555.42.02

(cherry picked from commit 5a1c474040)
This commit is contained in:
Bernhard Stoeckner
2024-05-21 15:11:46 +02:00
committed by Gaurav Juvekar
parent caa2dd11a0
commit 3084c04453
1004 changed files with 172522 additions and 150960 deletions

View File

@@ -159,14 +159,7 @@ static int lkca_aead_internal(struct crypto_aead *aead,
}
if (rc != 0) {
if (enc) {
pr_info("aead.c: Encryption failed with error %i\n", rc);
} else {
pr_info("aead.c: Decryption failed with error %i\n", rc);
if (rc == -EBADMSG) {
pr_info("aead.c: Authentication tag mismatch!\n");
}
}
pr_info("Encryption FAILED\n");
}
*data_out_size = data_in_size;

View File

@@ -1,42 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "os-interface.h"
#include "internal_crypt_lib.h"
#include "library/cryptlib.h"
bool libspdm_check_crypto_backend(void)
{
#ifdef USE_LKCA
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA wrappers found.\n");
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA calls may still fail if modules have not been loaded!\n");
return true;
#else
nv_printf(NV_DBG_ERRORS, "libspdm_check_crypto_backend: Error - libspdm expects LKCA but found stubs!\n");
return false;
#endif
}

View File

@@ -39,7 +39,9 @@
#define RSA_PSS_PADDING_ZEROS_SIZE_BYTE (8)
#define RSA_PSS_TRAILER_FIELD (0xbc)
#define SHIFT_RIGHT_AND_GET_BYTE(val, x) ((val >> x) & 0xFF)
#ifndef BITS_TO_BYTES
#define BITS_TO_BYTES(b) (b >> 3)
#endif
static const unsigned char zeroes[RSA_PSS_PADDING_ZEROS_SIZE_BYTE] = { 0 };

View File

@@ -66,6 +66,9 @@ static NvBool battery_present = NV_FALSE;
#define ACPI_VIDEO_CLASS "video"
#endif
/* Maximum size of ACPI _DSM method's 4th argument */
#define NV_MAX_ACPI_DSM_PARAM_SIZE 1024
// Used for NVPCF event handling
static acpi_handle nvpcf_handle = NULL;
static acpi_handle nvpcf_device_handle = NULL;
@@ -73,21 +76,6 @@ static nv_acpi_t *nvpcf_nv_acpi_object = NULL;
#define ACPI_NVPCF_EVENT_CHANGE 0xC0
static int nv_acpi_get_device_handle(nv_state_t *nv, acpi_handle *dev_handle)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
#if defined(DEVICE_ACPI_HANDLE)
*dev_handle = DEVICE_ACPI_HANDLE(nvl->dev);
return NV_TRUE;
#elif defined (ACPI_HANDLE)
*dev_handle = ACPI_HANDLE(nvl->dev);
return NV_TRUE;
#else
return NV_FALSE;
#endif
}
/*
* This callback will be invoked by the acpi_notifier_call_chain()
*/
@@ -174,7 +162,7 @@ static void nv_acpi_nvpcf_event(acpi_handle handle, u32 event_type, void *data)
}
else
{
nv_printf(NV_DBG_INFO,"NVRM: %s: NVPCF event 0x%x is not supported\n", event_type, __FUNCTION__);
nv_printf(NV_DBG_INFO,"NVRM: %s: NVPCF event 0x%x is not supported\n", __FUNCTION__, event_type);
}
}
@@ -267,11 +255,10 @@ static void nv_acpi_notify_event(acpi_handle handle, u32 event_type, void *data)
void nv_acpi_register_notifier(nv_linux_state_t *nvl)
{
acpi_handle dev_handle = NULL;
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev);
/* Install the ACPI notifier corresponding to dGPU ACPI device. */
if ((nvl->nv_acpi_object == NULL) &&
nv_acpi_get_device_handle(NV_STATE_PTR(nvl), &dev_handle) &&
(dev_handle != NULL))
{
nvl->nv_acpi_object = nv_install_notifier(dev_handle, nv_acpi_notify_event, nvl);
@@ -657,64 +644,36 @@ static NV_STATUS nv_acpi_nvif_method(
return NV_OK;
}
#define MAX_INPUT_PARAM_SIZE 1024
/*
* This function executes a _DSM ACPI method.
*/
NV_STATUS NV_API_CALL nv_acpi_dsm_method(
nv_state_t *nv,
NvU8 *pAcpiDsmGuid,
NvU32 acpiDsmRev,
NvBool acpiNvpcfDsmFunction,
NvU32 acpiDsmSubFunction,
void *pInParams,
NvU16 inParamSize,
NvU32 *outStatus,
void *pOutData,
NvU16 *pSize
static NV_STATUS nv_acpi_evaluate_dsm_method(
acpi_handle dev_handle,
NvU8 *pathname,
NvU8 *pAcpiDsmGuid,
NvU32 acpiDsmRev,
NvU32 acpiDsmSubFunction,
void *arg3,
NvU16 arg3Size,
NvBool bArg3Integer,
NvU32 *outStatus,
void *pOutData,
NvU16 *pSize
)
{
NV_STATUS status = NV_ERR_OPERATING_SYSTEM;
acpi_status acpi_status;
NV_STATUS rmStatus = NV_OK;
acpi_status status;
struct acpi_object_list input;
union acpi_object *dsm = NULL;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object dsm_params[4];
NvU8 *argument3 = NULL;
NvU32 data_size;
acpi_handle dev_handle = NULL;
if (!nv_acpi_get_device_handle(nv, &dev_handle))
return NV_ERR_NOT_SUPPORTED;
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
if ((!pInParams) || (inParamSize > MAX_INPUT_PARAM_SIZE) || (!pOutData) || (!pSize))
{
nv_printf(NV_DBG_INFO,
"NVRM: %s: invalid argument(s)!\n", __FUNCTION__);
return NV_ERR_INVALID_ARGUMENT;
}
if (!NV_MAY_SLEEP())
{
#if defined(DEBUG)
nv_printf(NV_DBG_INFO,
"NVRM: %s: invalid argument(s)!\n", __FUNCTION__);
nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid context!\n", __FUNCTION__);
#endif
return NV_ERR_NOT_SUPPORTED;
}
status = os_alloc_mem((void **)&argument3, inParamSize);
if (status != NV_OK)
return status;
//
// dsm_params[0].buffer.pointer and dsm_params[1].integer.value set in
// switch below based on acpiDsmFunction
//
dsm_params[0].buffer.type = ACPI_TYPE_BUFFER;
dsm_params[0].buffer.length = 0x10;
dsm_params[0].buffer.pointer = pAcpiDsmGuid;
@@ -725,35 +684,28 @@ NV_STATUS NV_API_CALL nv_acpi_dsm_method(
dsm_params[2].integer.type = ACPI_TYPE_INTEGER;
dsm_params[2].integer.value = acpiDsmSubFunction;
dsm_params[3].buffer.type = ACPI_TYPE_BUFFER;
dsm_params[3].buffer.length = inParamSize;
memcpy(argument3, pInParams, dsm_params[3].buffer.length);
dsm_params[3].buffer.pointer = argument3;
if (bArg3Integer)
{
dsm_params[3].integer.type = ACPI_TYPE_INTEGER;
dsm_params[3].integer.value = *((NvU32 *)arg3);
}
else
{
dsm_params[3].buffer.type = ACPI_TYPE_BUFFER;
dsm_params[3].buffer.length = arg3Size;
dsm_params[3].buffer.pointer = arg3;
}
// parameters for dsm calls (GUID, rev, subfunction, data)
input.count = 4;
input.pointer = dsm_params;
if (acpiNvpcfDsmFunction)
{
//
// acpi_evaluate_object() can operate with either valid object pathname or
// valid object handle. For NVPCF DSM function, use valid pathname as we do
// not have device handle for NVPCF device
//
dev_handle = NULL;
acpi_status = acpi_evaluate_object(dev_handle, "\\_SB.NPCF._DSM", &input, &output);
}
else
{
acpi_status = acpi_evaluate_object(dev_handle, "_DSM", &input, &output);
}
if (ACPI_FAILURE(acpi_status))
status = acpi_evaluate_object(dev_handle, pathname, &input, &output);
if (ACPI_FAILURE(status))
{
nv_printf(NV_DBG_INFO,
"NVRM: %s: failed to evaluate _DSM method!\n", __FUNCTION__);
goto exit;
return NV_ERR_OPERATING_SYSTEM;
}
dsm = output.pointer;
@@ -767,20 +719,80 @@ NV_STATUS NV_API_CALL nv_acpi_dsm_method(
dsm->buffer.pointer[0];
}
status = nv_acpi_extract_object(dsm, pOutData, *pSize, &data_size);
rmStatus = nv_acpi_extract_object(dsm, pOutData, *pSize, &data_size);
*pSize = data_size;
kfree(output.pointer);
}
if (status != NV_OK)
else
{
*pSize = 0;
}
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s: DSM data invalid!\n", __FUNCTION__);
}
exit:
return rmStatus;
}
/*
* This function executes a _DSM ACPI method.
*/
NV_STATUS NV_API_CALL nv_acpi_dsm_method(
nv_state_t *nv,
NvU8 *pAcpiDsmGuid,
NvU32 acpiDsmRev,
NvBool acpiNvpcfDsmFunction,
NvU32 acpiDsmSubFunction,
void *pInParams,
NvU16 inParamSize,
NvU32 *outStatus,
void *pOutData,
NvU16 *pSize
)
{
NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM;
NvU8 *argument3 = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev);
NvU8 *pathname = "_DSM";
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
if ((!pInParams) || (inParamSize > NV_MAX_ACPI_DSM_PARAM_SIZE) || (!pOutData) || (!pSize))
{
nv_printf(NV_DBG_INFO,
"NVRM: %s: invalid argument(s)!\n", __FUNCTION__);
return NV_ERR_INVALID_ARGUMENT;
}
rmStatus = os_alloc_mem((void **)&argument3, inParamSize);
if (rmStatus != NV_OK)
return rmStatus;
memcpy(argument3, pInParams, inParamSize);
if (acpiNvpcfDsmFunction)
{
//
// acpi_evaluate_object() can operate with either valid object pathname or
// valid object handle. For NVPCF DSM function, use valid pathname as we do
// not have device handle for NVPCF device
//
dev_handle = NULL;
pathname = "\\_SB.NPCF._DSM";
}
rmStatus = nv_acpi_evaluate_dsm_method(dev_handle, pathname, pAcpiDsmGuid, acpiDsmRev,
acpiDsmSubFunction, argument3, inParamSize,
NV_FALSE, NULL, pOutData, pSize);
os_free_mem(argument3);
return status;
return rmStatus;
}
/*
@@ -796,13 +808,11 @@ NV_STATUS NV_API_CALL nv_acpi_ddc_method(
acpi_status status;
union acpi_object *ddc = NULL;
NvU32 i, largestEdidSize;
acpi_handle dev_handle = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev);
acpi_handle lcd_dev_handle = NULL;
acpi_handle handle = NULL;
if (!nv_acpi_get_device_handle(nv, &dev_handle))
return NV_ERR_NOT_SUPPORTED;
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
@@ -836,7 +846,7 @@ NV_STATUS NV_API_CALL nv_acpi_ddc_method(
case 0x0400:
case 0xA420:
lcd_dev_handle = handle;
nv_printf(NV_DBG_INFO, "NVRM: %s Found LCD: %x\n",
nv_printf(NV_DBG_INFO, "NVRM: %s Found LCD: %llx\n",
__FUNCTION__, device_id);
break;
default:
@@ -915,12 +925,10 @@ NV_STATUS NV_API_CALL nv_acpi_rom_method(
union acpi_object *rom;
union acpi_object rom_arg[2];
struct acpi_object_list input = { 2, rom_arg };
acpi_handle dev_handle = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev);
uint32_t offset, length;
if (!nv_acpi_get_device_handle(nv, &dev_handle))
return NV_ERR_NOT_SUPPORTED;
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
@@ -982,12 +990,10 @@ NV_STATUS NV_API_CALL nv_acpi_dod_method(
acpi_status status;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *dod;
acpi_handle dev_handle = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev);
NvU32 i, count = (*pSize / sizeof(NvU32));
if (!nv_acpi_get_device_handle(nv, &dev_handle))
return NV_ERR_NOT_SUPPORTED;
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
@@ -1129,17 +1135,11 @@ NvBool nv_acpi_power_resource_method_present(
struct pci_dev *pdev
)
{
acpi_handle handle = NULL;
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *object_package, *object_reference;
acpi_status status;
#if defined(DEVICE_ACPI_HANDLE)
handle = DEVICE_ACPI_HANDLE(&pdev->dev);
#elif defined (ACPI_HANDLE)
handle = ACPI_HANDLE(&pdev->dev);
#endif
if (!handle)
return NV_FALSE;
@@ -1198,7 +1198,8 @@ NV_STATUS NV_API_CALL nv_acpi_mux_method(
union acpi_object *mux = NULL;
union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
struct acpi_object_list input = { 1, &mux_arg };
acpi_handle dev_handle = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev);
acpi_handle mux_dev_handle = NULL;
acpi_handle handle = NULL;
unsigned long long device_id = 0;
@@ -1216,9 +1217,6 @@ NV_STATUS NV_API_CALL nv_acpi_mux_method(
__FUNCTION__, pMethodName);
}
if (!nv_acpi_get_device_handle(nv, &dev_handle))
return NV_ERR_NOT_SUPPORTED;
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
@@ -1384,6 +1382,34 @@ NvBool NV_API_CALL nv_acpi_is_battery_present(void)
return NV_FALSE;
}
NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port(
nv_state_t *nv,
NvU8 *pAcpiDsmGuid,
NvU32 acpiDsmRev,
NvU32 acpiDsmSubFunction,
NvU32 *data
)
{
NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
acpi_handle dev_handle = ACPI_HANDLE(nvl->dev->parent);
NvU32 outData = 0;
NvU16 outDatasize = sizeof(NvU32);
NvU16 inParamSize = sizeof(NvU32);
if (!dev_handle)
return NV_ERR_INVALID_ARGUMENT;
rmStatus = nv_acpi_evaluate_dsm_method(dev_handle, "_DSM", pAcpiDsmGuid, acpiDsmRev,
acpiDsmSubFunction, data, inParamSize, NV_TRUE,
NULL, &outData, &outDatasize);
if (rmStatus == NV_OK)
*data = outData;
return rmStatus;
}
#else // NV_LINUX_ACPI_EVENTS_SUPPORTED
void NV_API_CALL nv_acpi_methods_init(NvU32 *handlePresent)
@@ -1426,6 +1452,17 @@ NV_STATUS NV_API_CALL nv_acpi_dsm_method(
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port(
nv_state_t *nv,
NvU8 *pAcpiDsmGuid,
NvU32 acpiDsmRev,
NvU32 acpiDsmSubFunction,
NvU32 *data
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_acpi_ddc_method(
nv_state_t *nv,
void *pEdidBuffer,

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,6 +24,7 @@
#include "nv-linux.h"
extern int NVreg_ImexChannelCount;
extern int NVreg_CreateImexChannel0;
static int nv_caps_imex_open(struct inode *inode, struct file *file)
{
@@ -104,6 +105,10 @@ int NV_API_CALL nv_caps_imex_init(void)
if (NVreg_ImexChannelCount == 0)
{
nv_printf(NV_DBG_INFO, "nv-caps-imex is disabled.\n");
// Disable channel creation as well
NVreg_CreateImexChannel0 = 0;
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,8 @@
#include "nv-procfs.h"
#include "nv-hash.h"
#include "nvmisc.h"
extern int NVreg_ModifyDeviceFiles;
/* sys_close() or __close_fd() */
@@ -49,7 +51,7 @@ typedef struct nv_cap_table_entry
struct hlist_node hlist;
} nv_cap_table_entry_t;
#define NV_CAP_NUM_ENTRIES(_table) (sizeof(_table) / sizeof(_table[0]))
#define NV_CAP_NUM_ENTRIES(_table) (NV_ARRAY_ELEMENTS(_table))
static nv_cap_table_entry_t g_nv_cap_nvlink_table[] =
{
@@ -361,18 +363,28 @@ static ssize_t nv_cap_procfs_write(struct file *file,
nv_cap_file_private_t *private = NULL;
unsigned long bytes_left;
char *proc_buffer;
int status;
status = nv_down_read_interruptible(&nv_system_pm_lock);
if (status < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to lock the nv_system_pm_lock!\n");
return status;
}
private = ((struct seq_file *)file->private_data)->private;
bytes_left = (sizeof(private->buffer) - private->offset - 1);
if (count == 0)
{
return -EINVAL;
count = -EINVAL;
goto done;
}
if ((bytes_left == 0) || (count > bytes_left))
{
return -ENOSPC;
count = -ENOSPC;
goto done;
}
proc_buffer = &private->buffer[private->offset];
@@ -380,7 +392,8 @@ static ssize_t nv_cap_procfs_write(struct file *file,
if (copy_from_user(proc_buffer, buffer, count))
{
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n");
return -EFAULT;
count = -EFAULT;
goto done;
}
private->offset += count;
@@ -388,17 +401,28 @@ static ssize_t nv_cap_procfs_write(struct file *file,
*pos = private->offset;
done:
up_read(&nv_system_pm_lock);
return count;
}
static int nv_cap_procfs_read(struct seq_file *s, void *v)
{
int status;
nv_cap_file_private_t *private = s->private;
status = nv_down_read_interruptible(&nv_system_pm_lock);
if (status < 0)
{
return status;
}
seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor);
seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions);
seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify);
up_read(&nv_system_pm_lock);
return 0;
}
@@ -423,14 +447,6 @@ static int nv_cap_procfs_open(struct inode *inode, struct file *file)
if (rc < 0)
{
NV_KFREE(private, sizeof(nv_cap_file_private_t));
return rc;
}
rc = nv_down_read_interruptible(&nv_system_pm_lock);
if (rc < 0)
{
single_release(inode, file);
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
return rc;
@@ -449,8 +465,6 @@ static int nv_cap_procfs_release(struct inode *inode, struct file *file)
private = s->private;
}
up_read(&nv_system_pm_lock);
single_release(inode, file);
if (private != NULL)

View File

@@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if (i == (attempts - 1))
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If

View File

@@ -28,12 +28,21 @@
* teardown.
*/
#define NV_MEM_LOGGER_STACK_TRACE 0
#if defined(NV_STACK_TRACE_PRESENT) && defined(NV_MEM_LOGGER) && defined(DEBUG)
#define NV_MEM_LOGGER_STACK_TRACE 1
#endif
typedef struct {
struct rb_node rb_node;
void *addr;
NvU64 size;
NvU32 line;
const char *file;
#if NV_MEM_LOGGER_STACK_TRACE == 1
unsigned long stack_trace[32];
#endif
} nv_memdbg_node_t;
struct
@@ -117,6 +126,12 @@ void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line)
node->size = size;
node->file = file;
node->line = line;
#if NV_MEM_LOGGER_STACK_TRACE == 1
memset(node->stack_trace, '\0', sizeof(node->stack_trace));
stack_trace_save(node->stack_trace, NV_ARRAY_ELEMENTS(node->stack_trace), 0);
#endif
}
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
@@ -209,6 +224,10 @@ void nv_memdbg_exit(void)
node->size, node->addr);
}
#if NV_MEM_LOGGER_STACK_TRACE == 1
stack_trace_print(node->stack_trace, NV_ARRAY_ELEMENTS(node->stack_trace), 1);
#endif
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
kfree(node);
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,7 +62,7 @@ nvidia_nano_timer_callback(
nv_linux_state_t *nvl = nv_nstimer->nv_linux_state;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
if (nv_kmem_cache_alloc_stack_atomic(&sp) != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n");
return;
@@ -189,12 +189,6 @@ void NV_API_CALL nv_start_nano_timer(
NvU32 time_us;
time_us = (NvU32)(time_ns / 1000);
if (time_us == 0)
{
nv_printf(NV_DBG_WARNINGS, "NVRM: Timer value cannot be less than 1 usec.\n");
}
time_jiffies = usecs_to_jiffies(time_us);
mod_timer(&nv_nstimer->jiffy_timer, jiffies + time_jiffies);
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,8 @@
#include "nv-p2p.h"
#include "rmp2pdefines.h"
#include "nvmisc.h"
typedef enum nv_p2p_page_table_type {
NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT = 0,
NV_P2P_PAGE_TABLE_TYPE_PERSISTENT,
@@ -50,6 +52,7 @@ typedef struct nv_p2p_mem_info {
struct semaphore lock;
} dma_mapping_list;
void *private;
void *mig_info;
} nv_p2p_mem_info_t;
// declared and created in nv.c
@@ -73,7 +76,7 @@ static struct nvidia_status_mapping {
};
#define NVIDIA_STATUS_MAPPINGS \
(sizeof(nvidia_status_mappings) / sizeof(struct nvidia_status_mapping))
NV_ARRAY_ELEMENTS(nvidia_status_mappings)
static int nvidia_p2p_map_status(NV_STATUS status)
{
@@ -314,7 +317,7 @@ static NV_STATUS nv_p2p_put_pages(
* callback which can free it unlike non-persistent page_table.
*/
mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table);
status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table);
status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table, mem_info->mig_info);
}
else
{
@@ -412,6 +415,17 @@ static int nv_p2p_get_pages(
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
int rc;
if (!NV_IS_ALIGNED64(virtual_address, NVRM_P2P_PAGESIZE_BIG_64K) ||
!NV_IS_ALIGNED64(length, NVRM_P2P_PAGESIZE_BIG_64K))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid argument in nv_p2p_get_pages,"
"address or length are not aligned "
"address=0x%llx, length=0x%llx\n",
virtual_address, length);
return -EINVAL;
}
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
@@ -495,7 +509,7 @@ static int nv_p2p_get_pages(
status = rm_p2p_get_pages_persistent(sp, virtual_address, length,
&mem_info->private,
physical_addresses, &entries,
*page_table, gpu_info);
*page_table, gpu_info, &mem_info->mig_info);
if (status != NV_OK)
{
goto failed;

View File

@@ -328,7 +328,7 @@ static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl)
gi = (struct acpi_srat_generic_affinity *) subtable_header;
gi_dbdf = *((NvU16 *)(&gi->device_handle[0])) << 16 |
*((NvU16 *)(&gi->device_handle[2]));
if (gi_dbdf == dev_dbdf) {
numa_node = pxm_to_node(gi->proximity_domain);
if (numa_node < MAX_NUMNODES) {
@@ -353,7 +353,6 @@ exit:
acpi_put_table(table_header);
return pxm_count;
}
#endif
static void
@@ -379,6 +378,7 @@ nv_init_coherent_link_info
return;
gi_found = find_gpu_numa_nodes_in_srat(nvl);
if (!gi_found &&
(device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-start", &pxm_start) != 0 ||
device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-count", &pxm_count) != 0))
@@ -534,7 +534,6 @@ nv_pci_probe
if (pci_dev->is_virtfn)
{
#if defined(NV_VGPU_KVM_BUILD)
#if defined(NV_BUS_TYPE_HAS_IOMMU_OPS)
if (pci_dev->dev.bus->iommu_ops == NULL)
#else
@@ -677,8 +676,8 @@ next_bar:
// Invalid 32 or 64-bit BAR.
nv_printf(NV_DBG_ERRORS,
"NVRM: This PCI I/O region assigned to your NVIDIA device is invalid:\n"
"NVRM: BAR%d is %dM @ 0x%llx (PCI:%04x:%02x:%02x.%x)\n", i,
(NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20),
"NVRM: BAR%d is %" NvU64_fmtu "M @ 0x%" NvU64_fmtx " (PCI:%04x:%02x:%02x.%x)\n", i,
(NvU64)(NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20),
(NvU64)NV_PCI_RESOURCE_START(pci_dev, i),
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
@@ -698,10 +697,10 @@ next_bar:
nv_device_name))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: request_mem_region failed for %dM @ 0x%llx. This can\n"
"NVRM: request_mem_region failed for %" NvU64_fmtu "M @ 0x%" NvU64_fmtx ". This can\n"
"NVRM: occur when a driver such as rivatv is loaded and claims\n"
"NVRM: ownership of the device's registers.\n",
(NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index) >> 20),
(NvU64)(NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index) >> 20),
(NvU64)NV_PCI_RESOURCE_START(pci_dev, regs_bar_index));
goto failed;
}

View File

@@ -197,28 +197,25 @@ nv_procfs_read_power(
{
nv_state_t *nv = s->private;
nvidia_stack_t *sp = NULL;
const char *vidmem_power_status;
const char *dynamic_power_status;
const char *gc6_support;
const char *gcoff_support;
nv_power_info_t power_info;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return 0;
}
dynamic_power_status = rm_get_dynamic_power_management_status(sp, nv);
seq_printf(s, "Runtime D3 status: %s\n", dynamic_power_status);
vidmem_power_status = rm_get_vidmem_power_status(sp, nv);
seq_printf(s, "Video Memory: %s\n\n", vidmem_power_status);
rm_get_power_info(sp, nv, &power_info);
seq_printf(s, "Runtime D3 status: %s\n", power_info.dynamic_power_status);
seq_printf(s, "Video Memory: %s\n\n", power_info.vidmem_power_status);
seq_printf(s, "GPU Hardware Support:\n");
gc6_support = rm_get_gpu_gcx_support(sp, nv, NV_TRUE);
seq_printf(s, " Video Memory Self Refresh: %s\n", gc6_support);
seq_printf(s, " Video Memory Self Refresh: %s\n", power_info.gc6_support);
seq_printf(s, " Video Memory Off: %s\n\n", power_info.gcoff_support);
gcoff_support = rm_get_gpu_gcx_support(sp, nv, NV_FALSE);
seq_printf(s, " Video Memory Off: %s\n", gcoff_support);
seq_printf(s, "S0ix Power Management:\n");
seq_printf(s, " Platform Support: %s\n",
nv_platform_supports_s0ix() ? "Supported" : "Not Supported");
seq_printf(s, " Status: %s\n", power_info.s0ix_status);
nv_kmem_cache_free_stack(sp);
return 0;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2006-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -869,6 +869,8 @@
* NVreg_ModifyDeviceFiles, NVreg_DeviceFileGID, NVreg_DeviceFileUID
* and NVreg_DeviceFileMode will be honored by nvidia-modprobe.
*
* Also, refer to the NVreg_CreateImexChannel0 option.
*
* Possible values:
* 0 - Disable IMEX using CUDA driver's fabric handles.
* N - N IMEX channels will be enabled in the driver to facilitate N
@@ -878,6 +880,29 @@
#define __NV_IMEX_CHANNEL_COUNT ImexChannelCount
#define NV_REG_IMEX_CHANNEL_COUNT NV_REG_STRING(__NV_IMEX_CHANNEL_COUNT)
/*
* Option: NVreg_CreateImexChannel0
*
* Description:
*
* This option allows users to specify whether the NVIDIA driver must create
* the IMEX channel 0 by default. The channel will be created automatically
* when an application (e.g. nvidia-smi, nvidia-persistenced) is run.
*
* Note that users are advised to enable this option only in trusted
* environments where it is acceptable for applications to share the same
* IMEX channel.
*
* For more details on IMEX channels, refer to the NVreg_ImexChannelCount
* option.
*
* Possible values:
* 0 - Do not create IMEX channel 0 (default).
* 1 - Create IMEX channel 0.
*/
#define __NV_CREATE_IMEX_CHANNEL_0 CreateImexChannel0
#define NV_CREATE_IMEX_CHANNEL_0 NV_REG_STRING(__CREATE_IMEX_CHANNEL_0)
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
@@ -927,6 +952,7 @@ NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IMEX_CHANNEL_COUNT, 2048);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_CREATE_IMEX_CHANNEL_0, 0);
/*
*----------------registry database definition----------------------
@@ -974,6 +1000,7 @@ nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IMEX_CHANNEL_COUNT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_CREATE_IMEX_CHANNEL_0),
{NULL, NULL}
};

View File

@@ -514,7 +514,6 @@ NV_STATUS nv_alloc_system_pages(
struct device *dev = at->dev;
dma_addr_t bus_addr;
// Order should be zero except for EGM allocations.
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
@@ -523,7 +522,7 @@ NV_STATUS nv_alloc_system_pages(
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %u: %u order0 pages, %u order\n", __FUNCTION__, at->num_pages, at->order);
"NVRM: VM: %s: %u order0 pages, %u order\n", __FUNCTION__, at->num_pages, at->order);
gfp_mask = nv_compute_gfp_mask(nv, at);
@@ -641,7 +640,6 @@ void nv_free_system_pages(
unsigned int i;
struct device *dev = at->dev;
// Order should be zero except for EGM allocations.
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;

View File

@@ -29,7 +29,7 @@
NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address)
{
/* direct-mapped kernel address */
if (virt_addr_valid(address))
if (virt_addr_valid((void *)address))
return __pa(address);
nv_printf(NV_DBG_ERRORS,

View File

@@ -3131,6 +3131,7 @@ NV_STATUS NV_API_CALL
nv_alias_pages(
nv_state_t *nv,
NvU32 page_cnt,
NvU64 page_size,
NvU32 contiguous,
NvU32 cache_type,
NvU64 guest_id,
@@ -3152,7 +3153,14 @@ nv_alias_pages(
at->cache_type = cache_type;
if (contiguous)
{
at->flags.contig = NV_TRUE;
at->order = get_order(at->num_pages * PAGE_SIZE);
}
else
{
at->order = get_order(page_size);
}
#if defined(NVCPU_AARCH64)
if (at->cache_type != NV_MEMORY_CACHED)
at->flags.aliased = NV_TRUE;
@@ -3160,8 +3168,6 @@ nv_alias_pages(
at->flags.guest = NV_TRUE;
at->order = get_order(at->num_pages * PAGE_SIZE);
for (i=0; i < at->num_pages; ++i)
{
page_ptr = at->page_table[i];
@@ -3271,7 +3277,7 @@ NV_STATUS NV_API_CALL nv_register_user_pages(
nv_linux_state_t *nvl;
nvidia_pte_t *page_ptr;
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%x\n", page_count);
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%" NvU64_fmtx"\n", page_count);
user_pages = *priv_data;
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
@@ -3332,7 +3338,7 @@ void NV_API_CALL nv_unregister_user_pages(
{
nv_alloc_t *at = *priv_data;
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_user_pages: 0x%x\n", page_count);
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_user_pages: 0x%" NvU64_fmtx "\n", page_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
@@ -6133,7 +6139,10 @@ void NV_API_CALL nv_get_screen_info(
{
NvU64 physAddr = screen_info.lfb_base;
#if defined(VIDEO_CAPABILITY_64BIT_BASE)
physAddr |= (NvU64)screen_info.ext_lfb_base << 32;
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
{
physAddr |= (NvU64)screen_info.ext_lfb_base << 32;
}
#endif
/* Make sure base address is mapped to GPU BAR */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -45,11 +45,6 @@ typedef struct gpuObject *gpuObjectHandle;
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
NV_STATUS calculatePCIELinkRateMBps(NvU32 lanes,
NvU32 pciLinkMaxSpeed,
NvU32 *pcieLinkRate);
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
@@ -290,12 +285,15 @@ NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(gpuFaultInfo *pFaultInfo,
NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo,
NvBool bEnable);
NV_STATUS nvGpuOpsKeyRotationChannelDisable(struct gpuChannel *channelList[],
NvU32 channelListCount);
// Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
NV_STATUS nvGpuOpsCcslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount);
NV_STATUS nvGpuOpsCcslContextUpdate(UvmCslContext *contextList[],
NvU32 contextListCount);
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
NvU8 direction);
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
@@ -313,7 +311,6 @@ NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
@@ -329,8 +326,7 @@ NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 increment,
NvU8 *iv);
NV_STATUS nvGpuOpsLogEncryption(struct ccslContext_t *ctx,
NvU8 direction,
NvU32 bufferSize);
NV_STATUS nvGpuOpsLogDeviceEncryption(struct ccslContext_t *ctx,
NvU32 bufferSize);
#endif /* _NV_GPU_OPS_H_*/

View File

@@ -1478,6 +1478,15 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
}
EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream);
NV_STATUS nvUvmInterfaceKeyRotationChannelDisable(uvmGpuChannelHandle channelList[],
NvU32 channeListCount)
{
nvidia_stack_t *sp = nvUvmGetSafeStack();
return rm_gpu_ops_key_rotation_channel_disable(sp, ((gpuChannelHandle *)channelList), channeListCount);
}
EXPORT_SYMBOL(nvUvmInterfaceKeyRotationChannelDisable);
NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
uvmGpuChannelHandle channel)
{
@@ -1516,23 +1525,17 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
}
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount)
NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *contextList[],
NvU32 contextListCount)
{
NV_STATUS status;
nvidia_stack_t *sp;
nvidia_stack_t *sp = contextList[0]->nvidia_stack;
if ((contextList == NULL) || (contextListCount == 0) || (contextList[0] == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
sp = contextList[0]->nvidia_stack;
status = rm_gpu_ops_ccsl_rotate_key(sp, contextList, contextListCount);
status = rm_gpu_ops_ccsl_context_update(sp, contextList, contextListCount);
return status;
}
EXPORT_SYMBOL(nvUvmInterfaceCslRotateKey);
EXPORT_SYMBOL(nvUvmInterfaceCslUpdateContext);
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
UvmCslOperation operation)
@@ -1569,7 +1572,6 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
NvU32 bufferSize,
NvU8 const *inputBuffer,
UvmCslIv const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
@@ -1583,7 +1585,6 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
bufferSize,
inputBuffer,
(NvU8 *)decryptIv,
keyRotationId,
outputBuffer,
addAuthData,
addAuthDataSize,
@@ -1634,18 +1635,17 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
}
EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv);
NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
UvmCslOperation operation,
NvU32 bufferSize)
NV_STATUS nvUvmInterfaceCslLogExternalEncryption(UvmCslContext *uvmCslContext,
NvU32 bufferSize)
{
NV_STATUS status;
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
status = rm_gpu_ops_ccsl_log_encryption(sp, uvmCslContext->ctx, operation, bufferSize);
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, bufferSize);
return status;
}
EXPORT_SYMBOL(nvUvmInterfaceCslLogEncryption);
EXPORT_SYMBOL(nvUvmInterfaceCslLogExternalEncryption);
#else // NV_UVM_ENABLE

View File

@@ -41,7 +41,6 @@ NVIDIA_SOURCES += nvidia/libspdm_rsa.c
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
NVIDIA_SOURCES += nvidia/libspdm_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
NVIDIA_SOURCES += nvidia/libspdm_internal_crypt_lib.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
NVIDIA_SOURCES += nvidia/libspdm_ec.c
NVIDIA_SOURCES += nvidia/libspdm_x509.c

View File

@@ -161,7 +161,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
@@ -195,6 +195,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += stack_trace
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
@@ -227,8 +228,9 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_in
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_platform_is_fpga
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_platform_is_sim
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -38,4 +38,4 @@ bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
bool libspdm_check_crypto_backend(void);

View File

@@ -36,28 +36,10 @@ static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_FOLLOW_PFN_PRESENT)
return follow_pfn(vma, address, pfn);
#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT)
return unsafe_follow_pfn(vma, address, pfn);
#else
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
int status = 0;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return status;
status = follow_pte(vma, address, &ptep, &ptl);
if (status)
return status;
*pfn = pte_pfn(ptep_get(ptep));
// The lock is acquired inside follow_pte()
pte_unmap_unlock(ptep, ptl);
return 0;
#else // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
return -1;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
return follow_pfn(vma, address, pfn);
#endif
}