535.86.05

This commit is contained in:
Bernhard Stoeckner
2023-07-18 15:54:53 +02:00
parent 22a077c4fe
commit 337e28efda
264 changed files with 67251 additions and 107479 deletions

View File

@@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.54.03\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.86.05\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@@ -211,6 +211,7 @@
#include <linux/highmem.h>
#include <linux/nodemask.h>
#include <linux/memory.h>
#include <linux/workqueue.h> /* workqueue */
#include "nv-kthread-q.h" /* kthread based queue */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,12 +36,21 @@ typedef int vm_fault_t;
* pin_user_pages() was added by commit eddb1c228f7951d399240
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
*
* Removed vmas parameter from pin_user_pages() by commit 40896a02751
* ("mm/gup: remove vmas parameter from pin_user_pages()")
* in linux-next, expected in v6.5-rc1 (2023-05-17)
*
*/
#include <linux/mm.h>
#include <linux/sched.h>
#if defined(NV_PIN_USER_PAGES_PRESENT)
#define NV_PIN_USER_PAGES pin_user_pages
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES pin_user_pages
#else
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages, vmas) \
pin_user_pages(start, nr_pages, gup_flags, pages)
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
#define NV_UNPIN_USER_PAGE unpin_user_page
#else
#define NV_PIN_USER_PAGES NV_GET_USER_PAGES
@@ -64,11 +73,18 @@ typedef int vm_fault_t;
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
* replacement of the write and force parameters with gup_flags
*
* Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
* ("mm/gup: remove unused vmas parameter from get_user_pages()")
* in linux-next, expected in v6.5-rc1 (2023-05-17)
*
*/
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(start, nr_pages, flags, pages)
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
#define NV_GET_USER_PAGES get_user_pages
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS)
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
#else
@@ -81,13 +97,13 @@ typedef int vm_fault_t;
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE)
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
return get_user_pages(start, nr_pages, write, force, pages, vmas);
#else
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
return get_user_pages(current, current->mm, start, nr_pages, write,
force, pages, vmas);
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
}
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
@@ -100,15 +116,22 @@ typedef int vm_fault_t;
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
* in v5.9-rc1 (2020-08-11). *
*
* Removed unused vmas parameter from pin_user_pages_remote() by commit
* 83bcc2e132("mm/gup: remove unused vmas parameter from pin_user_pages_remote()")
* in linux-next, expected in v6.5-rc1 (2023-05-14)
*
*/
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
#if defined (NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK)
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#else
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK
#else
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
#else
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
@@ -135,22 +158,30 @@ typedef int vm_fault_t;
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
* all gup code") in v5.9-rc1 (2020-08-11).
*
* Removed vmas parameter from get_user_pages_remote() by commit a4bde14d549
* ("mm/gup: remove vmas parameter from get_user_pages_remote()")
* in linux-next, expected in v6.5-rc1 (2023-05-14)
*
*/
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
#else
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
@@ -167,7 +198,7 @@ typedef int vm_fault_t;
}
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
#else
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE)
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS)
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
@@ -185,7 +216,7 @@ typedef int vm_fault_t;
#else
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
/*

View File

@@ -852,6 +852,14 @@ typedef union UvmFaultMetadataPacket_tag
NvU8 _padding[32];
} UvmFaultMetadataPacket;
// This struct shall not be accessed nor modified directly by UVM as it is
// entirely managed by the RM layer
typedef struct UvmCslContext_tag
{
struct ccslContext_t *ctx;
void *nvidia_stack;
} UvmCslContext;
typedef struct UvmGpuFaultInfo_tag
{
struct
@@ -909,6 +917,10 @@ typedef struct UvmGpuFaultInfo_tag
// Confidential Computing is disabled.
UvmFaultMetadataPacket *bufferMetadata;
// CSL context used for performing decryption of replayable faults when
// Confidential Computing is enabled.
UvmCslContext cslCtx;
// Indicates whether UVM owns the replayable fault buffer.
// The value of this field is always NV_TRUE When Confidential Computing
// is disabled.
@@ -1047,14 +1059,6 @@ typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
typedef UvmPmaAllocationOptions gpuPmaAllocationOptions;
// This struct shall not be accessed nor modified directly by UVM as it is
// entirely managed by the RM layer
typedef struct UvmCslContext_tag
{
struct ccslContext_t *ctx;
void *nvidia_stack;
} UvmCslContext;
typedef struct UvmCslIv
{
NvU8 iv[12];

View File

@@ -1135,6 +1135,23 @@ compile_test() {
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_DMA_UNMAP" "" "types"
;;
vfio_device_ops_has_bind_iommufd)
#
# Determine if 'vfio_device_ops' struct has 'bind_iommufd' field.
#
# Added by commit a4d1f91db5021 ("vfio-iommufd: Support iommufd
# for physical VFIO devices") in v6.2
#
CODE="
#include <linux/pci.h>
#include <linux/vfio.h>
int conftest_vfio_device_ops_has_bind_iommufd(void) {
return offsetof(struct vfio_device_ops, bind_iommufd);
}"
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_BIND_IOMMUFD" "" "types"
;;
pci_irq_vector_helpers)
#
# Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors()
@@ -2410,6 +2427,10 @@ compile_test() {
# commit 768ae309a961 ("mm: replace get_user_pages() write/force
# parameters with gup_flags") in v4.9 (2016-10-13)
#
# Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
# ("mm/gup: remove unused vmas parameter from get_user_pages()")
# in linux-next, expected in v6.5-rc1
#
# linux-4.4.168 cherry-picked commit 768ae309a961 without
# c12d2da56d0e which is covered in Conftest #3.
#
@@ -2419,22 +2440,28 @@ compile_test() {
# passing conftest's
#
set_get_user_pages_defines () {
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" | append_conftest "functions"
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" | append_conftest "functions"
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_FLAGS" ]; then
@@ -2442,6 +2469,7 @@ compile_test() {
else
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_FLAGS" | append_conftest "functions"
fi
}
# Conftest #1: Check if get_user_pages accepts 6 arguments.
@@ -2462,14 +2490,15 @@ compile_test() {
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE"
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS"
rm -f conftest$$.o
return
fi
# Conftest #2: Check if get_user_pages has gup_flags instead of
# write and force parameters. And that gup doesn't accept a
# task_struct and mm_struct as its first arguments.
# task_struct and mm_struct as its first arguments. get_user_pages
# has vm_area_struct as its last argument.
# Return if available.
# Fall through to conftest #3 on failure.
@@ -2487,16 +2516,17 @@ compile_test() {
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS"
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS"
rm -f conftest$$.o
return
fi
# Conftest #3: Check if get_user_pages has gup_flags instead of
# write and force parameters AND that gup has task_struct and
# mm_struct as its first arguments.
# write and force parameters. The gup has task_struct and
# mm_struct as its first arguments. get_user_pages
# has vm_area_struct as its last argument.
# Return if available.
# Fall through to default case if absent.
# Fall through to conftest #4 on failure.
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
@@ -2514,12 +2544,35 @@ compile_test() {
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS"
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS"
rm -f conftest$$.o
return
fi
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE"
# Conftest #4: gup doesn't accept a task_struct and mm_struct as
# its first arguments. check if get_user_pages() does not take
# vmas argument.
# Fall through to default case otherwise.
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
long get_user_pages(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages) {
return 0;
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS"
rm -f conftest$$.o
return
fi
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS"
return
;;
@@ -2546,6 +2599,10 @@ compile_test() {
# commit 64019a2e467a ("mm/gup: remove task_struct pointer for
# all gup code") in v5.9-rc1 (2020-08-11).
#
# Removed vmas parameter from get_user_pages_remote() by commit
# a4bde14d549 ("mm/gup: remove vmas parameter from get_user_pages_remote()")
# in linux-next, expected in v6.5-rc1
#
#
# This function sets the NV_GET_USER_PAGES_REMOTE_* macros as per
@@ -2558,22 +2615,28 @@ compile_test() {
echo "#define NV_GET_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" | append_conftest "functions"
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" | append_conftest "functions"
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS" ]; then
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS" | append_conftest "functions"
else
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" ]; then
@@ -2581,6 +2644,7 @@ compile_test() {
else
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" | append_conftest "functions"
fi
}
# conftest #1: check if get_user_pages_remote() is available
@@ -2603,8 +2667,8 @@ compile_test() {
fi
#
# conftest #2: check if get_user_pages_remote() has write and
# force arguments. Return if these arguments are present
# conftest #2: check if get_user_pages_remote() has write, force
# and vmas arguments. Return if these arguments are present
# Fall through to conftest #3 if these args are absent.
#
echo "$CONFTEST_PREAMBLE
@@ -2624,14 +2688,14 @@ compile_test() {
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE"
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS"
rm -f conftest$$.o
return
fi
#
# conftest #3: check if get_user_pages_remote() has gpu_flags
# arguments. Return if these arguments are present
# conftest #3: check if get_user_pages_remote() has gpu_flags and
# vmas arguments. Return if these arguments are present
# Fall through to conftest #4 if these args are absent.
#
echo "$CONFTEST_PREAMBLE
@@ -2650,13 +2714,14 @@ compile_test() {
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS"
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS"
rm -f conftest$$.o
return
fi
#
# conftest #4: check if get_user_pages_remote() has locked argument
# conftest #4: check if get_user_pages_remote() has locked and
# vmas argument
# Return if these arguments are present. Fall through to conftest #5
# if these args are absent.
#
@@ -2677,7 +2742,7 @@ compile_test() {
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED"
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS"
rm -f conftest$$.o
return
fi
@@ -2701,10 +2766,34 @@ compile_test() {
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS"
rm -f conftest$$.o
fi
#
# conftest #6: check if get_user_pages_remote() does not take
# vmas argument.
#
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
int *locked) {
return 0;
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED"
rm -f conftest$$.o
fi
;;
pin_user_pages)
@@ -2716,17 +2805,65 @@ compile_test() {
# pin_user_pages() was added by commit eddb1c228f7951d399240
# ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in
# v5.6-rc1 (2020-01-30)
#
# Removed vmas parameter from pin_user_pages() by commit
# 40896a02751("mm/gup: remove vmas parameter from pin_user_pages()")
# in linux-next, expected in v6.5-rc1
set_pin_user_pages_defines () {
if [ "$1" = "" ]; then
echo "#undef NV_PIN_USER_PAGES_PRESENT" | append_conftest "functions"
else
echo "#define NV_PIN_USER_PAGES_PRESENT" | append_conftest "functions"
fi
if [ "$1" = "NV_PIN_USER_PAGES_HAS_ARGS_VMAS" ]; then
echo "#define NV_PIN_USER_PAGES_HAS_ARGS_VMAS" | append_conftest "functions"
else
echo "#undef NV_PIN_USER_PAGES_HAS_ARGS_VMAS" | append_conftest "functions"
fi
}
# conftest #1: check if pin_user_pages() is available
# return if not available.
# Fall through to conftest #2 if it is present
#
CODE="
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
void conftest_pin_user_pages(void) {
pin_user_pages();
}"
}" > conftest$$.c
compile_check_conftest "$CODE" "NV_PIN_USER_PAGES_PRESENT" "" "functions"
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_pin_user_pages_defines ""
rm -f conftest$$.o
return
fi
# conftest #2: Check if pin_user_pages() has vmas argument
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
long pin_user_pages(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas) {
return 0;
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_pin_user_pages_defines "NV_PIN_USER_PAGES_HAS_ARGS_VMAS"
rm -f conftest$$.o
else
set_pin_user_pages_defines "NV_PIN_USER_PAGES_PRESENT"
fi
;;
pin_user_pages_remote)
@@ -2739,6 +2876,10 @@ compile_test() {
# pin_user_pages_remote() removed 'tsk' parameter by
# commit 64019a2e467a ("mm/gup: remove task_struct pointer for
# all gup code") in v5.9-rc1 (2020-08-11).
#
# Removed unused vmas parameter from pin_user_pages_remote() by
# commit 83bcc2e132 ("mm/gup: remove unused vmas parameter from
# pin_user_pages_remote()") in linux-next, expected in v6.5-rc1
#
# This function sets the NV_PIN_USER_PAGES_REMOTE_* macros as per
@@ -2751,10 +2892,16 @@ compile_test() {
echo "#define NV_PIN_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions"
fi
if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" ]; then
echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions"
if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" ]; then
echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions"
else
echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions"
echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions"
fi
if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" ]; then
echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" | append_conftest "functions"
else
echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" | append_conftest "functions"
fi
}
@@ -2777,7 +2924,11 @@ compile_test() {
return
fi
# conftest #2: Check if pin_user_pages_remote() has tsk argument
# conftest #2: Check if pin_user_pages_remote() has tsk and
# vmas argument
# Return if these arguments are present else fall through to
# conftest #3
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
long pin_user_pages_remote(struct task_struct *tsk,
@@ -2795,11 +2946,34 @@ compile_test() {
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK"
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS"
rm -f conftest$$.o
return
fi
# conftest #3: Check if pin_user_pages_remote() has vmas argument
echo "$CONFTEST_PREAMBLE
#include <linux/mm.h>
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked) {
return 0;
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS"
rm -f conftest$$.o
else
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_PRESENT"
fi
;;
vfio_pin_pages_has_vfio_device_arg)
@@ -6067,6 +6241,68 @@ compile_test() {
compile_check_conftest "$CODE" "NV_MEMORY_FAILURE_MF_SW_SIMULATED_DEFINED" "" "types"
;;
crypto)
#
# Determine if we support various crypto functions.
# This test is not complete and may return false positive.
#
CODE="
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
#include <crypto/hash.h>
#include <crypto/internal/ecc.h>
#include <crypto/kpp.h>
#include <crypto/public_key.h>
#include <crypto/sm3.h>
#include <keys/asymmetric-type.h>
#include <linux/crypto.h>
void conftest_crypto(void) {
struct shash_desc sd;
struct crypto_shash cs;
(void)crypto_shash_tfm_digest;
}"
compile_check_conftest "$CODE" "NV_CRYPTO_PRESENT" "" "symbols"
;;
mempolicy_has_unified_nodes)
#
# Determine if the 'mempolicy' structure has
# nodes union.
#
# nodes field was added by commit 269fbe72cd ("mm/mempolicy:
# use unified 'nodes' for bind/interleave/prefer policies") in
# v5.14 (2021-06-30).
#
CODE="
#include <linux/mempolicy.h>
int conftest_mempolicy_has_unified_nodes(void) {
return offsetof(struct mempolicy, nodes);
}"
compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_UNIFIED_NODES" "" "types"
;;
mempolicy_has_home_node)
#
# Determine if the 'mempolicy' structure has
# home_node field.
#
# home_node field was added by commit c6018b4b254
# ("mm/mempolicy: add set_mempolicy_home_node syscall") in v5.17
# (2022-01-14).
#
CODE="
#include <linux/mempolicy.h>
int conftest_mempolicy_has_home_node(void) {
return offsetof(struct mempolicy, home_node);
}"
compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_HOME_NODE" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit.
#

View File

@@ -65,6 +65,9 @@
static bool output_rounding_fix = true;
module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
static bool disable_vrr_memclk_switch = false;
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
/* These parameters are used for fault injection tests. Normally the defaults
* should be used. */
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
@@ -91,6 +94,11 @@ NvBool nvkms_output_rounding_fix(void)
return output_rounding_fix;
}
NvBool nvkms_disable_vrr_memclk_switch(void)
{
return disable_vrr_memclk_switch;
}
#define NVKMS_SYNCPT_STUBS_NEEDED
/*************************************************************************

View File

@@ -98,6 +98,8 @@ typedef struct {
NvBool nvkms_output_rounding_fix(void);
NvBool nvkms_disable_vrr_memclk_switch(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,
NvBool zero);

View File

@@ -108,5 +108,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg

View File

@@ -24,6 +24,7 @@
#include "uvm_va_range.h"
#include "uvm_ats_faults.h"
#include "uvm_migrate_pageable.h"
#include <linux/mempolicy.h>
// TODO: Bug 2103669: Implement a real prefetching policy and remove or adapt
// these experimental parameters. These are intended to help guide that policy.
@@ -79,7 +80,7 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
NvU64 start,
size_t length,
uvm_fault_access_type_t access_type,
uvm_fault_client_type_t client_type)
uvm_ats_fault_context_t *ats_context)
{
uvm_va_space_t *va_space = gpu_va_space->va_space;
struct mm_struct *mm = va_space->va_space_mm.mm;
@@ -95,17 +96,18 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
// 2) guest physical -> host physical
//
// The overall ATS translation will fault if either of those translations is
// invalid. The get_user_pages() call above handles translation #1, but not
// #2. We don't know if we're running as a guest, but in case we are we can
// force that translation to be valid by touching the guest physical address
// from the CPU. If the translation is not valid then the access will cause
// a hypervisor fault. Note that dma_map_page() can't establish mappings
// used by GPU ATS SVA translations. GPU accesses to host physical addresses
// obtained as a result of the address translation request uses the CPU
// address space instead of the IOMMU address space since the translated
// host physical address isn't necessarily an IOMMU address. The only way to
// establish guest physical to host physical mapping in the CPU address
// space is to touch the page from the CPU.
// invalid. The pin_user_pages() call within uvm_migrate_pageable() call
// below handles translation #1, but not #2. We don't know if we're running
// as a guest, but in case we are we can force that translation to be valid
// by touching the guest physical address from the CPU. If the translation
// is not valid then the access will cause a hypervisor fault. Note that
// dma_map_page() can't establish mappings used by GPU ATS SVA translations.
// GPU accesses to host physical addresses obtained as a result of the
// address translation request uses the CPU address space instead of the
// IOMMU address space since the translated host physical address isn't
// necessarily an IOMMU address. The only way to establish guest physical to
// host physical mapping in the CPU address space is to touch the page from
// the CPU.
//
// We assume that the hypervisor mappings are all VM_PFNMAP, VM_SHARED, and
// VM_WRITE, meaning that the mappings are all granted write access on any
@@ -116,20 +118,26 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
uvm_migrate_args_t uvm_migrate_args =
{
.va_space = va_space,
.mm = mm,
.dst_id = gpu_va_space->gpu->parent->id,
.dst_node_id = -1,
.populate_permissions = write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY,
.touch = true,
.skip_mapped = true,
.user_space_start = &user_space_start,
.user_space_length = &user_space_length,
.va_space = va_space,
.mm = mm,
.dst_id = ats_context->residency_id,
.dst_node_id = ats_context->residency_node,
.populate_permissions = write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY,
.touch = true,
.skip_mapped = true,
.populate_on_cpu_alloc_failures = true,
.user_space_start = &user_space_start,
.user_space_length = &user_space_length,
};
UVM_ASSERT(uvm_ats_can_service_faults(gpu_va_space, mm));
expand_fault_region(vma, start, length, client_type, &uvm_migrate_args.start, &uvm_migrate_args.length);
expand_fault_region(vma,
start,
length,
ats_context->client_type,
&uvm_migrate_args.start,
&uvm_migrate_args.length);
// We are trying to use migrate_vma API in the kernel (if it exists) to
// populate and map the faulting region on the GPU. We want to do this only
@@ -165,6 +173,58 @@ static void flush_tlb_write_faults(uvm_gpu_va_space_t *gpu_va_space,
uvm_tlb_batch_invalidate(&ats_invalidate->write_faults_tlb_batch, addr, size, PAGE_SIZE, UVM_MEMBAR_NONE);
}
static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
struct vm_area_struct *vma,
uvm_ats_fault_context_t *ats_context)
{
uvm_gpu_t *gpu = gpu_va_space->gpu;
int residency = uvm_gpu_numa_node(gpu);
#if defined(NV_MEMPOLICY_HAS_UNIFIED_NODES)
struct mempolicy *vma_policy = vma_policy(vma);
unsigned short mode;
if (!vma_policy)
goto done;
mode = vma_policy->mode;
if ((mode == MPOL_BIND) || (mode == MPOL_PREFERRED_MANY) || (mode == MPOL_PREFERRED)) {
int home_node = NUMA_NO_NODE;
#if defined(NV_MEMPOLICY_HAS_HOME_NODE)
if ((mode != MPOL_PREFERRED) && (vma_policy->home_node != NUMA_NO_NODE))
home_node = vma_policy->home_node;
#endif
// Prefer home_node if set. Otherwise, prefer the faulting GPU if it's
// in the list of preferred nodes, else prefer the closest_cpu_numa_node
// to the GPU if closest_cpu_numa_node is in the list of preferred
// nodes. Fallback to the faulting GPU if all else fails.
if (home_node != NUMA_NO_NODE) {
residency = home_node;
}
else if (!node_isset(residency, vma_policy->nodes)) {
int closest_cpu_numa_node = gpu->parent->closest_cpu_numa_node;
if ((closest_cpu_numa_node != NUMA_NO_NODE) && node_isset(closest_cpu_numa_node, vma_policy->nodes))
residency = gpu->parent->closest_cpu_numa_node;
else
residency = first_node(vma_policy->nodes);
}
}
// Update gpu if residency is not the faulting gpu.
if (residency != uvm_gpu_numa_node(gpu))
gpu = uvm_va_space_find_gpu_with_memory_node_id(gpu_va_space->va_space, residency);
done:
#endif
ats_context->residency_id = gpu ? gpu->parent->id : UVM_ID_CPU;
ats_context->residency_node = residency;
}
NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
struct vm_area_struct *vma,
NvU64 base,
@@ -205,6 +265,8 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
uvm_page_mask_zero(write_fault_mask);
}
ats_batch_select_residency(gpu_va_space, vma, ats_context);
for_each_va_block_subregion_in_mask(subregion, write_fault_mask, region) {
NvU64 start = base + (subregion.first * PAGE_SIZE);
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
@@ -215,7 +277,7 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
UVM_ASSERT(start >= vma->vm_start);
UVM_ASSERT((start + length) <= vma->vm_end);
status = service_ats_faults(gpu_va_space, vma, start, length, access_type, client_type);
status = service_ats_faults(gpu_va_space, vma, start, length, access_type, ats_context);
if (status != NV_OK)
return status;
@@ -244,11 +306,12 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
for_each_va_block_subregion_in_mask(subregion, read_fault_mask, region) {
NvU64 start = base + (subregion.first * PAGE_SIZE);
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
uvm_fault_access_type_t access_type = UVM_FAULT_ACCESS_TYPE_READ;
UVM_ASSERT(start >= vma->vm_start);
UVM_ASSERT((start + length) <= vma->vm_end);
status = service_ats_faults(gpu_va_space, vma, start, length, UVM_FAULT_ACCESS_TYPE_READ, client_type);
status = service_ats_faults(gpu_va_space, vma, start, length, access_type, ats_context);
if (status != NV_OK)
return status;

View File

@@ -152,7 +152,7 @@ static NvU32 uvm_channel_update_progress_with_max(uvm_channel_t *channel,
break;
if (entry->type == UVM_GPFIFO_ENTRY_TYPE_NORMAL) {
uvm_pushbuffer_mark_completed(channel->pool->manager->pushbuffer, entry);
uvm_pushbuffer_mark_completed(channel, entry);
list_add_tail(&entry->push_info->available_list_node, &channel->available_push_infos);
}
@@ -1035,6 +1035,57 @@ static NV_STATUS internal_channel_submit_work_indirect_sec2(uvm_push_t *push,
return status;
}
// When the Confidential Computing feature is enabled, the CPU is unable to
// access and read the pushbuffer. This is because it is located in the CPR of
// vidmem in this configuration. This function allows UVM to retrieve the
// content of the pushbuffer in an encrypted form for later decryption, hence,
// simulating the original access pattern. E.g, reading timestamp semaphores.
// See also: decrypt_push().
static void encrypt_push(uvm_push_t *push)
{
NvU64 push_protected_gpu_va;
NvU64 push_unprotected_gpu_va;
uvm_gpu_address_t auth_tag_gpu_va;
uvm_channel_t *channel = push->channel;
uvm_push_crypto_bundle_t *crypto_bundle;
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
NvU32 push_size = uvm_push_get_size(push);
uvm_push_info_t *push_info = uvm_push_info_from_push(push);
uvm_pushbuffer_t *pushbuffer = channel->pool->manager->pushbuffer;
unsigned auth_tag_offset = UVM_CONF_COMPUTING_AUTH_TAG_SIZE * push->push_info_index;
if (!uvm_conf_computing_mode_enabled(gpu))
return;
if (!push_info->on_complete)
return;
if (!uvm_channel_is_ce(channel))
return;
if (push_size == 0)
return;
UVM_ASSERT(!uvm_channel_is_wlc(channel));
UVM_ASSERT(!uvm_channel_is_lcic(channel));
UVM_ASSERT(channel->conf_computing.push_crypto_bundles != NULL);
crypto_bundle = channel->conf_computing.push_crypto_bundles + push->push_info_index;
auth_tag_gpu_va = uvm_rm_mem_get_gpu_va(channel->conf_computing.push_crypto_bundle_auth_tags, gpu, false);
auth_tag_gpu_va.address += auth_tag_offset;
crypto_bundle->push_size = push_size;
push_protected_gpu_va = uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push);
push_unprotected_gpu_va = uvm_pushbuffer_get_unprotected_gpu_va_for_push(pushbuffer, push);
uvm_conf_computing_log_gpu_encryption(channel, &crypto_bundle->iv);
gpu->parent->ce_hal->encrypt(push,
uvm_gpu_address_virtual_unprotected(push_unprotected_gpu_va),
uvm_gpu_address_virtual(push_protected_gpu_va),
push_size,
auth_tag_gpu_va);
}
void uvm_channel_end_push(uvm_push_t *push)
{
uvm_channel_t *channel = push->channel;
@@ -1051,6 +1102,8 @@ void uvm_channel_end_push(uvm_push_t *push)
channel_pool_lock(channel->pool);
encrypt_push(push);
new_tracking_value = ++channel->tracking_sem.queued_value;
new_payload = (NvU32)new_tracking_value;
@@ -1561,11 +1614,15 @@ static void free_conf_computing_buffers(uvm_channel_t *channel)
uvm_rm_mem_free(channel->conf_computing.static_pb_protected_vidmem);
uvm_rm_mem_free(channel->conf_computing.static_pb_unprotected_sysmem);
uvm_rm_mem_free(channel->conf_computing.static_notifier_unprotected_sysmem);
uvm_rm_mem_free(channel->conf_computing.push_crypto_bundle_auth_tags);
uvm_kvfree(channel->conf_computing.static_pb_protected_sysmem);
uvm_kvfree(channel->conf_computing.push_crypto_bundles);
channel->conf_computing.static_pb_protected_vidmem = NULL;
channel->conf_computing.static_pb_unprotected_sysmem = NULL;
channel->conf_computing.static_notifier_unprotected_sysmem = NULL;
channel->conf_computing.push_crypto_bundle_auth_tags = NULL;
channel->conf_computing.static_pb_protected_sysmem = NULL;
channel->conf_computing.push_crypto_bundles = NULL;
uvm_rm_mem_free(channel->tracking_sem.semaphore.conf_computing.encrypted_payload);
uvm_rm_mem_free(channel->tracking_sem.semaphore.conf_computing.notifier);
@@ -1702,14 +1759,34 @@ static NV_STATUS alloc_conf_computing_buffers(uvm_channel_t *channel)
{
NV_STATUS status;
status = alloc_conf_computing_buffers_semaphore(channel);
UVM_ASSERT(uvm_channel_is_secure_ce(channel));
status = alloc_conf_computing_buffers_semaphore(channel);
if (status != NV_OK)
return status;
if (uvm_channel_is_wlc(channel))
if (uvm_channel_is_wlc(channel)) {
status = alloc_conf_computing_buffers_wlc(channel);
else if (uvm_channel_is_lcic(channel))
}
else if (uvm_channel_is_lcic(channel)) {
status = alloc_conf_computing_buffers_lcic(channel);
}
else {
uvm_gpu_t *gpu = channel->pool->manager->gpu;
void *push_crypto_bundles = uvm_kvmalloc_zero(sizeof(*channel->conf_computing.push_crypto_bundles) *
channel->num_gpfifo_entries);
if (push_crypto_bundles == NULL)
return NV_ERR_NO_MEMORY;
channel->conf_computing.push_crypto_bundles = push_crypto_bundles;
status = uvm_rm_mem_alloc_and_map_cpu(gpu,
UVM_RM_MEM_TYPE_SYS,
channel->num_gpfifo_entries * UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
&channel->conf_computing.push_crypto_bundle_auth_tags);
}
return status;
}

View File

@@ -355,6 +355,13 @@ struct uvm_channel_struct
// Encryption auth tags have to be located in unprotected sysmem.
void *launch_auth_tag_cpu;
NvU64 launch_auth_tag_gpu_va;
// Used to decrypt the push back to protected sysmem.
// This happens when profilers register callbacks for migration data.
uvm_push_crypto_bundle_t *push_crypto_bundles;
// Accompanying authentication tags for the crypto bundles
uvm_rm_mem_t *push_crypto_bundle_auth_tags;
} conf_computing;
// RM channel information

View File

@@ -26,6 +26,7 @@
#include "uvm_conf_computing.h"
#include "uvm_kvmalloc.h"
#include "uvm_gpu.h"
#include "uvm_hal.h"
#include "uvm_mem.h"
#include "uvm_processors.h"
#include "uvm_tracker.h"
@@ -60,8 +61,7 @@ NV_STATUS uvm_conf_computing_init_parent_gpu(const uvm_parent_gpu_t *parent)
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
// TODO: Bug 2844714.
// Since we have no routine to traverse parent gpus,
// TODO: Bug 2844714: since we have no routine to traverse parent GPUs,
// find first child GPU and get its parent.
first = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);
if (!first)
@@ -448,3 +448,51 @@ NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
return status;
}
NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
void *dst_plain,
const void *src_cipher,
const void *auth_tag_buffer,
NvU8 valid)
{
NV_STATUS status;
// There is no dedicated lock for the CSL context associated with replayable
// faults. The mutual exclusion required by the RM CSL API is enforced by
// relying on the GPU replayable service lock (ISR lock), since fault
// decryption is invoked as part of fault servicing.
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
UVM_ASSERT(!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu));
status = nvUvmInterfaceCslDecrypt(&parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx,
parent_gpu->fault_buffer_hal->entry_size(parent_gpu),
(const NvU8 *) src_cipher,
NULL,
(NvU8 *) dst_plain,
&valid,
sizeof(valid),
(const NvU8 *) auth_tag_buffer);
if (status != NV_OK)
UVM_ERR_PRINT("nvUvmInterfaceCslDecrypt() failed: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name);
return status;
}
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu, NvU64 increment)
{
NV_STATUS status;
// See comment in uvm_conf_computing_fault_decrypt
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
UVM_ASSERT(!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu));
status = nvUvmInterfaceCslIncrementIv(&parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx,
UVM_CSL_OPERATION_DECRYPT,
increment,
NULL);
UVM_ASSERT(status == NV_OK);
}

View File

@@ -177,4 +177,28 @@ NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
const UvmCslIv *src_iv,
size_t size,
const void *auth_tag_buffer);
// CPU decryption of a single replayable fault, encrypted by GSP-RM.
//
// Replayable fault decryption depends not only on the encrypted fault contents,
// and the authentication tag, but also on the plaintext valid bit associated
// with the fault.
//
// When decrypting data previously encrypted by the Copy Engine, use
// uvm_conf_computing_cpu_decrypt instead.
//
// Locking: this function must be invoked while holding the replayable ISR lock.
NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
void *dst_plain,
const void *src_cipher,
const void *auth_tag_buffer,
NvU8 valid);
// Increment the CPU-side decrypt IV of the CSL context associated with
// replayable faults. The function is a no-op if the given increment is zero.
//
// The IV associated with a fault CSL context is a 64-bit counter.
//
// Locking: this function must be invoked while holding the replayable ISR lock.
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu, NvU64 increment);
#endif // __UVM_CONF_COMPUTING_H__

View File

@@ -50,6 +50,7 @@ typedef struct uvm_channel_struct uvm_channel_t;
typedef struct uvm_user_channel_struct uvm_user_channel_t;
typedef struct uvm_push_struct uvm_push_t;
typedef struct uvm_push_info_struct uvm_push_info_t;
typedef struct uvm_push_crypto_bundle_struct uvm_push_crypto_bundle_t;
typedef struct uvm_push_acquire_info_struct uvm_push_acquire_info_t;
typedef struct uvm_pushbuffer_struct uvm_pushbuffer_t;
typedef struct uvm_gpfifo_entry_struct uvm_gpfifo_entry_t;

View File

@@ -198,6 +198,12 @@ typedef struct
// Client type of the service requestor.
uvm_fault_client_type_t client_type;
// New residency ID of the faulting region.
uvm_processor_id_t residency_id;
// New residency NUMA node ID of the faulting region.
int residency_node;
} uvm_ats_fault_context_t;
struct uvm_fault_service_batch_context_struct

View File

@@ -177,31 +177,34 @@ bool uvm_gpu_non_replayable_faults_pending(uvm_parent_gpu_t *parent_gpu)
return has_pending_faults == NV_TRUE;
}
static NvU32 fetch_non_replayable_fault_buffer_entries(uvm_gpu_t *gpu)
static NV_STATUS fetch_non_replayable_fault_buffer_entries(uvm_parent_gpu_t *parent_gpu, NvU32 *cached_faults)
{
NV_STATUS status;
NvU32 i = 0;
NvU32 cached_faults = 0;
uvm_fault_buffer_entry_t *fault_cache;
NvU32 entry_size = gpu->parent->fault_buffer_hal->entry_size(gpu->parent);
uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable;
NvU32 i;
NvU32 entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &parent_gpu->fault_buffer_info.non_replayable;
char *current_hw_entry = (char *)non_replayable_faults->shadow_buffer_copy;
uvm_fault_buffer_entry_t *fault_entry = non_replayable_faults->fault_cache;
fault_cache = non_replayable_faults->fault_cache;
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.non_replayable_faults.service_lock));
UVM_ASSERT(parent_gpu->non_replayable_faults_supported);
UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.non_replayable_faults.service_lock));
UVM_ASSERT(gpu->parent->non_replayable_faults_supported);
status = nvUvmInterfaceGetNonReplayableFaults(&parent_gpu->fault_buffer_info.rm_info,
current_hw_entry,
cached_faults);
status = nvUvmInterfaceGetNonReplayableFaults(&gpu->parent->fault_buffer_info.rm_info,
non_replayable_faults->shadow_buffer_copy,
&cached_faults);
UVM_ASSERT(status == NV_OK);
if (status != NV_OK) {
UVM_ERR_PRINT("nvUvmInterfaceGetNonReplayableFaults() failed: %s, GPU %s\n",
nvstatusToString(status),
parent_gpu->name);
uvm_global_set_fatal_error(status);
return status;
}
// Parse all faults
for (i = 0; i < cached_faults; ++i) {
uvm_fault_buffer_entry_t *fault_entry = &non_replayable_faults->fault_cache[i];
gpu->parent->fault_buffer_hal->parse_non_replayable_entry(gpu->parent, current_hw_entry, fault_entry);
for (i = 0; i < *cached_faults; ++i) {
parent_gpu->fault_buffer_hal->parse_non_replayable_entry(parent_gpu, current_hw_entry, fault_entry);
// The GPU aligns the fault addresses to 4k, but all of our tracking is
// done in PAGE_SIZE chunks which might be larger.
@@ -226,9 +229,10 @@ static NvU32 fetch_non_replayable_fault_buffer_entries(uvm_gpu_t *gpu)
}
current_hw_entry += entry_size;
fault_entry++;
}
return cached_faults;
return NV_OK;
}
// In SRIOV, the UVM (guest) driver does not have access to the privileged
@@ -705,21 +709,28 @@ exit_no_channel:
uvm_va_space_up_read(va_space);
uvm_va_space_mm_release_unlock(va_space, mm);
if (status != NV_OK)
UVM_DBG_PRINT("Error servicing non-replayable faults on GPU: %s\n", uvm_gpu_name(gpu));
return status;
}
void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu)
{
NV_STATUS status = NV_OK;
NvU32 cached_faults;
// If this handler is modified to handle fewer than all of the outstanding
// faults, then special handling will need to be added to uvm_suspend()
// to guarantee that fault processing has completed before control is
// returned to the RM.
while ((cached_faults = fetch_non_replayable_fault_buffer_entries(gpu)) > 0) {
do {
NV_STATUS status;
NvU32 i;
status = fetch_non_replayable_fault_buffer_entries(gpu->parent, &cached_faults);
if (status != NV_OK)
return;
// Differently to replayable faults, we do not batch up and preprocess
// non-replayable faults since getting multiple faults on the same
// memory region is not very likely
@@ -728,10 +739,7 @@ void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu)
for (i = 0; i < cached_faults; ++i) {
status = service_fault(gpu, &gpu->parent->fault_buffer_info.non_replayable.fault_cache[i]);
if (status != NV_OK)
break;
return;
}
}
if (status != NV_OK)
UVM_DBG_PRINT("Error servicing non-replayable faults on GPU: %s\n", uvm_gpu_name(gpu));
} while (cached_faults > 0);
}

View File

@@ -486,7 +486,9 @@ static NV_STATUS cancel_fault_precise_va(uvm_gpu_t *gpu,
return status;
}
static NV_STATUS push_replay_on_gpu(uvm_gpu_t *gpu, uvm_fault_replay_type_t type, uvm_fault_service_batch_context_t *batch_context)
static NV_STATUS push_replay_on_gpu(uvm_gpu_t *gpu,
uvm_fault_replay_type_t type,
uvm_fault_service_batch_context_t *batch_context)
{
NV_STATUS status;
uvm_push_t push;
@@ -572,6 +574,19 @@ static NV_STATUS hw_fault_buffer_flush_locked(uvm_parent_gpu_t *parent_gpu)
return status;
}
static void fault_buffer_skip_replayable_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index)
{
UVM_ASSERT(parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, index));
// Flushed faults are never decrypted, but the decryption IV associated with
// replayable faults still requires manual adjustment so it is kept in sync
// with the encryption IV on the GSP-RM's side.
if (!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu))
uvm_conf_computing_fault_increment_decrypt_iv(parent_gpu, 1);
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index);
}
static NV_STATUS fault_buffer_flush_locked(uvm_gpu_t *gpu,
uvm_gpu_buffer_flush_mode_t flush_mode,
uvm_fault_replay_type_t fault_replay,
@@ -610,7 +625,7 @@ static NV_STATUS fault_buffer_flush_locked(uvm_gpu_t *gpu,
// Wait until valid bit is set
UVM_SPIN_WHILE(!parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, get), &spin);
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, get);
fault_buffer_skip_replayable_entry(parent_gpu, get);
++get;
if (get == replayable_faults->max_faults)
get = 0;
@@ -785,9 +800,9 @@ static bool fetch_fault_buffer_try_merge_entry(uvm_fault_buffer_entry_t *current
// This optimization cannot be performed during fault cancel on Pascal GPUs
// (fetch_mode == FAULT_FETCH_MODE_ALL) since we need accurate tracking of all
// the faults in each uTLB in order to guarantee precise fault attribution.
static void fetch_fault_buffer_entries(uvm_gpu_t *gpu,
uvm_fault_service_batch_context_t *batch_context,
fault_fetch_mode_t fetch_mode)
static NV_STATUS fetch_fault_buffer_entries(uvm_gpu_t *gpu,
uvm_fault_service_batch_context_t *batch_context,
fault_fetch_mode_t fetch_mode)
{
NvU32 get;
NvU32 put;
@@ -796,6 +811,7 @@ static void fetch_fault_buffer_entries(uvm_gpu_t *gpu,
NvU32 utlb_id;
uvm_fault_buffer_entry_t *fault_cache;
uvm_spin_loop_t spin;
NV_STATUS status = NV_OK;
uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable;
const bool in_pascal_cancel_path = (!gpu->parent->fault_cancel_va_supported && fetch_mode == FAULT_FETCH_MODE_ALL);
const bool may_filter = uvm_perf_fault_coalesce && !in_pascal_cancel_path;
@@ -851,7 +867,9 @@ static void fetch_fault_buffer_entries(uvm_gpu_t *gpu,
smp_mb__after_atomic();
// Got valid bit set. Let's cache.
gpu->parent->fault_buffer_hal->parse_entry(gpu->parent, get, current_entry);
status = gpu->parent->fault_buffer_hal->parse_replayable_entry(gpu->parent, get, current_entry);
if (status != NV_OK)
goto done;
// The GPU aligns the fault addresses to 4k, but all of our tracking is
// done in PAGE_SIZE chunks which might be larger.
@@ -918,6 +936,8 @@ done:
batch_context->num_cached_faults = fault_index;
batch_context->num_coalesced_faults = num_coalesced_faults;
return status;
}
// Sort comparator for pointers to fault buffer entries that sorts by
@@ -2475,7 +2495,10 @@ static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_bat
batch_context->has_throttled_faults = false;
// 5) Fetch all faults from buffer
fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_ALL);
status = fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_ALL);
if (status != NV_OK)
break;
++batch_context->batch_id;
UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults);
@@ -2612,7 +2635,10 @@ void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu)
batch_context->has_fatal_faults = false;
batch_context->has_throttled_faults = false;
fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_BATCH_READY);
status = fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_BATCH_READY);
if (status != NV_OK)
break;
if (batch_context->num_cached_faults == 0)
break;

View File

@@ -373,7 +373,7 @@ static uvm_hal_class_ops_t fault_buffer_table[] =
.read_get = uvm_hal_maxwell_fault_buffer_read_get_unsupported,
.write_get = uvm_hal_maxwell_fault_buffer_write_get_unsupported,
.get_ve_id = uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported,
.parse_entry = uvm_hal_maxwell_fault_buffer_parse_entry_unsupported,
.parse_replayable_entry = uvm_hal_maxwell_fault_buffer_parse_replayable_entry_unsupported,
.entry_is_valid = uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported,
.entry_clear_valid = uvm_hal_maxwell_fault_buffer_entry_clear_valid_unsupported,
.entry_size = uvm_hal_maxwell_fault_buffer_entry_size_unsupported,
@@ -396,7 +396,7 @@ static uvm_hal_class_ops_t fault_buffer_table[] =
.read_put = uvm_hal_pascal_fault_buffer_read_put,
.read_get = uvm_hal_pascal_fault_buffer_read_get,
.write_get = uvm_hal_pascal_fault_buffer_write_get,
.parse_entry = uvm_hal_pascal_fault_buffer_parse_entry,
.parse_replayable_entry = uvm_hal_pascal_fault_buffer_parse_replayable_entry,
.entry_is_valid = uvm_hal_pascal_fault_buffer_entry_is_valid,
.entry_clear_valid = uvm_hal_pascal_fault_buffer_entry_clear_valid,
.entry_size = uvm_hal_pascal_fault_buffer_entry_size,
@@ -411,7 +411,7 @@ static uvm_hal_class_ops_t fault_buffer_table[] =
.read_get = uvm_hal_volta_fault_buffer_read_get,
.write_get = uvm_hal_volta_fault_buffer_write_get,
.get_ve_id = uvm_hal_volta_fault_buffer_get_ve_id,
.parse_entry = uvm_hal_volta_fault_buffer_parse_entry,
.parse_replayable_entry = uvm_hal_volta_fault_buffer_parse_replayable_entry,
.parse_non_replayable_entry = uvm_hal_volta_fault_buffer_parse_non_replayable_entry,
.get_fault_type = uvm_hal_volta_fault_buffer_get_fault_type,
}

View File

@@ -485,11 +485,24 @@ typedef NvU32 (*uvm_hal_fault_buffer_read_get_t)(uvm_parent_gpu_t *parent_gpu);
typedef void (*uvm_hal_fault_buffer_write_get_t)(uvm_parent_gpu_t *parent_gpu, NvU32 get);
typedef NvU8 (*uvm_hal_fault_buffer_get_ve_id_t)(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type);
// Parse the entry on the given buffer index. This also clears the valid bit of
// the entry in the buffer.
typedef void (*uvm_hal_fault_buffer_parse_entry_t)(uvm_parent_gpu_t *gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
// Parse the replayable entry at the given buffer index. This also clears the
// valid bit of the entry in the buffer.
typedef NV_STATUS (*uvm_hal_fault_buffer_parse_replayable_entry_t)(uvm_parent_gpu_t *gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
NV_STATUS uvm_hal_maxwell_fault_buffer_parse_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
NV_STATUS uvm_hal_pascal_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
NV_STATUS uvm_hal_volta_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
typedef bool (*uvm_hal_fault_buffer_entry_is_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index);
typedef void (*uvm_hal_fault_buffer_entry_clear_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index);
typedef NvU32 (*uvm_hal_fault_buffer_entry_size_t)(uvm_parent_gpu_t *parent_gpu);
@@ -508,9 +521,6 @@ NvU32 uvm_hal_maxwell_fault_buffer_read_put_unsupported(uvm_parent_gpu_t *parent
NvU32 uvm_hal_maxwell_fault_buffer_read_get_unsupported(uvm_parent_gpu_t *parent_gpu);
void uvm_hal_maxwell_fault_buffer_write_get_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index);
NvU8 uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type);
void uvm_hal_maxwell_fault_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
uvm_fault_type_t uvm_hal_maxwell_fault_buffer_get_fault_type_unsupported(const NvU32 *fault_entry);
void uvm_hal_pascal_enable_replayable_faults(uvm_parent_gpu_t *parent_gpu);
@@ -519,18 +529,14 @@ void uvm_hal_pascal_clear_replayable_faults(uvm_parent_gpu_t *parent_gpu, NvU32
NvU32 uvm_hal_pascal_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu);
NvU32 uvm_hal_pascal_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu);
void uvm_hal_pascal_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index);
void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
uvm_fault_type_t uvm_hal_pascal_fault_buffer_get_fault_type(const NvU32 *fault_entry);
NvU32 uvm_hal_volta_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu);
NvU32 uvm_hal_volta_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu);
void uvm_hal_volta_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index);
NvU8 uvm_hal_volta_fault_buffer_get_ve_id(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type);
void uvm_hal_volta_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry);
uvm_fault_type_t uvm_hal_volta_fault_buffer_get_fault_type(const NvU32 *fault_entry);
void uvm_hal_turing_disable_replayable_faults(uvm_parent_gpu_t *parent_gpu);
@@ -772,7 +778,7 @@ struct uvm_fault_buffer_hal_struct
uvm_hal_fault_buffer_read_get_t read_get;
uvm_hal_fault_buffer_write_get_t write_get;
uvm_hal_fault_buffer_get_ve_id_t get_ve_id;
uvm_hal_fault_buffer_parse_entry_t parse_entry;
uvm_hal_fault_buffer_parse_replayable_entry_t parse_replayable_entry;
uvm_hal_fault_buffer_entry_is_valid_t entry_is_valid;
uvm_hal_fault_buffer_entry_clear_valid_t entry_clear_valid;
uvm_hal_fault_buffer_entry_size_t entry_size;

View File

@@ -128,6 +128,13 @@ static uvm_gpu_address_t uvm_gpu_address_virtual(NvU64 va)
return address;
}
static uvm_gpu_address_t uvm_gpu_address_virtual_unprotected(NvU64 va)
{
uvm_gpu_address_t address = uvm_gpu_address_virtual(va);
address.is_unprotected = true;
return address;
}
// Create a physical GPU address
static uvm_gpu_address_t uvm_gpu_address_physical(uvm_aperture_t aperture, NvU64 pa)
{

View File

@@ -153,6 +153,10 @@ static inline const struct cpumask *uvm_cpumask_of_node(int node)
#define VM_MIXEDMAP 0x00000000
#endif
#if !defined(MPOL_PREFERRED_MANY)
#define MPOL_PREFERRED_MANY 5
#endif
//
// printk.h already defined pr_fmt, so we have to redefine it so the pr_*
// routines pick up our version

View File

@@ -68,11 +68,12 @@ uvm_fault_type_t uvm_hal_maxwell_fault_buffer_get_fault_type_unsupported(const N
return UVM_FAULT_TYPE_COUNT;
}
void uvm_hal_maxwell_fault_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry)
NV_STATUS uvm_hal_maxwell_fault_buffer_parse_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry)
{
UVM_ASSERT_MSG(false, "fault_buffer_parse_entry is not supported on GPU: %s.\n", parent_gpu->name);
return NV_ERR_NOT_SUPPORTED;
}
bool uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index)

View File

@@ -944,17 +944,18 @@ NV_STATUS uvm_api_migrate(UVM_MIGRATE_PARAMS *params, struct file *filp)
if (type == UVM_API_RANGE_TYPE_ATS) {
uvm_migrate_args_t uvm_migrate_args =
{
.va_space = va_space,
.mm = mm,
.start = params->base,
.length = params->length,
.dst_id = (dest_gpu ? dest_gpu->id : UVM_ID_CPU),
.dst_node_id = (int)params->cpuNumaNode,
.populate_permissions = UVM_POPULATE_PERMISSIONS_INHERIT,
.touch = false,
.skip_mapped = false,
.user_space_start = &params->userSpaceStart,
.user_space_length = &params->userSpaceLength,
.va_space = va_space,
.mm = mm,
.start = params->base,
.length = params->length,
.dst_id = (dest_gpu ? dest_gpu->id : UVM_ID_CPU),
.dst_node_id = (int)params->cpuNumaNode,
.populate_permissions = UVM_POPULATE_PERMISSIONS_INHERIT,
.touch = false,
.skip_mapped = false,
.populate_on_cpu_alloc_failures = false,
.user_space_start = &params->userSpaceStart,
.user_space_length = &params->userSpaceLength,
};
status = uvm_migrate_pageable(&uvm_migrate_args);

View File

@@ -507,6 +507,22 @@ static NV_STATUS migrate_vma_copy_pages(struct vm_area_struct *vma,
return NV_OK;
}
void migrate_vma_cleanup_pages(unsigned long *dst, unsigned long npages)
{
unsigned long i;
for (i = 0; i < npages; i++) {
struct page *dst_page = migrate_pfn_to_page(dst[i]);
if (!dst_page)
continue;
unlock_page(dst_page);
__free_page(dst_page);
dst[i] = 0;
}
}
void uvm_migrate_vma_alloc_and_copy(struct migrate_vma *args, migrate_vma_state_t *state)
{
struct vm_area_struct *vma = args->vma;
@@ -531,6 +547,10 @@ void uvm_migrate_vma_alloc_and_copy(struct migrate_vma *args, migrate_vma_state_
if (state->status == NV_OK)
state->status = tracker_status;
// Mark all pages as not migrating if we're failing
if (state->status != NV_OK)
migrate_vma_cleanup_pages(args->dst, state->num_pages);
}
void uvm_migrate_vma_alloc_and_copy_helper(struct vm_area_struct *vma,
@@ -802,7 +822,7 @@ static NV_STATUS migrate_pageable_vma_region(struct vm_area_struct *vma,
// If the destination is the CPU, signal user-space to retry with a
// different node. Otherwise, just try to populate anywhere in the
// system
if (UVM_ID_IS_CPU(uvm_migrate_args->dst_id)) {
if (UVM_ID_IS_CPU(uvm_migrate_args->dst_id) && !uvm_migrate_args->populate_on_cpu_alloc_failures) {
*next_addr = start + find_first_bit(state->scratch2_mask, num_pages) * PAGE_SIZE;
return NV_ERR_MORE_PROCESSING_REQUIRED;
}
@@ -961,13 +981,10 @@ NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args)
// We only check that dst_node_id is a valid node in the system and it
// doesn't correspond to a GPU node. This is fine because
// alloc_pages_node will clamp the allocation to
// cpuset_current_mems_allowed, and uvm_migrate_pageable is only called
// from process context (uvm_migrate) when dst_id is CPU. UVM bottom
// half never calls uvm_migrate_pageable when dst_id is CPU. So, assert
// that we're in a user thread. However, this would need to change if we
// wanted to call this function from a bottom half with CPU dst_id.
UVM_ASSERT(!(current->flags & PF_KTHREAD));
// cpuset_current_mems_allowed when uvm_migrate_pageable is called from
// process context (uvm_migrate) when dst_id is CPU. UVM bottom half
// calls uvm_migrate_pageable with CPU dst_id only when the VMA memory
// policy is set to dst_node_id and dst_node_id is not NUMA_NO_NODE.
if (!nv_numa_node_has_memory(dst_node_id) ||
uvm_va_space_find_gpu_with_memory_node_id(va_space, dst_node_id) != NULL)
return NV_ERR_INVALID_ARGUMENT;

View File

@@ -43,6 +43,7 @@ typedef struct
uvm_populate_permissions_t populate_permissions;
bool touch : 1;
bool skip_mapped : 1;
bool populate_on_cpu_alloc_failures : 1;
NvU64 *user_space_start;
NvU64 *user_space_length;
} uvm_migrate_args_t;

View File

@@ -214,9 +214,9 @@ static UvmFaultMetadataPacket *get_fault_buffer_entry_metadata(uvm_parent_gpu_t
return fault_entry_metadata + index;
}
void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry)
NV_STATUS uvm_hal_pascal_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry)
{
NvU32 *fault_entry;
NvU64 addr_hi, addr_lo;
@@ -280,6 +280,8 @@ void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
// Automatically clear valid bit for the entry in the fault buffer
uvm_hal_pascal_fault_buffer_entry_clear_valid(parent_gpu, index);
return NV_OK;
}
bool uvm_hal_pascal_fault_buffer_entry_is_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index)

View File

@@ -1455,7 +1455,18 @@ static uvm_perf_thrashing_hint_t get_hint_for_migration_thrashing(va_space_thras
hint.type = UVM_PERF_THRASHING_HINT_TYPE_NONE;
closest_resident_id = uvm_va_block_page_get_closest_resident(va_block, page_index, requester);
UVM_ASSERT(UVM_ID_IS_VALID(closest_resident_id));
if (uvm_va_block_is_hmm(va_block)) {
// HMM pages always start out resident on the CPU but may not be
// recorded in the va_block state because hmm_range_fault() or
// similar functions haven't been called to get an accurate snapshot
// of the Linux state. We can assume pages are CPU resident for the
// purpose of deciding where to migrate to reduce thrashing.
if (UVM_ID_IS_INVALID(closest_resident_id))
closest_resident_id = UVM_ID_CPU;
}
else {
UVM_ASSERT(UVM_ID_IS_VALID(closest_resident_id));
}
if (thrashing_processors_can_access(va_space, page_thrashing, preferred_location)) {
// The logic in uvm_va_block_select_residency chooses the preferred

View File

@@ -64,6 +64,14 @@ typedef enum
UVM_PUSH_FLAG_COUNT,
} uvm_push_flag_t;
struct uvm_push_crypto_bundle_struct {
// Initialization vector used to decrypt the push
UvmCslIv iv;
// Size of the pushbuffer that is encrypted/decrypted
NvU32 push_size;
};
struct uvm_push_struct
{
// Location of the first method of the push

View File

@@ -776,15 +776,6 @@ static NV_STATUS test_timestamp_on_gpu(uvm_gpu_t *gpu)
NvU32 i;
NvU64 last_stamp = 0;
// TODO: Bug 3988992: [UVM][HCC] RFE - Support encrypted semaphore for secure CE channels
// This test is waived when Confidential Computing is enabled because it
// assumes that CPU can directly read the result of a semaphore timestamp
// operation. Instead the operation needs to be follower up by an encrypt
// -decrypt trip to be accessible to CPU. This will be cleaner and simpler
// once encrypted semaphores are available.
if (uvm_conf_computing_mode_enabled(gpu))
return NV_OK;
for (i = 0; i < 10; ++i) {
status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "Releasing a timestamp");
if (status != NV_OK)

View File

@@ -449,21 +449,68 @@ static uvm_pushbuffer_chunk_t *gpfifo_to_chunk(uvm_pushbuffer_t *pushbuffer, uvm
return chunk;
}
void uvm_pushbuffer_mark_completed(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo)
static void decrypt_push(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo)
{
NV_STATUS status;
NvU32 auth_tag_offset;
void *auth_tag_cpu_va;
void *push_protected_cpu_va;
void *push_unprotected_cpu_va;
NvU32 pushbuffer_offset = gpfifo->pushbuffer_offset;
NvU32 push_info_index = gpfifo->push_info - channel->push_infos;
uvm_pushbuffer_t *pushbuffer = channel->pool->manager->pushbuffer;
uvm_push_crypto_bundle_t *crypto_bundle = channel->conf_computing.push_crypto_bundles + push_info_index;
if (channel->conf_computing.push_crypto_bundles == NULL)
return;
// When the crypto bundle is used, the push size cannot be zero
if (crypto_bundle->push_size == 0)
return;
UVM_ASSERT(!uvm_channel_is_wlc(channel));
UVM_ASSERT(!uvm_channel_is_lcic(channel));
push_protected_cpu_va = (char *)get_base_cpu_va(pushbuffer) + pushbuffer_offset;
push_unprotected_cpu_va = (char *)uvm_rm_mem_get_cpu_va(pushbuffer->memory_unprotected_sysmem) + pushbuffer_offset;
auth_tag_offset = push_info_index * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
auth_tag_cpu_va = (char *)uvm_rm_mem_get_cpu_va(channel->conf_computing.push_crypto_bundle_auth_tags) +
auth_tag_offset;
status = uvm_conf_computing_cpu_decrypt(channel,
push_protected_cpu_va,
push_unprotected_cpu_va,
&crypto_bundle->iv,
crypto_bundle->push_size,
auth_tag_cpu_va);
// A decryption failure here is not fatal because it does not
// prevent UVM from running fine in the future and cannot be used
// maliciously to leak information or otherwise derail UVM from its
// regular duties.
UVM_ASSERT_MSG_RELEASE(status == NV_OK, "Pushbuffer decryption failure: %s\n", nvstatusToString(status));
// Avoid reusing the bundle across multiple pushes
crypto_bundle->push_size = 0;
}
void uvm_pushbuffer_mark_completed(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo)
{
uvm_pushbuffer_chunk_t *chunk;
uvm_push_info_t *push_info = gpfifo->push_info;
bool need_to_update_chunk = false;
uvm_push_info_t *push_info = gpfifo->push_info;
uvm_pushbuffer_t *pushbuffer = channel->pool->manager->pushbuffer;
UVM_ASSERT(gpfifo->type == UVM_GPFIFO_ENTRY_TYPE_NORMAL);
chunk = gpfifo_to_chunk(pushbuffer, gpfifo);
if (push_info->on_complete != NULL)
if (push_info->on_complete != NULL) {
decrypt_push(channel, gpfifo);
push_info->on_complete(push_info->on_complete_data);
push_info->on_complete = NULL;
push_info->on_complete_data = NULL;
push_info->on_complete = NULL;
push_info->on_complete_data = NULL;
}
uvm_spin_lock(&pushbuffer->lock);

View File

@@ -258,7 +258,7 @@ NV_STATUS uvm_pushbuffer_begin_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *pu
// Complete a pending push
// Updates the chunk state the pending push used
void uvm_pushbuffer_mark_completed(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo);
void uvm_pushbuffer_mark_completed(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo);
// Get the GPU VA for an ongoing push
NvU64 uvm_pushbuffer_get_gpu_va_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push);

View File

@@ -275,13 +275,15 @@ static NV_STATUS alloc_and_init_mem(uvm_gpu_t *gpu, uvm_mem_t **mem, size_t size
TEST_NV_CHECK_GOTO(ce_memset_gpu(gpu, *mem, size, 0xdead), err);
}
else {
if (type == MEM_ALLOC_TYPE_SYSMEM_DMA)
if (type == MEM_ALLOC_TYPE_SYSMEM_DMA) {
TEST_NV_CHECK_RET(uvm_mem_alloc_sysmem_dma(size, gpu, NULL, mem));
else
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(*mem, gpu), err);
}
else {
TEST_NV_CHECK_RET(uvm_mem_alloc_sysmem(size, NULL, mem));
}
TEST_NV_CHECK_GOTO(uvm_mem_map_cpu_kernel(*mem), err);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(*mem, gpu), err);
write_range_cpu(*mem, size, 0xdeaddead);
}
@@ -443,7 +445,6 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, siz
cpu_encrypt(push.channel, src_cipher, src_plain, auth_tag_mem, size, copy_size);
gpu_decrypt(&push, dst_plain, src_cipher, auth_tag_mem, size, copy_size);
// Wait for SEC2 before launching the CE part.
// SEC2 is only allowed to release semaphores in unprotected sysmem,
// and CE can only acquire semaphores in protected vidmem.

View File

@@ -2083,12 +2083,6 @@ static uvm_processor_id_t block_page_get_closest_resident_in_mask(uvm_va_block_t
return id;
}
// HMM va_blocks don't know if a page is CPU resident until either
// migrate_vma_setup() or hmm_range_fault() is called. If a page isn't
// resident anywhere, assume it is CPU resident.
if (uvm_va_block_is_hmm(va_block))
return UVM_ID_CPU;
return UVM_ID_INVALID;
}
@@ -2888,7 +2882,7 @@ static uvm_va_block_region_t block_phys_contig_region(uvm_va_block_t *block,
{
if (UVM_ID_IS_CPU(resident_id)) {
uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index);
return uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk));
return uvm_cpu_chunk_block_region(block, chunk, page_index);
}
else {
uvm_chunk_size_t chunk_size;

View File

@@ -25,7 +25,8 @@
#include "uvm_global.h"
#include "uvm_gpu.h"
#include "uvm_hal.h"
#include "uvm_push.h"
#include "uvm_conf_computing.h"
#include "nv_uvm_types.h"
#include "hwref/volta/gv100/dev_fault.h"
#include "hwref/volta/gv100/dev_fb.h"
#include "clc369.h"
@@ -246,6 +247,20 @@ static NvU32 *get_fault_buffer_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index)
return fault_entry;
}
// See uvm_pascal_fault_buffer.c::get_fault_buffer_entry_metadata
static UvmFaultMetadataPacket *get_fault_buffer_entry_metadata(uvm_parent_gpu_t *parent_gpu, NvU32 index)
{
UvmFaultMetadataPacket *fault_entry_metadata;
UVM_ASSERT(index < parent_gpu->fault_buffer_info.replayable.max_faults);
UVM_ASSERT(!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu));
fault_entry_metadata = parent_gpu->fault_buffer_info.rm_info.replayable.bufferMetadata;
UVM_ASSERT(fault_entry_metadata != NULL);
return fault_entry_metadata + index;
}
static void parse_fault_entry_common(uvm_parent_gpu_t *parent_gpu,
NvU32 *fault_entry,
uvm_fault_buffer_entry_t *buffer_entry)
@@ -323,24 +338,47 @@ static void parse_fault_entry_common(uvm_parent_gpu_t *parent_gpu,
UVM_ASSERT_MSG(replayable_fault_enabled, "Fault with REPLAYABLE_FAULT_EN bit unset\n");
}
void uvm_hal_volta_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry)
NV_STATUS uvm_hal_volta_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
NvU32 index,
uvm_fault_buffer_entry_t *buffer_entry)
{
fault_buffer_entry_c369_t entry;
NvU32 *fault_entry;
BUILD_BUG_ON(NVC369_BUF_SIZE > UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE);
BUILD_BUG_ON(sizeof(entry) > UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE);
// Valid bit must be set before this function is called
UVM_ASSERT(parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, index));
fault_entry = get_fault_buffer_entry(parent_gpu, index);
// When Confidential Computing is enabled, faults are encrypted by RM, so
// they need to be decrypted before they can be parsed
if (!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu)) {
NV_STATUS status;
UvmFaultMetadataPacket *fault_entry_metadata = get_fault_buffer_entry_metadata(parent_gpu, index);
status = uvm_conf_computing_fault_decrypt(parent_gpu,
&entry,
fault_entry,
fault_entry_metadata->authTag,
fault_entry_metadata->valid);
if (status != NV_OK) {
uvm_global_set_fatal_error(status);
return status;
}
fault_entry = (NvU32 *) &entry;
}
parse_fault_entry_common(parent_gpu, fault_entry, buffer_entry);
UVM_ASSERT(buffer_entry->is_replayable);
// Automatically clear valid bit for the entry in the fault buffer
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index);
return NV_OK;
}
void uvm_hal_volta_fault_buffer_parse_non_replayable_entry(uvm_parent_gpu_t *parent_gpu,

View File

@@ -939,6 +939,12 @@ nvswitch_os_get_os_version
NvU32 *pBuildNum
);
NvlStatus
nvswitch_os_get_pid
(
NvU32 *pPid
);
void
nvswitch_lib_smbpbi_log_sxid
(

View File

@@ -0,0 +1,211 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_AEAD_H
#define CRYPTLIB_AEAD_H
/*=====================================================================================
* Authenticated Encryption with Associated data (AEAD) Cryptography Primitives
*=====================================================================================
*/
#if LIBSPDM_AEAD_GCM_SUPPORT
/**
* Performs AEAD AES-GCM authenticated encryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16 or 32, otherwise false is returned.
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD AES-GCM authenticated encryption succeeded.
* @retval false AEAD AES-GCM authenticated encryption failed.
**/
extern bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD AES-GCM authenticated decryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16 or 32, otherwise false is returned.
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD AES-GCM authenticated decryption succeeded.
* @retval false AEAD AES-GCM authenticated decryption failed.
**/
extern bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_GCM_SUPPORT */
#if LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
/**
* Performs AEAD ChaCha20Poly1305 authenticated encryption on a data buffer and additional
* authenticated data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 32, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD ChaCha20Poly1305 authenticated encryption succeeded.
* @retval false AEAD ChaCha20Poly1305 authenticated encryption failed.
**/
extern bool libspdm_aead_chacha20_poly1305_encrypt(
const uint8_t *key, size_t key_size, const uint8_t *iv,
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size, uint8_t *tag_out,
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD ChaCha20Poly1305 authenticated decryption on a data buffer and additional authenticated data (AAD).
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 32, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD ChaCha20Poly1305 authenticated decryption succeeded.
* @retval false AEAD ChaCha20Poly1305 authenticated decryption failed.
*
**/
extern bool libspdm_aead_chacha20_poly1305_decrypt(
const uint8_t *key, size_t key_size, const uint8_t *iv,
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size, const uint8_t *tag,
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT */
#if LIBSPDM_AEAD_SM4_SUPPORT
/**
* Performs AEAD SM4-GCM authenticated encryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be encrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the encryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD SM4-GCM authenticated encryption succeeded.
* @retval false AEAD SM4-GCM authenticated encryption failed.
**/
extern bool libspdm_aead_sm4_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
/**
* Performs AEAD SM4-GCM authenticated decryption on a data buffer and additional authenticated
* data.
*
* iv_size must be 12, otherwise false is returned.
* key_size must be 16, otherwise false is returned.
* tag_size must be 16, otherwise false is returned.
*
* If data verification fails, false is returned.
*
* @param[in] key Pointer to the encryption key.
* @param[in] key_size Size of the encryption key in bytes.
* @param[in] iv Pointer to the IV value.
* @param[in] iv_size Size of the IV value in bytes.
* @param[in] a_data Pointer to the additional authenticated data.
* @param[in] a_data_size Size of the additional authenticated data in bytes.
* @param[in] data_in Pointer to the input data buffer to be decrypted.
* @param[in] data_in_size Size of the input data buffer in bytes.
* @param[in] tag Pointer to a buffer that contains the authentication tag.
* @param[in] tag_size Size of the authentication tag in bytes.
* @param[out] data_out Pointer to a buffer that receives the decryption output.
* @param[out] data_out_size Size of the output data buffer in bytes.
*
* @retval true AEAD SM4-GCM authenticated decryption succeeded.
* @retval false AEAD SM4-GCM authenticated decryption failed.
**/
extern bool libspdm_aead_sm4_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
#endif /* LIBSPDM_AEAD_SM4_SUPPORT */
#endif /* CRYPTLIB_AEAD_H */

View File

@@ -0,0 +1,416 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_CERT_H
#define CRYPTLIB_CERT_H
/**
* Retrieve the tag and length of the tag.
*
* @param ptr The position in the ASN.1 data.
* @param end End of data.
* @param length The variable that will receive the length.
* @param tag The expected tag.
*
* @retval true Get tag successful.
* @retval false Failed to get tag or tag not match.
**/
extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length, uint32_t tag);
/**
* Retrieve the subject bytes from one X.509 certificate.
*
* If cert is NULL, then return false.
* If subject_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] cert_subject Pointer to the retrieved certificate subject bytes.
* @param[in, out] subject_size The size in bytes of the cert_subject buffer on input,
* and the size of buffer returned cert_subject on output.
*
* @retval true The certificate subject retrieved successfully.
* @retval false Invalid certificate, or the subject_size is too small for the result.
* The subject_size will be updated with the required size.
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
size_t *subject_size);
/**
* Retrieve the version from one X.509 certificate.
*
* If cert is NULL, then return false.
* If cert_size is 0, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] version Pointer to the retrieved version integer.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size, size_t *version);
/**
* Retrieve the serialNumber from one X.509 certificate.
*
* If cert is NULL, then return false.
* If cert_size is 0, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] serial_number Pointer to the retrieved certificate serial_number bytes.
* @param[in, out] serial_number_size The size in bytes of the serial_number buffer on input,
* and the size of buffer returned serial_number on output.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
uint8_t *serial_number,
size_t *serial_number_size);
/**
* Retrieve the issuer bytes from one X.509 certificate.
*
* If cert is NULL, then return false.
* If issuer_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] cert_issuer Pointer to the retrieved certificate subject bytes.
* @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input,
* and the size of buffer returned cert_issuer on output.
*
* @retval true The certificate issuer retrieved successfully.
* @retval false Invalid certificate, or the issuer_size is too small for the result.
* The issuer_size will be updated with the required size.
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
size_t *issuer_size);
/**
* Retrieve Extension data from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[in] oid Object identifier buffer
* @param[in] oid_size Object identifier buffer size
* @param[out] extension_data Extension bytes.
* @param[in, out] extension_data_size Extension bytes size.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
uint8_t *extension_data,
size_t *extension_data_size);
/**
* Retrieve the Validity from one X.509 certificate
*
* If cert is NULL, then return false.
* If CertIssuerSize is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] from notBefore Pointer to date_time object.
* @param[in,out] from_size notBefore date_time object size.
* @param[out] to notAfter Pointer to date_time object.
* @param[in,out] to_size notAfter date_time object size.
*
* Note: libspdm_x509_compare_date_time to compare date_time oject
* x509SetDateTime to get a date_time object from a date_time_str
*
* @retval true The certificate Validity retrieved successfully.
* @retval false Invalid certificate, or Validity retrieve failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
size_t *to_size);
/**
* Format a date_time object into DataTime buffer
*
* If date_time_str is NULL, then return false.
* If date_time_size is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] date_time_str date_time string like YYYYMMDDhhmmssZ
* Ref: https://www.w3.org/TR/NOTE-datetime
* Z stand for UTC time
* @param[out] date_time Pointer to a date_time object.
* @param[in,out] date_time_size date_time object buffer size.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_set_date_time(const char *date_time_str, void *date_time,
size_t *date_time_size);
/**
* Compare date_time1 object and date_time2 object.
*
* If date_time1 is NULL, then return -2.
* If date_time2 is NULL, then return -2.
* If date_time1 == date_time2, then return 0
* If date_time1 > date_time2, then return 1
* If date_time1 < date_time2, then return -1
*
* @param[in] date_time1 Pointer to a date_time Ojbect
* @param[in] date_time2 Pointer to a date_time Object
*
* @retval 0 If date_time1 == date_time2
* @retval 1 If date_time1 > date_time2
* @retval -1 If date_time1 < date_time2
**/
extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2);
/**
* Retrieve the key usage from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*)
*
* @retval true The certificate key usage retrieved successfully.
* @retval false Invalid certificate, or usage is NULL
* @retval false This interface is not supported.
**/
extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage);
/**
* Retrieve the Extended key usage from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage bytes.
* @param[in, out] usage_size Key usage buffer sizs in bytes.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
size_t *usage_size);
/**
* Retrieve the basic constraints from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] basic_constraints Basic constraints bytes.
* @param[in, out] basic_constraints_size Basic constraints buffer sizs in bytes.
*
* @retval true
* @retval false
**/
extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
uint8_t *basic_constraints,
size_t *basic_constraints_size);
/**
* Verify one X509 certificate was issued by the trusted CA.
*
* If cert is NULL, then return false.
* If ca_cert is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate to be verified.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[in] ca_cert Pointer to the DER-encoded trusted CA certificate.
* @param[in] ca_cert_size Size of the CA Certificate in bytes.
*
* @retval true The certificate was issued by the trusted CA.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size);
/**
* Verify one X509 certificate was issued by the trusted CA.
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Cerificate itself. and
* subsequent cerificate is signed by the preceding
* cerificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] root_cert Trusted Root Certificate buffer.
*
* @param[in] root_cert_length Trusted Root Certificate buffer length.
*
* @retval true All cerificates were issued by the first certificate in X509Certchain.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
**/
extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
const uint8_t *cert_chain,
size_t cert_chain_length);
/**
* Get one X509 certificate from cert_chain.
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Cerificate itself. and
* subsequent cerificate is signed by the preceding
* cerificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] cert_index Index of certificate. If index is -1 indecate the
* last certificate in cert_chain.
*
* @param[out] cert The certificate at the index of cert_chain.
* @param[out] cert_length The length certificate at the index of cert_chain.
*
* @retval true Success.
* @retval false Failed to get certificate from certificate chain.
**/
extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
size_t cert_chain_length,
const int32_t cert_index, const uint8_t **cert,
size_t *cert_length);
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
/**
* Retrieve the RSA public key from one DER-encoded X509 certificate.
*
* If cert is NULL, then return false.
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] rsa_context Pointer to new-generated RSA context which contain the retrieved
* RSA public key component. Use libspdm_rsa_free() function to free the
* resource.
*
* @retval true RSA public key was retrieved successfully.
* @retval false Fail to retrieve RSA public key from X509 certificate.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **rsa_context);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Retrieve the EC public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ec_context Pointer to new-generated EC DSA context which contain the retrieved
* EC public key component. Use libspdm_ec_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If ec_context is NULL, then return false.
*
* @retval true EC public key was retrieved successfully.
* @retval false Fail to retrieve EC public key from X509 certificate.
*
**/
extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ec_context);
#endif /* LIBSPDM_ECDSA_SUPPORT */
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
/**
* Retrieve the Ed public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ecd_context Pointer to new-generated Ed DSA context which contain the retrieved
* Ed public key component. Use libspdm_ecd_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If ecd_context is NULL, then return false.
*
* @retval true Ed public key was retrieved successfully.
* @retval false Fail to retrieve Ed public key from X509 certificate.
*
**/
extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ecd_context);
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
#if LIBSPDM_SM2_DSA_SUPPORT
/**
* Retrieve the sm2 public key from one DER-encoded X509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] sm2_context Pointer to new-generated sm2 context which contain the retrieved
* sm2 public key component. Use sm2_free() function to free the
* resource.
*
* If cert is NULL, then return false.
* If sm2_context is NULL, then return false.
*
* @retval true sm2 public key was retrieved successfully.
* @retval false Fail to retrieve sm2 public key from X509 certificate.
*
**/
extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **sm2_context);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#if LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
/**
* Generate a CSR.
*
* @param[in] hash_nid hash algo for sign
* @param[in] asym_nid asym algo for sign
*
* @param[in] requester_info requester info to gen CSR
* @param[in] requester_info_length The len of requester info
*
* @param[in] context Pointer to asymmetric context
* @param[in] subject_name Subject name: should be break with ',' in the middle
* example: "C=AA,CN=BB"
*
* Subject names should contain a comma-separated list of OID types and values:
* The valid OID type name is in:
* {"CN", "commonName", "C", "countryName", "O", "organizationName","L",
* "OU", "organizationalUnitName", "ST", "stateOrProvinceName", "emailAddress",
* "serialNumber", "postalAddress", "postalCode", "dnQualifier", "title",
* "SN","givenName","GN", "initials", "pseudonym", "generationQualifier", "domainComponent", "DC"}.
* Note: The object of C and countryName should be CSR Supported Country Codes
*
* @param[in] csr_len For input, csr_len is the size of store CSR buffer.
* For output, csr_len is CSR len for DER format
* @param[in] csr_pointer For input, csr_pointer is buffer address to store CSR.
* For output, csr_pointer is address for stored CSR.
* The csr_pointer address will be changed.
*
* @retval true Success.
* @retval false Failed to gen CSR.
**/
extern bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
uint8_t *requester_info, size_t requester_info_length,
void *context, char *subject_name,
size_t *csr_len, uint8_t **csr_pointer);
#endif /* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP */
#endif /* CRYPTLIB_CERT_H */

View File

@@ -0,0 +1,98 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_DH_H
#define CRYPTLIB_DH_H
/*=====================================================================================
* Diffie-Hellman Key Exchange Primitives
*=====================================================================================
*/
#if LIBSPDM_FFDHE_SUPPORT
/**
* Allocates and initializes one Diffie-Hellman context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Diffie-Hellman context that has been initialized.
* If the allocations fails, libspdm_dh_new_by_nid() returns NULL.
* If the interface is not supported, libspdm_dh_new_by_nid() returns NULL.
**/
extern void *libspdm_dh_new_by_nid(size_t nid);
/**
* Release the specified DH context.
*
* @param[in] dh_context Pointer to the DH context to be released.
**/
void libspdm_dh_free(void *dh_context);
/**
* Generates DH public key.
*
* This function generates random secret exponent, and computes the public key, which is
* returned via parameter public_key and public_key_size. DH context is updated accordingly.
* If the public_key buffer is too small to hold the public key, false is returned and
* public_key_size is set to the required buffer size to obtain the public key.
*
* If dh_context is NULL, then return false.
* If public_key_size is NULL, then return false.
* If public_key_size is large enough but public_key is NULL, then return false.
* If this interface is not supported, then return false.
*
* For FFDHE2048, the public_size is 256.
* For FFDHE3072, the public_size is 384.
* For FFDHE4096, the public_size is 512.
*
* @param[in, out] dh_context Pointer to the DH context.
* @param[out] public_key Pointer to the buffer to receive generated public key.
* @param[in, out] public_key_size On input, the size of public_key buffer in bytes.
* On output, the size of data returned in public_key buffer in
* bytes.
*
* @retval true DH public key generation succeeded.
* @retval false DH public key generation failed.
* @retval false public_key_size is not large enough.
* @retval false This interface is not supported.
**/
extern bool libspdm_dh_generate_key(void *dh_context, uint8_t *public_key, size_t *public_key_size);
/**
* Computes exchanged common key.
*
* Given peer's public key, this function computes the exchanged common key, based on its own
* context including value of prime modulus and random secret exponent.
*
* If dh_context is NULL, then return false.
* If peer_public_key is NULL, then return false.
* If key_size is NULL, then return false.
* If key is NULL, then return false.
* If key_size is not large enough, then return false.
* If this interface is not supported, then return false.
*
* For FFDHE2048, the peer_public_size and key_size is 256.
* For FFDHE3072, the peer_public_size and key_size is 384.
* For FFDHE4096, the peer_public_size and key_size is 512.
*
* @param[in, out] dh_context Pointer to the DH context.
* @param[in] peer_public_key Pointer to the peer's public key.
* @param[in] peer_public_key_size size of peer's public key in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in, out] key_size On input, the size of key buffer in bytes.
* On output, the size of data returned in key buffer in
* bytes.
*
* @retval true DH exchanged key generation succeeded.
* @retval false DH exchanged key generation failed.
* @retval false key_size is not large enough.
* @retval false This interface is not supported.
**/
extern bool libspdm_dh_compute_key(void *dh_context, const uint8_t *peer_public_key,
size_t peer_public_key_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_FFDHE_SUPPORT */
#endif /* CRYPTLIB_DH_H */

View File

@@ -0,0 +1,162 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_EC_H
#define CRYPTLIB_EC_H
/*=====================================================================================
* Elliptic Curve Primitives
*=====================================================================================*/
#if (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT)
/**
* Allocates and Initializes one Elliptic Curve context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Elliptic Curve context that has been initialized.
* If the allocations fails, libspdm_ec_new_by_nid() returns NULL.
**/
extern void *libspdm_ec_new_by_nid(size_t nid);
/**
* Release the specified EC context.
*
* @param[in] ec_context Pointer to the EC context to be released.
**/
extern void libspdm_ec_free(void *ec_context);
#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */
#if LIBSPDM_ECDHE_SUPPORT
/**
* Generates EC key and returns EC public key (X, Y).
*
* This function generates random secret, and computes the public key (X, Y), which is
* returned via parameter public, public_size.
* X is the first half of public with size being public_size / 2,
* Y is the second half of public with size being public_size / 2.
* EC context is updated accordingly.
* If the public buffer is too small to hold the public X, Y, false is returned and
* public_size is set to the required buffer size to obtain the public X, Y.
*
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
*
* If ec_context is NULL, then return false.
* If public_size is NULL, then return false.
* If public_size is large enough but public is NULL, then return false.
*
* @param[in, out] ec_context Pointer to the EC context.
* @param[out] public Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true EC public X,Y generation succeeded.
* @retval false EC public X,Y generation failed.
* @retval false public_size is not large enough.
**/
extern bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_key, size_t *public_key_size);
/**
* Computes exchanged common key.
*
* Given peer's public key (X, Y), this function computes the exchanged common key,
* based on its own context including value of curve parameter and random secret.
* X is the first half of peer_public with size being peer_public_size / 2,
* Y is the second half of peer_public with size being peer_public_size / 2.
*
* If ec_context is NULL, then return false.
* If peer_public is NULL, then return false.
* If peer_public_size is 0, then return false.
* If key is NULL, then return false.
* If key_size is not large enough, then return false.
*
* For P-256, the peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
* The key_size is 32.
* For P-384, the peer_public_size is 96. first 48-byte is X, second 48-byte is Y.
* The key_size is 48.
* For P-521, the peer_public_size is 132. first 66-byte is X, second 66-byte is Y.
* The key_size is 66.
*
* @param[in, out] ec_context Pointer to the EC context.
* @param[in] peer_public Pointer to the peer's public X,Y.
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in, out] key_size On input, the size of key buffer in bytes.
* On output, the size of data returned in key buffer in bytes.
*
* @retval true EC exchanged key generation succeeded.
* @retval false EC exchanged key generation failed.
* @retval false key_size is not large enough.
**/
extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_ECDHE_SUPPORT */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Carries out the EC-DSA signature.
*
* This function carries out the EC-DSA signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If ec_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
*
* @param[in] ec_context Pointer to EC context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in EC-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the EC-DSA signature.
*
* If ec_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
*
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
*
* @param[in] ec_context Pointer to EC context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to EC-DSA signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in EC-DSA.
* @retval false Invalid signature or invalid EC context.
**/
extern bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_ECDSA_SUPPORT */
#endif /* CRYPTLIB_EC_H */

View File

@@ -0,0 +1,100 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_ECD_H
#define CRYPTLIB_ECD_H
/*=====================================================================================
* Edwards-Curve Primitives
*=====================================================================================*/
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
/**
* Allocates and Initializes one Edwards-Curve context for subsequent use with the NID.
*
* @param nid cipher NID
*
* @return Pointer to the Edwards-Curve context that has been initialized.
* If the allocations fails, libspdm_ecd_new_by_nid() returns NULL.
**/
extern void *libspdm_ecd_new_by_nid(size_t nid);
/**
* Release the specified Ed context.
*
* @param[in] ecd_context Pointer to the Ed context to be released.
**/
extern void libspdm_ecd_free(void *ecd_context);
/**
* Carries out the Ed-DSA signature.
*
* This function carries out the Ed-DSA signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If ecd_context is NULL, then return false.
* If message is NULL, then return false.
* hash_nid must be NULL.
* If sig_size is large enough but signature is NULL, then return false.
*
* For ed25519, context must be NULL and context_size must be 0.
* For ed448, context must be maximum of 255 octets.
*
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
*
* @param[in] ecd_context Pointer to Ed context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] context The EDDSA signing context.
* @param[in] context_size Size of EDDSA signing context.
* @param[in] message Pointer to octet message to be signed (before hash).
* @param[in] size size of the message in bytes.
* @param[out] signature Pointer to buffer to receive Ed-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in Ed-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_eddsa_sign(const void *ecd_context, size_t hash_nid,
const uint8_t *context, size_t context_size,
const uint8_t *message, size_t size, uint8_t *signature,
size_t *sig_size);
/**
* Verifies the Ed-DSA signature.
*
* If ecd_context is NULL, then return false.
* If message is NULL, then return false.
* If signature is NULL, then return false.
* hash_nid must be NULL.
*
* For ed25519, context must be NULL and context_size must be 0.
* For ed448, context must be maximum of 255 octets.
*
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
*
* @param[in] ecd_context Pointer to Ed context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] context The EDDSA signing context.
* @param[in] context_size Size of EDDSA signing context.
* @param[in] message Pointer to octet message to be checked (before hash).
* @param[in] size Size of the message in bytes.
* @param[in] signature Pointer to Ed-DSA signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in Ed-DSA.
* @retval false Invalid signature or invalid Ed context.
**/
extern bool libspdm_eddsa_verify(const void *ecd_context, size_t hash_nid,
const uint8_t *context, size_t context_size,
const uint8_t *message, size_t size,
const uint8_t *signature, size_t sig_size);
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
#endif /* CRYPTLIB_ECD_H */

View File

@@ -0,0 +1,772 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_HASH_H
#define CRYPTLIB_HASH_H
/* SHA-256 digest size in bytes. */
#define LIBSPDM_SHA256_DIGEST_SIZE 32
/* SHA-384 digest size in bytes. */
#define LIBSPDM_SHA384_DIGEST_SIZE 48
/* SHA-512 digest size in bytes. */
#define LIBSPDM_SHA512_DIGEST_SIZE 64
/* SHA3-256 digest size in bytes. */
#define LIBSPDM_SHA3_256_DIGEST_SIZE 32
/* SHA3-384 digest size in bytes. */
#define LIBSPDM_SHA3_384_DIGEST_SIZE 48
/* SHA3-512 digest size in bytes. */
#define LIBSPDM_SHA3_512_DIGEST_SIZE 64
/* SM3_256 digest size in bytes. */
#define LIBSPDM_SM3_256_DIGEST_SIZE 32
/*=====================================================================================
* One-way cryptographic hash SHA2 primitives.
*=====================================================================================
*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, sha256_new() returns NULL. *
**/
extern void *libspdm_sha256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha256_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha256_free(void *sha256_context);
/**
* Initializes user-supplied memory pointed to by sha256_context as SHA-256 hash context for
* subsequent use.
*
* If sha256_context is NULL, then return false.
*
* @param[out] sha256_context Pointer to SHA-256 context being initialized.
*
* @retval true SHA-256 context initialization succeeded.
* @retval false SHA-256 context initialization failed.
**/
extern bool libspdm_sha256_init(void *sha256_context);
/**
* Makes a copy of an existing SHA-256 context.
*
* If sha256_context is NULL, then return false.
* If new_sha256_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha256_context Pointer to SHA-256 context being copied.
* @param[out] new_sha256_context Pointer to new SHA-256 context.
*
* @retval true SHA-256 context copy succeeded.
* @retval false SHA-256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha256_duplicate(const void *sha256_context, void *new_sha256_context);
/**
* Digests the input data and updates SHA-256 context.
*
* This function performs SHA-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-256 context should be already correctly initialized by libspdm_sha256_init(), and must not
* have been finalized by libspdm_sha256_final(). Behavior with invalid context is undefined.
*
* If sha256_context is NULL, then return false.
*
* @param[in, out] sha256_context Pointer to the SHA-256 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-256 data digest succeeded.
* @retval false SHA-256 data digest failed.
**/
extern bool libspdm_sha256_update(void *sha256_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-256 digest value.
*
* This function completes SHA-256 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-256 context cannot
* be used again. SHA-256 context should be already correctly initialized by libspdm_sha256_init(),
* and must not have been finalized by libspdm_sha256_final(). Behavior with invalid SHA-256 context
* is undefined.
*
* If sha256_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha256_context Pointer to the SHA-256 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest
* value (32 bytes).
*
* @retval true SHA-256 digest computation succeeded.
* @retval false SHA-256 digest computation failed.
**/
extern bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value);
/**
* Computes the SHA-256 message digest of an input data buffer.
*
* This function performs the SHA-256 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest value (32 bytes).
*
* @retval true SHA-256 digest computation succeeded.
* @retval false SHA-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-384 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha384_new() returns NULL.
**/
extern void *libspdm_sha384_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha384_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha384_free(void *sha384_context);
/**
* Initializes user-supplied memory pointed to by sha384_context as SHA-384 hash context for
* subsequent use.
*
* If sha384_context is NULL, then return false.
*
* @param[out] sha384_context Pointer to SHA-384 context being initialized.
*
* @retval true SHA-384 context initialization succeeded.
* @retval false SHA-384 context initialization failed.
**/
extern bool libspdm_sha384_init(void *sha384_context);
/**
* Makes a copy of an existing SHA-384 context.
*
* If sha384_context is NULL, then return false.
* If new_sha384_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha384_context Pointer to SHA-384 context being copied.
* @param[out] new_sha384_context Pointer to new SHA-384 context.
*
* @retval true SHA-384 context copy succeeded.
* @retval false SHA-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha384_duplicate(const void *sha384_context, void *new_sha384_context);
/**
* Digests the input data and updates SHA-384 context.
*
* This function performs SHA-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-384 context should be already correctly initialized by libspdm_sha384_init(), and must not
* have been finalized by libspdm_sha384_final(). Behavior with invalid context is undefined.
*
* If sha384_context is NULL, then return false.
*
* @param[in, out] sha384_context Pointer to the SHA-384 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-384 data digest succeeded.
* @retval false SHA-384 data digest failed.
**/
extern bool libspdm_sha384_update(void *sha384_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-384 digest value.
*
* This function completes SHA-384 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-384 context cannot
* be used again. SHA-384 context should be already correctly initialized by libspdm_sha384_init(),
* and must not have been finalized by libspdm_sha384_final(). Behavior with invalid SHA-384 context
* is undefined.
*
* If sha384_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha384_context Pointer to the SHA-384 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest
* value (48 bytes).
*
* @retval true SHA-384 digest computation succeeded.
* @retval false SHA-384 digest computation failed.
**/
extern bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value);
/**
* Computes the SHA-384 message digest of an input data buffer.
*
* This function performs the SHA-384 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest value (48 bytes).
*
* @retval true SHA-384 digest computation succeeded.
* @retval false SHA-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA-512 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha512_new() returns NULL.
**/
extern void *libspdm_sha512_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha512_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha512_free(void *sha512_context);
/**
* Initializes user-supplied memory pointed by sha512_context as SHA-512 hash context for
* subsequent use.
*
* If sha512_context is NULL, then return false.
*
* @param[out] sha512_context Pointer to SHA-512 context being initialized.
*
* @retval true SHA-512 context initialization succeeded.
* @retval false SHA-512 context initialization failed.
**/
extern bool libspdm_sha512_init(void *sha512_context);
/**
* Makes a copy of an existing SHA-512 context.
*
* If sha512_context is NULL, then return false.
* If new_sha512_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha512_context Pointer to SHA-512 context being copied.
* @param[out] new_sha512_context Pointer to new SHA-512 context.
*
* @retval true SHA-512 context copy succeeded.
* @retval false SHA-512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha512_duplicate(const void *sha512_context, void *new_sha512_context);
/**
* Digests the input data and updates SHA-512 context.
*
* This function performs SHA-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA-512 context should be already correctly initialized by libspdm_sha512_init(), and must not
* have been finalized by libspdm_sha512_final(). Behavior with invalid context is undefined.
*
* If sha512_context is NULL, then return false.
*
* @param[in, out] sha512_context Pointer to the SHA-512 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA-512 data digest succeeded.
* @retval false SHA-512 data digest failed.
**/
extern bool libspdm_sha512_update(void *sha512_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA-512 digest value.
*
* This function completes SHA-512 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA-512 context cannot
* be used again. SHA-512 context should be already correctly initialized by libspdm_sha512_init(),
* and must not have been finalized by libspdm_sha512_final(). Behavior with invalid SHA-512 context
* is undefined.
*
* If sha512_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha512_context Pointer to the SHA-512 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest
* value (64 bytes).
*
* @retval true SHA-512 digest computation succeeded.
* @retval false SHA-512 digest computation failed.
**/
extern bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value);
/**
* Computes the SHA-512 message digest of an input data buffer.
*
* This function performs the SHA-512 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest value (64 bytes).
*
* @retval true SHA-512 digest computation succeeded.
* @retval false SHA-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA512_SUPPORT */
/*=====================================================================================
* One-way cryptographic hash SHA3 primitives.
*=====================================================================================
*/
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_256_new() returns NULL.
**/
extern void *libspdm_sha3_256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_256_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_256_free(void *sha3_256_context);
/**
* Initializes user-supplied memory pointed by sha3_256_context as SHA3-256 hash context for
* subsequent use.
*
* If sha3_256_context is NULL, then return false.
*
* @param[out] sha3_256_context Pointer to SHA3-256 context being initialized.
*
* @retval true SHA3-256 context initialization succeeded.
* @retval false SHA3-256 context initialization failed.
**/
extern bool libspdm_sha3_256_init(void *sha3_256_context);
/**
* Makes a copy of an existing SHA3-256 context.
*
* If sha3_256_context is NULL, then return false.
* If new_sha3_256_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_256_context Pointer to SHA3-256 context being copied.
* @param[out] new_sha3_256_context Pointer to new SHA3-256 context.
*
* @retval true SHA3-256 context copy succeeded.
* @retval false SHA3-256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_256_duplicate(const void *sha3_256_context, void *new_sha3_256_context);
/**
* Digests the input data and updates SHA3-256 context.
*
* This function performs SHA3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-256 context should be already correctly initialized by libspdm_sha3_256_init(), and must not
* have been finalized by libspdm_sha3_256_final(). Behavior with invalid context is undefined.
*
* If sha3_256_context is NULL, then return false.
*
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size size of data buffer in bytes.
*
* @retval true SHA3-256 data digest succeeded.
* @retval false SHA3-256 data digest failed.
**/
extern bool libspdm_sha3_256_update(void *sha3_256_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-256 digest value.
*
* This function completes SHA3-256 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-512 context cannot
* be used again. SHA3-256 context should be already correctly initialized by
* libspdm_sha3_256_init(), and must not have been finalized by libspdm_sha3_256_final().
* Behavior with invalid SHA3-256 context is undefined.
*
* If sha3_256_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest
* value (32 bytes).
*
* @retval true SHA3-256 digest computation succeeded.
* @retval false SHA3-256 digest computation failed.
**/
extern bool libspdm_sha3_256_final(void *sha3_256_context, uint8_t *hash_value);
/**
* Computes the SHA3-256 message digest of an input data buffer.
*
* This function performs the SHA3-256 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest value (32 bytes).
*
* @retval true SHA3-256 digest computation succeeded.
* @retval false SHA3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-384 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_384_new() returns NULL.
**/
extern void *libspdm_sha3_384_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_384_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_384_free(void *sha3_384_context);
/**
* Initializes user-supplied memory pointed by sha3_384_context as SHA3-384 hash context for
* subsequent use.
*
* If sha3_384_context is NULL, then return false.
*
* @param[out] sha3_384_context Pointer to SHA3-384 context being initialized.
*
* @retval true SHA3-384 context initialization succeeded.
* @retval false SHA3-384 context initialization failed.
**/
extern bool libspdm_sha3_384_init(void *sha3_384_context);
/**
* Makes a copy of an existing SHA3-384 context.
*
* If sha3_384_context is NULL, then return false.
* If new_sha3_384_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_384_context Pointer to SHA3-384 context being copied.
* @param[out] new_sha3_384_context Pointer to new SHA3-384 context.
*
* @retval true SHA3-384 context copy succeeded.
* @retval false SHA3-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_384_duplicate(const void *sha3_384_context, void *new_sha3_384_context);
/**
* Digests the input data and updates SHA3-384 context.
*
* This function performs SHA3-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-384 context should be already correctly initialized by libspdm_sha3_384_init(), and must not
* have been finalized by libspdm_sha3_384_final(). Behavior with invalid context is undefined.
*
* If sha3_384_context is NULL, then return false.
*
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA3-384 data digest succeeded.
* @retval false SHA3-384 data digest failed.
**/
extern bool libspdm_sha3_384_update(void *sha3_384_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-384 digest value.
*
* This function completes SHA3-384 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-384 context cannot
* be used again. SHA3-384 context should be already correctly initialized by
* libspdm_sha3_384_init(), and must not have been finalized by libspdm_sha3_384_final().
* Behavior with invalid SHA3-384 context is undefined.
*
* If sha3_384_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest
* value (48 bytes).
*
* @retval true SHA3-384 digest computation succeeded.
* @retval false SHA3-384 digest computation failed.
*
**/
extern bool libspdm_sha3_384_final(void *sha3_384_context, uint8_t *hash_value);
/**
* Computes the SHA3-384 message digest of an input data buffer.
*
* This function performs the SHA3-384 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest value (48 bytes).
*
* @retval true SHA3-384 digest computation succeeded.
* @retval false SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SHA3-512 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sha3_512_new() returns NULL.
**/
extern void *libspdm_sha3_512_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sha3_512_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sha3_512_free(void *sha3_512_context);
/**
* Initializes user-supplied memory pointed by sha3_512_context as SHA3-512 hash context for
* subsequent use.
*
* If sha3_512_context is NULL, then return false.
*
* @param[out] sha3_512_context Pointer to SHA3-512 context being initialized.
*
* @retval true SHA3-512 context initialization succeeded.
* @retval false SHA3-512 context initialization failed.
**/
extern bool libspdm_sha3_512_init(void *sha3_512_context);
/**
* Makes a copy of an existing SHA3-512 context.
*
* If sha3_512_context is NULL, then return false.
* If new_sha3_512_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sha3_512_context Pointer to SHA3-512 context being copied.
* @param[out] new_sha3_512_context Pointer to new SHA3-512 context.
*
* @retval true SHA3-512 context copy succeeded.
* @retval false SHA3-512 context copy failed.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_sha3_512_duplicate(const void *sha3_512_context, void *new_sha3_512_context);
/**
* Digests the input data and updates SHA3-512 context.
*
* This function performs SHA3-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SHA3-512 context should be already correctly initialized by libspdm_sha3_512_init(), and must not
* have been finalized by libspdm_sha3_512_final(). Behavior with invalid context is undefined.
*
* If sha3_512_context is NULL, then return false.
*
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SHA3-512 data digest succeeded.
* @retval false SHA3-512 data digest failed.
**/
extern bool libspdm_sha3_512_update(void *sha3_512_context, const void *data, size_t data_size);
/**
* Completes computation of the SHA3-512 digest value.
*
* This function completes SHA3-512 hash computation and populates the digest value into
* the specified memory. After this function has been called, the SHA3-512 context cannot
* be used again. SHA3-512 context should be already correctly initialized by
* libspdm_sha3_512_init(), and must not have been finalized by libspdm_sha3_512_final().
* Behavior with invalid SHA3-512 context is undefined.
*
* If sha3_512_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest
* value (64 bytes).
*
* @retval true SHA3-512 digest computation succeeded.
* @retval false SHA3-512 digest computation failed.
**/
extern bool libspdm_sha3_512_final(void *sha3_512_context, uint8_t *hash_value);
/**
* Computes the SHA3-512 message digest of an input data buffer.
*
* This function performs the SHA3-512 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest value (64 bytes).
*
* @retval true SHA3-512 digest computation succeeded.
* @retval false SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sha3_512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
/*=====================================================================================
* One-Way Cryptographic hash SM3 Primitives
*=====================================================================================
*/
#if LIBSPDM_SM3_256_SUPPORT
/**
* Allocates and initializes one HASH_CTX context for subsequent SM3-256 use.
*
* @return Pointer to the HASH_CTX context that has been initialized.
* If the allocations fails, libspdm_sm3_256_new() returns NULL.
**/
extern void *libspdm_sm3_256_new(void);
/**
* Release the specified HASH_CTX context.
*
* @param[in] sm3_context Pointer to the HASH_CTX context to be released.
**/
extern void libspdm_sm3_256_free(void *sm3_context);
/**
* Initializes user-supplied memory pointed by sm3_context as SM3 hash context for
* subsequent use.
*
* If sm3_context is NULL, then return false.
*
* @param[out] sm3_context Pointer to SM3 context being initialized.
*
* @retval true SM3 context initialization succeeded.
* @retval false SM3 context initialization failed.
**/
extern bool libspdm_sm3_256_init(void *sm3_context);
/**
* Makes a copy of an existing SM3 context.
*
* If sm3_context is NULL, then return false.
* If new_sm3_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] sm3_context Pointer to SM3 context being copied.
* @param[out] new_sm3_context Pointer to new SM3 context.
*
* @retval true SM3 context copy succeeded.
* @retval false SM3 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sm3_256_duplicate(const void *sm3_context, void *new_sm3_context);
/**
* Digests the input data and updates SM3 context.
*
* This function performs SM3 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* SM3 context should be already correctly initialized by sm3_init(), and should not be finalized
* by sm3_final(). Behavior with invalid context is undefined.
*
* If sm3_context is NULL, then return false.
*
* @param[in, out] sm3_context Pointer to the SM3 context.
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true SM3 data digest succeeded.
* @retval false SM3 data digest failed.
**/
extern bool libspdm_sm3_256_update(void *sm3_context, const void *data, size_t data_size);
/**
* Completes computation of the SM3 digest value.
*
* This function completes SM3 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the SM3 context cannot
* be used again. SM3 context should be already correctly initialized by sm3_init(), and should not
* be finalized by sm3_final(). Behavior with invalid SM3 context is undefined.
*
* If sm3_context is NULL, then return false.
* If hash_value is NULL, then return false.
*
* @param[in, out] sm3_context Pointer to the SM3 context.
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
*
* @retval true SM3 digest computation succeeded.
* @retval false SM3 digest computation failed.
**/
extern bool libspdm_sm3_256_final(void *sm3_context, uint8_t *hash_value);
/**
* Computes the SM3 message digest of an input data buffer.
*
* This function performs the SM3 message digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be hashed.
* @param[in] data_size Size of data buffer in bytes.
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
*
* @retval true SM3 digest computation succeeded.
* @retval false SM3 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_sm3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_HASH_H */

View File

@@ -0,0 +1,266 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_HKDF_H
#define CRYPTLIB_HKDF_H
/*=====================================================================================
* Key Derivation Function Primitives
*=====================================================================================*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Derive SHA-256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive prk value.
* @param[in] prk_out_size Size of prk bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Derive SHA384 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA384 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Derive SHA512 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA512 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA512_SUPPORT */
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Derive SHA3_256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Derive SHA3_384 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_384 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Derive SHA3_512 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SHA3_512 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sha3_512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
#if LIBSPDM_SM3_256_SUPPORT
/**
* Derive SM3_256 HMAC-based Extract key Derivation Function (HKDF).
*
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[in] salt Pointer to the salt value.
* @param[in] salt_size Salt size in bytes.
* @param[out] prk_out Pointer to buffer to receive hkdf value.
* @param[in] prk_out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sm3_256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size);
/**
* Derive SM3_256 HMAC-based Expand key Derivation Function (HKDF).
*
* @param[in] prk Pointer to the user-supplied key.
* @param[in] prk_size Key size in bytes.
* @param[in] info Pointer to the application specific info.
* @param[in] info_size Info size in bytes.
* @param[out] out Pointer to buffer to receive hkdf value.
* @param[in] out_size Size of hkdf bytes to generate.
*
* @retval true Hkdf generated successfully.
* @retval false Hkdf generation failed.
**/
extern bool libspdm_hkdf_sm3_256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_HKDF_H */

View File

@@ -0,0 +1,833 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_MAC_H
#define CRYPTLIB_MAC_H
/*=====================================================================================
* Message Authentication Code (MAC) Primitives
*=====================================================================================
*/
#if LIBSPDM_SHA256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha256_new() returns NULL.
**/
extern void *libspdm_hmac_sha256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha256_free(void *hmac_sha256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha256_update().
*
* If hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha256_ctx Pointer to HMAC-SHA256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA256 context.
*
* If hmac_sha256_ctx is NULL, then return false.
* If new_hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha256_ctx Pointer to HMAC-SHA256 context being copied.
* @param[out] new_hmac_sha256_ctx Pointer to new HMAC-SHA256 context.
*
* @retval true HMAC-SHA256 context copy succeeded.
* @retval false HMAC-SHA256 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx, void *new_hmac_sha256_ctx);
/**
* Digests the input data and updates HMAC-SHA256 context.
*
* This function performs HMAC-SHA256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should not be
* finalized by libspdm_hmac_sha256_final(). Behavior with invalid context is undefined.
*
* If hmac_sha256_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA256 data digest succeeded.
* @retval false HMAC-SHA256 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA256 digest value.
*
* This function completes HMAC-SHA256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA256 context cannot
* be used again. HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should
* not be finalized by libspdm_hmac_sha256_final(). Behavior with invalid HMAC-SHA256 context is
* undefined.
*
* If hmac_sha256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA256 digest computation succeeded.
* @retval false HMAC-SHA256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA256 digest of a input data buffer.
*
* This function performs the HMAC-SHA256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA256 digest computation succeeded.
* @retval false HMAC-SHA256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA256_SUPPORT */
#if LIBSPDM_SHA384_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA384 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha384_new() returns NULL.
**/
extern void *libspdm_hmac_sha384_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha384_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha384_free(void *hmac_sha384_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha384_update().
*
* If hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha384_ctx Pointer to HMAC-SHA384 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA384 context.
*
* If hmac_sha384_ctx is NULL, then return false.
* If new_hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha384_ctx Pointer to HMAC-SHA384 context being copied.
* @param[out] new_hmac_sha384_ctx Pointer to new HMAC-SHA384 context.
*
* @retval true HMAC-SHA384 context copy succeeded.
* @retval false HMAC-SHA384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx, void *new_hmac_sha384_ctx);
/**
* Digests the input data and updates HMAC-SHA384 context.
*
* This function performs HMAC-SHA384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should not be
* finalized by libspdm_hmac_sha384_final(). Behavior with invalid context is undefined.
*
* If hmac_sha384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA384 data digest succeeded.
* @retval false HMAC-SHA384 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA384 digest value.
*
* This function completes HMAC-SHA384 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA384 context cannot
* be used again. HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should
* not be finalized by libspdm_hmac_sha384_final(). Behavior with invalid HMAC-SHA384 context is
* undefined.
*
* If hmac_sha384_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA384 digest computation succeeded.
* @retval false HMAC-SHA384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA384 digest of a input data buffer.
*
* This function performs the HMAC-SHA384 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA384 digest computation succeeded.
* @retval false HMAC-SHA384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA384_SUPPORT */
#if LIBSPDM_SHA512_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA512 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha512_new() returns NULL.
**/
extern void *libspdm_hmac_sha512_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha512_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha512_free(void *hmac_sha512_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha512_update().
*
* If hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha512_ctx Pointer to HMAC-SHA512 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA512 context.
*
* If hmac_sha512_ctx is NULL, then return false.
* If new_hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha512_ctx Pointer to HMAC-SHA512 context being copied.
* @param[out] new_hmac_sha512_ctx Pointer to new HMAC-SHA512 context.
*
* @retval true HMAC-SHA512 context copy succeeded.
* @retval false HMAC-SHA512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx, void *new_hmac_sha512_ctx);
/**
* Digests the input data and updates HMAC-SHA512 context.
*
* This function performs HMAC-SHA512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should not be
* finalized by libspdm_hmac_sha512_final(). Behavior with invalid context is undefined.
*
* If hmac_sha512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA512 data digest succeeded.
* @retval false HMAC-SHA512 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA512 digest value.
*
* This function completes HMAC-SHA512 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA512 context cannot
* be used again. HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should
* not be finalized by libspdm_hmac_sha512_final(). Behavior with invalid HMAC-SHA512 context is
* undefined.
*
* If hmac_sha512_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA512 digest computation succeeded.
* @retval false HMAC-SHA512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA512 digest of a input data buffer.
*
* This function performs the HMAC-SHA512 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA512 digest computation succeeded.
* @retval false HMAC-SHA512 digest computation failed.
* @retval false This interface is not supported.
*
**/
extern bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA512_SUPPORT */
#if LIBSPDM_SHA3_256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_256_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_256_free(void *hmac_sha3_256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_256_update().
*
* If hmac_sha3_256_ctx is NULL, then return false.
*
* @param[out] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
**/
extern bool libspdm_hmac_sha3_256_set_key(void *hmac_sha3_256_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-256 context.
*
* If hmac_sha3_256_ctx is NULL, then return false.
* If new_hmac_sha3_256_ctx is NULL, then return false.
*
* @param[in] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context being copied.
* @param[out] new_hmac_sha3_256_ctx Pointer to new HMAC-SHA3-256 context.
*
* @retval true HMAC-SHA3-256 context copy succeeded.
* @retval false HMAC-SHA3-256 context copy failed.
**/
extern bool libspdm_hmac_sha3_256_duplicate(const void *hmac_sha3_256_ctx,
void *new_hmac_sha3_256_ctx);
/**
* Digests the input data and updates HMAC-SHA3-256 context.
*
* This function performs HMAC-SHA3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and should not be
* finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_256_ctx is NULL, then return false.
*
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-256 data digest succeeded.
* @retval false HMAC-SHA3-256 data digest failed.
**/
extern bool libspdm_hmac_sha3_256_update(void *hmac_sha3_256_ctx,
const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA3-256 digest value.
*
* This function completes HMAC-SHA3-256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-256 context cannot
* be used again. HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and
* should not be finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid HMAC-SHA3-256
* context is undefined.
*
* If hmac_sha3_256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
*
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA3-256 digest computation succeeded.
* @retval false HMAC-SHA3-256 digest computation failed.
**/
extern bool libspdm_hmac_sha3_256_final(void *hmac_sha3_256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-256 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SHA3-256 digest computation succeeded.
* @retval false HMAC-SHA3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_256_SUPPORT */
#if LIBSPDM_SHA3_384_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-384 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_384_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_384_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_384_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_384_free(void *hmac_sha3_384_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_384_update().
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_set_key(void *hmac_sha3_384_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-384 context.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If new_hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context being copied.
* @param[out] new_hmac_sha3_384_ctx Pointer to new HMAC-SHA3-384 context.
*
* @retval true HMAC-SHA3-384 context copy succeeded.
* @retval false HMAC-SHA3-384 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_duplicate(const void *hmac_sha3_384_ctx,
void *new_hmac_sha3_384_ctx);
/**
* Digests the input data and updates HMAC-SHA3-384 context.
*
* This function performs HMAC-SHA3-384 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and should not be
* finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-384 data digest succeeded.
* @retval false HMAC-SHA3-384 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_update(void *hmac_sha3_384_ctx, const void *data,
size_t data_size);
/**
* Completes computation of the HMAC-SHA3-384 digest value.
*
* This function completes HMAC-SHA3-384 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-384 context cannot
* be used again. HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and
* should not be finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid HMAC-SHA3-384
* context is undefined.
*
* If hmac_sha3_384_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA3-384 digest computation succeeded.
* @retval false HMAC-SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_final(void *hmac_sha3_384_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-384 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-384 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
* value (48 bytes).
*
* @retval true HMAC-SHA3-384 digest computation succeeded.
* @retval false HMAC-SHA3-384 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_384_SUPPORT */
#if LIBSPDM_SHA3_512_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-512 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sha3_512_new() returns NULL.
**/
extern void *libspdm_hmac_sha3_512_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sha3_512_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sha3_512_free(void *hmac_sha3_512_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sha3_512_update().
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_set_key(void *hmac_sha3_512_ctx,
const uint8_t *key,
size_t key_size);
/**
* Makes a copy of an existing HMAC-SHA3-512 context.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If new_hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context being copied.
* @param[out] new_hmac_sha3_512_ctx Pointer to new HMAC-SHA3-512 context.
*
* @retval true HMAC-SHA3-512 context copy succeeded.
* @retval false HMAC-SHA3-512 context copy failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_duplicate(const void *hmac_sha3_512_ctx,
void *new_hmac_sha3_512_ctx);
/**
* Digests the input data and updates HMAC-SHA3-512 context.
*
* This function performs HMAC-SHA3-512 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and should not be
* finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid context is undefined.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SHA3-512 data digest succeeded.
* @retval false HMAC-SHA3-512 data digest failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_update(void *hmac_sha3_512_ctx,
const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SHA3-512 digest value.
*
* This function completes HMAC-SHA3-512 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SHA3-512 context cannot
* be used again. HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and
* should not be finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid HMAC-SHA3-512
* context is undefined.
*
* If hmac_sha3_512_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA3-512 digest computation succeeded.
* @retval false HMAC-SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_final(void *hmac_sha3_512_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SHA3-512 digest of a input data buffer.
*
* This function performs the HMAC-SHA3-512 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
* value (64 bytes).
*
* @retval true HMAC-SHA3-512 digest computation succeeded.
* @retval false HMAC-SHA3-512 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sha3_512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SHA3_512_SUPPORT */
#if LIBSPDM_SM3_256_SUPPORT
/**
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SM3-256 use.
*
* @return Pointer to the HMAC_CTX context that has been initialized.
* If the allocations fails, libspdm_hmac_sm3_256_new() returns NULL.
**/
extern void *libspdm_hmac_sm3_256_new(void);
/**
* Release the specified HMAC_CTX context.
*
* @param[in] hmac_sm3_256_ctx Pointer to the HMAC_CTX context to be released.
**/
extern void libspdm_hmac_sm3_256_free(void *hmac_sm3_256_ctx);
/**
* Set user-supplied key for subsequent use. It must be done before any
* calling to libspdm_hmac_sm3_256_update().
*
* If hmac_sm3_256_ctx is NULL, then return false.
*
* @param[out] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
*
* @retval true The key is set successfully.
* @retval false The key is set unsuccessfully.
**/
extern bool libspdm_hmac_sm3_256_set_key(void *hmac_sm3_256_ctx,
const uint8_t *key, size_t key_size);
/**
* Makes a copy of an existing HMAC-SM3-256 context.
*
* If hmac_sm3_256_ctx is NULL, then return false.
* If new_hmac_sm3_256_ctx is NULL, then return false.
*
* @param[in] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context being copied.
* @param[out] new_hmac_sm3_256_ctx Pointer to new HMAC-SM3-256 context.
*
* @retval true HMAC-SM3-256 context copy succeeded.
* @retval false HMAC-SM3-256 context copy failed.
**/
extern bool libspdm_hmac_sm3_256_duplicate(const void *hmac_sm3_256_ctx,
void *new_hmac_sm3_256_ctx);
/**
* Digests the input data and updates HMAC-SM3-256 context.
*
* This function performs HMAC-SM3-256 digest on a data buffer of the specified size.
* It can be called multiple times to compute the digest of long or discontinuous data streams.
* HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and should not be
* finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid context is undefined.
*
* If hmac_sm3_256_ctx is NULL, then return false.
*
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
*
* @retval true HMAC-SM3-256 data digest succeeded.
* @retval false HMAC-SM3-256 data digest failed.
**/
extern bool libspdm_hmac_sm3_256_update(void *hmac_sm3_256_ctx, const void *data, size_t data_size);
/**
* Completes computation of the HMAC-SM3-256 digest value.
*
* This function completes HMAC-SM3-256 hash computation and retrieves the digest value into
* the specified memory. After this function has been called, the HMAC-SM3-256 context cannot
* be used again. HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and
* should not be finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid HMAC-SM3-256
* context is undefined.
*
* If hmac_sm3_256_ctx is NULL, then return false.
* If hmac_value is NULL, then return false.
*
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SM3-256 digest computation succeeded.
* @retval false HMAC-SM3-256 digest computation failed.
**/
extern bool libspdm_hmac_sm3_256_final(void *hmac_sm3_256_ctx, uint8_t *hmac_value);
/**
* Computes the HMAC-SM3-256 digest of a input data buffer.
*
* This function performs the HMAC-SM3-256 digest of a given data buffer, and places
* the digest value into the specified memory.
*
* If this interface is not supported, then return false.
*
* @param[in] data Pointer to the buffer containing the data to be digested.
* @param[in] data_size Size of data buffer in bytes.
* @param[in] key Pointer to the user-supplied key.
* @param[in] key_size Key size in bytes.
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
* value (32 bytes).
*
* @retval true HMAC-SM3-256 digest computation succeeded.
* @retval false HMAC-SM3-256 digest computation failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_hmac_sm3_256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value);
#endif /* LIBSPDM_SM3_256_SUPPORT */
#endif /* CRYPTLIB_MAC_H */

View File

@@ -0,0 +1,30 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_RNG_H
#define CRYPTLIB_RNG_H
/*=====================================================================================
* Random Number Generation Primitive
*=====================================================================================*/
/**
* Generates a random byte stream of the specified size. If initialization, testing, or seeding of
* the (pseudo)random number generator is required it should be done before this function is called.
*
* If output is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[out] output Pointer to buffer to receive random value.
* @param[in] size Size of random bytes to generate.
*
* @retval true Random byte stream generated successfully.
* @retval false Generation of random byte stream failed.
* @retval false This interface is not supported.
**/
extern bool libspdm_random_bytes(uint8_t *output, size_t size);
#endif /* CRYPTLIB_RNG_H */

View File

@@ -0,0 +1,264 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_RSA_H
#define CRYPTLIB_RSA_H
/*=====================================================================================
* RSA Cryptography Primitives
*=====================================================================================
*/
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
/* RSA key Tags Definition used in libspdm_rsa_set_key() function for key component
* identification.
*/
typedef enum {
LIBSPDM_RSA_KEY_N, /*< RSA public Modulus (N)*/
LIBSPDM_RSA_KEY_E, /*< RSA public exponent (e)*/
LIBSPDM_RSA_KEY_D, /*< RSA Private exponent (d)*/
LIBSPDM_RSA_KEY_P, /*< RSA secret prime factor of Modulus (p)*/
LIBSPDM_RSA_KEY_Q, /*< RSA secret prime factor of Modules (q)*/
LIBSPDM_RSA_KEY_DP, /*< p's CRT exponent (== d mod (p - 1))*/
LIBSPDM_RSA_KEY_DQ, /*< q's CRT exponent (== d mod (q - 1))*/
LIBSPDM_RSA_KEY_Q_INV /*< The CRT coefficient (== 1/q mod p)*/
} libspdm_rsa_key_tag_t;
/**
* Allocates and initializes one RSA context for subsequent use.
*
* @return Pointer to the RSA context that has been initialized.
* If the allocations fails, libspdm_rsa_new() returns NULL.
**/
extern void *libspdm_rsa_new(void);
/**
* Release the specified RSA context.
*
* If rsa_context is NULL, then return false.
*
* @param[in] rsa_context Pointer to the RSA context to be released.
**/
extern void libspdm_rsa_free(void *rsa_context);
/**
* Sets the tag-designated key component into the established RSA context.
*
* This function sets the tag-designated RSA key component into the established
* RSA context from the user-specified non-negative integer (octet string format
* represented in RSA PKCS#1).
* If big_number is NULL, then the specified key component in RSA context is cleared.
* If rsa_context is NULL, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] key_tag tag of RSA key component being set.
* @param[in] big_number Pointer to octet integer buffer.
* If NULL, then the specified key component in RSA
* context is cleared.
* @param[in] bn_size Size of big number buffer in bytes.
* If big_number is NULL, then it is ignored.
*
* @retval true RSA key component was set successfully.
* @retval false Invalid RSA key component tag.
**/
extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
const uint8_t *big_number, size_t bn_size);
/**
* Gets the tag-designated RSA key component from the established RSA context.
*
* This function retrieves the tag-designated RSA key component from the
* established RSA context as a non-negative integer (octet string format
* represented in RSA PKCS#1).
* If specified key component has not been set or has been cleared, then returned
* bn_size is set to 0.
* If the big_number buffer is too small to hold the contents of the key, false
* is returned and bn_size is set to the required buffer size to obtain the key.
*
* If rsa_context is NULL, then return false.
* If bn_size is NULL, then return false.
* If bn_size is large enough but big_number is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] key_tag Tag of RSA key component being set.
* @param[out] big_number Pointer to octet integer buffer.
* @param[in, out] bn_size On input, the size of big number buffer in bytes.
* On output, the size of data returned in big number buffer in bytes.
*
* @retval true RSA key component was retrieved successfully.
* @retval false Invalid RSA key component tag.
* @retval false bn_size is too small.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_get_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
uint8_t *big_number, size_t *bn_size);
/**
* Generates RSA key components.
*
* This function generates RSA key components. It takes RSA public exponent E and
* length in bits of RSA modulus N as input, and generates all key components.
* If public_exponent is NULL, the default RSA public exponent (0x10001) will be used.
*
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] modulus_length Length of RSA modulus N in bits.
* @param[in] public_exponent Pointer to RSA public exponent.
* @param[in] public_exponent_size Size of RSA public exponent buffer in bytes.
*
* @retval true RSA key component was generated successfully.
* @retval false Invalid RSA key component tag.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_generate_key(void *rsa_context, size_t modulus_length,
const uint8_t *public_exponent,
size_t public_exponent_size);
/**
* Validates key components of RSA context.
* NOTE: This function performs integrity checks on all the RSA key material, so
* the RSA key structure must contain all the private key data.
*
* This function validates key components of RSA context in following aspects:
* - Whether p is a prime
* - Whether q is a prime
* - Whether n = p * q
* - Whether d*e = 1 mod lcm(p-1,q-1)
*
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] rsa_context Pointer to RSA context to check.
*
* @retval true RSA key components are valid.
* @retval false RSA key components are not valid.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_check_key(void *rsa_context);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_RSA_SSA_SUPPORT
/**
* Carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme.
*
* This function carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme
* defined in RSA PKCS#1. If the signature buffer is too small to hold the contents of signature,
* false is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA PKCS1-v1_5 signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in PKCS1-v1_5.
* @retval false signature generation failed.
* @retval false sig_size is too small.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_pkcs1_sign_with_nid(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size, uint8_t *signature,
size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PKCS1-v1_5 encoding scheme defined in RSA PKCS#1.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
* SHA3_384, SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to RSA PKCS1-v1_5 signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in PKCS1-v1_5.
* @retval false Invalid signature or invalid RSA context.
**/
extern bool libspdm_rsa_pkcs1_verify_with_nid(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash,
size_t hash_size, const uint8_t *signature,
size_t sig_size);
#endif /* LIBSPDM_RSA_SSA_SUPPORT */
#if LIBSPDM_RSA_PSS_SUPPORT
/**
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme.
*
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined
* in RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
* SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in RSA-SSA PSS.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2.
*
* The salt length is same as digest length.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
* SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size Size of the message hash in bytes.
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in RSA-SSA PSS.
* @retval false Invalid signature or invalid RSA context.
**/
extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_RSA_PSS_SUPPORT */
#endif /* CRYPTLIB_RSA_H */

View File

@@ -0,0 +1,194 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_SM2_H
#define CRYPTLIB_SM2_H
/*=====================================================================================
* Shang-Mi2 Primitives
*=====================================================================================*/
#if LIBSPDM_SM2_DSA_SUPPORT
/**
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
*
* @param nid cipher NID
*
* @return Pointer to the Shang-Mi2 context that has been initialized.
* If the allocations fails, sm2_new_by_nid() returns NULL.
**/
extern void *libspdm_sm2_dsa_new_by_nid(size_t nid);
/**
* Release the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
**/
extern void libspdm_sm2_dsa_free(void *sm2_context);
/**
* Carries out the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
*
* This function carries out the SM2 signature.
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If sm2_context is NULL, then return false.
* If message is NULL, then return false.
* hash_nid must be SM3_256.
* If sig_size is large enough but signature is NULL, then return false.
*
* The id_a_size must be smaller than 2^16-1.
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
*
* @param[in] sm2_context Pointer to sm2 context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] id_a The ID-A of the signing context.
* @param[in] id_a_size Size of ID-A signing context.
* @param[in] message Pointer to octet message to be signed (before hash).
* @param[in] size Size of the message in bytes.
* @param[out] signature Pointer to buffer to receive SM2 signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in SM2.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_sm2_dsa_sign(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *message, size_t size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
*
* If sm2_context is NULL, then return false.
* If message is NULL, then return false.
* If signature is NULL, then return false.
* hash_nid must be SM3_256.
*
* The id_a_size must be smaller than 2^16-1.
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
*
* @param[in] sm2_context Pointer to SM2 context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] id_a The ID-A of the signing context.
* @param[in] id_a_size Size of ID-A signing context.
* @param[in] message Pointer to octet message to be checked (before hash).
* @param[in] size Size of the message in bytes.
* @param[in] signature Pointer to SM2 signature to be verified.
* @param[in] sig_size Size of signature in bytes.
*
* @retval true Valid signature encoded in SM2.
* @retval false Invalid signature or invalid sm2 context.
*
**/
extern bool libspdm_sm2_dsa_verify(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *message, size_t size,
const uint8_t *signature, size_t sig_size);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#if LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
/**
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
*
* @param nid cipher NID
*
* @return Pointer to the Shang-Mi2 context that has been initialized.
* If the allocations fails, sm2_new_by_nid() returns NULL.
**/
extern void *libspdm_sm2_key_exchange_new_by_nid(size_t nid);
/**
* Release the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
*
**/
extern void libspdm_sm2_key_exchange_free(void *sm2_context);
/**
* Initialize the specified sm2 context.
*
* @param[in] sm2_context Pointer to the sm2 context to be released.
* @param[in] hash_nid hash NID, only SM3 is valid.
* @param[in] id_a The ID-A of the key exchange context.
* @param[in] id_a_size Size of ID-A key exchange context.
* @param[in] id_b The ID-B of the key exchange context.
* @param[in] id_b_size Size of ID-B key exchange context.
* @param[in] is_initiator If the caller is initiator.
*
* @retval true sm2 context is initialized.
* @retval false sm2 context is not initialized.
**/
extern bool libspdm_sm2_key_exchange_init(const void *sm2_context, size_t hash_nid,
const uint8_t *id_a, size_t id_a_size,
const uint8_t *id_b, size_t id_b_size,
bool is_initiator);
/**
* Generates sm2 key and returns sm2 public key (X, Y), based upon GB/T 32918.3-2016: SM2 - Part3.
*
* This function generates random secret, and computes the public key (X, Y), which is
* returned via parameter public, public_size.
* X is the first half of public with size being public_size / 2,
* Y is the second half of public with size being public_size / 2.
* sm2 context is updated accordingly.
* If the public buffer is too small to hold the public X, Y, false is returned and
* public_size is set to the required buffer size to obtain the public X, Y.
*
* The public_size is 64. first 32-byte is X, second 32-byte is Y.
*
* If sm2_context is NULL, then return false.
* If public_size is NULL, then return false.
* If public_size is large enough but public is NULL, then return false.
*
* @param[in, out] sm2_context Pointer to the sm2 context.
* @param[out] public_data Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true sm2 public X,Y generation succeeded.
* @retval false sm2 public X,Y generation failed.
* @retval false public_size is not large enough.
**/
extern bool libspdm_sm2_key_exchange_generate_key(void *sm2_context, uint8_t *public_data,
size_t *public_size);
/**
* Computes exchanged common key, based upon GB/T 32918.3-2016: SM2 - Part3.
*
* Given peer's public key (X, Y), this function computes the exchanged common key,
* based on its own context including value of curve parameter and random secret.
* X is the first half of peer_public with size being peer_public_size / 2,
* Y is the second half of peer_public with size being peer_public_size / 2.
*
* If sm2_context is NULL, then return false.
* If peer_public is NULL, then return false.
* If peer_public_size is 0, then return false.
* If key is NULL, then return false.
*
* The id_a_size and id_b_size must be smaller than 2^16-1.
* The peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
* The key_size must be smaller than 2^32-1, limited by KDF function.
*
* @param[in, out] sm2_context Pointer to the sm2 context.
* @param[in] peer_public Pointer to the peer's public X,Y.
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
* @param[out] key Pointer to the buffer to receive generated key.
* @param[in] key_size On input, the size of key buffer in bytes.
*
* @retval true sm2 exchanged key generation succeeded.
* @retval false sm2 exchanged key generation failed.
**/
extern bool libspdm_sm2_key_exchange_compute_key(void *sm2_context,
const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
#endif /* LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT */
#endif /* CRYPTLIB_SM2_H */

View File

@@ -0,0 +1,71 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef LIBSPDM_LIB_CONFIG_H
#define LIBSPDM_LIB_CONFIG_H
#ifndef LIBSPDM_CONFIG
#include "library/spdm_lib_config.h"
#else
#include LIBSPDM_CONFIG
#endif
#if defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) && \
!defined(LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP)
#ifdef _MSC_VER
#pragma message("LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use " \
"LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a " \
"future release.")
#else
#warning LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use \
LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a \
future release.
#endif /* _MSC_VER */
#endif /* defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) */
#if defined(LIBSPDM_ENABLE_CHUNK_CAP) && !defined(LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP)
#ifdef _MSC_VER
#pragma message("LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP " \
"instead. This warning will be removed in a future release.")
#else
#warning LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP \
instead. This warning will be removed in a future release.
#endif /* _MSC_VER */
#endif /* defined(LIBSPDM_ENABLE_CHUNK_CAP) */
#if defined(MDEPKG_NDEBUG) && !defined(LIBSPDM_DEBUG_ENABLE)
#ifdef _MSC_VER
#pragma message("MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE " \
"instead. This warning will be removed in a future release.")
#else
#warning MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE \
instead. This warning will be removed in a future release.
#endif /* _MSC_VER */
#endif /* defined(MDEPKG_NDEBUG) */
#if defined(LIBSPDM_DEBUG_ENABLE)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE)
#elif defined(MDEPKG_NDEBUG)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
#define LIBSPDM_DEBUG_ASSERT_ENABLE 0
#define LIBSPDM_DEBUG_PRINT_ENABLE 0
#define LIBSPDM_DEBUG_BLOCK_ENABLE 0
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
#if LIBSPDM_CHECK_MACRO
#include "internal/libspdm_macro_check.h"
#endif /* LIBSPDM_CHECK_MACRO */
#endif /* LIBSPDM_LIB_CONFIG_H */

View File

@@ -0,0 +1,154 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __INTERNAL_CRYPT_LIB_H__
#define __INTERNAL_CRYPT_LIB_H__
/*
* This code uses Linux Kernel Crypto API extensively. Web page written by
* Stephan Mueller and Marek Vasut is a good starting reference on how linux
* kernel provides crypto api.
*/
#include "conftest.h"
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/random.h>
#include <linux/string.h>
// Check if ECDH/ECDSA are there, on some platforms they might not be...
#ifndef AUTOCONF_INCLUDED
#if defined(NV_GENERATED_AUTOCONF_H_PRESENT)
#include <generated/autoconf.h>
#else
#include <linux/autoconf.h>
#endif
#endif
#if \
(defined(CONFIG_CRYPTO_AEAD) || defined(CONFIG_CRYPTO_AEAD_MODULE)) && \
(defined(CONFIG_CRYPTO_AKCIPHER) || defined(CONFIG_CRYPTO_AKCIPHER_MODULE)) && \
(defined(CONFIG_CRYPTO_SKCIPHER) || defined(CONFIG_CRYPTO_SKCIPHER_MODULE)) && \
(defined(CONFIG_CRYPTO_HASH) || defined(CONFIG_CRYPTO_HASH_MODULE)) && \
(defined(CONFIG_CRYPTO_HMAC) || defined(CONFIG_CRYPTO_HMAC_MODULE)) && \
(defined(CONFIG_CRYPTO_ECDH) || defined(CONFIG_CRYPTO_ECDH_MODULE)) && \
(defined(CONFIG_CRYPTO_ECDSA) || defined(CONFIG_CRYPTO_ECDSA_MODULE)) && \
(defined(CONFIG_X509_CERTIFICATE_PARSER) || defined(CONFIG_X509_CERTIFICATE_PARSER_MODULE))
#define NV_CONFIG_CRYPTO_PRESENT 1
#endif
/*
* It is possible that we don't have access to all the functions we have. This
* could be either because we are running non-gpl kernel, because kernel is too
* old or even just user disabled. If we should use LKCA, include headers, else
* define stubs to return errors.
*/
#if defined(NV_CRYPTO_PRESENT) && defined (NV_CONFIG_CRYPTO_PRESENT)
#define USE_LKCA 1
#endif
#ifdef USE_LKCA
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/aead.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sm3.h>
// HASH_MAX_DIGESTSIZE is available since 4.20.
// This value is accurate as of 6.1
#ifndef HASH_MAX_DIGESTSIZE
#define HASH_MAX_DIGESTSIZE 64
#endif
#else
// Just stub everything out
struct shash_desc;
struct crypto_shash;
#define crypto_shash_setkey(...) -ENOMEM
#define crypto_shash_init(...) -ENOMEM
#define crypto_shash_update(...) -ENOMEM
#define crypto_shash_update(...) -ENOMEM
#define crypto_shash_final(...) -ENOMEM
#endif
#define CHAR_BIT 8U
#undef SIZE_MAX
#define SIZE_MAX 8
#include "library/cryptlib.h"
#define LIBSPDM_ASSERT(...)
struct lkca_aead_ctx;
int lkca_aead_alloc(struct lkca_aead_ctx **ctx, char const *alg);
void lkca_aead_free(struct lkca_aead_ctx *ctx);
int lkca_aead_ex(struct lkca_aead_ctx *ctx,
const uint8_t *key, size_t key_size,
uint8_t *iv, size_t iv_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc);
int libspdm_aead(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc, char const *alg);
void *lkca_hash_new(const char* alg_name);
void lkca_hash_free(struct shash_desc *ctx);
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src);
bool lkca_hash_all(const char* alg_name, const void *data,
size_t data_size, uint8_t *hash_value);
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src);
bool lkca_hmac_set_key(struct shash_desc *ctx, const uint8_t *key, size_t key_size);
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
const uint8_t *data, size_t data_size, uint8_t *hash_value);
bool lkca_hkdf_extract_and_expand(const char *alg_name,
const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
bool lkca_hkdf_expand(const char *alg_name,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size);
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size);
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size);
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size);
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size);
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size);
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif

View File

@@ -0,0 +1,109 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
/** @file
* Defines base cryptographic library APIs.
* The Base Cryptographic Library provides implementations of basic cryptography
* primitives (hash Serials, HMAC, AES, RSA, Diffie-Hellman, Elliptic Curve, etc) for security
* functionality enabling.
**/
#ifndef CRYPTLIB_H
#define CRYPTLIB_H
#include "internal/libspdm_lib_config.h"
#define LIBSPDM_CRYPTO_NID_NULL 0x0000
/* Hash */
#define LIBSPDM_CRYPTO_NID_SHA256 0x0001
#define LIBSPDM_CRYPTO_NID_SHA384 0x0002
#define LIBSPDM_CRYPTO_NID_SHA512 0x0003
#define LIBSPDM_CRYPTO_NID_SHA3_256 0x0004
#define LIBSPDM_CRYPTO_NID_SHA3_384 0x0005
#define LIBSPDM_CRYPTO_NID_SHA3_512 0x0006
#define LIBSPDM_CRYPTO_NID_SM3_256 0x0007
/* Signing */
#define LIBSPDM_CRYPTO_NID_RSASSA2048 0x0101
#define LIBSPDM_CRYPTO_NID_RSASSA3072 0x0102
#define LIBSPDM_CRYPTO_NID_RSASSA4096 0x0103
#define LIBSPDM_CRYPTO_NID_RSAPSS2048 0x0104
#define LIBSPDM_CRYPTO_NID_RSAPSS3072 0x0105
#define LIBSPDM_CRYPTO_NID_RSAPSS4096 0x0106
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P256 0x0107
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P384 0x0108
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P521 0x0109
#define LIBSPDM_CRYPTO_NID_SM2_DSA_P256 0x010A
#define LIBSPDM_CRYPTO_NID_EDDSA_ED25519 0x010B
#define LIBSPDM_CRYPTO_NID_EDDSA_ED448 0x010C
/* Key Exchange */
#define LIBSPDM_CRYPTO_NID_FFDHE2048 0x0201
#define LIBSPDM_CRYPTO_NID_FFDHE3072 0x0202
#define LIBSPDM_CRYPTO_NID_FFDHE4096 0x0203
#define LIBSPDM_CRYPTO_NID_SECP256R1 0x0204
#define LIBSPDM_CRYPTO_NID_SECP384R1 0x0205
#define LIBSPDM_CRYPTO_NID_SECP521R1 0x0206
#define LIBSPDM_CRYPTO_NID_SM2_KEY_EXCHANGE_P256 0x0207
#define LIBSPDM_CRYPTO_NID_CURVE_X25519 0x0208
#define LIBSPDM_CRYPTO_NID_CURVE_X448 0x0209
/* AEAD */
#define LIBSPDM_CRYPTO_NID_AES_128_GCM 0x0301
#define LIBSPDM_CRYPTO_NID_AES_256_GCM 0x0302
#define LIBSPDM_CRYPTO_NID_CHACHA20_POLY1305 0x0303
#define LIBSPDM_CRYPTO_NID_SM4_128_GCM 0x0304
/* X.509 v3 key usage extension flags. */
#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80 /* bit 0 */
#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40 /* bit 1 */
#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20 /* bit 2 */
#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10 /* bit 3 */
#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08 /* bit 4 */
#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04 /* bit 5 */
#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02 /* bit 6 */
#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01 /* bit 7 */
#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000 /* bit 8 */
/* These constants comply with the DER encoded ASN.1 type tags. */
#define LIBSPDM_CRYPTO_ASN1_BOOLEAN 0x01
#define LIBSPDM_CRYPTO_ASN1_INTEGER 0x02
#define LIBSPDM_CRYPTO_ASN1_BIT_STRING 0x03
#define LIBSPDM_CRYPTO_ASN1_OCTET_STRING 0x04
#define LIBSPDM_CRYPTO_ASN1_NULL 0x05
#define LIBSPDM_CRYPTO_ASN1_OID 0x06
#define LIBSPDM_CRYPTO_ASN1_UTF8_STRING 0x0C
#define LIBSPDM_CRYPTO_ASN1_SEQUENCE 0x10
#define LIBSPDM_CRYPTO_ASN1_SET 0x11
#define LIBSPDM_CRYPTO_ASN1_PRINTABLE_STRING 0x13
#define LIBSPDM_CRYPTO_ASN1_T61_STRING 0x14
#define LIBSPDM_CRYPTO_ASN1_IA5_STRING 0x16
#define LIBSPDM_CRYPTO_ASN1_UTC_TIME 0x17
#define LIBSPDM_CRYPTO_ASN1_GENERALIZED_TIME 0x18
#define LIBSPDM_CRYPTO_ASN1_UNIVERSAL_STRING 0x1C
#define LIBSPDM_CRYPTO_ASN1_BMP_STRING 0x1E
#define LIBSPDM_CRYPTO_ASN1_PRIMITIVE 0x00
#define LIBSPDM_CRYPTO_ASN1_CONSTRUCTED 0x20
#define LIBSPDM_CRYPTO_ASN1_CONTEXT_SPECIFIC 0x80
#define LIBSPDM_CRYPTO_ASN1_TAG_CLASS_MASK 0xC0
#define LIBSPDM_CRYPTO_ASN1_TAG_PC_MASK 0x20
#define LIBSPDM_CRYPTO_ASN1_TAG_VALUE_MASK 0x1F
#include "hal/library/cryptlib/cryptlib_hash.h"
#include "hal/library/cryptlib/cryptlib_mac.h"
#include "hal/library/cryptlib/cryptlib_aead.h"
#include "hal/library/cryptlib/cryptlib_cert.h"
#include "hal/library/cryptlib/cryptlib_hkdf.h"
#include "hal/library/cryptlib/cryptlib_rsa.h"
#include "hal/library/cryptlib/cryptlib_ec.h"
#include "hal/library/cryptlib/cryptlib_dh.h"
#include "hal/library/cryptlib/cryptlib_ecd.h"
#include "hal/library/cryptlib/cryptlib_sm2.h"
#include "hal/library/cryptlib/cryptlib_rng.h"
#endif /* CRYPTLIB_H */

View File

@@ -0,0 +1,415 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef SPDM_LIB_CONFIG_H
#define SPDM_LIB_CONFIG_H
/* Enables assertions and debug printing. When `LIBSPDM_DEBUG_ENABLE` is defined it overrides or
* sets the values of `LIBSPDM_DEBUG_PRINT_ENABLE`, `LIBSPDM_DEBUG_ASSERT_ENABLE`, and
* `LIBSPDM_BLOCK_ENABLE` to the value of `LIBSPDM_DEBUG_ENABLE`.
*
* Note that if this file is used with CMake and `DTARGET=Release` is defined, then all debugging
* is disabled.
*/
#ifndef LIBSPDM_DEBUG_ENABLE
#define LIBSPDM_DEBUG_ENABLE 1
#endif
/* The SPDM specification allows a Responder to return up to 256 version entries in the `VERSION`
* response to the Requester, including duplicate entries. For a Requester this value specifies the
* maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an
* error. A similiar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
* is not meant to be configured by the Integrator.
*/
#ifndef LIBSPDM_MAX_VERSION_COUNT
#define LIBSPDM_MAX_VERSION_COUNT 5
#endif
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.RequesterContext` and,
* if supported by the Responder, `PSK_EXCHANGE_RSP.ResponderContext` fields. The fields are
* typically random or monotonically increasing numbers.
*/
#ifndef LIBSPDM_PSK_CONTEXT_LENGTH
#define LIBSPDM_PSK_CONTEXT_LENGTH LIBSPDM_MAX_HASH_SIZE
#endif
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field.*/
#ifndef LIBSPDM_PSK_MAX_HINT_LENGTH
#define LIBSPDM_PSK_MAX_HINT_LENGTH 16
#endif
/* libspdm allows an Integrator to specify multiple root certificates as trust anchors when
* verifying certificate chains from an endpoint. This value specifies the maximum number of root
* certificates that libspdm can support.
*/
#ifndef LIBSPDM_MAX_ROOT_CERT_SUPPORT
#define LIBSPDM_MAX_ROOT_CERT_SUPPORT 10
#endif
/* If the Responder supports it a Requester is allowed to establish multiple secure sessions with
* the Responder. This value specifies the maximum number of sessions libspdm can support.
*/
#ifndef LIBSPDM_MAX_SESSION_COUNT
#define LIBSPDM_MAX_SESSION_COUNT 4
#endif
/* This value specifies the maximum size, in bytes, of a certificate chain that can be stored in a
* libspdm context.
*/
#ifndef LIBSPDM_MAX_CERT_CHAIN_SIZE
#define LIBSPDM_MAX_CERT_CHAIN_SIZE 0x1000
#endif
#ifndef LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE
#define LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE 0x1000
#endif
/* Partial certificates can be retrieved from a Requester or Responder and through multiple messages
* the complete certificate chain can be constructed. This value specifies the maximum size,
* in bytes, of a partial certificate that can be sent or received.
*/
#ifndef LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN
#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024
#endif
#ifndef LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
#define LIBSPDM_MAX_MESSAGE_BUFFER_SIZE 0x1200
#endif
#ifndef LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE
#define LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE 0x100 /* to hold message_a before negotiate*/
#endif
#ifndef LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE
#define LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE 0x300 /* to hold message_k before finished_key is ready*/
#endif
/* If the Responder replies with a Busy `ERROR` response to a request then the Requester is free to
* retry sending the request. This value specifies the maximum number of times libspdm will retry
* sending the request before returning an error. If its value is 0 then libspdm will not send any
* retry requests.
*/
#ifndef LIBSPDM_MAX_REQUEST_RETRY_TIMES
#define LIBSPDM_MAX_REQUEST_RETRY_TIMES 3
#endif
#ifndef LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM
#define LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM 4
#endif
#ifndef LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM
#define LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM 4
#endif
#ifndef LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM
#define LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM 4
#endif
#ifndef LIBSPDM_MAX_CSR_SIZE
#define LIBSPDM_MAX_CSR_SIZE 0x1000
#endif
/* To ensure integrity in communication between the Requester and the Responder libspdm calculates
* cryptographic digests and signatures over multiple requests and responses. This value specifies
* whether libspdm will use a running calculation over the transcript, where requests and responses
* are discarded as they are cryptographically consumed, or whether libspdm will buffer the entire
* transcript before calculating the digest or signature.
*/
#ifndef LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT
#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0
#endif
/* Cryptography Configuration
* In each category, at least one should be selected.
* NOTE: Not all combination can be supported. E.g. Don't mix NIST algo with SMx.*/
#ifndef LIBSPDM_RSA_SSA_SUPPORT
#define LIBSPDM_RSA_SSA_SUPPORT 1
#endif
#ifndef LIBSPDM_RSA_PSS_SUPPORT
#define LIBSPDM_RSA_PSS_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDSA_SUPPORT
#define LIBSPDM_ECDSA_SUPPORT 1
#endif
#ifndef LIBSPDM_SM2_DSA_SUPPORT
#define LIBSPDM_SM2_DSA_SUPPORT 1
#endif
#ifndef LIBSPDM_EDDSA_ED25519_SUPPORT
#define LIBSPDM_EDDSA_ED25519_SUPPORT 1
#endif
#ifndef LIBSPDM_EDDSA_ED448_SUPPORT
#define LIBSPDM_EDDSA_ED448_SUPPORT 1
#endif
#ifndef LIBSPDM_FFDHE_SUPPORT
#define LIBSPDM_FFDHE_SUPPORT 1
#endif
#ifndef LIBSPDM_ECDHE_SUPPORT
#define LIBSPDM_ECDHE_SUPPORT 1
#endif
#ifndef LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_GCM_SUPPORT
#define LIBSPDM_AEAD_GCM_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 1
#endif
#ifndef LIBSPDM_AEAD_SM4_SUPPORT
#define LIBSPDM_AEAD_SM4_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA256_SUPPORT
#define LIBSPDM_SHA256_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA384_SUPPORT
#define LIBSPDM_SHA384_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA512_SUPPORT
#define LIBSPDM_SHA512_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_256_SUPPORT
#define LIBSPDM_SHA3_256_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_384_SUPPORT
#define LIBSPDM_SHA3_384_SUPPORT 1
#endif
#ifndef LIBSPDM_SHA3_512_SUPPORT
#define LIBSPDM_SHA3_512_SUPPORT 1
#endif
#ifndef LIBSPDM_SM3_256_SUPPORT
#define LIBSPDM_SM3_256_SUPPORT 1
#endif
/* Code space optimization for Optional request/response messages.*/
/* Consumers of libspdm may wish to not fully implement all of the optional
* SPDM request/response messages. Therefore we have provided these
* SPDM_ENABLE_CAPABILITY_***_CAP compile time switches as an optimization
* disable the code (#if 0) related to said optional capability, thereby
* reducing the code space used in the image.*/
/* A single switch may enable/disable a single capability or group of related
* capabilities.*/
/* LIBSPDM_ENABLE_CAPABILITY_CERT_CAP - Enable/Disable single CERT capability.
* LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP - Enable/Disable single CHAL capability.
* LIBSPDM_ENABLE_CAPABILTIY_MEAS_CAP - Enable/Disables multiple MEAS capabilities:
* (MEAS_CAP_NO_SIG, MEAS_CAP_SIG, MEAS_FRESH_CAP)*/
/* LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP - Enable/Disable single Key Exchange capability.
* LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP - Enable/Disable PSK_EX and PSK_FINISH.*/
/* LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP - Enable/Disable mutual authentication.
* LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP - Enable/Disable encapsulated message.*/
/* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP - Enable/Disable get csr capability.
* LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP - Enable/Disable set certificate capability. */
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP
#define LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP
#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
#define LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP 1
#endif
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
#endif
/*
* MinDataTransferSize = 42
*
* H = HashLen = HmacLen = [32, 64]
* S = SigLen = [64, 512]
* D = ExchangeDataLen = [64, 512]
* R = RequesterContextLen >= 32
* R = ResponderContextLen >= 0
* O = OpaqueDataLen <= 1024
*
* Max Chunk No = 1, if (message size <= 42)
* Max Chunk No = [(message size + 4) / 30] roundup, if (message size > 42)
*
* +==========================+==========================================+=========+
* | Command | Size |MaxChunk |
* +==========================+==========================================+=========+
* | GET_VERSION | 4 | 1 |
* | VERSION {1.0, 1.1, 1.2} | 6 + 2 * 3 = 12 | 1 |
* +--------------------------+------------------------------------------+---------+
* | GET_CAPABILITIES 1.2 | 20 | 1 |
* | CAPABILITIES 1.2 | 20 | 1 |
* +--------------------------+------------------------------------------+---------+
* | ERROR | 4 | 1 |
* | ERROR(ResponseTooLarge) | 4 + 4 = 8 | 1 |
* | ERROR(LargeResponse) | 4 + 1 = 5 | 1 |
* | ERROR(ResponseNotReady) | 4 + 4 = 8 | 1 |
* +--------------------------+------------------------------------------+---------+
* | CHUNK_SEND header | 12 + L0 (0 or 4) | 1 |
* | CHUNK_RESPONSE header | 12 + L0 (0 or 4) | 1 |
* +==========================+==========================================+=========+
* | NEGOTIATE_ALGORITHMS 1.2 | 32 + 4 * 4 = 48 | 2 |
* | ALGORITHMS 1.2 | 36 + 4 * 4 = 52 | 2 |
* +--------------------------+------------------------------------------+---------+
* | GET_DIGESTS 1.2 | 4 | 1 |
* | DIGESTS 1.2 | 4 + H * SlotNum = [36, 516] | [1, 18] |
* +--------------------------+------------------------------------------+---------+
* | GET_CERTIFICATE 1.2 | 8 | 1 |
* | CERTIFICATE 1.2 | 8 + PortionLen | [1, ] |
* +--------------------------+------------------------------------------+---------+
* | CHALLENGE 1.2 | 40 | 1 |
* | CHALLENGE_AUTH 1.2 | 38 + H * 2 + S [+ O] = [166, 678] | [6, 23] |
* +--------------------------+------------------------------------------+---------+
* | GET_MEASUREMENTS 1.2 | 5 + Nounce (0 or 32) | 1 |
* | MEASUREMENTS 1.2 | 42 + MeasRecLen (+ S) [+ O] = [106, 554] | [4, 19] |
* +--------------------------+------------------------------------------+---------+
* | KEY_EXCHANGE 1.2 | 42 + D [+ O] = [106, 554] | [4, 19] |
* | KEY_EXCHANGE_RSP 1.2 | 42 + D + H + S (+ H) [+ O] = [234, 1194] | [8, 40] |
* +--------------------------+------------------------------------------+---------+
* | FINISH 1.2 | 4 (+ S) + H = [100, 580] | [4, 20] |
* | FINISH_RSP 1.2 | 4 (+ H) = [36, 69] | [1, 3] |
* +--------------------------+------------------------------------------+---------+
* | PSK_EXCHANGE 1.2 | 12 [+ PSKHint] + R [+ O] = 44 | 2 |
* | PSK_EXCHANGE_RSP 1.2 | 12 + R + H (+ H) [+ O] = [108, 172] | [4, 6] |
* +--------------------------+------------------------------------------+---------+
* | PSK_FINISH 1.2 | 4 + H = [36, 68] | [1, 3] |
* | PSK_FINISH_RSP 1.2 | 4 | 1 |
* +--------------------------+------------------------------------------+---------+
* | GET_CSR 1.2 | 8 + RequesterInfoLen [+ O] | [1, ] |
* | CSR 1.2 | 8 + CSRLength | [1, ] |
* +--------------------------+------------------------------------------+---------+
* | SET_CERTIFICATE 1.2 | 4 + CertChainLen | [1, ] |
* | SET_CERTIFICATE_RSP 1.2 | 4 | 1 |
* +==========================+==========================================+=========+
*/
/* Maximum size of a large SPDM message.
* If chunk is unsupported, it must be same as LIBSPDM_DATA_TRANSFER_SIZE.
* If chunk is supported, it must be larger than LIBSPDM_DATA_TRANSFER_SIZE.
* It matches MaxSPDMmsgSize in SPDM specification. */
#ifndef LIBSPDM_MAX_SPDM_MSG_SIZE
#define LIBSPDM_MAX_SPDM_MSG_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
#endif
/* Maximum size of a single SPDM message.
* It matches DataTransferSize in SPDM specification. */
#ifndef LIBSPDM_DATA_TRANSFER_SIZE
#define LIBSPDM_DATA_TRANSFER_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
#endif
/* Required sender/receive buffer in device io.
* NOTE: This is transport specific. Below configuration is just an example.
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
* | TYPE |TransHdr| EncryptionHeader |AppHdr| |Random|MAC|AlignPad|FINAL|
* | | |SessionId|SeqNum|Len|AppLen| | | | | | |
* +-------+--------+---------------------------+------+ +------+---+--------+-----+
* | MCTP | 1 | 4 | 2 | 2 | 2 | 1 | | 32 | 12| 0 | 56 |
* |PCI_DOE| 8 | 4 | 0 | 2 | 2 | 0 | | 0 | 12| 3 | 31 |
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
*/
#ifndef LIBSPDM_TRANSPORT_ADDITIONAL_SIZE
#define LIBSPDM_TRANSPORT_ADDITIONAL_SIZE 64
#endif
#ifndef LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE
#define LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE (LIBSPDM_DATA_TRANSFER_SIZE + \
LIBSPDM_TRANSPORT_ADDITIONAL_SIZE)
#endif
/* Required scratch buffer size for libspdm internal usage.
* It may be used to hold the encrypted/decrypted message and/or last sent/received message.
* It may be used to hold the large request/response and intermediate send/receive buffer
* in case of chunking.
*
* If chunking is not supported, it may be just LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE.
* If chunking is supported, it should be at least below.
*
* +---------------+--------------+--------------------------+------------------------------+
* |SECURE_MESSAGE |LARGE_MESSAGE | SENDER_RECEIVER | LARGE_SENDER_RECEIVER |
* +---------------+--------------+--------------------------+------------------------------+
* |<-Secure msg ->|<-Large msg ->|<-Snd/Rcv buf for chunk ->|<-Snd/Rcv buf for large msg ->|
*
* The value is NOT configurable.
* The value MAY be changed in different libspdm version.
* It is exposed here, just in case the libspdm consumer wants to configure the setting at build time.
*/
#if LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
/* first section */
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_OFFSET 0
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
/* second section */
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_OFFSET (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY)
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
/* third section */
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_OFFSET \
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY)
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
/* fourth section */
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_OFFSET \
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY)
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY + \
LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY \
)
#else
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE)
#endif
/* Enable message logging.
* See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging
* for more information */
#ifndef LIBSPDM_ENABLE_MSG_LOG
#define LIBSPDM_ENABLE_MSG_LOG 1
#endif
/* Enable macro checking during compilation. */
#ifndef LIBSPDM_CHECK_MACRO
#define LIBSPDM_CHECK_MACRO 0
#endif
#endif /* SPDM_LIB_CONFIG_H */

View File

@@ -0,0 +1,470 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#include "nvspdm_cryptlib_extensions.h"
#ifdef USE_LKCA
#define BUFFER_SIZE (2 * 1024 * 1024)
#define AUTH_TAG_SIZE 16
struct lkca_aead_ctx
{
struct crypto_aead *aead;
struct aead_request *req;
char *a_data_buffer;
char *in_buffer;
char *out_buffer;
char tag[AUTH_TAG_SIZE];
};
#endif
int libspdm_aead_prealloc(void **context, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
#else
struct lkca_aead_ctx *ctx;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL) {
return -ENOMEM;
}
memset(ctx, 0, sizeof(*ctx));
ctx->aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
if (IS_ERR(ctx->aead)) {
pr_notice("could not allocate AEAD algorithm\n");
kfree(ctx);
return -ENODEV;
}
ctx->req = aead_request_alloc(ctx->aead, GFP_KERNEL);
if (ctx->req == NULL) {
pr_info("could not allocate skcipher request\n");
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->a_data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->a_data_buffer == NULL) {
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->in_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->in_buffer == NULL) {
kfree(ctx->a_data_buffer);
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
ctx->out_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (ctx->out_buffer == NULL) {
kfree(ctx->a_data_buffer);
kfree(ctx->in_buffer);
aead_request_free(ctx->req);
crypto_free_aead(ctx->aead);
kfree(ctx);
return -ENOMEM;
}
*context = ctx;
return 0;
#endif
}
void libspdm_aead_free(void *context)
{
#ifdef USE_LKCA
struct lkca_aead_ctx *ctx = context;
crypto_free_aead(ctx->aead);
aead_request_free(ctx->req);
kfree(ctx->a_data_buffer);
kfree(ctx->in_buffer);
kfree(ctx->out_buffer);
kfree(ctx);
#endif
}
#define SG_AEAD_AAD 0
#define SG_AEAD_TEXT 1
#define SG_AEAD_SIG 2
// Number of fields in AEAD scatterlist
#define SG_AEAD_LEN 3
#ifdef USE_LKCA
// This function doesn't do any allocs, it uses temp buffers instead
static int lkca_aead_internal(struct crypto_aead *aead,
struct aead_request *req,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
struct scatterlist sg_in[],
struct scatterlist sg_out[],
size_t a_data_size,
size_t data_in_size,
size_t *data_out_size,
size_t tag_size,
bool enc)
{
DECLARE_CRYPTO_WAIT(wait);
int rc = 0;
if (crypto_aead_setkey(aead, key, key_size)) {
pr_info("key could not be set\n");
return -EINVAL;
}
if (crypto_aead_ivsize(aead) != iv_size) {
pr_info("iv could not be set\n");
return -EINVAL;
}
aead_request_set_ad(req, a_data_size);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
if (enc) {
aead_request_set_crypt(req, sg_in, sg_out, data_in_size, (u8 *) iv);
rc = crypto_wait_req(crypto_aead_encrypt(req), &wait);
} else {
aead_request_set_crypt(req, sg_in, sg_out, data_in_size + tag_size, (u8 *) iv);
rc = crypto_wait_req(crypto_aead_decrypt(req), &wait);
}
if (rc != 0) {
pr_info("Encryption FAILED\n");
}
*data_out_size = data_in_size;
return rc;
}
#endif
int libspdm_aead_prealloced(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc)
{
#ifndef USE_LKCA
return -ENODEV;
#else
int rc = 0;
struct scatterlist sg_in[SG_AEAD_LEN];
struct scatterlist sg_out[SG_AEAD_LEN];
struct lkca_aead_ctx *ctx = context;
sg_init_table(sg_in, SG_AEAD_LEN);
sg_init_table(sg_out, SG_AEAD_LEN);
if (!virt_addr_valid(a_data)) {
if (a_data_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
memcpy(ctx->a_data_buffer, a_data, a_data_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
}
if (!virt_addr_valid(data_in)) {
if (data_in_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_TEXT], ctx->in_buffer, data_in_size);
memcpy(ctx->in_buffer, data_in, data_in_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
}
if (!virt_addr_valid(data_out)) {
if (data_in_size > BUFFER_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_out[SG_AEAD_TEXT], ctx->out_buffer, data_in_size);
} else {
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
}
// Tag is small enough that memcpy is cheaper than checking if page is virtual
if(tag_size > AUTH_TAG_SIZE) {
return -ENOMEM;
}
sg_set_buf(&sg_in[SG_AEAD_SIG], ctx->tag, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], ctx->tag, tag_size);
if(!enc)
memcpy(ctx->tag, tag, tag_size);
rc = lkca_aead_internal(ctx->aead, ctx->req, key, key_size, iv, iv_size,
sg_in, sg_out, a_data_size, data_in_size,
data_out_size, tag_size, enc);
if (enc) {
memcpy(tag, ctx->tag, tag_size);
}
if (!virt_addr_valid(data_out)) {
memcpy(data_out, ctx->out_buffer, data_in_size);
}
return rc;
#endif
}
int libspdm_aead(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size,
bool enc, char const *alg)
{
#ifndef USE_LKCA
return -ENODEV;
#else
struct crypto_aead *aead = NULL;
struct aead_request *req = NULL;
struct scatterlist sg_in[SG_AEAD_LEN];
struct scatterlist sg_out[SG_AEAD_LEN];
uint8_t *a_data_shadow = NULL;
uint8_t *data_in_shadow = NULL;
uint8_t *data_out_shadow = NULL;
uint8_t *tag_shadow = NULL;
int rc = 0;
aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
if (IS_ERR(aead)) {
pr_notice("could not allocate AEAD algorithm\n");
return -ENODEV;
}
req = aead_request_alloc(aead, GFP_KERNEL);
if (req == NULL) {
pr_info("could not allocate skcipher request\n");
rc = -ENOMEM;
goto out;
}
sg_init_table(sg_in, SG_AEAD_LEN);
sg_init_table(sg_out, SG_AEAD_LEN);
if (!virt_addr_valid(a_data)) {
a_data_shadow = kmalloc(a_data_size, GFP_KERNEL);
if (a_data_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data_shadow, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data_shadow, a_data_size);
memcpy(a_data_shadow, a_data, a_data_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
}
if (!virt_addr_valid(data_in)) {
data_in_shadow = kmalloc(data_in_size, GFP_KERNEL);
if (data_in_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in_shadow, data_in_size);
memcpy(data_in_shadow, data_in, data_in_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
}
if (!virt_addr_valid(data_out)) {
data_out_shadow = kmalloc(data_in_size, GFP_KERNEL);
if (data_out_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out_shadow, data_in_size);
} else {
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
}
if (!virt_addr_valid(tag)) {
tag_shadow = kmalloc(tag_size, GFP_KERNEL);
if (tag_shadow == NULL) {
rc = -ENOMEM;
goto out;
}
sg_set_buf(&sg_in[SG_AEAD_SIG], tag_shadow, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], tag_shadow, tag_size);
if(!enc)
memcpy(tag_shadow, tag, tag_size);
} else {
sg_set_buf(&sg_in[SG_AEAD_SIG], tag, tag_size);
sg_set_buf(&sg_out[SG_AEAD_SIG], tag, tag_size);
}
rc = lkca_aead_internal(aead, req, key, key_size, iv, iv_size,
sg_in, sg_out, a_data_size, data_in_size,
data_out_size, tag_size, enc);
if (enc && (tag_shadow != NULL))
memcpy((uint8_t *) tag, tag_shadow, tag_size);
if (data_out_shadow != NULL)
memcpy(data_out, data_out_shadow, data_in_size);
out:
if (a_data_shadow != NULL)
kfree(a_data_shadow);
if (data_in_shadow != NULL)
kfree(data_in_shadow);
if (data_out != NULL)
kfree(data_out_shadow);
if (tag != NULL)
kfree(tag_shadow);
if (aead != NULL)
crypto_free_aead(aead);
if (req != NULL)
aead_request_free(req);
return rc;
#endif
}
// Wrapper to make look like libspdm
bool libspdm_aead_gcm_prealloc(void **context)
{
return libspdm_aead_prealloc(context, "gcm(aes)") == 0;
}
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int32_t ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
a_data, a_data_size, data_in, data_in_size,
tag_out, tag_size, data_out, data_out_size, true);
*data_out_size = data_in_size;
return ret == 0;
}
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
a_data, a_data_size, data_in, data_in_size,
(uint8_t *) tag, tag_size, data_out, data_out_size, false);
*data_out_size = data_in_size;
return ret == 0;
}

View File

@@ -0,0 +1,117 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int32_t ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
data_in, data_in_size, tag_out, tag_size,
data_out, data_out_size, true, "gcm(aes)");
*data_out_size = data_in_size;
return ret == 0;
}
bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size)
{
int ret;
if (data_in_size > INT_MAX) {
return false;
}
if (a_data_size > INT_MAX) {
return false;
}
if (iv_size != 12) {
return false;
}
switch (key_size) {
case 16:
case 24:
case 32:
break;
default:
return false;
}
if ((tag_size < 12) || (tag_size > 16)) {
return false;
}
if (data_out_size != NULL) {
if ((*data_out_size > INT_MAX) ||
(*data_out_size < data_in_size)) {
return false;
}
}
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
data_in, data_in_size, tag, tag_size,
data_out, data_out_size, false, "gcm(aes)");
*data_out_size = data_in_size;
return ret == 0;
}

View File

@@ -0,0 +1,172 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
static bool lkca_ecdsa_sign(void *ec_context,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
return false;
}
bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size)
{
if (ec_context == NULL || public_key == NULL) {
return false;
}
return lkca_ec_set_pub_key(ec_context, public_key, public_key_size);
}
bool libspdm_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size)
{
if (ec_context == NULL || public_key_size == NULL) {
return false;
}
if (public_key == NULL && *public_key_size != 0) {
return false;
}
return lkca_ec_get_pub_key(ec_context, public_key, public_key_size);
}
bool libspdm_ec_check_key(const void *ec_context)
{
/* TBD*/
return true;
}
bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size)
{
if (ec_context == NULL || public_size == NULL) {
return false;
}
if (public_data == NULL && *public_size != 0) {
return false;
}
return lkca_ec_generate_key(ec_context, public_data, public_size);
}
bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size)
{
if (ec_context == NULL || peer_public == NULL || key_size == NULL ||
key == NULL) {
return false;
}
if (peer_public_size > INT_MAX) {
return false;
}
return lkca_ec_compute_key(ec_context, peer_public, peer_public_size, key,
key_size);
}
bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size)
{
if (ec_context == NULL || message_hash == NULL) {
return false;
}
if (signature == NULL) {
return false;
}
switch (hash_nid) {
case LIBSPDM_CRYPTO_NID_SHA256:
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA384:
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA512:
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
return false;
}
break;
default:
return false;
}
return lkca_ecdsa_sign(ec_context, message_hash, hash_size, signature, sig_size);
}
bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
if (ec_context == NULL || message_hash == NULL || signature == NULL) {
return false;
}
if (sig_size > INT_MAX || sig_size == 0) {
return false;
}
switch (hash_nid) {
case LIBSPDM_CRYPTO_NID_SHA256:
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA384:
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
return false;
}
break;
case LIBSPDM_CRYPTO_NID_SHA512:
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
return false;
}
break;
default:
return false;
}
return lkca_ecdsa_verify(ec_context, hash_nid, message_hash, hash_size,
signature, sig_size);
}

View File

@@ -0,0 +1,326 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#include <linux/module.h>
MODULE_SOFTDEP("pre: ecdh_generic,ecdsa_generic");
#include <crypto/akcipher.h>
#include <crypto/ecdh.h>
#include <crypto/internal/ecc.h>
struct ecc_ctx {
unsigned int curve_id;
u64 priv_key[ECC_MAX_DIGITS]; // In big endian
struct {
// ecdsa wants byte preceding pub_key to be set to '4'
u64 pub_key_prefix;
u64 pub_key[2 * ECC_MAX_DIGITS];
};
bool pub_key_set;
bool priv_key_set;
char const *name;
int size;
};
#endif
void *libspdm_ec_new_by_nid(size_t nid)
{
#ifndef USE_LKCA
return NULL;
#else
struct ecc_ctx *ctx;
if ((nid != LIBSPDM_CRYPTO_NID_SECP256R1) && (nid != LIBSPDM_CRYPTO_NID_SECP384R1)){
return NULL;
}
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
return NULL;
}
if (nid == LIBSPDM_CRYPTO_NID_SECP256R1) {
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->size = 64;
ctx->name = "ecdsa-nist-p256";
} else {
ctx->curve_id = ECC_CURVE_NIST_P384;
ctx->size = 96;
ctx->name = "ecdsa-nist-p384";
}
ctx->pub_key_set = false;
ctx->priv_key_set = false;
return ctx;
#endif
}
void libspdm_ec_free(void *ec_context)
{
#ifdef USE_LKCA
kfree(ec_context);
#endif
}
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = context;
unsigned int ndigits = ctx->size / 16;
if (key_size != (ctx->size / 2)) {
return false;
}
memcpy(ctx->priv_key, key, key_size);
// XXX: if this fails, do we want to retry generating new key?
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
return false;
}
ctx->pub_key_set = true;
ctx->priv_key_set = true;
return true;
#endif
}
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
struct ecc_point pub_key;
unsigned int ndigits;
if (public_key_size != ctx->size) {
return false;
}
// We can reuse pub_key for now
ndigits = ctx->size / 16;
pub_key = ECC_POINT_INIT(ctx->pub_key, ctx->pub_key + ndigits, ndigits);
ecc_swap_digits(public_key, ctx->pub_key, ndigits);
ecc_swap_digits(((u64 *)public_key) + ndigits, ctx->pub_key + ndigits, ndigits);
if(ecc_is_pubkey_valid_full(ecc_get_curve(ctx->curve_id), &pub_key)) {
return false;
}
memcpy(ctx->pub_key, public_key, public_key_size);
ctx->pub_key_set = true;
return true;
#endif
}
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
size_t *public_key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
if (*public_key_size < ctx->size) {
*public_key_size = ctx->size;
return false;
}
*public_key_size = ctx->size;
memcpy(public_key, ctx->pub_key, ctx->size);
return true;
#endif
}
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
size_t *public_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
unsigned int ndigits = ctx->size / 16;
if(ecc_gen_privkey(ctx->curve_id, ndigits, ctx->priv_key)) {
return false;
}
// XXX: if this fails, do we want to retry generating new key?
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
return false;
}
memcpy(public_data, ctx->pub_key, ctx->size);
*public_size = ctx->size;
ctx->priv_key_set = true;
ctx->pub_key_set = true;
return true;
#endif
}
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
size_t peer_public_size, uint8_t *key,
size_t *key_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
if (peer_public_size != ctx->size) {
return false;
}
if (!ctx->priv_key_set) {
return false;
}
if ((ctx->size / 2) > *key_size) {
return false;
}
if (crypto_ecdh_shared_secret(ctx->curve_id, ctx->size / 16,
(const u64 *) ctx->priv_key,
(const u64 *) peer_public,
(u64 *) key)) {
return false;
}
*key_size = ctx->size / 2;
return true;
#endif
}
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size)
{
#ifndef USE_LKCA
return false;
#else
struct ecc_ctx *ctx = ec_context;
// Roundabout way
u64 ber_max_len = 3 + 2 * (4 + (ECC_MAX_BYTES));
u64 ber_len = 0;
u8 *ber = NULL;
u8 *pub_key;
struct akcipher_request *req = NULL;
struct crypto_akcipher *tfm = NULL;
struct scatterlist sg;
DECLARE_CRYPTO_WAIT(wait);
int err;
if (sig_size != ctx->size) {
return false;
}
if(ctx->pub_key_set == false){
return false;
}
tfm = crypto_alloc_akcipher(ctx->name, CRYPTO_ALG_TYPE_AKCIPHER, 0);
if (IS_ERR(tfm)) {
pr_info("ALLOC FAILED\n");
return false;
}
pub_key = (u8 *) ctx->pub_key;
pub_key--; // Go back into byte of pub_key_prefix
*pub_key = 4; // And set it to 4 to placate kernel
if ((err = crypto_akcipher_set_pub_key(tfm, pub_key, ctx->size + 1)) != 0) {
pr_info("SET PUB KEY FAILED: %d\n", -err);
goto failTfm;
}
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (IS_ERR(req)) {
pr_info("REQUEST ALLOC FAILED\n");
goto failTfm;
}
// We concatenate signature and hash and ship it to kernel
ber = kmalloc(ber_max_len + hash_size, GFP_KERNEL);
if (ber == NULL) {
goto failReq;
}
// XXX: NOTE THIS WILL WORK ONLY FOR 256 AND 384 bits. For larger keys
// length field will be longer than 1 byte and I haven't taken care of that!
// Signature
ber[ber_len++] = 0x30;
ber[ber_len++] = 2 * (2 + ctx->size / 2);
ber[ber_len++] = 0x02;
if (signature[0] > 127) {
ber[ber_len++] = ctx->size / 2 + 1;
ber[1]++;
ber[ber_len++] = 0;
} else {
ber[ber_len++] = ctx->size / 2;
}
memcpy(ber + ber_len, signature, sig_size / 2);
ber_len += sig_size / 2;
ber[ber_len++] = 0x02;
if (signature[sig_size / 2] > 127) {
ber[ber_len++] = ctx->size / 2 + 1;
ber[1]++;
ber[ber_len++] = 0;
} else {
ber[ber_len++] = ctx->size / 2;
}
memcpy(ber + ber_len, signature + sig_size / 2, sig_size / 2);
ber_len += sig_size / 2;
// Just append hash, for scatterlists it can't be on stack anyway
memcpy(ber + ber_len, message_hash, hash_size);
sg_init_one(&sg, ber, ber_len + hash_size);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
akcipher_request_set_crypt(req, &sg, NULL, ber_len, hash_size);
err = crypto_wait_req(crypto_akcipher_verify(req), &wait);
if (err != 0){
pr_info("Verify FAILED %d\n", -err);
}
kfree(ber);
failReq:
akcipher_request_free(req);
failTfm:
crypto_free_akcipher(tfm);
return err == 0;
#endif
}

View File

@@ -0,0 +1,158 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
// RFC 5869 has some very non-intuitive points, reading it is advised
static bool lkca_hkdf_expand_only(struct crypto_shash *alg,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
int ret;
int i;
uint8_t ctr = 1;
uint8_t tmp[HASH_MAX_DIGESTSIZE];
SHASH_DESC_ON_STACK(desc, alg);
desc->tfm = alg;
ret = crypto_shash_setkey(desc->tfm, prk, prk_size);
if (ret != 0) {
pr_info("key size mismatch %ld\n", prk_size);
return false;
}
for (i = 0, ctr = 1; i < out_size; i += prk_size, ctr++) {
ret = crypto_shash_init(desc);
if (ret) {
return false;
}
if (i != 0) {
ret = crypto_shash_update(desc, out + i - prk_size, prk_size);
if (ret) {
return false;
}
}
if (info_size > 0) {
ret = crypto_shash_update(desc, info, info_size);
if (ret) {
return false;
}
}
ret = crypto_shash_update(desc, &ctr, 1);
if (ret)
return false;
if ((out_size - i) < prk_size) {
ret = crypto_shash_final(desc, tmp);
if (ret) {
return false;
}
memcpy(out + i, tmp, out_size - i);
memzero_explicit(tmp, sizeof(tmp));
} else {
ret = crypto_shash_final(desc, out + i);
if (ret) {
return false;
}
}
}
return true;
#endif
}
bool lkca_hkdf_extract_and_expand(const char *alg_name,
const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
int ret = 0;
struct crypto_shash *alg;
uint8_t prk[HASH_MAX_DIGESTSIZE];
if (key == NULL || salt == NULL || info == NULL || out == NULL ||
key_size > sizeof(prk) || salt_size > INT_MAX || info_size > INT_MAX ||
out_size > (sizeof(prk) * 255)) {
return false;
}
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_setkey(alg, salt, salt_size);
if (ret != 0) {
goto out;
}
ret = crypto_shash_tfm_digest(alg, key, key_size, prk);
if (ret != 0) {
goto out;
}
ret = !lkca_hkdf_expand_only(alg, prk, crypto_shash_digestsize(alg), info, info_size, out, out_size);
out:
crypto_free_shash(alg);
return ret == 0;
#endif
}
bool lkca_hkdf_expand(const char *alg_name,
const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
#ifndef USE_LKCA
return false;
#else
bool ret = false;
struct crypto_shash *alg;
if (prk == NULL || info == NULL || out == NULL || prk_size > (512 / 8) ||
info_size > INT_MAX || (out_size > (prk_size * 255))) {
return false;
}
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = lkca_hkdf_expand_only(alg, prk, prk_size, info, info_size, out, out_size);
crypto_free_shash(alg);
return ret;
#endif
}

View File

@@ -0,0 +1,111 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
bool libspdm_hkdf_sha256_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha256)", key, key_size,
salt, salt_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (256 / 8))
return false;
return libspdm_hmac_sha256_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha256)", prk, prk_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha384_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha384)", key, key_size,
salt, salt_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (384 / 8))
return false;
return libspdm_hmac_sha384_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha384)", prk, prk_size, info, info_size,
out, out_size);
}
bool libspdm_hkdf_sha512_extract_and_expand(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_extract_and_expand("hmac(sha512)", key, key_size,
salt, salt_size, info, info_size, out,
out_size);
}
bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
const uint8_t *salt, size_t salt_size,
uint8_t *prk_out, size_t prk_out_size)
{
if (prk_out_size != (512 / 8))
return false;
return libspdm_hmac_sha512_all(key, key_size, salt, salt_size, prk_out);
}
bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
const uint8_t *info, size_t info_size,
uint8_t *out, size_t out_size)
{
return lkca_hkdf_expand("hmac(sha512)", prk, prk_size, info, info_size,
out, out_size);
}

View File

@@ -0,0 +1,282 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
void *libspdm_hmac_sha256_new(void)
{
return lkca_hash_new("hmac(sha256)");
}
void libspdm_hmac_sha256_free(void *hmac_sha256_ctx)
{
lkca_hash_free(hmac_sha256_ctx);
}
bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha256_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha256_ctx, key, key_size);
}
bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx,
void *new_hmac_sha256_ctx)
{
if (hmac_sha256_ctx == NULL || new_hmac_sha256_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha256_ctx, hmac_sha256_ctx);
}
bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha256_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha256_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha256_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha256_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha256)", key, key_size, data, data_size, hmac_value);
}
void *libspdm_hmac_sha384_new(void)
{
return lkca_hash_new("hmac(sha384)");
}
void libspdm_hmac_sha384_free(void *hmac_sha384_ctx)
{
lkca_hash_free(hmac_sha384_ctx);
}
bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha384_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha384_ctx, key, key_size);
}
bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx,
void *new_hmac_sha384_ctx)
{
if (hmac_sha384_ctx == NULL || new_hmac_sha384_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha384_ctx, hmac_sha384_ctx);
}
bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha384_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha384_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha384_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha384_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha384)", key, key_size, data, data_size, hmac_value);
}
void *libspdm_hmac_sha512_new(void)
{
return lkca_hash_new("hmac(sha512)");
}
void libspdm_hmac_sha512_free(void *hmac_sha512_ctx)
{
lkca_hash_free(hmac_sha512_ctx);
}
bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key,
size_t key_size)
{
if (hmac_sha512_ctx == NULL)
return false;
return lkca_hmac_set_key(hmac_sha512_ctx, key, key_size);
}
bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx,
void *new_hmac_sha512_ctx)
{
if (new_hmac_sha512_ctx == NULL || new_hmac_sha512_ctx == NULL) {
return false;
}
return lkca_hmac_duplicate(new_hmac_sha512_ctx, hmac_sha512_ctx);
}
bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data,
size_t data_size)
{
int32_t ret;
if (hmac_sha512_ctx == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(hmac_sha512_ctx, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value)
{
int32_t ret;
if (hmac_sha512_ctx == NULL || hmac_value == NULL) {
return false;
}
ret = crypto_shash_final(hmac_sha512_ctx, hmac_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
const uint8_t *key, size_t key_size,
uint8_t *hmac_value)
{
if (hmac_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hmac_all("hmac(sha512)", key, key_size, data, data_size, hmac_value);
}

View File

@@ -0,0 +1,37 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
// This is non-gpl symbol and not part of LKCA so no need to stub it out
bool libspdm_random_bytes(uint8_t *output, size_t size)
{
get_random_bytes(output, size);
return true;
}
// This is specifically allowed by spdm
bool libspdm_random_seed(const uint8_t *seed, size_t seed_size)
{
return true;
}

View File

@@ -0,0 +1,264 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
void *libspdm_sha256_new(void)
{
return lkca_hash_new("sha256");
}
void libspdm_sha256_free(void *sha256_ctx)
{
lkca_hash_free(sha256_ctx);
}
bool libspdm_sha256_init(void *sha256_context)
{
return crypto_shash_init(sha256_context) == 0;
}
bool libspdm_sha256_duplicate(const void *sha256_context,
void *new_sha256_context)
{
if (sha256_context == NULL || new_sha256_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha256_context, sha256_context);
}
bool libspdm_sha256_update(void *sha256_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha256_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha256_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value)
{
int32_t ret;
if (sha256_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha256_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha256_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha256", data, data_size, hash_value);
}
void *libspdm_sha384_new(void)
{
return lkca_hash_new("sha384");
}
void libspdm_sha384_free(void *sha384_ctx)
{
lkca_hash_free(sha384_ctx);
}
bool libspdm_sha384_init(void *sha384_context)
{
return crypto_shash_init(sha384_context) == 0;
}
bool libspdm_sha384_duplicate(const void *sha384_context,
void *new_sha384_context)
{
if (sha384_context == NULL || new_sha384_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha384_context, sha384_context);
}
bool libspdm_sha384_update(void *sha384_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha384_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha384_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value)
{
int32_t ret;
if (sha384_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha384_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha384_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha384", data, data_size, hash_value);
}
void *libspdm_sha512_new(void)
{
return lkca_hash_new("sha512");
}
void libspdm_sha512_free(void *sha512_ctx)
{
lkca_hash_free(sha512_ctx);
}
bool libspdm_sha512_init(void *sha512_context)
{
return crypto_shash_init(sha512_context) == 0;
}
bool libspdm_sha512_duplicate(const void *sha512_context,
void *new_sha512_context)
{
if (sha512_context == NULL || new_sha512_context == NULL) {
return false;
}
return lkca_hash_duplicate(new_sha512_context, sha512_context);
}
bool libspdm_sha512_update(void *sha512_context, const void *data,
size_t data_size)
{
int32_t ret;
if (sha512_context == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
ret = crypto_shash_update(sha512_context, data, data_size);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value)
{
int32_t ret;
if (sha512_context == NULL || hash_value == NULL) {
return false;
}
ret = crypto_shash_final(sha512_context, hash_value);
if (ret != 0) {
return false;
}
return true;
}
bool libspdm_sha512_hash_all(const void *data, size_t data_size,
uint8_t *hash_value)
{
if (hash_value == NULL) {
return false;
}
if (data == NULL && data_size != 0) {
return false;
}
if (data_size > INT_MAX) {
return false;
}
return lkca_hash_all("sha512", data, data_size, hash_value);
}

View File

@@ -0,0 +1,160 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal_crypt_lib.h"
void *lkca_hash_new(const char* alg_name)
{
#ifndef USE_LKCA
return false;
#else
//XXX: can we reuse crypto_shash part and just allocate desc
struct crypto_shash *alg;
struct shash_desc *desc;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
printk (KERN_INFO "Failed to alloc %s\n", alg_name);
return NULL;
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(alg), GFP_KERNEL);
if (desc == NULL){
printk (KERN_INFO "Kernel out of mem\n");
crypto_free_shash(alg);
return NULL;
}
desc->tfm = alg;
return desc;
#endif
}
void lkca_hash_free(struct shash_desc *ctx)
{
#ifndef USE_LKCA
#else
crypto_free_shash(ctx->tfm);
kfree(ctx);
#endif
}
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src)
{
#ifndef USE_LKCA
return false;
#else
SHASH_DESC_ON_STACK(tmp, src);
if (crypto_shash_export((struct shash_desc *) src, tmp)) {
return false;
}
if (crypto_shash_import(dst, tmp)) {
return false;
}
return true;
#endif
}
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
{
#ifndef USE_LKCA
return false;
#else
// in LKCA hmac export doesn't export ipad/opad, so we need to WAR it
struct crypto_shash *src_tfm = src->tfm;
struct crypto_shash *dst_tfm = dst->tfm;
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
int ss = crypto_shash_statesize(dst_tfm);
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);
return lkca_hash_duplicate(dst, src);
#endif
}
bool lkca_hash_all(const char* alg_name, const void *data,
size_t data_size, uint8_t *hash_value)
{
#ifndef USE_LKCA
return false;
#else
int ret;
struct crypto_shash *alg;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
crypto_free_shash(alg);
return (ret == 0);
#endif
}
bool lkca_hmac_set_key(struct shash_desc *desc, const uint8_t *key, size_t key_size)
{
#ifndef USE_LKCA
return false;
#else
int ret;
ret = crypto_shash_setkey(desc->tfm, key, key_size);
if (ret == 0) {
ret = crypto_shash_init(desc);
}
return ret == 0;
#endif
}
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
const uint8_t *data, size_t data_size, uint8_t *hash_value)
{
#ifndef USE_LKCA
return false;
#else
int ret;
struct crypto_shash *alg;
alg = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(alg)) {
return false;
}
ret = crypto_shash_setkey(alg, key, key_size);
if (ret == 0){
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
}
crypto_free_shash(alg);
return (ret == 0);
#endif
}

View File

@@ -0,0 +1,456 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* libspdm_x509_verify_cert_chain, libspdm_x509_get_cert_from_cert_chain, check
* and prototypes taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
#endif
bool libspdm_x509_construct_certificate(const uint8_t *cert, size_t cert_size,
uint8_t **single_x509_cert)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_construct_certificate_stack(uint8_t **x509_stack, ...)
{
LIBSPDM_ASSERT(false);
return false;
}
void libspdm_x509_free(void *x509_cert)
{
LIBSPDM_ASSERT(false);
}
void libspdm_x509_stack_free(void *x509_stack)
{
LIBSPDM_ASSERT(false);
}
static bool lkca_asn1_get_tag(uint8_t const *ptr, uint8_t const *end,
size_t *length, uint32_t tag)
{
uint64_t max_len = end - ptr;
// Chain must be less than 1 GB
if ((max_len < 2) || (max_len > (1024 * 1024 * 1024))) {
return false;
}
// We only deal with universal and application tags
if (ptr[0] != tag) {
return false;
}
if (ptr[1] < 0x80) {
*length = ptr[1] + 2;
} else if (ptr[1] == 0x81) {
if (max_len < 3) {
return false;
}
*length = ptr[2] + 3;
} else if (ptr[1] == 0x82) {
if (max_len < 4) {
return false;
}
*length = (ptr[2] << 8) + ptr[3] + 4;
} else {
// In theory it could be bigger than 64KB
return false;
}
if (*length > max_len) {
return false;
}
return true;
}
bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length,
uint32_t tag)
{
return lkca_asn1_get_tag(*ptr, end, length, tag);
}
bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
size_t *subject_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_common_name(const uint8_t *cert, size_t cert_size,
char *common_name,
size_t *common_name_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_organization_name(const uint8_t *cert, size_t cert_size,
char *name_buffer,
size_t *name_buffer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **rsa_context)
{
LIBSPDM_ASSERT(false);
return false;
}
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ec_context)
{
#ifdef USE_LKCA
bool ret = false;
uint32_t key_size = 0;
struct key_preparsed_payload lkca_cert;
struct public_key *pub;
lkca_cert.data = cert;
lkca_cert.datalen = cert_size;
if (cert == NULL) {
return false;
}
if(key_type_asymmetric.preparse(&lkca_cert)) {
return false;
}
pub = lkca_cert.payload.data[asym_crypto];
// -1 is since lkca prepends '4' to public keys...
key_size = pub->keylen - 1;
if (key_size == (2 * 256 / 8)) {
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP256R1);
} else if (key_size == (2 * 384 / 8)) {
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP384R1);
} else {
goto err;
}
if (*ec_context == NULL) {
goto err;
}
// Again skip '4' in key to be in line with spdm protocol. We will add it
// back in ecda_verify
if (!lkca_ec_set_pub_key(*ec_context, (char *) pub->key + 1, key_size)) {
libspdm_ec_free(*ec_context);
goto err;
}
ret = true;
err:
key_type_asymmetric.free_preparse(&lkca_cert);
return ret;
#else
return false;
#endif
}
bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **ecd_context)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
void **sm2_context)
{
LIBSPDM_ASSERT(false);
return false;
}
static int lkca_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size)
{
#ifdef USE_LKCA
int ret;
struct key_preparsed_payload lkca_cert;
struct key_preparsed_payload lkca_ca_cert;
lkca_cert.data = cert;
lkca_cert.datalen = cert_size;
lkca_ca_cert.data = ca_cert;
lkca_ca_cert.datalen = ca_cert_size;
ret = key_type_asymmetric.preparse(&lkca_cert);
if (ret) {
return ret;
}
ret = key_type_asymmetric.preparse(&lkca_ca_cert);
if (ret) {
key_type_asymmetric.free_preparse(&lkca_cert);
return ret;
}
ret = public_key_verify_signature(lkca_ca_cert.payload.data[asym_crypto],
lkca_cert.payload.data[asym_auth]);
key_type_asymmetric.free_preparse(&lkca_cert);
key_type_asymmetric.free_preparse(&lkca_ca_cert);
return ret;
#else
return false;
#endif
}
bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
const uint8_t *ca_cert, size_t ca_cert_size)
{
return lkca_x509_verify_cert(cert, cert_size, ca_cert, ca_cert_size) == 0;
}
bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
const uint8_t *cert_chain, size_t cert_chain_length)
{
size_t preceding_cert_len;
const uint8_t *preceding_cert;
size_t current_cert_len;
const uint8_t *current_cert;
bool verify_flag;
int ret;
verify_flag = false;
preceding_cert = root_cert;
preceding_cert_len = root_cert_length;
current_cert = cert_chain;
/* Get Current certificate from certificates buffer and Verify with preceding cert*/
do {
if (!lkca_asn1_get_tag(
current_cert, cert_chain + cert_chain_length, &current_cert_len,
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
break;
}
ret = lkca_x509_verify_cert(current_cert, current_cert_len,
preceding_cert, preceding_cert_len);
if (ret != 0) {
verify_flag = false;
break;
} else {
verify_flag = true;
}
preceding_cert = current_cert;
preceding_cert_len = current_cert_len;
current_cert = current_cert + current_cert_len;
} while (true);
return verify_flag;
}
bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
size_t cert_chain_length,
const int32_t cert_index, const uint8_t **cert,
size_t *cert_length)
{
size_t asn1_len;
int32_t current_index;
size_t current_cert_len;
const uint8_t *current_cert;
current_cert_len = 0;
/* Check input parameters.*/
if ((cert_chain == NULL) || (cert == NULL) || (cert_index < -1) ||
(cert_length == NULL)) {
return false;
}
current_cert = cert_chain;
current_index = -1;
/* Traverse the certificate chain*/
while (true) {
/* Get asn1 tag len*/
if (!lkca_asn1_get_tag(
current_cert, cert_chain + cert_chain_length, &asn1_len,
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
break;
}
current_cert_len = asn1_len;
current_index++;
if (current_index == cert_index) {
*cert = current_cert;
*cert_length = current_cert_len;
return true;
}
current_cert = current_cert + current_cert_len;
}
/* If cert_index is -1, Return the last certificate*/
if (cert_index == -1 && current_index >= 0) {
*cert = current_cert - current_cert_len;
*cert_length = current_cert_len;
return true;
}
return false;
}
bool libspdm_x509_get_tbs_cert(const uint8_t *cert, size_t cert_size,
uint8_t **tbs_cert, size_t *tbs_cert_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size,
size_t *version)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
uint8_t *serial_number,
size_t *serial_number_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
size_t *issuer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_issuer_common_name(const uint8_t *cert, size_t cert_size,
char *common_name,
size_t *common_name_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool
libspdm_x509_get_issuer_orgnization_name(const uint8_t *cert, size_t cert_size,
char *name_buffer,
size_t *name_buffer_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
size_t cert_size, uint8_t *oid,
size_t *oid_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
uint8_t *extension_data,
size_t *extension_data_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
size_t *to_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size,
size_t *usage)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
size_t *usage_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
uint8_t *basic_constraints,
size_t *basic_constraints_size)
{
LIBSPDM_ASSERT(false);
return false;
}
bool libspdm_x509_set_date_time(char const *date_time_str, void *date_time, size_t *date_time_size)
{
LIBSPDM_ASSERT(false);
return false;
}
int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2)
{
LIBSPDM_ASSERT(false);
return -3;
}
bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
uint8_t *requester_info, size_t requester_info_length,
void *context, char *subject_name,
size_t *csr_len, uint8_t **csr_pointer)
{
LIBSPDM_ASSERT(false);
return false;
}

View File

@@ -2691,3 +2691,17 @@ nvswitch_os_get_supported_register_events_params
*os_descriptor = NV_FALSE;
return NVL_SUCCESS;
}
NvlStatus
nvswitch_os_get_pid
(
NvU32 *pPid
)
{
if (pPid != NULL)
{
*pPid = task_pid_nr(current);
}
return NVL_SUCCESS;
}

View File

@@ -126,6 +126,19 @@ static void nvUvmFreeSafeStack(nvidia_stack_t *sp)
nv_kmem_cache_free_stack(sp);
}
static NV_STATUS nvUvmDestroyFaultInfoAndStacks(nvidia_stack_t *sp,
uvmGpuDeviceHandle device,
UvmGpuFaultInfo *pFaultInfo)
{
nv_kmem_cache_free_stack(pFaultInfo->replayable.cslCtx.nvidia_stack);
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_bh_sp);
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp);
return rm_gpu_ops_destroy_fault_info(sp,
(gpuDeviceHandle)device,
pFaultInfo);
}
NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo)
{
nvidia_stack_t *sp = NULL;
@@ -855,6 +868,7 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
{
nvidia_stack_t *sp = NULL;
NV_STATUS status;
int err;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
@@ -864,36 +878,48 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
status = rm_gpu_ops_init_fault_info(sp,
(gpuDeviceHandle)device,
pFaultInfo);
if (status != NV_OK)
{
goto done;
}
// Preallocate a stack for functions called from ISR top half
pFaultInfo->nonReplayable.isr_sp = NULL;
pFaultInfo->nonReplayable.isr_bh_sp = NULL;
if (status == NV_OK)
pFaultInfo->replayable.cslCtx.nvidia_stack = NULL;
// NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC.
// Therefore, the pointer can be NULL on success. Always use the
// returned error code to determine if the operation was successful.
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp);
if (err)
{
// NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC.
// Therefore, the pointer can be NULL on success. Always use the
// returned error code to determine if the operation was successful.
int err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp);
if (!err)
{
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp);
if (err)
{
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp);
pFaultInfo->nonReplayable.isr_sp = NULL;
}
}
if (err)
{
rm_gpu_ops_destroy_fault_info(sp,
(gpuDeviceHandle)device,
pFaultInfo);
status = NV_ERR_NO_MEMORY;
}
goto error;
}
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp);
if (err)
{
goto error;
}
// The cslCtx.ctx pointer is not NULL only when ConfidentialComputing is enabled.
if (pFaultInfo->replayable.cslCtx.ctx != NULL)
{
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->replayable.cslCtx.nvidia_stack);
if (err)
{
goto error;
}
}
goto done;
error:
nvUvmDestroyFaultInfoAndStacks(sp,
device,
pFaultInfo);
status = NV_ERR_NO_MEMORY;
done:
nv_kmem_cache_free_stack(sp);
return status;
}
@@ -949,23 +975,9 @@ NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device,
nvidia_stack_t *sp = nvUvmGetSafeStack();
NV_STATUS status;
// Free the preallocated stack for functions called from ISR
if (pFaultInfo->nonReplayable.isr_sp != NULL)
{
nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_sp);
pFaultInfo->nonReplayable.isr_sp = NULL;
}
if (pFaultInfo->nonReplayable.isr_bh_sp != NULL)
{
nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_bh_sp);
pFaultInfo->nonReplayable.isr_bh_sp = NULL;
}
status = rm_gpu_ops_destroy_fault_info(sp,
(gpuDeviceHandle)device,
pFaultInfo);
status = nvUvmDestroyFaultInfoAndStacks(sp,
device,
pFaultInfo);
nvUvmFreeSafeStack(sp);
return status;
}

View File

@@ -32,6 +32,17 @@ NVIDIA_SOURCES += nvidia/nv-msi.c
NVIDIA_SOURCES += nvidia/nv-caps.c
NVIDIA_SOURCES += nvidia/nv-frontend.c
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
NVIDIA_SOURCES += nvidia/libspdm_aead.c
NVIDIA_SOURCES += nvidia/libspdm_ecc.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf.c
NVIDIA_SOURCES += nvidia/libspdm_rand.c
NVIDIA_SOURCES += nvidia/libspdm_shash.c
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
NVIDIA_SOURCES += nvidia/libspdm_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
NVIDIA_SOURCES += nvidia/libspdm_ec.c
NVIDIA_SOURCES += nvidia/libspdm_x509.c
NVIDIA_SOURCES += nvidia/nvlink_linux.c
NVIDIA_SOURCES += nvidia/nvlink_caps.c
NVIDIA_SOURCES += nvidia/linux_nvswitch.c

View File

@@ -222,6 +222,8 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_set_init
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_init_cb
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops

View File

@@ -46,6 +46,11 @@ NvlStatus nvlink_lib_unload(void);
*/
NvlStatus nvlink_lib_ioctl_ctrl(nvlink_ioctrl_params *ctrl_params);
/*
* Gets number of devices with type deviceType
*/
NvlStatus nvlink_lib_return_device_count_by_type(NvU32 deviceType, NvU32 *numDevices);
#ifdef __cplusplus
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,9 @@ extern "C" {
#include "nvlink_common.h"
#define TOP_LEVEL_LOCKING_DISABLED 1
#define PER_LINK_LOCKING_DISABLED 1
#define NVLINK_FREE(x) nvlink_free((void *)x)
// Memory management functions

View File

@@ -0,0 +1,41 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
bool libspdm_aead_gcm_prealloc(void **context);
void libspdm_aead_free(void *context);
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
uint8_t *tag_out, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *key, size_t key_size,
const uint8_t *iv, size_t iv_size,
const uint8_t *a_data, size_t a_data_size,
const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size);

View File

@@ -1787,6 +1787,10 @@ NV_STATUS NV_API_CALL os_numa_memblock_size
NvU64 *memblock_size
)
{
#if NV_IS_EXPORT_SYMBOL_PRESENT_memory_block_size_bytes
*memblock_size = memory_block_size_bytes();
return NV_OK;
#endif
if (nv_ctl_device.numa_memblock_size == 0)
return NV_ERR_INVALID_STATE;
*memblock_size = nv_ctl_device.numa_memblock_size;
@@ -2118,6 +2122,53 @@ void NV_API_CALL os_nv_cap_close_fd
nv_cap_close_fd(fd);
}
typedef struct os_numa_gpu_mem_hotplug_notifier_s
{
NvU64 start_pa;
NvU64 size;
nv_pci_info_t pci_info;
struct notifier_block memory_notifier;
} os_numa_gpu_mem_hotplug_notifier_t;
static int os_numa_verify_gpu_memory_zone(struct notifier_block *nb,
unsigned long action, void *data)
{
os_numa_gpu_mem_hotplug_notifier_t *notifier = container_of(nb,
os_numa_gpu_mem_hotplug_notifier_t,
memory_notifier);
struct memory_notify *mhp = data;
NvU64 start_pa = PFN_PHYS(mhp->start_pfn);
NvU64 size = PFN_PHYS(mhp->nr_pages);
if (action == MEM_GOING_ONLINE)
{
// Check if onlining memory falls in the GPU memory range
if ((start_pa >= notifier->start_pa) &&
(start_pa + size) <= (notifier->start_pa + notifier->size))
{
/*
* Verify GPU memory NUMA node has memory only in ZONE_MOVABLE before
* onlining the memory so that incorrect auto online setting doesn't
* cause the memory onlined in a zone where kernel allocations
* could happen, resulting in GPU memory hot unpluggable and requiring
* system reboot.
*/
if (page_zonenum((pfn_to_page(mhp->start_pfn))) != ZONE_MOVABLE)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Failing GPU memory onlining as the onlining zone "
"is not movable. pa: 0x%llx size: 0x%llx\n"
"NVRM: The NVIDIA GPU %04x:%02x:%02x.%x installed in the system\n"
"NVRM: requires auto onlining mode online_movable enabled in\n"
"NVRM: /sys/devices/system/memory/auto_online_blocks\n",
start_pa, size, notifier->pci_info.domain, notifier->pci_info.bus,
notifier->pci_info.slot, notifier->pci_info.function);
return NOTIFY_BAD;
}
}
}
return NOTIFY_OK;
}
NV_STATUS NV_API_CALL os_numa_add_gpu_memory
(
void *handle,
@@ -2129,8 +2180,16 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
#if defined(NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT)
int node = 0;
nv_linux_state_t *nvl = pci_get_drvdata(handle);
nv_state_t *nv = NV_STATE_PTR(nvl);
NvU64 base = offset + nvl->coherent_link_info.gpu_mem_pa;
int ret;
os_numa_gpu_mem_hotplug_notifier_t notifier =
{
.start_pa = base,
.size = size,
.pci_info = nv->pci_info,
.memory_notifier.notifier_call = os_numa_verify_gpu_memory_zone,
};
if (nodeId == NULL)
{
@@ -2149,21 +2208,31 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS);
ret = register_memory_notifier(&notifier.memory_notifier);
if (ret)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Memory hotplug notifier registration failed\n");
goto failed;
}
#ifdef NV_ADD_MEMORY_DRIVER_MANAGED_HAS_MHP_FLAGS_ARG
ret = add_memory_driver_managed(node, base, size, "System RAM (NVIDIA)", MHP_NONE);
#else
ret = add_memory_driver_managed(node, base, size, "System RAM (NVIDIA)");
#endif
unregister_memory_notifier(&notifier.memory_notifier);
if (ret == 0)
{
struct zone *zone = &NODE_DATA(node)->node_zones[ZONE_MOVABLE];
NvU64 start_pfn = base >> PAGE_SHIFT;
NvU64 end_pfn = (base + size) >> PAGE_SHIFT;
/* Verify the full GPU memory range passed on is onlined */
if (zone->zone_start_pfn != start_pfn ||
zone_end_pfn(zone) != end_pfn)
{
nv_printf(NV_DBG_ERRORS, "GPU memory zone movable auto onlining failed!\n");
nv_printf(NV_DBG_ERRORS, "NVRM: GPU memory zone movable auto onlining failed!\n");
#ifdef NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT
#ifdef NV_REMOVE_MEMORY_HAS_NID_ARG
if (offline_and_remove_memory(node, base, size) != 0)
@@ -2171,7 +2240,7 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
if (offline_and_remove_memory(base, size) != 0)
#endif
{
nv_printf(NV_DBG_ERRORS, "offline_and_remove_memory failed\n");
nv_printf(NV_DBG_ERRORS, "NVRM: offline_and_remove_memory failed\n");
}
#endif
goto failed;