mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-01 05:59:48 +00:00
550.78
This commit is contained in:
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -59,6 +59,12 @@ typedef struct
|
||||
NvU64 length;
|
||||
NvU64 flags;
|
||||
NvU64 submittedWorkId; // Payload to poll for async completion
|
||||
|
||||
NvBool bSecureCopy; // The copy encrypts/decrypts when copying to/from unprotected memory
|
||||
NvBool bEncrypt; // Toggle encrypt/decrypt
|
||||
NvU64 authTagAddr; // encryption authTag address. Same aperture as unencrypted operand assumed. 16 byte aligned
|
||||
NvU64 encryptIvAddr; // IV value that was used for ecryption, requirements are the same as for authTagAddr. Required
|
||||
|
||||
} CEUTILS_MEMCOPY_PARAMS;
|
||||
|
||||
struct KernelChannel;
|
||||
@@ -90,8 +96,6 @@ struct CeUtils {
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct CeUtils *__nvoc_pbase_CeUtils;
|
||||
NvHandle hClient;
|
||||
NvHandle hDevice;
|
||||
NvHandle hSubdevice;
|
||||
OBJCHANNEL *pChannel;
|
||||
struct OBJGPU *pGpu;
|
||||
struct KernelCE *pKCe;
|
||||
|
||||
@@ -7,7 +7,7 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -39,6 +39,7 @@ extern "C" {
|
||||
#include "gpu/spdm/spdm.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080spdm.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080internal.h"
|
||||
#include "ctrl/ctrlc56f.h"
|
||||
#include "cc_drv.h"
|
||||
#include "conf_compute/cc_keystore.h"
|
||||
#include "kernel/gpu/fifo/kernel_channel.h"
|
||||
@@ -154,12 +155,16 @@ struct ConfidentialCompute {
|
||||
NvU32 keyRotationEnableMask;
|
||||
KEY_ROTATION_STATS_INFO lowerThreshold;
|
||||
KEY_ROTATION_STATS_INFO upperThreshold;
|
||||
NvU64 attackerAdvantage;
|
||||
NvU8 PRIVATE_FIELD(m_exportMasterKey)[32];
|
||||
void *PRIVATE_FIELD(m_keySlot);
|
||||
KEY_ROTATION_STATUS PRIVATE_FIELD(keyRotationState)[62];
|
||||
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(aggregateStats)[62];
|
||||
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(freedChannelAggregateStats)[62];
|
||||
PTMR_EVENT PRIVATE_FIELD(ppKeyRotationTimer)[62];
|
||||
NvU64 PRIVATE_FIELD(keyRotationLimitDelta);
|
||||
NvU64 PRIVATE_FIELD(keyRotationUpperLimit);
|
||||
NvU64 PRIVATE_FIELD(keyRotationLowerLimit);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__
|
||||
@@ -711,6 +716,39 @@ static inline NV_STATUS confComputeUpdateFreedChannelStats(struct OBJGPU *pGpu,
|
||||
#define confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel) confComputeUpdateFreedChannelStats_IMPL(pGpu, pConfCompute, pKernelChannel)
|
||||
#endif //__nvoc_conf_compute_h_disabled
|
||||
|
||||
NV_STATUS confComputeSetKeyRotationThreshold_IMPL(struct ConfidentialCompute *pConfCompute, NvU64 attackerAdvantage);
|
||||
|
||||
#ifdef __nvoc_conf_compute_h_disabled
|
||||
static inline NV_STATUS confComputeSetKeyRotationThreshold(struct ConfidentialCompute *pConfCompute, NvU64 attackerAdvantage) {
|
||||
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_conf_compute_h_disabled
|
||||
#define confComputeSetKeyRotationThreshold(pConfCompute, attackerAdvantage) confComputeSetKeyRotationThreshold_IMPL(pConfCompute, attackerAdvantage)
|
||||
#endif //__nvoc_conf_compute_h_disabled
|
||||
|
||||
NvBool confComputeIsUpperThresholdCrossed_IMPL(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||
|
||||
#ifdef __nvoc_conf_compute_h_disabled
|
||||
static inline NvBool confComputeIsUpperThresholdCrossed(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo) {
|
||||
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_conf_compute_h_disabled
|
||||
#define confComputeIsUpperThresholdCrossed(pConfCompute, pStatsInfo) confComputeIsUpperThresholdCrossed_IMPL(pConfCompute, pStatsInfo)
|
||||
#endif //__nvoc_conf_compute_h_disabled
|
||||
|
||||
NvBool confComputeIsLowerThresholdCrossed_IMPL(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||
|
||||
#ifdef __nvoc_conf_compute_h_disabled
|
||||
static inline NvBool confComputeIsLowerThresholdCrossed(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo) {
|
||||
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_conf_compute_h_disabled
|
||||
#define confComputeIsLowerThresholdCrossed(pConfCompute, pStatsInfo) confComputeIsLowerThresholdCrossed_IMPL(pConfCompute, pStatsInfo)
|
||||
#endif //__nvoc_conf_compute_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
#ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
|
||||
@@ -750,6 +788,16 @@ NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey)(struct Confidentia
|
||||
#undef confComputeKeyStoreUpdateKey_HAL
|
||||
NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey_HAL)(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
|
||||
|
||||
#ifndef __nvoc_conf_compute_h_disabled
|
||||
#undef confComputeIsUpperThresholdCrossed
|
||||
NvBool NVOC_PRIVATE_FUNCTION(confComputeIsUpperThresholdCrossed)(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||
#endif //__nvoc_conf_compute_h_disabled
|
||||
|
||||
#ifndef __nvoc_conf_compute_h_disabled
|
||||
#undef confComputeIsLowerThresholdCrossed
|
||||
NvBool NVOC_PRIVATE_FUNCTION(confComputeIsLowerThresholdCrossed)(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||
#endif //__nvoc_conf_compute_h_disabled
|
||||
|
||||
#endif // NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
|
||||
|
||||
|
||||
|
||||
@@ -303,6 +303,7 @@ struct OBJGPUMGR {
|
||||
GPU_HANDLE_ID gpuHandleIDList[32];
|
||||
NvU32 numGpuHandles;
|
||||
CONF_COMPUTE_CAPS ccCaps;
|
||||
NvU64 ccAttackerAdvantage;
|
||||
pcieP2PCapsInfoList pcieP2PCapsInfoCache;
|
||||
void *pcieP2PCapsInfoLock;
|
||||
};
|
||||
|
||||
@@ -83,14 +83,22 @@ static NvBool __nvoc_thunk_KernelCE_engstateIsPresent(OBJGPU *pGpu, struct OBJEN
|
||||
return kceIsPresent(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
|
||||
return kceStateLoad(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg2);
|
||||
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateInitLocked(OBJGPU *arg0, struct OBJENGSTATE *arg1) {
|
||||
return kceStateInitLocked(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKCe, NvU32 flags) {
|
||||
return kceStateUnload(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), flags);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
|
||||
return kceStateLoad(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCE_engstateStateDestroy(OBJGPU *arg0, struct OBJENGSTATE *arg1) {
|
||||
kceStateDestroy(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCE_intrservRegisterIntrService(OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[171]) {
|
||||
kceRegisterIntrService(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2);
|
||||
}
|
||||
@@ -99,10 +107,6 @@ static NV_STATUS __nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt(OBJG
|
||||
return kceServiceNotificationInterrupt(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStateInitLocked(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
||||
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreLoad(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
@@ -111,10 +115,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePostUnload(POBJGPU pGpu, struc
|
||||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_kceStateDestroy(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreUnload(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
@@ -172,6 +172,17 @@ void __nvoc_init_dataField_KernelCE(KernelCE *pThis, RmHalspecOwner *pRmhalspeco
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
|
||||
// Hal field -- bCcFipsSelfTestRequired
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
|
||||
{
|
||||
pThis->bCcFipsSelfTestRequired = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bCcFipsSelfTestRequired = ((NvBool)(0 != 0));
|
||||
}
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
||||
@@ -210,13 +221,17 @@ static void __nvoc_init_funcTable_KernelCE_1(KernelCE *pThis, RmHalspecOwner *pR
|
||||
// Hal function -- kceIsPresent
|
||||
pThis->__kceIsPresent__ = &kceIsPresent_IMPL;
|
||||
|
||||
// Hal function -- kceStateLoad
|
||||
pThis->__kceStateLoad__ = &kceStateLoad_GP100;
|
||||
pThis->__kceStateInitLocked__ = &kceStateInitLocked_IMPL;
|
||||
|
||||
// Hal function -- kceStateUnload
|
||||
// default
|
||||
pThis->__kceStateUnload__ = &kceStateUnload_56cd7a;
|
||||
|
||||
// Hal function -- kceStateLoad
|
||||
pThis->__kceStateLoad__ = &kceStateLoad_GP100;
|
||||
|
||||
pThis->__kceStateDestroy__ = &kceStateDestroy_IMPL;
|
||||
|
||||
pThis->__kceRegisterIntrService__ = &kceRegisterIntrService_IMPL;
|
||||
|
||||
pThis->__kceServiceNotificationInterrupt__ = &kceServiceNotificationInterrupt_IMPL;
|
||||
@@ -436,22 +451,22 @@ static void __nvoc_init_funcTable_KernelCE_1(KernelCE *pThis, RmHalspecOwner *pR
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateIsPresent__ = &__nvoc_thunk_KernelCE_engstateIsPresent;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelCE_engstateStateLoad;
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelCE_engstateStateInitLocked;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelCE_engstateStateUnload;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelCE_engstateStateLoad;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelCE_engstateStateDestroy;
|
||||
|
||||
pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelCE_intrservRegisterIntrService;
|
||||
|
||||
pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt;
|
||||
|
||||
pThis->__kceStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitLocked;
|
||||
|
||||
pThis->__kceStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreLoad;
|
||||
|
||||
pThis->__kceStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePostUnload;
|
||||
|
||||
pThis->__kceStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kceStateDestroy;
|
||||
|
||||
pThis->__kceStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreUnload;
|
||||
|
||||
pThis->__kceStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitUnlocked;
|
||||
|
||||
@@ -115,8 +115,10 @@ struct KernelCE {
|
||||
struct KernelCE *__nvoc_pbase_KernelCE;
|
||||
NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE *, ENGDESCRIPTOR);
|
||||
NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE *);
|
||||
NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||
NV_STATUS (*__kceStateInitLocked__)(OBJGPU *, struct KernelCE *);
|
||||
NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||
NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||
void (*__kceStateDestroy__)(OBJGPU *, struct KernelCE *);
|
||||
void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *);
|
||||
NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *);
|
||||
NV_STATUS (*__kceGetP2PCes__)(struct KernelCE *, OBJGPU *, NvU32, NvU32 *);
|
||||
@@ -136,10 +138,8 @@ struct KernelCE {
|
||||
NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE *);
|
||||
NvBool (*__kceIsGenXorHigherSupported__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||
void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32, NvU32);
|
||||
NV_STATUS (*__kceStateInitLocked__)(POBJGPU, struct KernelCE *);
|
||||
NV_STATUS (*__kceStatePreLoad__)(POBJGPU, struct KernelCE *, NvU32);
|
||||
NV_STATUS (*__kceStatePostUnload__)(POBJGPU, struct KernelCE *, NvU32);
|
||||
void (*__kceStateDestroy__)(POBJGPU, struct KernelCE *);
|
||||
NV_STATUS (*__kceStatePreUnload__)(POBJGPU, struct KernelCE *, NvU32);
|
||||
NV_STATUS (*__kceStateInitUnlocked__)(POBJGPU, struct KernelCE *);
|
||||
void (*__kceInitMissing__)(POBJGPU, struct KernelCE *);
|
||||
@@ -156,6 +156,7 @@ struct KernelCE {
|
||||
NvBool bIsAutoConfigEnabled;
|
||||
NvBool bUseGen4Mapping;
|
||||
struct IoAperture aperture;
|
||||
NvBool bCcFipsSelfTestRequired;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__
|
||||
@@ -191,10 +192,12 @@ NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
|
||||
#define kceConstructEngine(pGpu, pKCe, arg0) kceConstructEngine_DISPATCH(pGpu, pKCe, arg0)
|
||||
#define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
|
||||
#define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
|
||||
#define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||
#define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||
#define kceStateInitLocked(arg0, arg1) kceStateInitLocked_DISPATCH(arg0, arg1)
|
||||
#define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
|
||||
#define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
|
||||
#define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||
#define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||
#define kceStateDestroy(arg0, arg1) kceStateDestroy_DISPATCH(arg0, arg1)
|
||||
#define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2)
|
||||
#define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
|
||||
#define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
|
||||
@@ -231,10 +234,8 @@ NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
|
||||
#define kceIsGenXorHigherSupported_HAL(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
|
||||
#define kceApplyGen4orHigherMapping(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
|
||||
#define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
|
||||
#define kceStateInitLocked(pGpu, pEngstate) kceStateInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kceStatePreLoad(pGpu, pEngstate, arg0) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kceStatePostUnload(pGpu, pEngstate, arg0) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kceStateDestroy(pGpu, pEngstate) kceStateDestroy_DISPATCH(pGpu, pEngstate)
|
||||
#define kceStatePreUnload(pGpu, pEngstate, arg0) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate)
|
||||
@@ -366,10 +367,10 @@ static inline NvBool kceIsPresent_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe)
|
||||
return pKCe->__kceIsPresent__(pGpu, pKCe);
|
||||
}
|
||||
|
||||
NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2);
|
||||
NV_STATUS kceStateInitLocked_IMPL(OBJGPU *arg0, struct KernelCE *arg1);
|
||||
|
||||
static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) {
|
||||
return arg1->__kceStateLoad__(arg0, arg1, arg2);
|
||||
static inline NV_STATUS kceStateInitLocked_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1) {
|
||||
return arg1->__kceStateInitLocked__(arg0, arg1);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kceStateUnload_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
|
||||
@@ -380,6 +381,18 @@ static inline NV_STATUS kceStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelCE *p
|
||||
return pKCe->__kceStateUnload__(pGpu, pKCe, flags);
|
||||
}
|
||||
|
||||
NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2);
|
||||
|
||||
static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) {
|
||||
return arg1->__kceStateLoad__(arg0, arg1, arg2);
|
||||
}
|
||||
|
||||
void kceStateDestroy_IMPL(OBJGPU *arg0, struct KernelCE *arg1);
|
||||
|
||||
static inline void kceStateDestroy_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1) {
|
||||
arg1->__kceStateDestroy__(arg0, arg1);
|
||||
}
|
||||
|
||||
void kceRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]);
|
||||
|
||||
static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]) {
|
||||
@@ -574,10 +587,6 @@ static inline void kceApplyGen4orHigherMapping_DISPATCH(OBJGPU *pGpu, struct Ker
|
||||
pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg0, arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kceStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
||||
return pEngstate->__kceStateInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kceStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
@@ -586,10 +595,6 @@ static inline NV_STATUS kceStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelC
|
||||
return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline void kceStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
||||
pEngstate->__kceStateDestroy__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kceStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -337,6 +337,9 @@ typedef struct OBJCHANNEL
|
||||
// Used by Partition Scrubber
|
||||
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
|
||||
NvHandle hPartitionRef;
|
||||
|
||||
NvBool bSecure;
|
||||
|
||||
} OBJCHANNEL, *POBJCHANNEL;
|
||||
|
||||
#define NV_METHOD(SubCh, Method, Num) \
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -641,6 +641,7 @@ struct Subdevice {
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRotateKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetGpuState__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalInitUserSharedData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__)(struct Subdevice *, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalGspStartTrace__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_GSP_START_TRACE_INFO_PARAMS *);
|
||||
@@ -1296,6 +1297,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
|
||||
#define subdeviceCtrlCmdInternalConfComputeRotateKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRotateKeys_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalConfComputeSetGpuState(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalInitUserSharedData(pSubdevice, pParams) subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll(pSubdevice, pParams) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalGspStartTrace(pSubdevice, pParams) subdeviceCtrlCmdInternalGspStartTrace_DISPATCH(pSubdevice, pParams)
|
||||
@@ -4587,6 +4589,12 @@ static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(
|
||||
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeSetGpuState__(pSubdevice, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS *pParams) {
|
||||
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy__(pSubdevice, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams) {
|
||||
|
||||
Reference in New Issue
Block a user