535.43.09

This commit is contained in:
russellcnv
2023-09-01 21:36:45 -07:00
parent 18b7303c54
commit 17546dbdda
122 changed files with 41587 additions and 34584 deletions

View File

@@ -50,22 +50,6 @@
using namespace DisplayPort;
// These wrappers are specifically for DSC PPS library malloc and free callbacks
// Pointer to these functions are populated to dscMalloc/dscFree in DSC_InitializeCallBack and it is initialized from both DPLib and HDMiPacketLib.
// In HDMI case, callback function for malloc/free needs client handle so to match function prototype, in DP case, adding these wrappers.
extern "C" void * dpMallocCb(const void *clientHandle, NvLength size);
extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr);
extern "C" void * dpMallocCb(const void *clientHandle, NvLength size)
{
return dpMalloc(size);
}
extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr)
{
dpFree(pMemPtr);
}
ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink)
: main(main),
auxBus(auxBus),
@@ -158,14 +142,6 @@ ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Co
hal->applyRegkeyOverrides(dpRegkeyDatabase);
highestAssessedLC = getMaxLinkConfig();
// Initialize DSC callbacks
DSC_CALLBACK callback;
callback.clientHandle = NULL;
callback.dscPrint = NULL;
callback.dscMalloc = dpMallocCb;
callback.dscFree = dpFreeCb;
DSC_InitializeCallback(callback);
}
void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase)
@@ -1309,10 +1285,13 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth;
warData.connectorType = DSC_DP;
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pScratchBuffer = nullptr;
pScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*) dpMalloc(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA));
result = DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
&warData, availableBandwidthBitsPerSecond,
(NvU32*)(PPS),
(NvU32*)(&bitsPerPixelX16));
(NvU32*)(&bitsPerPixelX16), pScratchBuffer);
// Try max dsc compression bpp = 8 once to check if that can support that mode.
if (result != NVT_STATUS_SUCCESS && !bDscBppForced)
@@ -1324,7 +1303,13 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
result = DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
&warData, availableBandwidthBitsPerSecond,
(NvU32*)(PPS),
(NvU32*)(&bitsPerPixelX16));
(NvU32*)(&bitsPerPixelX16), pScratchBuffer);
}
if (pScratchBuffer)
{
dpFree(pScratchBuffer);
pScratchBuffer = nullptr;
}
if (result != NVT_STATUS_SUCCESS)
@@ -1614,10 +1599,21 @@ nonDscDpIMP:
warData.dpData.dpMode = DSC_DP_SST;
warData.connectorType = DSC_DP;
if ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
&warData, availableBandwidthBitsPerSecond,
(NvU32*)(PPS),
(NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS)
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pScratchBuffer = nullptr;
pScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*)dpMalloc(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA));
bool bPpsFailure = ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
&warData, availableBandwidthBitsPerSecond,
(NvU32*)(PPS),
(NvU32*)(&bitsPerPixelX16),
pScratchBuffer)) != NVT_STATUS_SUCCESS);
if (pScratchBuffer)
{
dpFree(pScratchBuffer);
pScratchBuffer = nullptr;
}
if (bPpsFailure)
{
compoundQueryResult = false;
pDscParams->bEnableDsc = false;

View File

@@ -43,18 +43,18 @@
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/VK535_87-128"
#define NV_BUILD_CHANGELIST_NUM (33195052)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/VK535_87-129"
#define NV_BUILD_CHANGELIST_NUM (33257513)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r535/VK535_87-128"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33195052)
#define NV_BUILD_NAME "rel/gpu_drv/r535/VK535_87-129"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33257513)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "VK535_87-8"
#define NV_BUILD_CHANGELIST_NUM (33190458)
#define NV_BUILD_BRANCH_VERSION "VK535_87-9"
#define NV_BUILD_CHANGELIST_NUM (33257513)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "537.16"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33190458)
#define NV_BUILD_NAME "537.35"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33257513)
#define NV_BUILD_BRANCH_BASE_VERSION R535
#endif
// End buildmeister python edited section

View File

@@ -158,6 +158,7 @@ static const PNPVendorId PNPVendorIds[] =
{ "CSE", _VENDOR_NAME_ENTRY("Compu Shack") },
{ "CSI", _VENDOR_NAME_ENTRY("Cabletron") },
{ "CSS", _VENDOR_NAME_ENTRY("CSS Laboratories") },
{ "CSW", _VENDOR_NAME_ENTRY("China Star Optoelectronics Technology Co., Ltd") },
{ "CTN", _VENDOR_NAME_ENTRY("Computone") },
{ "CTX", _VENDOR_NAME_ENTRY("Chuntex/CTX") },
{ "CUB", _VENDOR_NAME_ENTRY("Cubix") },

View File

@@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "535.43.08"
#define NV_VERSION_STRING "535.43.09"
#else

View File

@@ -110,7 +110,18 @@
#define NV_PFALCON_FALCON_DMEMC_AINCW 24:24 /* RWIVF */
#define NV_PFALCON_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */
#define NV_PFALCON_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */
#define NV_PFALCON_FALCON_DMEMC_AINCR 25:25 /* RWIVF */
#define NV_PFALCON_FALCON_DMEMC_AINCR_TRUE 0x00000001 /* RW--V */
#define NV_PFALCON_FALCON_DMEMC_AINCR_FALSE 0x00000000 /* RW--V */
#define NV_PFALCON_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */
#define NV_PFALCON_FALCON_DMEMD_DATA 31:0 /* RW-VF */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(i) (0x00000300+(i)*4) /* RW-4A */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0__SIZE_1 4 /* */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(i) (0x00000310+(i)*4) /* RW-4A */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1__SIZE_1 4 /* */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(i) (0x00000320+(i)*4) /* RW-4A */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2__SIZE_1 4 /* */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(i) (0x00000330+(i)*4) /* RW-4A */
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3__SIZE_1 4 /* */
#endif // __tu102_dev_falcon_v4_h__

View File

@@ -181,24 +181,6 @@ static const NVHDMIPKT_CLASS_HIERARCHY hierarchy[] =
},
};
#if defined(DSC_CALLBACK_MODIFIED)
// Callbacks for DSC PPS library
void *hdmipktMallocCb(const void *clientHandle, NvLength size);
void hdmipktFreeCb(const void *clientHandle, void *pMemPtr);
void *hdmipktMallocCb(const void *clientHandle, NvLength size)
{
const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle);
return pClass->callback.malloc(pClass->cbHandle, size);
}
void hdmipktFreeCb(const void *clientHandle, void *pMemPtr)
{
const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle);
pClass->callback.free(pClass->cbHandle, pMemPtr);
}
#endif // DSC_CALLBACK_MODIFIED
/********************************** HDMI Library interfaces *************************************/
/*
* NvHdmiPkt_PacketCtrl
@@ -581,15 +563,6 @@ NvHdmiPkt_InitializeLibrary(NvU32 const hwClass,
// 2. Constructor calls
result = NvHdmiPkt_CallConstructors(thisClassId, pClass);
#if defined(DSC_CALLBACK_MODIFIED)
DSC_CALLBACK callbacks;
NVMISC_MEMSET(&callbacks, 0, sizeof(DSC_CALLBACK));
callbacks.clientHandle = pClass;
callbacks.dscMalloc = hdmipktMallocCb;
callbacks.dscFree = hdmipktFreeCb;
DSC_InitializeCallback(callbacks);
#endif // DSC_CALLBACK_MODIFIED
NvHdmiPkt_InitializeLibrary_exit:
if (result)
{

View File

@@ -1168,18 +1168,29 @@ frlQuery_Success:
NvU64 availableLinkBw = (NvU64)(frlBitRateGbps) * (NvU64)(numLanes) * MULTIPLIER_1G;
warData.connectorType = DSC_HDMI;
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pDscScratchBuffer = NULL;
pDscScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*)pThis->callback.malloc(pThis->cbHandle,
sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA));
if ((DSC_GeneratePPS(&dscInfo,
&dscModesetInfo,
&warData,
availableLinkBw,
pFRLConfig->dscInfo.pps,
&bitsPerPixelX16)) != NVT_STATUS_SUCCESS)
&bitsPerPixelX16,
pDscScratchBuffer)) != NVT_STATUS_SUCCESS)
{
NvHdmiPkt_Print(pThis, "ERROR - DSC PPS calculation failed.");
NvHdmiPkt_Assert(0);
result = NVHDMIPKT_FAIL;
}
if (pDscScratchBuffer != NULL)
{
pThis->callback.free(pThis->cbHandle, pDscScratchBuffer);
pDscScratchBuffer = NULL;
}
// DSC lib should honor the bpp setting passed from client, assert here just in case
NvHdmiPkt_Assert(bitsPerPixelX16 == pFRLConfig->dscInfo.bitsPerPixelX16);
}

View File

@@ -33,20 +33,19 @@
#include "nvt_dsc_pps.h"
#include "nvmisc.h"
#include "displayport/displayport.h"
#include "nvctassert.h"
#include <stddef.h>
/* ------------------------ Macros ----------------------------------------- */
#if defined (DEBUG)
#define DSC_Print(...) \
do { \
if (callbacks.dscPrint) { \
callbacks.dscPrint("DSC: " __VA_ARGS__); \
} \
} while(0)
#else
//
// DSC_Print macro was for debugging purposes in early development of
// DSC PPS library. The print statements no longer get logged
// inside any client logger. But the lines of print in this file are useful
// for browsing code, hence this DSC_Print is left as a stub
// definition intentionally to help reader understand the PPS code.
//
#define DSC_Print(...) do { } while(0)
#endif
#define MIN_CHECK(s,a,b) { if((a)<(b)) { DSC_Print("%s (=%u) needs to be larger than %u",s,a,b); return (NVT_STATUS_ERR);} }
#define RANGE_CHECK(s,a,b,c) { if((((NvS32)(a))<(NvS32)(b))||(((NvS32)(a))>(NvS32)(c))) { DSC_Print("%s (=%u) needs to be between %u and %u",s,a,b,c); return (NVT_STATUS_ERR);} }
@@ -171,9 +170,21 @@ typedef struct
NvU32 flatness_det_thresh;
} DSC_OUTPUT_PARAMS;
/* ------------------------ Global Variables ------------------------------- */
//
// Opaque scratch space is passed by client for DSC calculation usage.
// Use an internal struct to cast the input buffer
// into in/out params for DSC PPS calculation functions to work with
//
typedef struct _DSC_GENERATE_PPS_WORKAREA
{
DSC_INPUT_PARAMS in;
DSC_OUTPUT_PARAMS out;
} DSC_GENERATE_PPS_WORKAREA;
DSC_CALLBACK callbacks;
// Compile time check to ensure Opaque workarea buffer size always covers required work area.
ct_assert(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA) >= sizeof(DSC_GENERATE_PPS_WORKAREA));
/* ------------------------ Global Variables ------------------------------- */
static const NvU8 minqp444_8b[15][37]={
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
@@ -396,8 +407,6 @@ static const NvU32 rcBufThresh[] = { 896, 1792, 2688, 3584, 4480, 5376, 6272, 67
/* ------------------------ Static Variables ------------------------------- */
/* ------------------------ Private Functions Prototype--------------------- */
static void * DSC_Malloc(NvLength size);
static void DSC_Free(void * ptr);
static NvU32
DSC_GetHigherSliceCount
(
@@ -1586,19 +1595,11 @@ static NVT_STATUS
DSC_PpsDataGen
(
const DSC_INPUT_PARAMS *in,
NvU32 out[DSC_MAX_PPS_SIZE_DWORD]
DSC_OUTPUT_PARAMS *pPpsOut,
NvU32 out[DSC_MAX_PPS_SIZE_DWORD]
)
{
NVT_STATUS ret;
DSC_OUTPUT_PARAMS *pPpsOut;
pPpsOut = (DSC_OUTPUT_PARAMS *)DSC_Malloc(sizeof(DSC_OUTPUT_PARAMS));
if (pPpsOut == NULL)
{
DSC_Print("ERROR - Memory allocation error.");
ret = NVT_STATUS_NO_MEMORY;
goto done;
}
NVMISC_MEMSET(pPpsOut, 0, sizeof(DSC_OUTPUT_PARAMS));
ret = DSC_PpsCalc(in, pPpsOut);
@@ -1612,44 +1613,9 @@ DSC_PpsDataGen
/* fall through */
done:
DSC_Free(pPpsOut);
return ret;
}
/*
* @brief Allocates memory for requested size
*
* @param[in] size Size to be allocated
*
* @returns Pointer to allocated memory
*/
static void *
DSC_Malloc(NvLength size)
{
#if defined(DSC_CALLBACK_MODIFIED)
return (callbacks.dscMalloc)(callbacks.clientHandle, size);
#else
return (callbacks.dscMalloc)(size);
#endif // DSC_CALLBACK_MODIFIED
}
/*
* @brief Frees dynamically allocated memory
*
* @param[in] ptr Pointer to a memory to be deallocated
*
*/
static void
DSC_Free(void * ptr)
{
#if defined(DSC_CALLBACK_MODIFIED)
(callbacks.dscFree)(callbacks.clientHandle, ptr);
#else
(callbacks.dscFree)(ptr);
#endif // DSC_CALLBACK_MODIFIED
}
/*
* @brief Validate input parameter we got from caller of this function
*
@@ -1992,19 +1958,26 @@ DSC_GeneratePPS
const WAR_DATA *pWARData,
NvU64 availableBandwidthBitsPerSecond,
NvU32 pps[DSC_MAX_PPS_SIZE_DWORD],
NvU32 *pBitsPerPixelX16
NvU32 *pBitsPerPixelX16,
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pOpaqueWorkarea
)
{
DSC_INPUT_PARAMS *in = NULL;
DSC_INPUT_PARAMS *in = NULL;
DSC_OUTPUT_PARAMS *out = NULL;
DSC_GENERATE_PPS_WORKAREA *pWorkarea = NULL;
NVT_STATUS ret = NVT_STATUS_ERR;
if ((!pDscInfo) || (!pModesetInfo) || (!pBitsPerPixelX16))
if ((!pDscInfo) || (!pModesetInfo) || (!pBitsPerPixelX16) || (!pOpaqueWorkarea))
{
DSC_Print("ERROR - Invalid parameter.");
ret = NVT_STATUS_INVALID_PARAMETER;
goto done;
}
pWorkarea = (DSC_GENERATE_PPS_WORKAREA*)(pOpaqueWorkarea);
in = &pWorkarea->in;
out = &pWorkarea->out;
ret = _validateInput(pDscInfo, pModesetInfo, pWARData, availableBandwidthBitsPerSecond);
if (ret != NVT_STATUS_SUCCESS)
{
@@ -2013,14 +1986,6 @@ DSC_GeneratePPS
goto done;
}
in = (DSC_INPUT_PARAMS *)DSC_Malloc(sizeof(DSC_INPUT_PARAMS));
if (in == NULL)
{
DSC_Print("ERROR - Memory allocation error.");
ret = NVT_STATUS_NO_MEMORY;
goto done;
}
NVMISC_MEMSET(in, 0, sizeof(DSC_INPUT_PARAMS));
in->bits_per_component = pModesetInfo->bitsPerComponent;
@@ -2277,42 +2242,11 @@ DSC_GeneratePPS
}
}
ret = DSC_PpsDataGen(in, pps);
ret = DSC_PpsDataGen(in, out, pps);
*pBitsPerPixelX16 = in->bits_per_pixel;
/* fall through */
done:
DSC_Free(in);
return ret;
}
/*
* @brief Initializes callbacks for print and assert
*
* @param[in] callback DSC callbacks
*
* @returns NVT_STATUS_SUCCESS if successful;
* NVT_STATUS_ERR if unsuccessful;
*/
NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback)
{
// if callbacks are initialized already, return nothing to do
if (callbacks.dscMalloc && callbacks.dscFree)
{
return NVT_STATUS_SUCCESS;
}
#if defined(DSC_CALLBACK_MODIFIED)
callbacks.clientHandle = callback.clientHandle;
#endif // DSC_CALLBACK_MODIFIED
callbacks.dscPrint = NULL;
callbacks.dscMalloc = callback.dscMalloc;
callbacks.dscFree = callback.dscFree;
#if defined (DEBUG)
callbacks.dscPrint = callback.dscPrint;
#endif
return NVT_STATUS_SUCCESS;
}

View File

@@ -43,27 +43,6 @@
/* ------------------------ Datatypes -------------------------------------- */
#define DSC_CALLBACK_MODIFIED 1
#if defined(DSC_CALLBACK_MODIFIED)
typedef struct
{
// DSC - Callbacks
const void* clientHandle; // ClientHandle is only used when calling into HDMI lib's mallocCb/freeCb
void (*dscPrint) (const char* fmtstring, ...);
void *(*dscMalloc)(const void *clientHandle, NvLength size);
void (*dscFree) (const void *clientHandle, void * ptr);
} DSC_CALLBACK;
#else
typedef struct
{
// DSC - Callbacks
void (*dscPrint) (const char* fmtstring, ...);
void *(*dscMalloc)(NvLength size);
void (*dscFree) (void * ptr);
} DSC_CALLBACK;
#endif // DSC_CALLBACK_MODIFIED
typedef struct
{
NvU32 versionMajor;
@@ -278,6 +257,16 @@ typedef struct
}dpData;
} WAR_DATA;
//
// DSC PPS calculations need large scratch buffer to work with, which can be too
// big for some platforms. These buffers need to be allocated on heap rather
// than local stack variable. Clients are expected to pre-allocate
// this buffer and pass it in to DSC PPS interface
//
typedef struct {
NvU8 data[512U]; // an upper bound of total size of DSC_IN/OUTPUT_PARAMS
} DSC_GENERATE_PPS_OPAQUE_WORKAREA;
/*
* Windows testbed compiles are done with warnings as errors
* with the maximum warning level. Here we turn off some
@@ -292,16 +281,6 @@ typedef struct
#ifdef __cplusplus
extern "C" {
#endif
/*
* @brief Initializes callbacks for print and assert
*
* @param[in] callback DSC callbacks
*
* @returns NVT_STATUS_SUCCESS if successful;
* NVT_STATUS_ERR if unsuccessful;
*/
NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback);
/*
* @brief Calculate PPS parameters based on passed down Sink,
* GPU capability and modeset info
@@ -323,7 +302,8 @@ NVT_STATUS DSC_GeneratePPS(const DSC_INFO *pDscInfo,
const WAR_DATA *pWARData,
NvU64 availableBandwidthBitsPerSecond,
NvU32 pps[DSC_MAX_PPS_SIZE_DWORD],
NvU32 *pBitsPerPixelX16);
NvU32 *pBitsPerPixelX16,
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pOpaqueWorkarea);
#ifdef __cplusplus
}

View File

@@ -136,6 +136,15 @@ typedef volatile struct _clcba2_tag0 {
#define NVCBA2_ERROR_OS_APPLICATION (0x0000000D)
#define NVCBA2_ERROR_INVALID_CTXSW_REQUEST (0x0000000E)
#define NVCBA2_ERROR_BUFFER_OVERFLOW (0x0000000F)
#define NVCBA2_ERROR_IV_OVERFLOW (0x00000010)
#define NVCBA2_ERROR_INTERNAL_SETUP_FAILURE (0x00000011)
#define NVCBA2_ERROR_DECRYPT_COPY_INTERNAL_DMA_FAILURE (0x00000012)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_INTERNAL_DMA_FAILURE (0x00000013)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_HMAC_CALC_FAILURE (0x00000014)
#define NVCBA2_ERROR_NONCE_OVERFLOW (0x00000015)
#define NVCBA2_ERROR_AES_GCM_DECRYPTION_FAILURE (0x00000016)
#define NVCBA2_ERROR_SEMAPHORE_RELEASE_INTERNAL_DMA_FAILURE (0x00000017)
#define NVCBA2_ERROR_KEY_DERIVATION_FAILURE (0x00000018)
#define NVCBA2_ERROR_SCRUBBER_FAILURE (0x00000019)
#define NVCBA2_ERROR_SCRUBBER_INVALD_ADDRESS (0x0000001a)
#define NVCBA2_ERROR_SCRUBBER_INSUFFICIENT_PERMISSIONS (0x0000001b)

View File

@@ -793,6 +793,37 @@ typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
typedef struct NV2080_CTRL_INTERNAL_NV_RANGE {
NV_DECLARE_ALIGNED(NvU64 lo, 8);
NV_DECLARE_ALIGNED(NvU64 hi, 8);
} NV2080_CTRL_INTERNAL_NV_RANGE;
/*!
* NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS
*
* This structure specifies a target swizz-id and mem_range to update
*
* swizzId[IN]
* - Targeted swizz-id for which the memRange is being set
*
* memAddrRange[IN]
* - Memory Range for given GPU instance
*/
#define NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID (0x43U)
typedef struct NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS {
NvU32 swizzId;
NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NV_RANGE memAddrRange, 8);
} NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a44) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID (0x44U)
typedef NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a43) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */
/**
* Get GR PDB properties synchronized between Kernel and Physical
*
@@ -1512,11 +1543,6 @@ typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS {
#define NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID 15
typedef struct NV2080_CTRL_INTERNAL_NV_RANGE {
NV_DECLARE_ALIGNED(NvU64 lo, 8);
NV_DECLARE_ALIGNED(NvU64 hi, 8);
} NV2080_CTRL_INTERNAL_NV_RANGE;
#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID (0x60U)
typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS {

View File

@@ -60,9 +60,6 @@ typedef struct RM_GSP_SPDM_CC_INIT_CTX {
NvU64_ALIGN32 dmaAddr; // The address RM allocate in SYS memory or FB memory.
NvU32 rmBufferSizeInByte; // The memort size allocated by RM(exclude NV_SPDM_DESC_HEADER)
} RM_GSP_SPDM_CC_INIT_CTX;
typedef struct RM_GSP_SPDM_CC_INIT_CTX *PRM_GSP_SPDM_CC_INIT_CTX;

View File

@@ -120,7 +120,8 @@
#define ALI_TRAINING_FAIL (136)
#define NVLINK_FLA_PRIV_ERR (137)
#define ROBUST_CHANNEL_DLA_ERROR (138)
#define ROBUST_CHANNEL_LAST_ERROR (ROBUST_CHANNEL_DLA_ERROR)
#define ROBUST_CHANNEL_FAST_PATH_ERROR (139)
#define ROBUST_CHANNEL_LAST_ERROR (ROBUST_CHANNEL_FAST_PATH_ERROR)
// Indexed CE reference

View File

@@ -14,7 +14,10 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
} vgpu_aliases[] = {
{ 0x20B5, 0x1642, 0x20B5, 0x1533 },
{ 0x20B8, 0x1581, 0x20B5, 0x1533 },
{ 0x20B7, 0x1804, 0x20B7, 0x1532 },
{ 0x20B7, 0x1852, 0x20B7, 0x1532 },
{ 0x20B9, 0x157F, 0x20B7, 0x1532 },
{ 0x20FD, 0x17F8, 0x20F5, 0x0 },
{ 0x2330, 0x16C0, 0x2330, 0x16C1 },
};

View File

@@ -0,0 +1,147 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LIBOS_V2_CRASHCAT_H
#define LIBOS_V2_CRASHCAT_H
#include "nv-crashcat.h"
#include "nv-crashcat-decoder.h"
// libosv2 implements the CrashCat V1 protocol with the following implementation-defined bits
typedef enum
{
LibosPanicReasonUnspecified = 0x00,
LibosPanicReasonUnrecoverableTaskCrash = 0x01,
LibosPanicReasonUnhandledState = 0x02,
LibosPanicReasonInvalidConfiguration = 0x03,
LibosPanicReasonFatalHardwareError = 0x04,
LibosPanicReasonInsufficientResources = 0x05,
LibosPanicReasonTimeout = 0x06,
LibosPanicReasonEnvCallFailed = 0x07,
LibosPanicReasonSspStackCheckFailed = 0x08,
LibosPanicReasonAsanMemoryError = 0x09,
LibosPanicReasonTest = 0x0a,
LibosPanicReasonProgrammingError = 0x0b,
LibosPanicReasonDebugAssertionFailed = 0x0c,
LibosPanicReasonCount
} LibosPanicReason;
// NV_CRASHCAT_REPORT_IMPLEMENTER_SIGNATURE (bits 63:0) - "LIBOS2.0"
#define NV_CRASHCAT_REPORT_IMPLEMENTER_SIGNATURE_LIBOS2 (0x4C49424F53322E30ull)
// NV_CRASHCAT_REPORT_V1_REPORTER_ID_IMPL_DEF (bits 63:24)
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_TASK_ID 31:24
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_TASK_ID_UNSPECIFIED 0xFF
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_RESERVED 63:32
static NV_INLINE
void crashcatReportV1SetReporterLibos2TaskId(NvCrashCatReport_V1 *pReport, NvU8 task_id)
{
pReport->reporterId = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _LIBOS2_TASK_ID,
task_id, pReport->reporterId);
}
static NV_INLINE
NvU8 crashcatReportV1ReporterLibos2TaskId(NvCrashCatReport_V1 *pReport)
{
return (NvU8)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _LIBOS2_TASK_ID, pReport->reporterId);
}
// NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION (bits 31:0)
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION_LIBOS2_CL 23:0
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION_LIBOS2_MINOR 27:24
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION_LIBOS2_MAJOR 31:28
static NV_INLINE
void crashcatReportV1SetReporterVersionLibos2(NvCrashCatReport_V1 *pReport, NvU32 cl)
{
pReport->reporterData = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_DATA,
_VERSION_LIBOS2_MAJOR, 2, pReport->reporterData);
pReport->reporterData = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_DATA,
_VERSION_LIBOS2_MINOR, 0, pReport->reporterData);
pReport->reporterData = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_DATA,
_VERSION_LIBOS2_CL, cl, pReport->reporterData);
}
static NV_INLINE
NvU32 crashcatReportV1ReporterVersionLibos2Cl(NvCrashCatReport_V1 *pReport)
{
return DRF_VAL(_CRASHCAT, _REPORT_V1_REPORTER_DATA_VERSION, _LIBOS2_CL,
crashcatReportV1ReporterVersion(pReport));
}
static NV_INLINE
NvU8 crashcatReportV1ReporterVersionLibos2Minor(NvCrashCatReport_V1 *pReport)
{
return (NvU8)DRF_VAL(_CRASHCAT, _REPORT_V1_REPORTER_DATA_VERSION, _LIBOS2_MINOR,
crashcatReportV1ReporterVersion(pReport));
}
static NV_INLINE
NvU8 crashcatReportV1ReporterVersionLibos2Major(NvCrashCatReport_V1 *pReport)
{
return (NvU8)DRF_VAL(_CRASHCAT, _REPORT_V1_REPORTER_DATA_VERSION, _LIBOS2_MAJOR,
crashcatReportV1ReporterVersion(pReport));
}
// NV_CRASHCAT_REPORT_V1_SOURCE_ID_IMPL_DEF (63:24)
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_TASK_ID 31:24
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_TASK_ID_UNSPECIFIED 0xFF
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_RESERVED 63:32
static NV_INLINE
void crashcatReportV1SetSourceLibos2TaskId(NvCrashCatReport_V1 *pReport, NvU8 task_id)
{
pReport->sourceId = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _LIBOS2_TASK_ID, task_id,
pReport->sourceId);
}
static NV_INLINE
NvU8 crashcatReportV1SourceLibos2TaskId(NvCrashCatReport_V1 *pReport)
{
return (NvU8)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _LIBOS2_TASK_ID, pReport->sourceId);
}
// NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_IMPL_DEF (63:32)
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_LIBOS2_REASON 39:32
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_LIBOS2_RESERVED 63:40
ct_assert(LibosPanicReasonCount <=
NVBIT(DRF_SIZE(NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_LIBOS2_REASON)));
static NV_INLINE
void crashcatReportV1SetSourceCauseLibos2Reason(NvCrashCatReport_V1 *pReport,
LibosPanicReason reason)
{
pReport->sourceCause = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _LIBOS2_REASON,
reason, pReport->sourceCause);
}
static NV_INLINE
LibosPanicReason crashcatReportV1SourceCauseLibos2Reason(NvCrashCatReport_V1 *pReport)
{
return (LibosPanicReason)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _LIBOS2_REASON,
pReport->sourceCause);
}
#endif // LIBOS_V2_CRASHCAT_H

View File

@@ -0,0 +1,244 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_CRASHCAT_DECODER_H
#define NV_CRASHCAT_DECODER_H
#include "nv-crashcat.h"
// This header defines decoder/consumer-side implementation helpers for the CrashCat protocol
//
// Wayfinder L0 Bitfield Accessors
//
static NV_INLINE
NvBool crashcatWayfinderL0Valid(NvCrashCatWayfinderL0_V1 wfl0)
{
return FLD_TEST_DRF(_CRASHCAT, _WAYFINDER_L0, _SIGNATURE, _VALID, wfl0);
}
static NV_INLINE
NV_CRASHCAT_WAYFINDER_VERSION crashcatWayfinderL0Version(NvCrashCatWayfinderL0_V1 wfl0)
{
return (NV_CRASHCAT_WAYFINDER_VERSION)DRF_VAL(_CRASHCAT, _WAYFINDER_L0, _VERSION, wfl0);
}
static NV_INLINE
NV_CRASHCAT_SCRATCH_GROUP_ID crashcatWayfinderL0V1Wfl1Location(NvCrashCatWayfinderL0_V1 wfl0)
{
return (NV_CRASHCAT_SCRATCH_GROUP_ID)DRF_VAL(_CRASHCAT, _WAYFINDER_L0_V1, _WFL1_LOCATION, wfl0);
}
//
// Wayfinder L1 Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_MEM_APERTURE crashcatWayfinderL1V1QueueAperture(NvCrashCatWayfinderL1_V1 wfl1)
{
return (NV_CRASHCAT_MEM_APERTURE)DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_APERTURE, wfl1);
}
static NV_INLINE
NvLength crashcatWayfinderL1V1QueueSize(NvCrashCatWayfinderL1_V1 wfl1)
{
NvU8 unitShift;
NV_CRASHCAT_MEM_UNIT_SIZE unitSize =
(NV_CRASHCAT_MEM_UNIT_SIZE)DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_UNIT_SIZE, wfl1);
switch (unitSize)
{
case NV_CRASHCAT_MEM_UNIT_SIZE_1KB: unitShift = 10; break;
case NV_CRASHCAT_MEM_UNIT_SIZE_4KB: unitShift = 12; break;
case NV_CRASHCAT_MEM_UNIT_SIZE_64KB: unitShift = 16; break;
default: return 0;
}
// Increment size, since the size in the header is size - 1 (queue of 0 size is not encodable)
return (NvLength)((DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_SIZE, wfl1) + 1) << unitShift);
}
static NV_INLINE
NvU64 crashcatWayfinderL1V1QueueOffset(NvCrashCatWayfinderL1_V1 wfl1)
{
return DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_OFFSET_1KB, wfl1) << 10;
}
//
// CrashCat Packet Header (Unversioned) Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_PACKET_FORMAT_VERSION crashcatPacketHeaderFormatVersion(NvCrashCatPacketHeader hdr)
{
return (NV_CRASHCAT_PACKET_FORMAT_VERSION)DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _FORMAT_VERSION,
hdr);
}
static NV_INLINE
NvLength crashcatPacketHeaderPayloadSize(NvCrashCatPacketHeader hdr)
{
NvU8 unitShift;
NV_CRASHCAT_MEM_UNIT_SIZE unitSize =
(NV_CRASHCAT_MEM_UNIT_SIZE)DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _PAYLOAD_UNIT_SIZE, hdr);
switch (unitSize)
{
case NV_CRASHCAT_MEM_UNIT_SIZE_8B: unitShift = 3; break;
case NV_CRASHCAT_MEM_UNIT_SIZE_1KB: unitShift = 10; break;
case NV_CRASHCAT_MEM_UNIT_SIZE_4KB: unitShift = 12; break;
case NV_CRASHCAT_MEM_UNIT_SIZE_64KB: unitShift = 16; break;
default: return 0;
}
// Increment size, since the size in the header is size - 1 (payload of 0 size is not encodable)
return (NvLength)((DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _PAYLOAD_SIZE, hdr) + 1) << unitShift);
}
static NV_INLINE
NvBool crashcatPacketHeaderValid(NvCrashCatPacketHeader hdr)
{
return (FLD_TEST_DRF64(_CRASHCAT, _PACKET_HEADER, _SIGNATURE, _VALID, hdr) &&
(crashcatPacketHeaderFormatVersion(hdr) > 0) &&
(crashcatPacketHeaderFormatVersion(hdr) <= NV_CRASHCAT_PACKET_FORMAT_VERSION_LAST) &&
(crashcatPacketHeaderPayloadSize(hdr) > 0));
}
//
// CrashCat Packet Header (V1) Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_PACKET_TYPE crashcatPacketHeaderV1Type(NvCrashCatPacketHeader_V1 hdr)
{
return (NV_CRASHCAT_PACKET_TYPE)DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _V1_TYPE, hdr);
}
//
// CrashCat Report V1 Bitfield Accessors
//
static NV_INLINE
NvCrashCatNvriscvPartition crashcatReportV1ReporterPartition(NvCrashCatReport_V1 *pReport)
{
return (NvCrashCatNvriscvPartition)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID,
_NVRISCV_PARTITION, pReport->reporterId);
}
static NV_INLINE
NvCrashCatNvriscvUcodeId crashcatReportV1ReporterUcodeId(NvCrashCatReport_V1 *pReport)
{
return (NvCrashCatNvriscvUcodeId)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _NVRISCV_UCODE_ID,
pReport->reporterId);
}
static NV_INLINE
NV_CRASHCAT_RISCV_MODE crashcatReportV1ReporterMode(NvCrashCatReport_V1 *pReport)
{
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _RISCV_MODE,
pReport->reporterId);
}
static NV_INLINE
NvU32 crashcatReportV1ReporterVersion(NvCrashCatReport_V1 *pReport)
{
return (NvU32)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_DATA, _VERSION, pReport->reporterData);
}
static NV_INLINE
NvU32 crashcatReportV1ReporterTimestamp(NvCrashCatReport_V1 *pReport)
{
return (NvU32)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_DATA, _TIMESTAMP, pReport->reporterData);
}
static NV_INLINE
NvCrashCatNvriscvPartition crashcatReportV1SourcePartition(NvCrashCatReport_V1 *pReport)
{
return (NvCrashCatNvriscvPartition)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID,
_NVRISCV_PARTITION, pReport->sourceId);
}
static NV_INLINE
NvCrashCatNvriscvUcodeId crashcatReportV1SourceUcodeId(NvCrashCatReport_V1 *pReport)
{
return (NvCrashCatNvriscvUcodeId)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _NVRISCV_UCODE_ID,
pReport->sourceId);
}
static NV_INLINE
NV_CRASHCAT_RISCV_MODE crashcatReportV1SourceMode(NvCrashCatReport_V1 *pReport)
{
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _RISCV_MODE,
pReport->sourceId);
}
static NV_INLINE
NV_CRASHCAT_CAUSE_TYPE crashcatReportV1SourceCauseType(NvCrashCatReport_V1 *pReport)
{
return (NV_CRASHCAT_CAUSE_TYPE)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _TYPE,
pReport->sourceCause);
}
//
// CrashCat RISC-V 64-bit CSR State V1 Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_RISCV_MODE crashcatRiscv64CsrStateV1Mode(NvCrashCatRiscv64CsrState_V1 *pRiscv64CsrState)
{
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _RISCV64_CSR_STATE_V1_HEADER, _RISCV_MODE,
pRiscv64CsrState->header);
}
//
// CrashCat RISC-V 64-bit GPR State V1 Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_RISCV_MODE crashcatRiscv64GprStateV1Mode(NvCrashCatRiscv64GprState_V1 *pRiscv64GprState)
{
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _RISCV64_GPR_STATE_V1_HEADER, _RISCV_MODE,
pRiscv64GprState->header);
}
//
// CrashCat RISC-V 64-bit Trace V1 Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_RISCV_MODE crashcatRiscv64TraceV1Mode(NvCrashCatRiscv64Trace_V1 *pRiscv64Trace)
{
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _RISCV64_TRACE_V1_HEADER, _RISCV_MODE,
pRiscv64Trace->header);
}
static NV_INLINE
NV_CRASHCAT_TRACE_TYPE crashcatRiscv64TraceV1Type(NvCrashCatRiscv64Trace_V1 *pRiscv64Trace)
{
return (NV_CRASHCAT_TRACE_TYPE)DRF_VAL64(_CRASHCAT, _RISCV64_TRACE_V1_HEADER, _TRACE_TYPE,
pRiscv64Trace->header);
}
//
// CrashCat 32-bit I/O State V1 Bitfield Accessors
//
static NV_INLINE
NV_CRASHCAT_IO_APERTURE crashcatIo32StateV1Aperture(NvCrashCatIo32State_V1 *pIo32State)
{
return (NV_CRASHCAT_IO_APERTURE)DRF_VAL64(_CRASHCAT, _IO32_STATE_V1_HEADER, _APERTURE,
pIo32State->header);
}
#endif // NV_CRASHCAT_DECODER_H

View File

@@ -0,0 +1,861 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_CRASHCAT_H
#define NV_CRASHCAT_H
#include "nvtypes.h"
#include "nvmisc.h"
#include "nvctassert.h"
//
// NVIDIA CrashCat - Crash Reporting And Signaling Helpers for Peregrine
//
// When a crash occurs on a Peregrine core, NVIDIA firmware may report additional data for post-
// mortem analysis of the crash. The protocol is described in greater detail elsewhere, but in
// general is defined to be a multi-producer/single-consumer flow as follows:
//
// 1. Peregrine writes a wayfinder to a set of predefined registers to indicate a crash report is
// in progress, and its general location (first crash only)
// 2. Peregrine writes crash report data in a tagged format to a circular queue accessible to both
// reporter and consumer of the crash reports.
// 3. Peregrine completes the wayfinder by updating a put pointer to indicate the crash report is
// complete.
// 4. Peregrine raises a beacon interrupt to the consumer to signal the presence of a crash report.
//
// This header contains the shared type and bitfield definitions that are common to both producer
// and consumer sides of the CrashCat protocol.
//
#define NV_CRASHCAT_SIGNATURE 0xdead
//
// CrashCat Wayfinder Protocol Versions
// A new version is created when backward-incompatible changes are made (the wayfinders and queue
// control cannot be handled by software written for a prior version).
//
// This version indicates the handling sequence and format of the wayfinder registers, except for
// the 16-bit signature and (this) 4-bit version number in the L0 wayfinder.
//
typedef enum {
NV_CRASHCAT_WAYFINDER_VERSION_1 = 0x01,
NV_CRASHCAT_WAYFINDER_VERSION_LAST = 0x01,
} NV_CRASHCAT_WAYFINDER_VERSION;
//
// CrashCat Packet Format Versions
// A new version is created when backward-incompatible changes are made (packets cannot be handled
// by software written for a prior version).
//
// This version indicates the format of the upper 32 bits of the packet header, and, along with the
// NV_CRASHCAT_PACKET_TYPE, the format of the payload.
//
// The lower 32 bits of the packet header, which include the 16-bit signature, (this) 4-bit
// version number, and 2-bit payload size unit and 10-bit payload size, are not covered by this
// version number and their format must not change.
//
typedef enum {
NV_CRASHCAT_PACKET_FORMAT_VERSION_1 = 0x01,
NV_CRASHCAT_PACKET_FORMAT_VERSION_LAST = 0x01
} NV_CRASHCAT_PACKET_FORMAT_VERSION;
// Utility macro for ensuring the maximum enum value will fit in a DRF bitfield
#define STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(e, bf) \
ct_assert(e ## _LAST < NVBIT(DRF_SIZE(bf)))
//
// The below enum definitions are generally unversioned, and so new values must only be added to
// the end, and existing values cannot be changed or removed (except for the _LAST values).
// Note that adding a new value may require a new version of the wayfinder protocol or packet
// formats that use the enum to accommodate a new maximum value.
//
//
// CrashCat Scratch Group Identifier
// Each enum value represents an ordered set of one or more scratch registers in the Peregrine IP.
// See NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE for the canonical list of registers in each
// scratch group for version 1 of the wayfinder protocol.
//
// This enum is used by the wayfinder protocol (version 1).
//
typedef enum {
NV_CRASHCAT_SCRATCH_GROUP_ID_NONE = 0x0,
NV_CRASHCAT_SCRATCH_GROUP_ID_A = 0x1,
NV_CRASHCAT_SCRATCH_GROUP_ID_B = 0x2,
NV_CRASHCAT_SCRATCH_GROUP_ID_C = 0x3,
NV_CRASHCAT_SCRATCH_GROUP_ID_D = 0x4,
NV_CRASHCAT_SCRATCH_GROUP_ID_E = 0x5,
NV_CRASHCAT_SCRATCH_GROUP_ID_F = 0x6,
//
// Note: NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF represents a set of registers which are defined
// by the implementer instead of the protocol specification - producer and consumer must
// agree on this set definition.
//
NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF = 0x7,
NV_CRASHCAT_SCRATCH_GROUP_ID_LAST = 0x7
} NV_CRASHCAT_SCRATCH_GROUP_ID;
//
// Canonical CrashCat Scratch Group Register Mappings (V1)
// This macro defines a designated-initializer table mapping NV_CRASHCAT_SCRATCH_GROUP_ID values to
// 0-terminated arrays of register offsets (relative to the NV_PFALCON register space base offset).
// This mapping is defined for version 1 of the wayfinder protocol; future versions may use a
// different mapping.
//
// This header does not define, or include any header that defines, the register offset macros used
// in the table entries. The caller should include the appropriate header defining these register
// offsets before invoking this macro.
//
// If the implementation intends to use the NV_CRASCHCAT_SCRATCH_GROUP_ID_IMPL_DEF group, it can
// invoke NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF() macro with the list of
// registers to be used for the IMPL_DEF group (up to 4). Example:
//
// NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF(scratchOffsetTable,
// NV_PUNIT_REG0, NV_PUNIT_REG1, NV_PUNIT_REG2, NV_PUNIT_REG3);
//
// Maximum number of registers in a scratch group for now
#define NV_CRASHCAT_SCRATCH_GROUP_V1_MAX_NUM_REGISTERS 4
#define NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE(tblName) \
NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF(tblName, 0)
#define NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF(tblName, ...) \
static const NvU32 tblName[][NV_CRASHCAT_SCRATCH_GROUP_V1_MAX_NUM_REGISTERS + 1] = { \
[NV_CRASHCAT_SCRATCH_GROUP_ID_NONE] = {0}, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_A] = { \
NV_PFALCON_FALCON_MAILBOX0, NV_PFALCON_FALCON_MAILBOX1, 0 }, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_B] = { \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(0), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(1), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(2), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(3), 0}, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_C] = { \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(0), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(1), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(2), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(3), 0}, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_D] = { \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(0), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(1), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(2), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(3), 0}, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_E] = { \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(0), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(1), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(2), \
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(3), 0}, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_F] = {0}, \
[NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF] = { __VA_ARGS__, 0 } \
}
//
// CrashCat Memory Aperture Identifier
// Each enum value represents a target aperture through which a CrashCat memory buffer can be
// accessed.
//
typedef enum {
NV_CRASHCAT_MEM_APERTURE_SYSGPA = 0x0,
NV_CRASHCAT_MEM_APERTURE_FBGPA = 0x1,
NV_CRASHCAT_MEM_APERTURE_DMEM = 0x2,
NV_CRASHCAT_MEM_APERTURE_EMEM = 0x3,
NV_CRASHCAT_MEM_APERTURE_LAST = 0x3,
NV_CRASHCAT_MEM_APERTURE_UNKNOWN = 0xFF, // Used for error checking/translation failures
} NV_CRASHCAT_MEM_APERTURE;
//
// CrashCat Memory Unit Size
// Each enum value represents a fixed unit size for a corresponding size field. This is used to
// encode buffer sizes in compact register space.
//
typedef enum {
NV_CRASHCAT_MEM_UNIT_SIZE_8B = 0, // 8-byte units
NV_CRASHCAT_MEM_UNIT_SIZE_1KB = 1, // 1-kilobyte units
NV_CRASHCAT_MEM_UNIT_SIZE_4KB = 2, // 4-kilobyte units
NV_CRASHCAT_MEM_UNIT_SIZE_64KB = 3, // 64-kilobyte units
NV_CRASHCAT_MEM_UNIT_SIZE_LAST = 3,
} NV_CRASHCAT_MEM_UNIT_SIZE;
//
// CrashCat Packet Type
// Encoded in the CrashCat packet header to indicate the format of the data.
//
typedef enum {
NV_CRASHCAT_PACKET_TYPE_REPORT = 0x00, // Base CrashCat report packet (required)
NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE = 0x01, // Each 8-byte value is a RISC-V 64-bit CSR
NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE = 0x02, // Each 8-byte value is a RISC-V 64-bit GPR
NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE = 0x03, // Each 8-byte value is a program counter/
// virtual address from a RISC-V 64-bit trace
NV_CRASHCAT_PACKET_TYPE_IO32_STATE = 0x04, // Each 8-byte value is a 32-bit register
// address in the upper bytes combined with
// a 32-bit value in the lower bytes
NV_CRASHCAT_PACKET_TYPE_LAST = 0x04
} NV_CRASHCAT_PACKET_TYPE;
//
// CrashCat RISC-V Mode
// Indicates the execution mode of the Peregrine core.
// Note: this does not include all RISC-V standard modes, only the ones supported by NVRISC-V.
//
typedef enum {
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED = 0x0,
NV_CRASHCAT_RISCV_MODE_M = 0x1, // Machine Mode
NV_CRASHCAT_RISCV_MODE_S = 0x2, // Supervisor Mode
NV_CRASHCAT_RISCV_MODE_U = 0x3, // User Mode
NV_CRASHCAT_RISCV_MODE_LAST = 0x3,
} NV_CRASHCAT_RISCV_MODE;
//
// CrashCat Partition
// Represents a NVRISC-V microcode partition index
//
typedef NvU8 NvCrashCatNvriscvPartition;
#define NV_CRASHCAT_NVRISCV_PARTITION_UNSPECIFIED NV_U8_MAX
//
// CrashCat Ucode ID
// Represents an NVRISC-V microcode ID
//
typedef NvU8 NvCrashCatNvriscvUcodeId;
#define NV_CRASHCAT_NVRISCV_UCODE_ID_UNSPECIFIED NV_U8_MAX
//
// CrashCat Crash Cause Type
// Indicates the general nature of the crash cause.
//
typedef enum {
NV_CRASHCAT_CAUSE_TYPE_EXCEPTION = 0x0, // Crash observed via Peregrine trap (exception or
// unhandled interrupt)
NV_CRASHCAT_CAUSE_TYPE_TIMEOUT = 0x1, // Crash observed via timeout or hang condition
NV_CRASHCAT_CAUSE_TYPE_PANIC = 0x2, // Crash observed via direct panic condition
NV_CRASHCAT_CAUSE_TYPE_LAST = 0x2
} NV_CRASHCAT_CAUSE_TYPE;
//
// CrashCat I/O Aperture Identifier
// Indicates the Peregrine MMIO aperture through which register offsets are accessed.
//
typedef enum {
NV_CRASHCAT_IO_APERTURE_NONE = 0x00, // Register offsets are not relative
NV_CRASHCAT_IO_APERTURE_INTIO = 0x01, // Register offsets are relative to local I/O base
NV_CRASHCAT_IO_APERTURE_EXTIO = 0x02, // Register offsets are relative to external I/O base
NV_CRASHCAT_IO_APERTURE_LAST = 0x02
} NV_CRASHCAT_IO_APERTURE;
//
// CrashCat Trace Type
// Indicates the source of trace data (PC values)
//
typedef enum {
NV_CRASHCAT_TRACE_TYPE_STACK = 0x00, // The PC values are return addresses on a stack, walked
// by CrashCat implementation
NV_CRASHCAT_TRACE_TYPE_NVRVTB = 0x01, // The PC values are entries from the NVRISC-V PC trace
// buffer
NV_CRASHCAT_TRACE_TYPE_LAST = 0x01
} NV_CRASHCAT_TRACE_TYPE;
//
// CrashCat Wayfinder Protocol is a mechanism for locating crash-reports in a programmatic way,
// since available memory for reports may vary across different Peregrines. In V1, the wayfinder
// protocol uses a single common scratch register (level 0, A.K.A. WFL0) to point to a secondary
// group of scratch registers (level 1, A.K.A. WFL1), which point to the full crash report queue.
// The queue is implemented as a circular buffer with classic put/get semantics, controlled through
// the wayfinder L1 registers.
//
// Crash Report Wayfinder Level 0 (NV_CRASHCAT_WAYFINDER_L0)
// _SIGNATURE : Initialized to NV_CRASHCAT_SIGNATURE after the level 1 wayfinder is
// initialized.
// _VERSION : NV_CRASHCAT_WAYFINDER_VERSION value of the protocol implemented for the
// crash report wayfinder on this Peregrine (must be consistent with all
// implementers on a Peregrine).
//
// Version 1 Fields:
// _V1_WFL1_LOCATION : Contains an NV_CRASHCAT_SCRATCH_GROUP_ID identifying the registers
// containing the level 1 wayfinder
// _V1_RESERVED : Reserved for future use (currently 0).
//
typedef NvU32 NvCrashCatWayfinderL0_V1;
#define NV_CRASHCAT_WAYFINDER_L0_SIGNATURE 15:0
#define NV_CRASHCAT_WAYFINDER_L0_SIGNATURE_VALID NV_CRASHCAT_SIGNATURE
#define NV_CRASHCAT_WAYFINDER_L0_VERSION 19:16
#define NV_CRASHCAT_WAYFINDER_L0_VERSION_1 NV_CRASHCAT_WAYFINDER_VERSION_1
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION 22:20
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_NONE NV_CRASHCAT_SCRATCH_GROUP_ID_NONE
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_A NV_CRASHCAT_SCRATCH_GROUP_ID_A
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_B NV_CRASHCAT_SCRATCH_GROUP_ID_B
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_C NV_CRASHCAT_SCRATCH_GROUP_ID_C
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_D NV_CRASHCAT_SCRATCH_GROUP_ID_D
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_E NV_CRASHCAT_SCRATCH_GROUP_ID_E
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_F NV_CRASHCAT_SCRATCH_GROUP_ID_F
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_IMPL_DEF NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF
#define NV_CRASHCAT_WAYFINDER_L0_V1_RESERVED 31:23
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_WAYFINDER_VERSION,
NV_CRASHCAT_WAYFINDER_L0_VERSION);
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_SCRATCH_GROUP_ID,
NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION);
//
// Crash Report Wayfinder Level 1 and Queue Control Notes
// Depending on how many scratch registers are in the set specified by the level 0 wayfinder, the
// registers used for the level 1 wayfinder may need to be reused for the queue control registers.
//
// The first two scratch registers in the set are used to compose the NvCrashCatWayfinderL1_V1
// value, with the register with the lower address providing the bottom 32 bits and the register
// with the higher address providing the upper 32 bits.
//
// If four scratch registers are available, the last two are used for the queue put and get
// control, respectively. The producer implementation should ensure these are initialized to zero,
// and may update the put pointer without any synchronization with the consumer.
//
// If only two scratch registers are available, the WFL1 registers are reclaimed after they are
// decoded by the consumer and used for the queue put and get points. The producer must wait for
// the consumer to set the NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION bits of the level 0 wayfinder
// to _NONE before writing the put pointer. It is the responsibility of the consumer to clear the
// WFL1 registers before updating the level 0 wayfinder - after the producer sees the
// NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION change to _NONE, it may update the put pointer.
//
// CrashCat Wayfinder Locking Notes for Implementers
// Due to the multi-producer nature of the crash report buffer, accesses to the following registers
// must be synchronized when writing to avoid stomping other crash reports or otherwise corrupting
// the queue:
// - NV_CRASHCAT_WAYFINDER_L0
// If the signature is already present when the reporter initializes, the reporter should panic
// if the wayfinder protocol version is not the same as what the reporter implements.
// Where possible, it is recommended to compile-time assert reporter version consistency.
// - NV_CRASHCAT_WAYFINDER_L1
// Writes to these registers must be synchronized during initialization by the reporter, to
// ensure that only one chooses the value and writes it. If they are already initialized, the
// reporter should not need to update them, and should instead queue its crash reports in the
// buffer pointed to by these registers.
// - NV_CRASHCAT_QUEUE_PUT
// This register must be synchronized on during initialization and update by the reporter. The
// interface should be locked before the start of writing the crash report and released after
// this register is updated.
// - NV_CRASHCAT_QUEUE_GET
// The (single) consumer controls this register, so no explicit synchronization is needed.
// The implementation should initialize to 0 when the level 0 wayfinder is initialized, and not
// touch it afterward.
//
// If no preemption is possible, then it is sufficient for a producer to push packets into the
// queue one by one, and only update the put pointer once all packets from the report have been
// queued. If the producer can be preempted while queuing report packets, it must hold a lock
// synchronizing access to the CrashCat queue while it it pushes all report packets in the queue,
// to prevent potential interleaving with packets from other reports.
//
// It may be advantageous for the Peregrine FMC to receive the report queue location as a boot
// argument and initialize the wayfinders accordingly during boot, rather than when a crash is
// observed.
//
//
// Crash Report Wayfinder Level 1 (NV_CRASHCAT_WAYFINDER_L1) V1
// _QUEUE_APERTURE : NV_CRASHCAT_MEM_APERTURE value of the aperture through which the queue can
// be accessed
// _QUEUE_UNIT_SIZE : NV_CRASHCAT_MEM_UNIT_SIZE value indicating the units of the _SIZE field
// (1KB or greater)
// _RESERVED : Reserved for future use (currently 0)
// _QUEUE_SIZE : Size of the queue in _UNIT_SIZE minus 1 (_SIZE = 0 -> queue size is 1 unit)
// _QUEUE_OFFSET_1KB : 1KB-aligned offset of the start of the queue in _QUEUE_APERTURE
//
typedef NvU64 NvCrashCatWayfinderL1_V1;
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE 2:0
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_SYSGPA NV_CRASHCAT_MEM_APERTURE_SYSGPA
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_FBGPA NV_CRASHCAT_MEM_APERTURE_FBGPA
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_DMEM NV_CRASHCAT_MEM_APERTURE_DMEM
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_EMEM NV_CRASHCAT_MEM_APERTURE_EMEM
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE 4:3
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE_1KB NV_CRASHCAT_MEM_UNIT_SIZE_1KB
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE_4KB NV_CRASHCAT_MEM_UNIT_SIZE_4KB
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE_64KB NV_CRASHCAT_MEM_UNIT_SIZE_64KB
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_RESERVED 5:5
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_SIZE 9:6
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_OFFSET_1KB 63:10
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_MEM_APERTURE,
NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE);
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_MEM_UNIT_SIZE,
NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE);
//
// CrashCat Queue Put Pointer
// Offset in bytes into the CrashCat circular queue at which the next crash report will be written.
// Reports may wrap around the end of the buffer to the start.
//
// The implementation should only update the put pointer once all packets from the report have been
// queued. This simplifies the consumer implementation, as it can assume that the report is
// complete once the put pointer is updated.
//
//
// CrashCat Queue Get Pointer
// Offset in bytes into the CrashCat circular queue at which the next crash report will be read by
// the consumer (when get is behind put). The consumer advances this pointer to allow queue memory
// to be reused by subsequent reports.
//
static NV_INLINE NvU32 crashcatQueueFreeBytes(NvU32 put, NvU32 get, NvU32 size)
{
return (put >= get) ? (size - (put - get)) : (get - put);
}
static NV_INLINE NvU32 crashcatQueueBytesToRead(NvU32 put, NvU32 get, NvU32 size)
{
return (put >= get) ? (put - get) : (size - (get - put));
}
//
// CrashCat Packet Header (NV_CRASHCAT_PACKET_HEADER)
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new data value
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION value
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_MEM_UNIT_SIZE value indicating the units of the
// _PAYLOAD_SIZE field
// _PAYLOAD_SIZE : Size of the packet payload (excluding header) in _PAYLOAD_UNIT_SIZE
// minus 1 (_PAYLOAD_SIZE = 0 -> payload size is 1 unit)
// _V1_TYPE : NV_CRASHCAT_PACKET_TYPE value
// _V1_META : Additional packet metadata bits specific to the packet type
//
typedef NvU64 NvCrashCatPacketHeader;
typedef NvU64 NvCrashCatPacketHeader_V1;
#define NV_CRASHCAT_PACKET_HEADER_SIGNATURE 15:0
#define NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID NV_CRASHCAT_SIGNATURE
#define NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION 19:16
#define NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION_1 NV_CRASHCAT_PACKET_FORMAT_VERSION_1
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE 21:20
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B NV_CRASHCAT_MEM_UNIT_SIZE_8B
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_1KB NV_CRASHCAT_MEM_UNIT_SIZE_1KB
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_4KB NV_CRASHCAT_MEM_UNIT_SIZE_4KB
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_64PKB NV_CRASHCAT_MEM_UNIT_SIZE_64KB
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE 31:22
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE 39:32
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_REPORT NV_CRASHCAT_PACKET_TYPE_REPORT
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_RISCV64_CSR_STATE \
NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_RISCV64_GPR_STATE \
NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_IO32_STATE NV_CRASHCAT_PACKET_TYPE_IO32_STATE
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_RISCV64_TRACE NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE
#define NV_CRASHCAT_PACKET_HEADER_V1_META 63:40
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_PACKET_FORMAT_VERSION,
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION);
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_MEM_UNIT_SIZE,
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE);
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_PACKET_TYPE,
NV_CRASHCAT_PACKET_HEADER_V1_TYPE);
//
// CrashCat Report (NV_CRASHCAT_PACKET_TYPE_REPORT) V1
// A fixed-size packet including
// 8 bytes: packet header (see NV_CRASHCAT_PACKET_HEADER)
// 8 bytes: implementer signature
// 16 bytes: reporter information
// 32 bytes: crash source information
//
// A report packet sets the context for the remaining packets that come after it (until the next
// NV_CRASHCAT_PACKET_TYPE_REPORT packet).
//
typedef struct NvCrashCatReport_V1 {
//
// CrashCat Report V1 Header (NV_CRASHCAT_REPORT_V1_HEADER)
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
// _PAYLOAD_SIZE : 6 (56 bytes)
// _TYPE : NV_CRASHCAT_PACKET_TYPE_REPORT value
// _RESERVED : Reserved for future use (currently 0)
//
NvCrashCatPacketHeader_V1 header;
#define NV_CRASHCAT_REPORT_V1_HEADER_SIGNATURE \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
#define NV_CRASHCAT_REPORT_V1_HEADER_SIGNATURE_VALID \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
#define NV_CRASHCAT_REPORT_V1_HEADER_FORMAT_VERSION \
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
#define NV_CRASHCAT_REPORT_V1_HEADER_FORMAT_VERSION_VALID \
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_UNIT_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_UNIT_SIZE_VALID \
NV_CRASHCAT_MEM_UNIT_SIZE_8B
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_SIZE_VALID \
(((sizeof(NvCrashCatReport_V1) - sizeof(NvCrashCatPacketHeader_V1)) >> 3) - 1)
#define NV_CRASHCAT_REPORT_V1_HEADER_PACKET_TYPE \
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
#define NV_CRASHCAT_REPORT_V1_HEADER_PACKET_TYPE_VALID \
NV_CRASHCAT_PACKET_TYPE_REPORT
#define NV_CRASHCAT_REPORT_V1_HEADER_RESERVED 63:40
//
// CrashCat Report V1 Implementer Signature
// Provides a unique 64-bit identifier for the decoder to use to interpret the
// implementation-defined bits
//
NvU64 implementerSignature;
//
// CrashCat Report V1 Reporter ID (NV_CRASHCAT_REPORT_V1_REPORTER_ID)
// _NVRISCV_PARTITION : Partition index of the crash reporter (depends on FMC configuration)
// _NVRISCV_UCODE_ID : Ucode ID of the crash reporter (read from the relevant curruid
// CSR/field)
// _RISCV_MODE : Current RISC-V mode of the crash reporter
// _IMPL_DEF : Implementation-defined identifier
//
NvU64 reporterId;
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_PARTITION 7:0
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_PARTITION_UNSPECIFIED \
NV_CRASHCAT_NVRISCV_PARTITION_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_UCODE_ID 15:8
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_UCODE_ID_UNSPECIFIED \
NV_CRASHCAT_NVRISCV_UCODE_ID_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE 18:16
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_M \
NV_CRASHCAT_RISCV_MODE_M
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_S \
NV_CRASHCAT_RISCV_MODE_S
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_U \
NV_CRASHCAT_RISCV_MODE_U
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_UNSPECIFIED \
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RESERVED 23:19
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_IMPL_DEF 63:24
//
// CrashCat Report V1 Reporter Data (NV_CRASHCAT_REPORT_V1_REPORTER_DATA)
// _VERSION : Implementation-defined version identifier (recommend CL number)
// _TIMESTAMP : Seconds since epoch (Jan 1, 1970) or cold reset of when the crash report was
// generated. Since this value is read from a local clock source, the consumer
// is responsible for adjusting this value to a relevant reference point.
//
NvU64 reporterData;
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION 31:0
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_TIMESTAMP 63:32
//
// CrashCat Report V1 Source ID (NV_CRASHCAT_REPORT_V1_SOURCE_ID)
// _NVRISCV_PARTITION : Partition ID of the crashing code (depends on FMC configuration)
// _NVRISCV_UCODE_ID : Ucode ID of the crashing code (read from the relevant curruid
// CSR/field)
// _RISCV_MODE : RISC-V mode of the crashing code
// _IMPL_DEF : Implementation-defined identifier
//
NvU64 sourceId;
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_PARTITION 7:0
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_PARTITION_UNSPECIFIED \
NV_CRASHCAT_NVRISCV_PARTITION_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_UCODE_ID 15:8
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_UCODE_ID_UNSPECIFIED \
NV_CRASHCAT_NVRISCV_UCODE_ID_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE 18:16
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_UNSPECIFIED \
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RESERVED 23:19
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_IMPL_DEF 63:24
//
// CrashCat Report V1 Source Cause (NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE)
// _TYPE : CrashCat general failure type for the crash (i.e., how the crash was observed)
// _RESERVED: Reserved for future use (currently 0)
// _IMPL_DEF: Implementation-defined reason code for the crash
//
NvU64 sourceCause;
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE 3:0
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_EXCEPTION NV_CRASHCAT_CAUSE_TYPE_EXCEPTION
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_TIMEOUT NV_CRASHCAT_CAUSE_TYPE_TIMEOUT
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_PANIC NV_CRASHCAT_CAUSE_TYPE_PANIC
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_RESERVED 31:4
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_IMPL_DEF 63:32
//
// CrashCat Report V1 Source PC
// Program counter of the instruction where the crash occurred
//
NvU64 sourcePc;
//
// CrashCat Report V1 Source Data
// Additional crash source data (implementation-defined)
//
NvU64 sourceData;
} NvCrashCatReport_V1;
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE);
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE);
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_CAUSE_TYPE,
NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE);
//
// CrashCat RISC-V CSR State (NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE) V1
// A fixed-size packet containing values for RISC-V Control and Status Registers (CSRs) that are
// commonly relevant in debugging crashes.
//
// Note: all CSRs in this structure must be defined in the standard RISC-V specification.
// Do not add NVRISC-V-specific CSRs to this packet.
//
typedef struct NvCrashCatRiscv64CsrState_V1 {
//
// CrashCat RISC-V CSR State Header (NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER)
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
// _PAYLOAD_SIZE : 6 (56 bytes)
// _TYPE : NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE
// _RISCV_MODE : NV_CRASHCAT_RISCV_MODE that indicates the RISC-V mode in which the
// CSR values are captured from
// _RESERVED : Reserved for future use (currently 0)
//
NvCrashCatPacketHeader_V1 header;
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_SIGNATURE \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_SIGNATURE_VALID \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_FORMAT_VERSION \
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_FORMAT_VERSION_VALID \
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE_VALID \
NV_CRASHCAT_MEM_UNIT_SIZE_8B
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_SIZE_VALID \
(((sizeof(NvCrashCatRiscv64CsrState_V1) - sizeof(NvCrashCatPacketHeader_V1)) >> 3) \
- 1)
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PACKET_TYPE \
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PACKET_TYPE_VALID \
NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE 42:40
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_UNSPECIFIED \
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RESERVED 63:43
NvU64 xstatus; // mstatus or sstatus
NvU64 xie; // mie or sie
NvU64 xip; // mip or sip
NvU64 xepc; // mepc or sepc
NvU64 xtval; // mbadaddr, mtval or stval
NvU64 xcause; // mcause or scause
NvU64 xscratch; // mscratch or sscratch
} NvCrashCatRiscv64CsrState_V1;
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE);
//
// CrashCat RISC-V GPR State (NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE) V1
// A fixed-size packet containing values for RISC-V general purpose registers (GPRs).
//
// These are defined to match the RISC-V standard calling convention for x1-x31.
// x0 is hardwired to 0, so we don't include it in dumps, and the packet header takes its place.
//
typedef struct NvCrashCatRiscv64GprState_V1 {
//
// CrashCat RISC-V GPR State Header (NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER)
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
// _PAYLOAD_SIZE : 30 (188 bytes)
// _TYPE : NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE
// _RESERVED : Reserved for future use (currently 0)
//
NvCrashCatPacketHeader_V1 header;
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_SIGNATURE \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_SIGNATURE_VALID \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_FORMAT_VERSION \
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_FORMAT_VERSION_VALID \
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE_VALID \
NV_CRASHCAT_MEM_UNIT_SIZE_8B
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_SIZE_VALID \
(((sizeof(NvCrashCatRiscv64GprState_V1) - sizeof(NvCrashCatPacketHeader_V1)) >> 3) \
- 1)
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PACKET_TYPE \
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PACKET_TYPE_VALID \
NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE 42:40
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_UNSPECIFIED \
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RESERVED 63:40
NvU64 ra; // Return address
NvU64 sp; // Stack pointer
NvU64 gp; // Global pointer
NvU64 tp; // Thread pointer
NvU64 t0; // Temporary register 0
NvU64 t1; // Temporary register 1
NvU64 t2; // Temporary register 2
NvU64 s0; // Saved register 0
NvU64 s1; // Saved register 1
NvU64 a0; // Argument/return value register 0
NvU64 a1; // Argument/return value register 1
NvU64 a2; // Argument register 2
NvU64 a3; // Argument register 3
NvU64 a4; // Argument register 4
NvU64 a5; // Argument register 5
NvU64 a6; // Argument register 6
NvU64 a7; // Argument register 7
NvU64 s2; // Saved register 2
NvU64 s3; // Saved register 3
NvU64 s4; // Saved register 4
NvU64 s5; // Saved register 5
NvU64 s6; // Saved register 6
NvU64 s7; // Saved register 7
NvU64 s8; // Saved register 8
NvU64 s9; // Saved register 9
NvU64 s10; // Saved register 10
NvU64 s11; // Saved register 11
NvU64 t3; // Temporary register 3
NvU64 t4; // Temporary register 4
NvU64 t5; // Temporary register 5
NvU64 t6; // Temporary register 6
} NvCrashCatRiscv64GprState_V1;
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE);
//
// CrashCat Trace (NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE) V1
// A variable-size packet in which each 64-bit payload value is a virtual address from a trace
// (such as from a stack or PC trace buffer). The packet header metadata includes details to help
// differentiate types of traces.
//
typedef struct NvCrashCatRiscv64Trace_V1 {
//
// CrashCat Stack Trace Header (NV_CRASHCAT_RISCV64_TRACE_V1_HEADER)
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
// _PAYLOAD_SIZE : Variable
// _TYPE : NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE
// _RISCV_MODE : The NV_CRASHCAT_RISCV_MODE context of the trace (e.g., the RISC-V
// mode in which the trace addresses are relevant in)
// _RESERVED : Reserved for future use (currently 0)
//
NvCrashCatPacketHeader_V1 header;
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_SIGNATURE \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_SIGNATURE_VALID \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_FORMAT_VERSION \
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_FORMAT_VERSION_VALID \
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PAYLOAD_UNIT_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PAYLOAD_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PACKET_TYPE \
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PACKET_TYPE_VALID \
NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE 42:40
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_UNSPECIFIED \
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_TRACE_TYPE 43:43
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_TRACE_TYPE_STACK NV_CRASHCAT_TRACE_TYPE_STACK
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_TRACE_TYPE_NVRVTB NV_CRASHCAT_TRACE_TYPE_NVRVTB
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RESERVED 63:44
NvU64 addr[];
} NvCrashCatRiscv64Trace_V1;
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE);
//
// CrashCat 32-bit I/O State (NV_CRASHCAT_PACKET_TYPE_IO32_STATE) V1
// A variable-size packet in which a 32-bit address and a 32-bit value are encoded into each 64-bit
// payload value.
//
typedef struct NvCrashCatIo32State_V1 {
//
// CrashCat 32-bit I/O Trace Header (NV_CRASHCAT_IO32_STATE_V1_HEADER)
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
// _PAYLOAD_SIZE : Variable
// _TYPE : NV_CRASHCAT_PACKET_TYPE_IO32_STATE
// _APERTURE : NV_CRASHCAT_IO_APERTURE value identifying the aperture that the
// offset is relative to
// _RESERVED : Reserved for future use (currently 0)
//
NvCrashCatPacketHeader_V1 header;
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_SIGNATURE \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_SIGNATURE_VALID \
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_FORMAT_VERSION \
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_FORMAT_VERSION_VALID \
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PAYLOAD_SIZE \
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PACKET_TYPE \
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PACKET_TYPE_VALID \
NV_CRASHCAT_PACKET_TYPE_IO32_STATE
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE 41:40
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE_NONE NV_CRASHCAT_IO_APERTURE_NONE
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE_INTIO NV_CRASHCAT_IO_APERTURE_INTIO
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE_EXTIO NV_CRASHCAT_IO_APERTURE_EXTIO
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_RESERVED 63:42
struct {
NvU32 value;
NvU32 offset;
} regs[];
} NvCrashCatIo32State_V1;
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_IO_APERTURE,
NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE);
#endif // NV_CRASHCAT_H

View File

@@ -137,28 +137,51 @@ typedef struct
// Boot count. Used to determine whether to load the firmware image.
NvU64 bootCount;
// TODO: the partitionRpc* fields below do not really belong in this
// structure. The values are patched in by the partition bootstrapper
// when GSP-RM is booted in a partition, and this structure was a
// convenient place for the bootstrapper to access them. These should
// be moved to a different comm. mechanism between the bootstrapper
// and the GSP-RM tasks.
// This union is organized the way it is to start at an 8-byte boundary and achieve natural
// packing of the internal struct fields.
union
{
struct
{
// TODO: the partitionRpc* fields below do not really belong in this
// structure. The values are patched in by the partition bootstrapper
// when GSP-RM is booted in a partition, and this structure was a
// convenient place for the bootstrapper to access them. These should
// be moved to a different comm. mechanism between the bootstrapper
// and the GSP-RM tasks.
// Shared partition RPC memory (physical address)
NvU64 partitionRpcAddr;
// Shared partition RPC memory (physical address)
NvU64 partitionRpcAddr;
// Offsets relative to partitionRpcAddr
NvU16 partitionRpcRequestOffset;
NvU16 partitionRpcReplyOffset;
// Offsets relative to partitionRpcAddr
NvU16 partitionRpcRequestOffset;
NvU16 partitionRpcReplyOffset;
// Code section and dataSection offset and size.
NvU32 elfCodeOffset;
NvU32 elfDataOffset;
NvU32 elfCodeSize;
NvU32 elfDataSize;
// Code section and dataSection offset and size.
NvU32 elfCodeOffset;
NvU32 elfDataOffset;
NvU32 elfCodeSize;
NvU32 elfDataSize;
// Used during GSP-RM resume to check for revocation
NvU32 lsUcodeVersion;
// Used during GSP-RM resume to check for revocation
NvU32 lsUcodeVersion;
};
struct
{
// Pad for the partitionRpc* fields, plus 4 bytes
NvU32 partitionRpcPadding[4];
// CrashCat (contiguous) buffer size/location - occupies same bytes as the
// elf(Code|Data)(Offset|Size) fields above.
// TODO: move to GSP_FMC_INIT_PARAMS
NvU64 sysmemAddrOfCrashReportQueue;
NvU32 sizeOfCrashReportQueue;
// Pad for the lsUcodeVersion field
NvU32 lsUcodeVersionPadding[1];
};
};
// Number of VF partitions allocating sub-heaps from the WPR heap
// Used during boot to ensure the heap is adequately sized

View File

@@ -5332,14 +5332,16 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
NV_STATUS rmStatus = NV_ERR_INVALID_ARGUMENT;
OBJGPU *pGpu;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
NV_ASSERT_OR_GOTO(((ppRanges != NULL) && (pRangeCount != NULL)), Done);
NV_ASSERT_OR_GOTO(((ppRanges != NULL) &&
(pRangeCount != NULL) &&
(pStaticMemInfo != NULL)), Done);
pGpu = NV_GET_NV_PRIV_PGPU(nv);
@@ -5347,12 +5349,54 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
{
KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR *) pStaticMemInfo;
NvU32 pageSize = 0;
NvU32 memdescPageSize = memdescGetPageSize(pMemDesc, AT_GPU);
NvU64 prologueOffset = offset;
NvU64 prologueSize = 0;
NvU64 epilogueOffset = offset;
NvU64 epilogueSize = 0;
NvU64 mainOffset = offset;
NvU64 mainSize = 0;
NvU32 mainPageCount = 0;
NvU64 alignedOffset;
NvU32 pageCount = 0;
NvU32 i = 0;
NvU32 index = 0;
pageSize = memdescGetPageSize(pMemDesc, AT_GPU);
pageCount = size / pageSize;
alignedOffset = NV_ALIGN_UP64(offset, memdescPageSize);
if ((size > 0) && offset != alignedOffset)
{
prologueOffset = offset;
prologueSize = NV_MIN(alignedOffset - offset, size);
pageCount++;
size -= prologueSize;
}
if (size > 0)
{
mainOffset = prologueOffset + prologueSize;
mainSize = NV_ALIGN_DOWN64(size, memdescPageSize);
mainPageCount = mainSize / memdescPageSize;
pageCount += mainPageCount;
size -= mainSize;
}
if (size > 0)
{
epilogueOffset = mainOffset + mainSize;
epilogueSize = size;
pageCount++;
size -= epilogueSize;
}
if ((pageCount == 0) || (size != 0))
{
NV_ASSERT(0);
rmStatus = NV_ERR_INVALID_STATE;
goto Done;
}
rmStatus = os_alloc_mem((void **) ppRanges,
pageCount * sizeof(nv_phys_addr_range_t));
@@ -5361,15 +5405,39 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
goto Done;
}
for (i = 0; i < pageCount; i++)
// Fill the first unaligned segment
if (prologueSize > 0)
{
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, offset);
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, prologueOffset);
(*ppRanges)[0].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
(*ppRanges)[0].len = prologueSize;
(*ppRanges)[i].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
(*ppRanges)[i].len = pageSize;
offset += pageSize;
index = 1;
}
// Fill the aligned segments between first and last entries
while (mainPageCount != 0)
{
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, alignedOffset);
(*ppRanges)[index].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
(*ppRanges)[index].len = memdescPageSize;
index++;
alignedOffset += memdescPageSize;
mainPageCount--;
}
// Fill the last unaligned segment
if (epilogueSize > 0)
{
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, epilogueOffset);
(*ppRanges)[index].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
(*ppRanges)[index].len = epilogueSize;
index++;
}
NV_ASSERT(index == pageCount);
*pRangeCount = pageCount;
}
else

View File

@@ -0,0 +1,123 @@
#define NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_crashcat_engine_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x654166 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
void __nvoc_init_CrashCatEngine(CrashCatEngine*);
void __nvoc_init_funcTable_CrashCatEngine(CrashCatEngine*);
NV_STATUS __nvoc_ctor_CrashCatEngine(CrashCatEngine*);
void __nvoc_init_dataField_CrashCatEngine(CrashCatEngine*);
void __nvoc_dtor_CrashCatEngine(CrashCatEngine*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatEngine;
static const struct NVOC_RTTI __nvoc_rtti_CrashCatEngine_CrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatEngine,
/*offset=*/ 0,
};
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatEngine = {
/*numRelatives=*/ 1,
/*relatives=*/ {
&__nvoc_rtti_CrashCatEngine_CrashCatEngine,
},
};
// Not instantiable because it's not derived from class "Object"
// Not instantiable because it's an abstract class with following pure virtual functions:
// crashcatEngineConfigured
// crashcatEngineVprintf
// crashcatEnginePriRead
// crashcatEnginePriWrite
// crashcatEngineMapBufferDescriptor
// crashcatEngineUnmapBufferDescriptor
// crashcatEngineSyncBufferDescriptor
// crashcatEngineGetScratchOffsets
// crashcatEngineGetWFL0Offset
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine =
{
/*classInfo=*/ {
/*size=*/ sizeof(CrashCatEngine),
/*classId=*/ classId(CrashCatEngine),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "CrashCatEngine",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatEngine,
/*pExportInfo=*/ &__nvoc_export_info_CrashCatEngine
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatEngine =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_CrashCatEngine(CrashCatEngine *pThis) {
__nvoc_crashcatEngineDestruct(pThis);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_CrashCatEngine(CrashCatEngine *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
NV_STATUS __nvoc_ctor_CrashCatEngine(CrashCatEngine *pThis) {
NV_STATUS status = NV_OK;
__nvoc_init_dataField_CrashCatEngine(pThis);
status = __nvoc_crashcatEngineConstruct(pThis);
if (status != NV_OK) goto __nvoc_ctor_CrashCatEngine_fail__init;
goto __nvoc_ctor_CrashCatEngine_exit; // Success
__nvoc_ctor_CrashCatEngine_fail__init:
__nvoc_ctor_CrashCatEngine_exit:
return status;
}
static void __nvoc_init_funcTable_CrashCatEngine_1(CrashCatEngine *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
pThis->__crashcatEngineUnload__ = &crashcatEngineUnload_IMPL;
pThis->__crashcatEngineConfigured__ = NULL;
pThis->__crashcatEngineVprintf__ = NULL;
pThis->__crashcatEnginePriRead__ = NULL;
pThis->__crashcatEnginePriWrite__ = NULL;
pThis->__crashcatEngineMapBufferDescriptor__ = NULL;
pThis->__crashcatEngineUnmapBufferDescriptor__ = NULL;
pThis->__crashcatEngineSyncBufferDescriptor__ = NULL;
pThis->__crashcatEngineGetScratchOffsets__ = NULL;
pThis->__crashcatEngineGetWFL0Offset__ = NULL;
}
void __nvoc_init_funcTable_CrashCatEngine(CrashCatEngine *pThis) {
__nvoc_init_funcTable_CrashCatEngine_1(pThis);
}
void __nvoc_init_CrashCatEngine(CrashCatEngine *pThis) {
pThis->__nvoc_pbase_CrashCatEngine = pThis;
__nvoc_init_funcTable_CrashCatEngine(pThis);
}

View File

@@ -0,0 +1,287 @@
#ifndef _G_CRASHCAT_ENGINE_NVOC_H_
#define _G_CRASHCAT_ENGINE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_crashcat_engine_nvoc.h"
#ifndef CRASHCAT_ENGINE_H
#define CRASHCAT_ENGINE_H
#include "containers/map.h"
#include "nvoc/object.h"
#include "nvport/inline/util_valist.h"
#include "nv-crashcat.h"
struct CrashCatReport;
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
typedef struct CrashCatReport CrashCatReport;
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatReport
#define __nvoc_class_id_CrashCatReport 0xde4777
#endif /* __nvoc_class_id_CrashCatReport */
struct CrashCatWayfinder;
#ifndef __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
#define __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
typedef struct CrashCatWayfinder CrashCatWayfinder;
#endif /* __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatWayfinder
#define __nvoc_class_id_CrashCatWayfinder 0x085e32
#endif /* __nvoc_class_id_CrashCatWayfinder */
typedef struct {
void *pEngPriv;
void *pMapping;
NvBool bRegistered;
NV_CRASHCAT_MEM_APERTURE aperture;
NvU64 physOffset;
NvLength size;
MapNode registeredBufferMapNode;
MapNode mappedBufferMapNode;
} CrashCatBufferDescriptor;
MAKE_INTRUSIVE_MAP(CrashCatRegisteredBufferMap, CrashCatBufferDescriptor, registeredBufferMapNode);
MAKE_INTRUSIVE_MAP(CrashCatMappedBufferMap, CrashCatBufferDescriptor, mappedBufferMapNode);
// Base class for engine-specific accessors - must be implemented by the host codebase.
#ifdef NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct CrashCatEngine {
const struct NVOC_RTTI *__nvoc_rtti;
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
void (*__crashcatEngineUnload__)(struct CrashCatEngine *);
NvBool (*__crashcatEngineConfigured__)(struct CrashCatEngine *);
void (*__crashcatEngineVprintf__)(struct CrashCatEngine *, NvBool, const char *, va_list);
NvU32 (*__crashcatEnginePriRead__)(struct CrashCatEngine *, NvU32);
void (*__crashcatEnginePriWrite__)(struct CrashCatEngine *, NvU32, NvU32);
void *(*__crashcatEngineMapBufferDescriptor__)(struct CrashCatEngine *, CrashCatBufferDescriptor *);
void (*__crashcatEngineUnmapBufferDescriptor__)(struct CrashCatEngine *, CrashCatBufferDescriptor *);
void (*__crashcatEngineSyncBufferDescriptor__)(struct CrashCatEngine *, CrashCatBufferDescriptor *, NvU32, NvU32);
const NvU32 *(*__crashcatEngineGetScratchOffsets__)(struct CrashCatEngine *, NV_CRASHCAT_SCRATCH_GROUP_ID);
NvU32 (*__crashcatEngineGetWFL0Offset__)(struct CrashCatEngine *);
NvBool PRIVATE_FIELD(bEnabled);
struct CrashCatWayfinder *PRIVATE_FIELD(pWayfinder);
CrashCatRegisteredBufferMap PRIVATE_FIELD(registeredCrashBuffers);
CrashCatMappedBufferMap PRIVATE_FIELD(mappedCrashBuffers);
};
#ifndef __NVOC_CLASS_CrashCatEngine_TYPEDEF__
#define __NVOC_CLASS_CrashCatEngine_TYPEDEF__
typedef struct CrashCatEngine CrashCatEngine;
#endif /* __NVOC_CLASS_CrashCatEngine_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatEngine
#define __nvoc_class_id_CrashCatEngine 0x654166
#endif /* __nvoc_class_id_CrashCatEngine */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
#define __staticCast_CrashCatEngine(pThis) \
((pThis)->__nvoc_pbase_CrashCatEngine)
#ifdef __nvoc_crashcat_engine_h_disabled
#define __dynamicCast_CrashCatEngine(pThis) ((CrashCatEngine*)NULL)
#else //__nvoc_crashcat_engine_h_disabled
#define __dynamicCast_CrashCatEngine(pThis) \
((CrashCatEngine*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatEngine)))
#endif //__nvoc_crashcat_engine_h_disabled
NV_STATUS __nvoc_objCreateDynamic_CrashCatEngine(CrashCatEngine**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_CrashCatEngine(CrashCatEngine**, Dynamic*, NvU32);
#define __objCreate_CrashCatEngine(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_CrashCatEngine((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define crashcatEngineUnload(arg0) crashcatEngineUnload_DISPATCH(arg0)
#define crashcatEngineConfigured(arg0) crashcatEngineConfigured_DISPATCH(arg0)
#define crashcatEngineVprintf(arg0, bReportStart, fmt, args) crashcatEngineVprintf_DISPATCH(arg0, bReportStart, fmt, args)
#define crashcatEnginePriRead(arg0, offset) crashcatEnginePriRead_DISPATCH(arg0, offset)
#define crashcatEnginePriWrite(arg0, offset, data) crashcatEnginePriWrite_DISPATCH(arg0, offset, data)
#define crashcatEngineMapBufferDescriptor(arg0, pBufDesc) crashcatEngineMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define crashcatEngineUnmapBufferDescriptor(arg0, pBufDesc) crashcatEngineUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define crashcatEngineSyncBufferDescriptor(arg0, pBufDesc, offset, size) crashcatEngineSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
#define crashcatEngineGetScratchOffsets(arg0, scratchId) crashcatEngineGetScratchOffsets_DISPATCH(arg0, scratchId)
#define crashcatEngineGetWFL0Offset(arg0) crashcatEngineGetWFL0Offset_DISPATCH(arg0)
void crashcatEngineUnload_IMPL(struct CrashCatEngine *arg0);
static inline void crashcatEngineUnload_DISPATCH(struct CrashCatEngine *arg0) {
arg0->__crashcatEngineUnload__(arg0);
}
static inline NvBool crashcatEngineConfigured_DISPATCH(struct CrashCatEngine *arg0) {
return arg0->__crashcatEngineConfigured__(arg0);
}
static inline void crashcatEngineVprintf_DISPATCH(struct CrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args) {
arg0->__crashcatEngineVprintf__(arg0, bReportStart, fmt, args);
}
static inline NvU32 crashcatEnginePriRead_DISPATCH(struct CrashCatEngine *arg0, NvU32 offset) {
return arg0->__crashcatEnginePriRead__(arg0, offset);
}
static inline void crashcatEnginePriWrite_DISPATCH(struct CrashCatEngine *arg0, NvU32 offset, NvU32 data) {
arg0->__crashcatEnginePriWrite__(arg0, offset, data);
}
static inline void *crashcatEngineMapBufferDescriptor_DISPATCH(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
return arg0->__crashcatEngineMapBufferDescriptor__(arg0, pBufDesc);
}
static inline void crashcatEngineUnmapBufferDescriptor_DISPATCH(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
arg0->__crashcatEngineUnmapBufferDescriptor__(arg0, pBufDesc);
}
static inline void crashcatEngineSyncBufferDescriptor_DISPATCH(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
arg0->__crashcatEngineSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
}
static inline const NvU32 *crashcatEngineGetScratchOffsets_DISPATCH(struct CrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchId) {
return arg0->__crashcatEngineGetScratchOffsets__(arg0, scratchId);
}
static inline NvU32 crashcatEngineGetWFL0Offset_DISPATCH(struct CrashCatEngine *arg0) {
return arg0->__crashcatEngineGetWFL0Offset__(arg0);
}
NV_STATUS crashcatEngineConstruct_IMPL(struct CrashCatEngine *arg_);
#define __nvoc_crashcatEngineConstruct(arg_) crashcatEngineConstruct_IMPL(arg_)
void crashcatEngineDestruct_IMPL(struct CrashCatEngine *arg0);
#define __nvoc_crashcatEngineDestruct(arg0) crashcatEngineDestruct_IMPL(arg0)
struct CrashCatReport *crashcatEngineGetNextCrashReport_IMPL(struct CrashCatEngine *arg0);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline struct CrashCatReport *crashcatEngineGetNextCrashReport(struct CrashCatEngine *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
return NULL;
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineGetNextCrashReport(arg0) crashcatEngineGetNextCrashReport_IMPL(arg0)
#endif //__nvoc_crashcat_engine_h_disabled
NV_STATUS crashcatEngineRegisterCrashBuffer_IMPL(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size, void *pEngPriv);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline NV_STATUS crashcatEngineRegisterCrashBuffer(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size, void *pEngPriv) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineRegisterCrashBuffer(arg0, aperture, offset, size, pEngPriv) crashcatEngineRegisterCrashBuffer_IMPL(arg0, aperture, offset, size, pEngPriv)
#endif //__nvoc_crashcat_engine_h_disabled
void crashcatEngineUnregisterCrashBuffer_IMPL(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline void crashcatEngineUnregisterCrashBuffer(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineUnregisterCrashBuffer(arg0, aperture, offset, size) crashcatEngineUnregisterCrashBuffer_IMPL(arg0, aperture, offset, size)
#endif //__nvoc_crashcat_engine_h_disabled
void *crashcatEngineMapCrashBuffer_IMPL(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline void *crashcatEngineMapCrashBuffer(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
return NULL;
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineMapCrashBuffer(arg0, aperture, offset, size) crashcatEngineMapCrashBuffer_IMPL(arg0, aperture, offset, size)
#endif //__nvoc_crashcat_engine_h_disabled
void crashcatEngineUnmapCrashBuffer_IMPL(struct CrashCatEngine *arg0, void *ptr);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline void crashcatEngineUnmapCrashBuffer(struct CrashCatEngine *arg0, void *ptr) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineUnmapCrashBuffer(arg0, ptr) crashcatEngineUnmapCrashBuffer_IMPL(arg0, ptr)
#endif //__nvoc_crashcat_engine_h_disabled
void crashcatEngineSyncCrashBuffer_IMPL(struct CrashCatEngine *arg0, void *ptr, NvU32 offset, NvU32 size);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline void crashcatEngineSyncCrashBuffer(struct CrashCatEngine *arg0, void *ptr, NvU32 offset, NvU32 size) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineSyncCrashBuffer(arg0, ptr, offset, size) crashcatEngineSyncCrashBuffer_IMPL(arg0, ptr, offset, size)
#endif //__nvoc_crashcat_engine_h_disabled
NV_STATUS crashcatEngineLoadWayfinder_IMPL(struct CrashCatEngine *arg0);
#ifdef __nvoc_crashcat_engine_h_disabled
static inline NV_STATUS crashcatEngineLoadWayfinder(struct CrashCatEngine *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_crashcat_engine_h_disabled
#define crashcatEngineLoadWayfinder(arg0) crashcatEngineLoadWayfinder_IMPL(arg0)
#endif //__nvoc_crashcat_engine_h_disabled
#undef PRIVATE_FIELD
#ifndef NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#ifndef __nvoc_crashcat_engine_h_disabled
#undef crashcatEngineLoadWayfinder
NV_STATUS NVOC_PRIVATE_FUNCTION(crashcatEngineLoadWayfinder)(struct CrashCatEngine *arg0);
#endif //__nvoc_crashcat_engine_h_disabled
#endif // NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#define CRASHCAT_GET_ENGINE(pCrashCatObj) objFindAncestorOfType(CrashCatEngine, pCrashCatObj)
// Non-NVOC wrapper for handling variadic arguments
void crashcatEnginePrintf(struct CrashCatEngine *, NvBool, const char *, ...);
#endif // CRASHCAT_ENGINE_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CRASHCAT_ENGINE_NVOC_H_

View File

@@ -0,0 +1,179 @@
#define NVOC_CRASHCAT_QUEUE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_crashcat_queue_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xbaa900 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatQueue;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* );
void __nvoc_init_funcTable_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* );
NV_STATUS __nvoc_ctor_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* , CrashCatQueueConfig * arg_pQueueConfig);
void __nvoc_init_dataField_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* );
void __nvoc_dtor_CrashCatQueue(CrashCatQueue*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatQueue;
static const struct NVOC_RTTI __nvoc_rtti_CrashCatQueue_CrashCatQueue = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatQueue,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatQueue,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_CrashCatQueue_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(CrashCatQueue, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatQueue = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_CrashCatQueue_CrashCatQueue,
&__nvoc_rtti_CrashCatQueue_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatQueue =
{
/*classInfo=*/ {
/*size=*/ sizeof(CrashCatQueue),
/*classId=*/ classId(CrashCatQueue),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "CrashCatQueue",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CrashCatQueue,
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatQueue,
/*pExportInfo=*/ &__nvoc_export_info_CrashCatQueue
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatQueue =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_CrashCatQueue(CrashCatQueue *pThis) {
__nvoc_crashcatQueueDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
CrashCatWayfinderHal *wayfinderHal = &pCrashcatWayfinder->wayfinderHal;
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pCrashcatWayfinder);
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder, CrashCatQueueConfig * arg_pQueueConfig) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_CrashCatQueue_fail_Object;
__nvoc_init_dataField_CrashCatQueue(pThis, pCrashcatWayfinder);
status = __nvoc_crashcatQueueConstruct(pThis, arg_pQueueConfig);
if (status != NV_OK) goto __nvoc_ctor_CrashCatQueue_fail__init;
goto __nvoc_ctor_CrashCatQueue_exit; // Success
__nvoc_ctor_CrashCatQueue_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_CrashCatQueue_fail_Object:
__nvoc_ctor_CrashCatQueue_exit:
return status;
}
static void __nvoc_init_funcTable_CrashCatQueue_1(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
CrashCatWayfinderHal *wayfinderHal = &pCrashcatWayfinder->wayfinderHal;
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pCrashcatWayfinder);
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
}
void __nvoc_init_funcTable_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
__nvoc_init_funcTable_CrashCatQueue_1(pThis, pCrashcatWayfinder);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
pThis->__nvoc_pbase_CrashCatQueue = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_funcTable_CrashCatQueue(pThis, pCrashcatWayfinder);
}
NV_STATUS __nvoc_objCreate_CrashCatQueue(CrashCatQueue **ppThis, Dynamic *pParent, NvU32 createFlags, CrashCatQueueConfig * arg_pQueueConfig) {
NV_STATUS status;
Object *pParentObj;
CrashCatQueue *pThis;
CrashCatWayfinder *pCrashcatWayfinder;
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CrashCatQueue), (void**)&pThis, (void**)ppThis);
if (status != NV_OK)
return status;
portMemSet(pThis, 0, sizeof(CrashCatQueue));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_CrashCatQueue);
pThis->__nvoc_base_Object.createFlags = createFlags;
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
if ((pCrashcatWayfinder = dynamicCast(pParent, CrashCatWayfinder)) == NULL)
pCrashcatWayfinder = objFindAncestorOfType(CrashCatWayfinder, pParent);
NV_ASSERT_OR_RETURN(pCrashcatWayfinder != NULL, NV_ERR_INVALID_ARGUMENT);
__nvoc_init_CrashCatQueue(pThis, pCrashcatWayfinder);
status = __nvoc_ctor_CrashCatQueue(pThis, pCrashcatWayfinder, arg_pQueueConfig);
if (status != NV_OK) goto __nvoc_objCreate_CrashCatQueue_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_CrashCatQueue_cleanup:
// do not call destructors here since the constructor already called them
if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
portMemSet(pThis, 0, sizeof(CrashCatQueue));
else
portMemFree(pThis);
// coverity[leaked_storage:FALSE]
return status;
}
NV_STATUS __nvoc_objCreateDynamic_CrashCatQueue(CrashCatQueue **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
CrashCatQueueConfig * arg_pQueueConfig = va_arg(args, CrashCatQueueConfig *);
status = __nvoc_objCreate_CrashCatQueue(ppThis, pParent, createFlags, arg_pQueueConfig);
return status;
}

View File

@@ -0,0 +1,147 @@
#ifndef _G_CRASHCAT_QUEUE_NVOC_H_
#define _G_CRASHCAT_QUEUE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_crashcat_queue_nvoc.h"
#ifndef CRASHCAT_QUEUE_H
#define CRASHCAT_QUEUE_H
#include "nvoc/object.h"
#include "nv-crashcat.h"
#include "crashcat/crashcat_wayfinder.h" // for CrashCatWayfinderHal spec
struct CrashCatEngine;
#ifndef __NVOC_CLASS_CrashCatEngine_TYPEDEF__
#define __NVOC_CLASS_CrashCatEngine_TYPEDEF__
typedef struct CrashCatEngine CrashCatEngine;
#endif /* __NVOC_CLASS_CrashCatEngine_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatEngine
#define __nvoc_class_id_CrashCatEngine 0x654166
#endif /* __nvoc_class_id_CrashCatEngine */
struct CrashCatReport;
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
typedef struct CrashCatReport CrashCatReport;
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatReport
#define __nvoc_class_id_CrashCatReport 0xde4777
#endif /* __nvoc_class_id_CrashCatReport */
typedef struct
{
NV_CRASHCAT_MEM_APERTURE aperture;
NvU32 size;
NvU64 offset;
NvU32 putRegOffset;
NvU32 getRegOffset;
} CrashCatQueueConfig;
#ifdef NVOC_CRASHCAT_QUEUE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct CrashCatQueue {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct CrashCatQueue *__nvoc_pbase_CrashCatQueue;
CrashCatQueueConfig PRIVATE_FIELD(config);
struct CrashCatEngine *PRIVATE_FIELD(pEngine);
void *PRIVATE_FIELD(pMapping);
};
#ifndef __NVOC_CLASS_CrashCatQueue_TYPEDEF__
#define __NVOC_CLASS_CrashCatQueue_TYPEDEF__
typedef struct CrashCatQueue CrashCatQueue;
#endif /* __NVOC_CLASS_CrashCatQueue_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatQueue
#define __nvoc_class_id_CrashCatQueue 0xbaa900
#endif /* __nvoc_class_id_CrashCatQueue */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatQueue;
#define __staticCast_CrashCatQueue(pThis) \
((pThis)->__nvoc_pbase_CrashCatQueue)
#ifdef __nvoc_crashcat_queue_h_disabled
#define __dynamicCast_CrashCatQueue(pThis) ((CrashCatQueue*)NULL)
#else //__nvoc_crashcat_queue_h_disabled
#define __dynamicCast_CrashCatQueue(pThis) \
((CrashCatQueue*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatQueue)))
#endif //__nvoc_crashcat_queue_h_disabled
NV_STATUS __nvoc_objCreateDynamic_CrashCatQueue(CrashCatQueue**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_CrashCatQueue(CrashCatQueue**, Dynamic*, NvU32, CrashCatQueueConfig * arg_pQueueConfig);
#define __objCreate_CrashCatQueue(ppNewObj, pParent, createFlags, arg_pQueueConfig) \
__nvoc_objCreate_CrashCatQueue((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pQueueConfig)
struct CrashCatReport *crashcatQueueConsumeNextReport_V1(struct CrashCatQueue *arg0);
#ifdef __nvoc_crashcat_queue_h_disabled
static inline struct CrashCatReport *crashcatQueueConsumeNextReport(struct CrashCatQueue *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatQueue was disabled!");
return NULL;
}
#else //__nvoc_crashcat_queue_h_disabled
#define crashcatQueueConsumeNextReport(arg0) crashcatQueueConsumeNextReport_V1(arg0)
#endif //__nvoc_crashcat_queue_h_disabled
#define crashcatQueueConsumeNextReport_HAL(arg0) crashcatQueueConsumeNextReport(arg0)
NV_STATUS crashcatQueueConstruct_IMPL(struct CrashCatQueue *arg_, CrashCatQueueConfig *arg_pQueueConfig);
#define __nvoc_crashcatQueueConstruct(arg_, arg_pQueueConfig) crashcatQueueConstruct_IMPL(arg_, arg_pQueueConfig)
void crashcatQueueDestruct_IMPL(struct CrashCatQueue *arg0);
#define __nvoc_crashcatQueueDestruct(arg0) crashcatQueueDestruct_IMPL(arg0)
#undef PRIVATE_FIELD
#endif // CRASHCAT_QUEUE_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CRASHCAT_QUEUE_NVOC_H_

View File

@@ -0,0 +1,218 @@
#define NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_crashcat_report_nvoc.h"
void __nvoc_init_halspec_CrashCatReportHal(CrashCatReportHal *pCrashCatReportHal, NV_CRASHCAT_PACKET_FORMAT_VERSION version, CrashCatImplementer implementer)
{
// V1_GENERIC
if(version == 0x1 && implementer == 0x0)
{
pCrashCatReportHal->__nvoc_HalVarIdx = 0;
}
// V1_LIBOS2
else if(version == 0x1 && implementer == 0x4C49424F53322E30)
{
pCrashCatReportHal->__nvoc_HalVarIdx = 1;
}
// V1_LIBOS3
else if(version == 0x1 && implementer == 0x4C49424F53332E31)
{
pCrashCatReportHal->__nvoc_HalVarIdx = 2;
}
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xde4777 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatReport;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_CrashCatReport(CrashCatReport*,
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer);
void __nvoc_init_funcTable_CrashCatReport(CrashCatReport*);
NV_STATUS __nvoc_ctor_CrashCatReport(CrashCatReport*, void ** arg_ppReportBytes, NvLength arg_bytesRemaining);
void __nvoc_init_dataField_CrashCatReport(CrashCatReport*);
void __nvoc_dtor_CrashCatReport(CrashCatReport*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatReport;
static const struct NVOC_RTTI __nvoc_rtti_CrashCatReport_CrashCatReport = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatReport,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatReport,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_CrashCatReport_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(CrashCatReport, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatReport = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_CrashCatReport_CrashCatReport,
&__nvoc_rtti_CrashCatReport_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatReport =
{
/*classInfo=*/ {
/*size=*/ sizeof(CrashCatReport),
/*classId=*/ classId(CrashCatReport),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "CrashCatReport",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CrashCatReport,
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatReport,
/*pExportInfo=*/ &__nvoc_export_info_CrashCatReport
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatReport =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_CrashCatReport(CrashCatReport *pThis) {
__nvoc_crashcatReportDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_CrashCatReport(CrashCatReport *pThis) {
CrashCatReportHal *reportHal = &pThis->reportHal;
const unsigned long reportHal_HalVarIdx = (unsigned long)reportHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(reportHal);
PORT_UNREFERENCED_VARIABLE(reportHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_CrashCatReport(CrashCatReport *pThis, void ** arg_ppReportBytes, NvLength arg_bytesRemaining) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_CrashCatReport_fail_Object;
__nvoc_init_dataField_CrashCatReport(pThis);
status = __nvoc_crashcatReportConstruct(pThis, arg_ppReportBytes, arg_bytesRemaining);
if (status != NV_OK) goto __nvoc_ctor_CrashCatReport_fail__init;
goto __nvoc_ctor_CrashCatReport_exit; // Success
__nvoc_ctor_CrashCatReport_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_CrashCatReport_fail_Object:
__nvoc_ctor_CrashCatReport_exit:
return status;
}
static void __nvoc_init_funcTable_CrashCatReport_1(CrashCatReport *pThis) {
CrashCatReportHal *reportHal = &pThis->reportHal;
const unsigned long reportHal_HalVarIdx = (unsigned long)reportHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(reportHal);
PORT_UNREFERENCED_VARIABLE(reportHal_HalVarIdx);
// Hal function -- crashcatReportLogReporter
if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* CrashCatReportHal: V1_GENERIC */
{
pThis->__crashcatReportLogReporter__ = &crashcatReportLogReporter_V1_GENERIC;
}
else
{
pThis->__crashcatReportLogReporter__ = &crashcatReportLogReporter_V1_LIBOS2;
}
// Hal function -- crashcatReportLogSource
if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* CrashCatReportHal: V1_GENERIC */
{
pThis->__crashcatReportLogSource__ = &crashcatReportLogSource_V1_GENERIC;
}
else
{
pThis->__crashcatReportLogSource__ = &crashcatReportLogSource_V1_LIBOS2;
}
}
void __nvoc_init_funcTable_CrashCatReport(CrashCatReport *pThis) {
__nvoc_init_funcTable_CrashCatReport_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_CrashCatReport(CrashCatReport *pThis,
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer) {
pThis->__nvoc_pbase_CrashCatReport = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_halspec_CrashCatReportHal(&pThis->reportHal, CrashCatReportHal_version, CrashCatReportHal_implementer);
__nvoc_init_funcTable_CrashCatReport(pThis);
}
NV_STATUS __nvoc_objCreate_CrashCatReport(CrashCatReport **ppThis, Dynamic *pParent, NvU32 createFlags,
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer, void ** arg_ppReportBytes, NvLength arg_bytesRemaining) {
NV_STATUS status;
Object *pParentObj;
CrashCatReport *pThis;
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CrashCatReport), (void**)&pThis, (void**)ppThis);
if (status != NV_OK)
return status;
portMemSet(pThis, 0, sizeof(CrashCatReport));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_CrashCatReport);
pThis->__nvoc_base_Object.createFlags = createFlags;
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_CrashCatReport(pThis, CrashCatReportHal_version, CrashCatReportHal_implementer);
status = __nvoc_ctor_CrashCatReport(pThis, arg_ppReportBytes, arg_bytesRemaining);
if (status != NV_OK) goto __nvoc_objCreate_CrashCatReport_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_CrashCatReport_cleanup:
// do not call destructors here since the constructor already called them
if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
portMemSet(pThis, 0, sizeof(CrashCatReport));
else
portMemFree(pThis);
// coverity[leaked_storage:FALSE]
return status;
}
NV_STATUS __nvoc_objCreateDynamic_CrashCatReport(CrashCatReport **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version = va_arg(args, NV_CRASHCAT_PACKET_FORMAT_VERSION);
CrashCatImplementer CrashCatReportHal_implementer = va_arg(args, CrashCatImplementer);
void ** arg_ppReportBytes = va_arg(args, void **);
NvLength arg_bytesRemaining = va_arg(args, NvLength);
status = __nvoc_objCreate_CrashCatReport(ppThis, pParent, createFlags, CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining);
return status;
}

View File

@@ -0,0 +1,388 @@
#ifndef _G_CRASHCAT_REPORT_NVOC_H_
#define _G_CRASHCAT_REPORT_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_crashcat_report_nvoc.h"
#ifndef CRASHCAT_REPORT_H
#define CRASHCAT_REPORT_H
#include "nv-crashcat.h"
#include "nvoc/object.h"
struct CrashCatEngine;
#ifndef __NVOC_CLASS_CrashCatEngine_TYPEDEF__
#define __NVOC_CLASS_CrashCatEngine_TYPEDEF__
typedef struct CrashCatEngine CrashCatEngine;
#endif /* __NVOC_CLASS_CrashCatEngine_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatEngine
#define __nvoc_class_id_CrashCatEngine 0x654166
#endif /* __nvoc_class_id_CrashCatEngine */
typedef NvU64 CrashCatImplementer;
#define CRASHCAT_IMPLEMENTER_UNSPECIFIED 0ull
#define CRASHCAT_IMPLEMENTER_LIBOS2 0x4C49424F53322E30ull // "LIBOS2.0"
#define CRASHCAT_IMPLEMENTER_LIBOS3 0x4C49424F53332E31ull // "LIBOS3.1"
struct CrashCatReportHal {
unsigned short __nvoc_HalVarIdx;
};
typedef struct CrashCatReportHal CrashCatReportHal;
void __nvoc_init_halspec_CrashCatReportHal(CrashCatReportHal*, NV_CRASHCAT_PACKET_FORMAT_VERSION, CrashCatImplementer);
#ifdef NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct __nvoc_inner_struc_CrashCatReport_1__ {
NvCrashCatReport_V1 report;
NvCrashCatRiscv64CsrState_V1 riscv64CsrState;
NvCrashCatRiscv64GprState_V1 riscv64GprState;
NvCrashCatRiscv64Trace_V1 *pRiscv64Trace;
NvCrashCatIo32State_V1 *pIo32State;
};
struct CrashCatReport {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct CrashCatReport *__nvoc_pbase_CrashCatReport;
void (*__crashcatReportLogReporter__)(struct CrashCatReport *);
void (*__crashcatReportLogSource__)(struct CrashCatReport *);
struct CrashCatReportHal reportHal;
struct CrashCatEngine *PRIVATE_FIELD(pEngine);
NvU32 PRIVATE_FIELD(validTags);
struct __nvoc_inner_struc_CrashCatReport_1__ PRIVATE_FIELD(v1);
};
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
typedef struct CrashCatReport CrashCatReport;
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatReport
#define __nvoc_class_id_CrashCatReport 0xde4777
#endif /* __nvoc_class_id_CrashCatReport */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatReport;
#define __staticCast_CrashCatReport(pThis) \
((pThis)->__nvoc_pbase_CrashCatReport)
#ifdef __nvoc_crashcat_report_h_disabled
#define __dynamicCast_CrashCatReport(pThis) ((CrashCatReport*)NULL)
#else //__nvoc_crashcat_report_h_disabled
#define __dynamicCast_CrashCatReport(pThis) \
((CrashCatReport*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatReport)))
#endif //__nvoc_crashcat_report_h_disabled
NV_STATUS __nvoc_objCreateDynamic_CrashCatReport(CrashCatReport**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_CrashCatReport(CrashCatReport**, Dynamic*, NvU32,
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer, void ** arg_ppReportBytes, NvLength arg_bytesRemaining);
#define __objCreate_CrashCatReport(ppNewObj, pParent, createFlags, CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining) \
__nvoc_objCreate_CrashCatReport((ppNewObj), staticCast((pParent), Dynamic), (createFlags), CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining)
#define crashcatReportLogReporter(arg0) crashcatReportLogReporter_DISPATCH(arg0)
#define crashcatReportLogReporter_HAL(arg0) crashcatReportLogReporter_DISPATCH(arg0)
#define crashcatReportLogSource(arg0) crashcatReportLogSource_DISPATCH(arg0)
#define crashcatReportLogSource_HAL(arg0) crashcatReportLogSource_DISPATCH(arg0)
void crashcatReportDestruct_V1(struct CrashCatReport *arg0);
#define __nvoc_crashcatReportDestruct(arg0) crashcatReportDestruct_V1(arg0)
void *crashcatReportExtract_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void *crashcatReportExtract(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
return NULL;
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtract(arg0, pReportBytes, bytesRemaining) crashcatReportExtract_V1(arg0, pReportBytes, bytesRemaining)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtract_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtract(arg0, pReportBytes, bytesRemaining)
void *crashcatReportExtractReport_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void *crashcatReportExtractReport(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
return NULL;
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractReport(arg0, pReportBytes, bytesRemaining) crashcatReportExtractReport_V1(arg0, pReportBytes, bytesRemaining)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractReport_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractReport(arg0, pReportBytes, bytesRemaining)
void *crashcatReportExtractRiscv64CsrState_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void *crashcatReportExtractRiscv64CsrState(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
return NULL;
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractRiscv64CsrState(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64CsrState_V1(arg0, pReportBytes, bytesRemaining)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractRiscv64CsrState_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64CsrState(arg0, pReportBytes, bytesRemaining)
void *crashcatReportExtractRiscv64GprState_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void *crashcatReportExtractRiscv64GprState(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
return NULL;
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractRiscv64GprState(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64GprState_V1(arg0, pReportBytes, bytesRemaining)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractRiscv64GprState_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64GprState(arg0, pReportBytes, bytesRemaining)
void *crashcatReportExtractRiscv64Trace_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void *crashcatReportExtractRiscv64Trace(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
return NULL;
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractRiscv64Trace(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64Trace_V1(arg0, pReportBytes, bytesRemaining)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractRiscv64Trace_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64Trace(arg0, pReportBytes, bytesRemaining)
void *crashcatReportExtractIo32State_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void *crashcatReportExtractIo32State(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
return NULL;
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractIo32State(arg0, pReportBytes, bytesRemaining) crashcatReportExtractIo32State_V1(arg0, pReportBytes, bytesRemaining)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportExtractIo32State_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractIo32State(arg0, pReportBytes, bytesRemaining)
void crashcatReportLogRiscv64CsrState_V1(struct CrashCatReport *arg0);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void crashcatReportLogRiscv64CsrState(struct CrashCatReport *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogRiscv64CsrState(arg0) crashcatReportLogRiscv64CsrState_V1(arg0)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogRiscv64CsrState_HAL(arg0) crashcatReportLogRiscv64CsrState(arg0)
void crashcatReportLogRiscv64GprState_V1(struct CrashCatReport *arg0);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void crashcatReportLogRiscv64GprState(struct CrashCatReport *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogRiscv64GprState(arg0) crashcatReportLogRiscv64GprState_V1(arg0)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogRiscv64GprState_HAL(arg0) crashcatReportLogRiscv64GprState(arg0)
void crashcatReportLogRiscv64Trace_V1(struct CrashCatReport *arg0);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void crashcatReportLogRiscv64Trace(struct CrashCatReport *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogRiscv64Trace(arg0) crashcatReportLogRiscv64Trace_V1(arg0)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogRiscv64Trace_HAL(arg0) crashcatReportLogRiscv64Trace(arg0)
void crashcatReportLogIo32State_V1(struct CrashCatReport *arg0);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void crashcatReportLogIo32State(struct CrashCatReport *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogIo32State(arg0) crashcatReportLogIo32State_V1(arg0)
#endif //__nvoc_crashcat_report_h_disabled
#define crashcatReportLogIo32State_HAL(arg0) crashcatReportLogIo32State(arg0)
void crashcatReportLogReporter_V1_GENERIC(struct CrashCatReport *arg0);
void crashcatReportLogReporter_V1_LIBOS2(struct CrashCatReport *arg0);
static inline void crashcatReportLogReporter_DISPATCH(struct CrashCatReport *arg0) {
arg0->__crashcatReportLogReporter__(arg0);
}
void crashcatReportLogSource_V1_GENERIC(struct CrashCatReport *arg0);
void crashcatReportLogSource_V1_LIBOS2(struct CrashCatReport *arg0);
static inline void crashcatReportLogSource_DISPATCH(struct CrashCatReport *arg0) {
arg0->__crashcatReportLogSource__(arg0);
}
NV_STATUS crashcatReportConstruct_IMPL(struct CrashCatReport *arg_, void **arg_ppReportBytes, NvLength arg_bytesRemaining);
#define __nvoc_crashcatReportConstruct(arg_, arg_ppReportBytes, arg_bytesRemaining) crashcatReportConstruct_IMPL(arg_, arg_ppReportBytes, arg_bytesRemaining)
void crashcatReportLog_IMPL(struct CrashCatReport *arg0);
#ifdef __nvoc_crashcat_report_h_disabled
static inline void crashcatReportLog(struct CrashCatReport *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
}
#else //__nvoc_crashcat_report_h_disabled
#define crashcatReportLog(arg0) crashcatReportLog_IMPL(arg0)
#endif //__nvoc_crashcat_report_h_disabled
#undef PRIVATE_FIELD
#ifndef NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#undef crashcatReportLogReporter
void NVOC_PRIVATE_FUNCTION(crashcatReportLogReporter)(struct CrashCatReport *arg0);
#undef crashcatReportLogReporter_HAL
void NVOC_PRIVATE_FUNCTION(crashcatReportLogReporter_HAL)(struct CrashCatReport *arg0);
#undef crashcatReportLogSource
void NVOC_PRIVATE_FUNCTION(crashcatReportLogSource)(struct CrashCatReport *arg0);
#undef crashcatReportLogSource_HAL
void NVOC_PRIVATE_FUNCTION(crashcatReportLogSource_HAL)(struct CrashCatReport *arg0);
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportExtract
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtract)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportExtractReport
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractReport)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportExtractRiscv64CsrState
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractRiscv64CsrState)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportExtractRiscv64GprState
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractRiscv64GprState)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportExtractRiscv64Trace
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractRiscv64Trace)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportExtractIo32State
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractIo32State)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportLogRiscv64CsrState
void NVOC_PRIVATE_FUNCTION(crashcatReportLogRiscv64CsrState)(struct CrashCatReport *arg0);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportLogRiscv64GprState
void NVOC_PRIVATE_FUNCTION(crashcatReportLogRiscv64GprState)(struct CrashCatReport *arg0);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportLogRiscv64Trace
void NVOC_PRIVATE_FUNCTION(crashcatReportLogRiscv64Trace)(struct CrashCatReport *arg0);
#endif //__nvoc_crashcat_report_h_disabled
#ifndef __nvoc_crashcat_report_h_disabled
#undef crashcatReportLogIo32State
void NVOC_PRIVATE_FUNCTION(crashcatReportLogIo32State)(struct CrashCatReport *arg0);
#endif //__nvoc_crashcat_report_h_disabled
#endif // NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
// Utility to convert a cause code to a user-friendly string
const char *crashcatReportRiscvCauseToString(NvU64 xcause);
// Log indentation used for multi-line reports
#define CRASHCAT_LOG_INDENT " "
// Prefix used for multi-line reports
#if defined(NVRM)
#define CRASHCAT_LOG_PREFIX "NVRM: "
#else
#define CRASHCAT_LOG_PREFIX
#endif
#define CRASHCAT_REPORT_LOG_PACKET_TYPE(pReport, fmt, ...) \
crashcatEnginePrintf(pReport->pEngine, NV_FALSE, \
CRASHCAT_LOG_PREFIX CRASHCAT_LOG_INDENT fmt, ##__VA_ARGS__)
#define CRASHCAT_REPORT_LOG_DATA(pReport, fmt, ...) \
crashcatEnginePrintf(pReport->pEngine, NV_FALSE, \
CRASHCAT_LOG_PREFIX CRASHCAT_LOG_INDENT CRASHCAT_LOG_INDENT fmt, \
##__VA_ARGS__)
#endif // CRASHCAT_REPORT_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CRASHCAT_REPORT_NVOC_H_

View File

@@ -0,0 +1,186 @@
#define NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_crashcat_wayfinder_nvoc.h"
void __nvoc_init_halspec_CrashCatWayfinderHal(CrashCatWayfinderHal *pCrashCatWayfinderHal, NV_CRASHCAT_WAYFINDER_VERSION version)
{
// V1
if(version == 0x1)
{
pCrashCatWayfinderHal->__nvoc_HalVarIdx = 0;
}
}
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0x085e32 = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatWayfinder;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
void __nvoc_init_CrashCatWayfinder(CrashCatWayfinder*,
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version);
void __nvoc_init_funcTable_CrashCatWayfinder(CrashCatWayfinder*);
NV_STATUS __nvoc_ctor_CrashCatWayfinder(CrashCatWayfinder*, NvU32 arg_wfl0);
void __nvoc_init_dataField_CrashCatWayfinder(CrashCatWayfinder*);
void __nvoc_dtor_CrashCatWayfinder(CrashCatWayfinder*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatWayfinder;
static const struct NVOC_RTTI __nvoc_rtti_CrashCatWayfinder_CrashCatWayfinder = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatWayfinder,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatWayfinder,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_CrashCatWayfinder_Object = {
/*pClassDef=*/ &__nvoc_class_def_Object,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(CrashCatWayfinder, __nvoc_base_Object),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatWayfinder = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_CrashCatWayfinder_CrashCatWayfinder,
&__nvoc_rtti_CrashCatWayfinder_Object,
},
};
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatWayfinder =
{
/*classInfo=*/ {
/*size=*/ sizeof(CrashCatWayfinder),
/*classId=*/ classId(CrashCatWayfinder),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "CrashCatWayfinder",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CrashCatWayfinder,
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatWayfinder,
/*pExportInfo=*/ &__nvoc_export_info_CrashCatWayfinder
};
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatWayfinder =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_Object(Object*);
void __nvoc_dtor_CrashCatWayfinder(CrashCatWayfinder *pThis) {
__nvoc_crashcatWayfinderDestruct(pThis);
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_CrashCatWayfinder(CrashCatWayfinder *pThis) {
CrashCatWayfinderHal *wayfinderHal = &pThis->wayfinderHal;
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_Object(Object* );
NV_STATUS __nvoc_ctor_CrashCatWayfinder(CrashCatWayfinder *pThis, NvU32 arg_wfl0) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
if (status != NV_OK) goto __nvoc_ctor_CrashCatWayfinder_fail_Object;
__nvoc_init_dataField_CrashCatWayfinder(pThis);
status = __nvoc_crashcatWayfinderConstruct(pThis, arg_wfl0);
if (status != NV_OK) goto __nvoc_ctor_CrashCatWayfinder_fail__init;
goto __nvoc_ctor_CrashCatWayfinder_exit; // Success
__nvoc_ctor_CrashCatWayfinder_fail__init:
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
__nvoc_ctor_CrashCatWayfinder_fail_Object:
__nvoc_ctor_CrashCatWayfinder_exit:
return status;
}
static void __nvoc_init_funcTable_CrashCatWayfinder_1(CrashCatWayfinder *pThis) {
CrashCatWayfinderHal *wayfinderHal = &pThis->wayfinderHal;
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
}
void __nvoc_init_funcTable_CrashCatWayfinder(CrashCatWayfinder *pThis) {
__nvoc_init_funcTable_CrashCatWayfinder_1(pThis);
}
void __nvoc_init_Object(Object*);
void __nvoc_init_CrashCatWayfinder(CrashCatWayfinder *pThis,
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version) {
pThis->__nvoc_pbase_CrashCatWayfinder = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
__nvoc_init_Object(&pThis->__nvoc_base_Object);
__nvoc_init_halspec_CrashCatWayfinderHal(&pThis->wayfinderHal, CrashCatWayfinderHal_version);
__nvoc_init_funcTable_CrashCatWayfinder(pThis);
}
NV_STATUS __nvoc_objCreate_CrashCatWayfinder(CrashCatWayfinder **ppThis, Dynamic *pParent, NvU32 createFlags,
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version, NvU32 arg_wfl0) {
NV_STATUS status;
Object *pParentObj;
CrashCatWayfinder *pThis;
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CrashCatWayfinder), (void**)&pThis, (void**)ppThis);
if (status != NV_OK)
return status;
portMemSet(pThis, 0, sizeof(CrashCatWayfinder));
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_CrashCatWayfinder);
pThis->__nvoc_base_Object.createFlags = createFlags;
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
}
else
{
pThis->__nvoc_base_Object.pParent = NULL;
}
__nvoc_init_CrashCatWayfinder(pThis, CrashCatWayfinderHal_version);
status = __nvoc_ctor_CrashCatWayfinder(pThis, arg_wfl0);
if (status != NV_OK) goto __nvoc_objCreate_CrashCatWayfinder_cleanup;
*ppThis = pThis;
return NV_OK;
__nvoc_objCreate_CrashCatWayfinder_cleanup:
// do not call destructors here since the constructor already called them
if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
portMemSet(pThis, 0, sizeof(CrashCatWayfinder));
else
portMemFree(pThis);
// coverity[leaked_storage:FALSE]
return status;
}
NV_STATUS __nvoc_objCreateDynamic_CrashCatWayfinder(CrashCatWayfinder **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
NV_STATUS status;
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version = va_arg(args, NV_CRASHCAT_WAYFINDER_VERSION);
NvU32 arg_wfl0 = va_arg(args, NvU32);
status = __nvoc_objCreate_CrashCatWayfinder(ppThis, pParent, createFlags, CrashCatWayfinderHal_version, arg_wfl0);
return status;
}

View File

@@ -0,0 +1,170 @@
#ifndef _G_CRASHCAT_WAYFINDER_NVOC_H_
#define _G_CRASHCAT_WAYFINDER_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_crashcat_wayfinder_nvoc.h"
#ifndef CRASHCAT_WAYFINDER_H
#define CRASHCAT_WAYFINDER_H
#include "nvoc/object.h"
#include "nv-crashcat.h"
struct CrashCatQueue;
#ifndef __NVOC_CLASS_CrashCatQueue_TYPEDEF__
#define __NVOC_CLASS_CrashCatQueue_TYPEDEF__
typedef struct CrashCatQueue CrashCatQueue;
#endif /* __NVOC_CLASS_CrashCatQueue_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatQueue
#define __nvoc_class_id_CrashCatQueue 0xbaa900
#endif /* __nvoc_class_id_CrashCatQueue */
struct CrashCatReport;
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
typedef struct CrashCatReport CrashCatReport;
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatReport
#define __nvoc_class_id_CrashCatReport 0xde4777
#endif /* __nvoc_class_id_CrashCatReport */
struct CrashCatWayfinderHal {
unsigned short __nvoc_HalVarIdx;
};
typedef struct CrashCatWayfinderHal CrashCatWayfinderHal;
void __nvoc_init_halspec_CrashCatWayfinderHal(CrashCatWayfinderHal*, NV_CRASHCAT_WAYFINDER_VERSION);
#ifdef NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct __nvoc_inner_struc_CrashCatWayfinder_1__ {
NvCrashCatWayfinderL0_V1 wfl0;
NvCrashCatWayfinderL1_V1 wfl1;
};
struct CrashCatWayfinder {
const struct NVOC_RTTI *__nvoc_rtti;
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct CrashCatWayfinder *__nvoc_pbase_CrashCatWayfinder;
struct CrashCatWayfinderHal wayfinderHal;
struct CrashCatQueue *PRIVATE_FIELD(pQueue);
struct __nvoc_inner_struc_CrashCatWayfinder_1__ PRIVATE_FIELD(v1);
};
#ifndef __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
#define __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
typedef struct CrashCatWayfinder CrashCatWayfinder;
#endif /* __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__ */
#ifndef __nvoc_class_id_CrashCatWayfinder
#define __nvoc_class_id_CrashCatWayfinder 0x085e32
#endif /* __nvoc_class_id_CrashCatWayfinder */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatWayfinder;
#define __staticCast_CrashCatWayfinder(pThis) \
((pThis)->__nvoc_pbase_CrashCatWayfinder)
#ifdef __nvoc_crashcat_wayfinder_h_disabled
#define __dynamicCast_CrashCatWayfinder(pThis) ((CrashCatWayfinder*)NULL)
#else //__nvoc_crashcat_wayfinder_h_disabled
#define __dynamicCast_CrashCatWayfinder(pThis) \
((CrashCatWayfinder*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatWayfinder)))
#endif //__nvoc_crashcat_wayfinder_h_disabled
NV_STATUS __nvoc_objCreateDynamic_CrashCatWayfinder(CrashCatWayfinder**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_CrashCatWayfinder(CrashCatWayfinder**, Dynamic*, NvU32,
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version, NvU32 arg_wfl0);
#define __objCreate_CrashCatWayfinder(ppNewObj, pParent, createFlags, CrashCatWayfinderHal_version, arg_wfl0) \
__nvoc_objCreate_CrashCatWayfinder((ppNewObj), staticCast((pParent), Dynamic), (createFlags), CrashCatWayfinderHal_version, arg_wfl0)
struct CrashCatQueue *crashcatWayfinderGetReportQueue_V1(struct CrashCatWayfinder *arg0);
#ifdef __nvoc_crashcat_wayfinder_h_disabled
static inline struct CrashCatQueue *crashcatWayfinderGetReportQueue(struct CrashCatWayfinder *arg0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatWayfinder was disabled!");
return NULL;
}
#else //__nvoc_crashcat_wayfinder_h_disabled
#define crashcatWayfinderGetReportQueue(arg0) crashcatWayfinderGetReportQueue_V1(arg0)
#endif //__nvoc_crashcat_wayfinder_h_disabled
#define crashcatWayfinderGetReportQueue_HAL(arg0) crashcatWayfinderGetReportQueue(arg0)
void crashcatWayfinderSetWFL0_V1(struct CrashCatWayfinder *arg0, NvU32 wfl0);
#ifdef __nvoc_crashcat_wayfinder_h_disabled
static inline void crashcatWayfinderSetWFL0(struct CrashCatWayfinder *arg0, NvU32 wfl0) {
NV_ASSERT_FAILED_PRECOMP("CrashCatWayfinder was disabled!");
}
#else //__nvoc_crashcat_wayfinder_h_disabled
#define crashcatWayfinderSetWFL0(arg0, wfl0) crashcatWayfinderSetWFL0_V1(arg0, wfl0)
#endif //__nvoc_crashcat_wayfinder_h_disabled
#define crashcatWayfinderSetWFL0_HAL(arg0, wfl0) crashcatWayfinderSetWFL0(arg0, wfl0)
NV_STATUS crashcatWayfinderConstruct_IMPL(struct CrashCatWayfinder *arg_, NvU32 arg_wfl0);
#define __nvoc_crashcatWayfinderConstruct(arg_, arg_wfl0) crashcatWayfinderConstruct_IMPL(arg_, arg_wfl0)
void crashcatWayfinderDestruct_IMPL(struct CrashCatWayfinder *arg0);
#define __nvoc_crashcatWayfinderDestruct(arg0) crashcatWayfinderDestruct_IMPL(arg0)
#undef PRIVATE_FIELD
#ifndef NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
#ifndef __nvoc_crashcat_wayfinder_h_disabled
#undef crashcatWayfinderSetWFL0
void NVOC_PRIVATE_FUNCTION(crashcatWayfinderSetWFL0)(struct CrashCatWayfinder *arg0, NvU32 wfl0);
#endif //__nvoc_crashcat_wayfinder_h_disabled
#endif // NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
#endif // CRASHCAT_VECTOR_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_CRASHCAT_WAYFINDER_NVOC_H_

View File

@@ -266,6 +266,7 @@ void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
{
pThis->setProperty(pThis, PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF, ((NvBool)(0 == 0)));
}
pThis->setProperty(pThis, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED, ((NvBool)(0 != 0)));
pThis->boardId = ~0;

View File

@@ -992,6 +992,7 @@ struct OBJGPU {
NvBool PDB_PROP_GPU_SKIP_TABLE_CE_MAP;
NvBool PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF;
NvBool PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL;
NvBool PDB_PROP_GPU_FASTPATH_SEQ_ENABLED;
OS_GPU_INFO *pOsGpuInfo;
OS_RM_CAPS *pOsRmCaps;
NvU32 halImpl;
@@ -1268,6 +1269,8 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI
#define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_CAST
#define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_NAME PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL
#define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_CAST
#define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_NAME PDB_PROP_GPU_FASTPATH_SEQ_ENABLED
#define PDB_PROP_GPU_IS_UEFI_BASE_CAST
#define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI
#define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_CAST

View File

@@ -625,40 +625,6 @@ static inline void intrClearLeafVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 v
#define intrClearLeafVector_HAL(pGpu, pIntr, vector, pThreadState) intrClearLeafVector(pGpu, pIntr, vector, pThreadState)
static inline void intrClearCpuLeafVector_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) {
return;
}
void intrClearCpuLeafVector_GH100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState);
#ifdef __nvoc_intr_h_disabled
static inline void intrClearCpuLeafVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) {
NV_ASSERT_FAILED_PRECOMP("Intr was disabled!");
}
#else //__nvoc_intr_h_disabled
#define intrClearCpuLeafVector(pGpu, pIntr, vector, pThreadState) intrClearCpuLeafVector_b3696a(pGpu, pIntr, vector, pThreadState)
#endif //__nvoc_intr_h_disabled
#define intrClearCpuLeafVector_HAL(pGpu, pIntr, vector, pThreadState) intrClearCpuLeafVector(pGpu, pIntr, vector, pThreadState)
static inline void intrWriteCpuRegLeaf_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) {
return;
}
void intrWriteCpuRegLeaf_GH100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2);
#ifdef __nvoc_intr_h_disabled
static inline void intrWriteCpuRegLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) {
NV_ASSERT_FAILED_PRECOMP("Intr was disabled!");
}
#else //__nvoc_intr_h_disabled
#define intrWriteCpuRegLeaf(pGpu, pIntr, arg0, arg1, arg2) intrWriteCpuRegLeaf_b3696a(pGpu, pIntr, arg0, arg1, arg2)
#endif //__nvoc_intr_h_disabled
#define intrWriteCpuRegLeaf_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteCpuRegLeaf(pGpu, pIntr, arg0, arg1, arg2)
NvBool intrIsVectorPending_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState);

View File

@@ -0,0 +1,213 @@
#define NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#include "nvoc/runtime.h"
#include "nvoc/rtti.h"
#include "nvtypes.h"
#include "nvport/nvport.h"
#include "nvport/inline/util_valist.h"
#include "utils/nvassert.h"
#include "g_kernel_crashcat_engine_nvoc.h"
#ifdef DEBUG
char __nvoc_class_id_uniqueness_check_0xc37aef = 1;
#endif
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
void __nvoc_init_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
void __nvoc_init_funcTable_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
void __nvoc_init_dataField_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
void __nvoc_dtor_KernelCrashCatEngine(KernelCrashCatEngine*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCrashCatEngine;
static const struct NVOC_RTTI __nvoc_rtti_KernelCrashCatEngine_KernelCrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCrashCatEngine,
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelCrashCatEngine, __nvoc_base_CrashCatEngine),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCrashCatEngine = {
/*numRelatives=*/ 2,
/*relatives=*/ {
&__nvoc_rtti_KernelCrashCatEngine_KernelCrashCatEngine,
&__nvoc_rtti_KernelCrashCatEngine_CrashCatEngine,
},
};
// Not instantiable because it's not derived from class "Object"
// Not instantiable because it's an abstract class with following pure virtual functions:
// kcrashcatEngineRegRead
// kcrashcatEngineRegWrite
// kcrashcatEngineMaskDmemAddr
const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine =
{
/*classInfo=*/ {
/*size=*/ sizeof(KernelCrashCatEngine),
/*classId=*/ classId(KernelCrashCatEngine),
/*providerId=*/ &__nvoc_rtti_provider,
#if NV_PRINTF_STRINGS_ALLOWED
/*name=*/ "KernelCrashCatEngine",
#endif
},
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
/*pCastInfo=*/ &__nvoc_castinfo_KernelCrashCatEngine,
/*pExportInfo=*/ &__nvoc_export_info_KernelCrashCatEngine
};
static NvBool __nvoc_thunk_KernelCrashCatEngine_crashcatEngineConfigured(struct CrashCatEngine *arg0) {
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset));
}
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnload(struct CrashCatEngine *arg0) {
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset));
}
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineVprintf(struct CrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args) {
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), bReportStart, fmt, args);
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriRead(struct CrashCatEngine *arg0, NvU32 offset) {
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), offset);
}
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriWrite(struct CrashCatEngine *arg0, NvU32 offset, NvU32 data) {
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), offset, data);
}
static void *__nvoc_thunk_KernelCrashCatEngine_crashcatEngineMapBufferDescriptor(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnmapBufferDescriptor(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineSyncBufferDescriptor(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), pBufDesc, offset, size);
}
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetScratchOffsets(struct CrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), scratchGroupId);
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetWFL0Offset(struct CrashCatEngine *arg0) {
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset));
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCrashCatEngine =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_CrashCatEngine(CrashCatEngine*);
void __nvoc_dtor_KernelCrashCatEngine(KernelCrashCatEngine *pThis) {
__nvoc_dtor_CrashCatEngine(&pThis->__nvoc_base_CrashCatEngine);
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_CrashCatEngine(CrashCatEngine* );
NV_STATUS __nvoc_ctor_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_CrashCatEngine(&pThis->__nvoc_base_CrashCatEngine);
if (status != NV_OK) goto __nvoc_ctor_KernelCrashCatEngine_fail_CrashCatEngine;
__nvoc_init_dataField_KernelCrashCatEngine(pThis, pRmhalspecowner);
goto __nvoc_ctor_KernelCrashCatEngine_exit; // Success
__nvoc_ctor_KernelCrashCatEngine_fail_CrashCatEngine:
__nvoc_ctor_KernelCrashCatEngine_exit:
return status;
}
static void __nvoc_init_funcTable_KernelCrashCatEngine_1(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
pThis->__kcrashcatEngineConfigured__ = &kcrashcatEngineConfigured_IMPL;
pThis->__kcrashcatEngineUnload__ = &kcrashcatEngineUnload_IMPL;
pThis->__kcrashcatEngineVprintf__ = &kcrashcatEngineVprintf_IMPL;
pThis->__kcrashcatEngineRegRead__ = NULL;
pThis->__kcrashcatEngineRegWrite__ = NULL;
pThis->__kcrashcatEngineMaskDmemAddr__ = NULL;
pThis->__kcrashcatEnginePriRead__ = &kcrashcatEnginePriRead_IMPL;
pThis->__kcrashcatEnginePriWrite__ = &kcrashcatEnginePriWrite_IMPL;
pThis->__kcrashcatEngineMapBufferDescriptor__ = &kcrashcatEngineMapBufferDescriptor_IMPL;
pThis->__kcrashcatEngineUnmapBufferDescriptor__ = &kcrashcatEngineUnmapBufferDescriptor_IMPL;
pThis->__kcrashcatEngineSyncBufferDescriptor__ = &kcrashcatEngineSyncBufferDescriptor_IMPL;
// Hal function -- kcrashcatEngineReadDmem
pThis->__kcrashcatEngineReadDmem__ = &kcrashcatEngineReadDmem_TU102;
pThis->__kcrashcatEngineReadEmem__ = &kcrashcatEngineReadEmem_2fced3;
// Hal function -- kcrashcatEngineGetScratchOffsets
pThis->__kcrashcatEngineGetScratchOffsets__ = &kcrashcatEngineGetScratchOffsets_TU102;
// Hal function -- kcrashcatEngineGetWFL0Offset
pThis->__kcrashcatEngineGetWFL0Offset__ = &kcrashcatEngineGetWFL0Offset_TU102;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineConfigured;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineUnload__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnload;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineVprintf;
pThis->__nvoc_base_CrashCatEngine.__crashcatEnginePriRead__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriRead;
pThis->__nvoc_base_CrashCatEngine.__crashcatEnginePriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriWrite;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineMapBufferDescriptor;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnmapBufferDescriptor;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineSyncBufferDescriptor;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetScratchOffsets;
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetWFL0Offset;
}
void __nvoc_init_funcTable_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
__nvoc_init_funcTable_KernelCrashCatEngine_1(pThis, pRmhalspecowner);
}
void __nvoc_init_CrashCatEngine(CrashCatEngine*);
void __nvoc_init_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_KernelCrashCatEngine = pThis;
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_CrashCatEngine;
__nvoc_init_CrashCatEngine(&pThis->__nvoc_base_CrashCatEngine);
__nvoc_init_funcTable_KernelCrashCatEngine(pThis, pRmhalspecowner);
}

View File

@@ -0,0 +1,274 @@
#ifndef _G_KERNEL_CRASHCAT_ENGINE_NVOC_H_
#define _G_KERNEL_CRASHCAT_ENGINE_NVOC_H_
#include "nvoc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_kernel_crashcat_engine_nvoc.h"
#ifndef KERNEL_CRASHCAT_ENGINE_H
#define KERNEL_CRASHCAT_ENGINE_H
#include "containers/map.h"
#include "core/core.h"
#include "crashcat/crashcat_engine.h"
#include "gpu/gpu_halspec.h"
#include "gpu/mem_mgr/mem_desc.h"
typedef struct KernelCrashCatEngineConfig
{
const char *pName; // Engine name passed to nvErrorLog_va() for crash reports
NvU32 allocQueueSize; // Size of the system memory buffer to allocate for the CrashCat queue
NvU32 errorId; // Error ID passed to nvErrorLog_va() for crash reports
NvU8 dmemPort; // DMEM port allocated for CrashCat usage
NvBool bEnable; // Enable CrashCat monitoring for the engine
} KernelCrashCatEngineConfig;
/*!
* Base implementation of CrashCatEngine in RM.
*/
#ifdef NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif
struct KernelCrashCatEngine {
const struct NVOC_RTTI *__nvoc_rtti;
struct CrashCatEngine __nvoc_base_CrashCatEngine;
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
NvBool (*__kcrashcatEngineConfigured__)(struct KernelCrashCatEngine *);
void (*__kcrashcatEngineUnload__)(struct KernelCrashCatEngine *);
void (*__kcrashcatEngineVprintf__)(struct KernelCrashCatEngine *, NvBool, const char *, va_list);
NvU32 (*__kcrashcatEngineRegRead__)(struct OBJGPU *, struct KernelCrashCatEngine *, NvU32);
void (*__kcrashcatEngineRegWrite__)(struct OBJGPU *, struct KernelCrashCatEngine *, NvU32, NvU32);
NvU32 (*__kcrashcatEngineMaskDmemAddr__)(struct OBJGPU *, struct KernelCrashCatEngine *, NvU32);
NvU32 (*__kcrashcatEnginePriRead__)(struct KernelCrashCatEngine *, NvU32);
void (*__kcrashcatEnginePriWrite__)(struct KernelCrashCatEngine *, NvU32, NvU32);
void *(*__kcrashcatEngineMapBufferDescriptor__)(struct KernelCrashCatEngine *, CrashCatBufferDescriptor *);
void (*__kcrashcatEngineUnmapBufferDescriptor__)(struct KernelCrashCatEngine *, CrashCatBufferDescriptor *);
void (*__kcrashcatEngineSyncBufferDescriptor__)(struct KernelCrashCatEngine *, CrashCatBufferDescriptor *, NvU32, NvU32);
void (*__kcrashcatEngineReadDmem__)(struct KernelCrashCatEngine *, NvU32, NvU32, void *);
void (*__kcrashcatEngineReadEmem__)(struct KernelCrashCatEngine *, NvU64, NvU64, void *);
const NvU32 *(*__kcrashcatEngineGetScratchOffsets__)(struct KernelCrashCatEngine *, NV_CRASHCAT_SCRATCH_GROUP_ID);
NvU32 (*__kcrashcatEngineGetWFL0Offset__)(struct KernelCrashCatEngine *);
NvBool PRIVATE_FIELD(bConfigured);
MEMORY_DESCRIPTOR *PRIVATE_FIELD(pQueueMemDesc);
const char *PRIVATE_FIELD(pName);
NvU32 PRIVATE_FIELD(errorId);
struct OBJGPU *PRIVATE_FIELD(pGpu);
NvU32 PRIVATE_FIELD(dmemPort);
char PRIVATE_FIELD(printBuffer)[512];
char PRIVATE_FIELD(fmtBuffer)[512];
};
#ifndef __NVOC_CLASS_KernelCrashCatEngine_TYPEDEF__
#define __NVOC_CLASS_KernelCrashCatEngine_TYPEDEF__
typedef struct KernelCrashCatEngine KernelCrashCatEngine;
#endif /* __NVOC_CLASS_KernelCrashCatEngine_TYPEDEF__ */
#ifndef __nvoc_class_id_KernelCrashCatEngine
#define __nvoc_class_id_KernelCrashCatEngine 0xc37aef
#endif /* __nvoc_class_id_KernelCrashCatEngine */
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
#define __staticCast_KernelCrashCatEngine(pThis) \
((pThis)->__nvoc_pbase_KernelCrashCatEngine)
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
#define __dynamicCast_KernelCrashCatEngine(pThis) ((KernelCrashCatEngine*)NULL)
#else //__nvoc_kernel_crashcat_engine_h_disabled
#define __dynamicCast_KernelCrashCatEngine(pThis) \
((KernelCrashCatEngine*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCrashCatEngine)))
#endif //__nvoc_kernel_crashcat_engine_h_disabled
NV_STATUS __nvoc_objCreateDynamic_KernelCrashCatEngine(KernelCrashCatEngine**, Dynamic*, NvU32, va_list);
NV_STATUS __nvoc_objCreate_KernelCrashCatEngine(KernelCrashCatEngine**, Dynamic*, NvU32);
#define __objCreate_KernelCrashCatEngine(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_KernelCrashCatEngine((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define kcrashcatEngineConfigured(arg0) kcrashcatEngineConfigured_DISPATCH(arg0)
#define kcrashcatEngineUnload(arg0) kcrashcatEngineUnload_DISPATCH(arg0)
#define kcrashcatEngineVprintf(arg0, bReportStart, fmt, args) kcrashcatEngineVprintf_DISPATCH(arg0, bReportStart, fmt, args)
#define kcrashcatEngineRegRead(pGpu, arg0, offset) kcrashcatEngineRegRead_DISPATCH(pGpu, arg0, offset)
#define kcrashcatEngineRegWrite(pGpu, arg0, offset, data) kcrashcatEngineRegWrite_DISPATCH(pGpu, arg0, offset, data)
#define kcrashcatEngineMaskDmemAddr(pGpu, arg0, addr) kcrashcatEngineMaskDmemAddr_DISPATCH(pGpu, arg0, addr)
#define kcrashcatEnginePriRead(arg0, offset) kcrashcatEnginePriRead_DISPATCH(arg0, offset)
#define kcrashcatEnginePriWrite(arg0, offset, data) kcrashcatEnginePriWrite_DISPATCH(arg0, offset, data)
#define kcrashcatEngineMapBufferDescriptor(arg0, pBufDesc) kcrashcatEngineMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define kcrashcatEngineUnmapBufferDescriptor(arg0, pBufDesc) kcrashcatEngineUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define kcrashcatEngineSyncBufferDescriptor(arg0, pBufDesc, offset, size) kcrashcatEngineSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
#define kcrashcatEngineReadDmem(arg0, offset, size, pBuf) kcrashcatEngineReadDmem_DISPATCH(arg0, offset, size, pBuf)
#define kcrashcatEngineReadDmem_HAL(arg0, offset, size, pBuf) kcrashcatEngineReadDmem_DISPATCH(arg0, offset, size, pBuf)
#define kcrashcatEngineReadEmem(arg0, offset, size, pBuf) kcrashcatEngineReadEmem_DISPATCH(arg0, offset, size, pBuf)
#define kcrashcatEngineReadEmem_HAL(arg0, offset, size, pBuf) kcrashcatEngineReadEmem_DISPATCH(arg0, offset, size, pBuf)
#define kcrashcatEngineGetScratchOffsets(arg0, scratchGroupId) kcrashcatEngineGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
#define kcrashcatEngineGetScratchOffsets_HAL(arg0, scratchGroupId) kcrashcatEngineGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
#define kcrashcatEngineGetWFL0Offset(arg0) kcrashcatEngineGetWFL0Offset_DISPATCH(arg0)
#define kcrashcatEngineGetWFL0Offset_HAL(arg0) kcrashcatEngineGetWFL0Offset_DISPATCH(arg0)
NvBool kcrashcatEngineConfigured_IMPL(struct KernelCrashCatEngine *arg0);
static inline NvBool kcrashcatEngineConfigured_DISPATCH(struct KernelCrashCatEngine *arg0) {
return arg0->__kcrashcatEngineConfigured__(arg0);
}
void kcrashcatEngineUnload_IMPL(struct KernelCrashCatEngine *arg0);
static inline void kcrashcatEngineUnload_DISPATCH(struct KernelCrashCatEngine *arg0) {
arg0->__kcrashcatEngineUnload__(arg0);
}
void kcrashcatEngineVprintf_IMPL(struct KernelCrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args);
static inline void kcrashcatEngineVprintf_DISPATCH(struct KernelCrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args) {
arg0->__kcrashcatEngineVprintf__(arg0, bReportStart, fmt, args);
}
static inline NvU32 kcrashcatEngineRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelCrashCatEngine *arg0, NvU32 offset) {
return arg0->__kcrashcatEngineRegRead__(pGpu, arg0, offset);
}
static inline void kcrashcatEngineRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 data) {
arg0->__kcrashcatEngineRegWrite__(pGpu, arg0, offset, data);
}
static inline NvU32 kcrashcatEngineMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelCrashCatEngine *arg0, NvU32 addr) {
return arg0->__kcrashcatEngineMaskDmemAddr__(pGpu, arg0, addr);
}
NvU32 kcrashcatEnginePriRead_IMPL(struct KernelCrashCatEngine *arg0, NvU32 offset);
static inline NvU32 kcrashcatEnginePriRead_DISPATCH(struct KernelCrashCatEngine *arg0, NvU32 offset) {
return arg0->__kcrashcatEnginePriRead__(arg0, offset);
}
void kcrashcatEnginePriWrite_IMPL(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 data);
static inline void kcrashcatEnginePriWrite_DISPATCH(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 data) {
arg0->__kcrashcatEnginePriWrite__(arg0, offset, data);
}
void *kcrashcatEngineMapBufferDescriptor_IMPL(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc);
static inline void *kcrashcatEngineMapBufferDescriptor_DISPATCH(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
return arg0->__kcrashcatEngineMapBufferDescriptor__(arg0, pBufDesc);
}
void kcrashcatEngineUnmapBufferDescriptor_IMPL(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc);
static inline void kcrashcatEngineUnmapBufferDescriptor_DISPATCH(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
arg0->__kcrashcatEngineUnmapBufferDescriptor__(arg0, pBufDesc);
}
void kcrashcatEngineSyncBufferDescriptor_IMPL(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size);
static inline void kcrashcatEngineSyncBufferDescriptor_DISPATCH(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
arg0->__kcrashcatEngineSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
}
void kcrashcatEngineReadDmem_TU102(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 size, void *pBuf);
static inline void kcrashcatEngineReadDmem_DISPATCH(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 size, void *pBuf) {
arg0->__kcrashcatEngineReadDmem__(arg0, offset, size, pBuf);
}
static inline void kcrashcatEngineReadEmem_2fced3(struct KernelCrashCatEngine *arg0, NvU64 offset, NvU64 size, void *pBuf) {
NV_ASSERT_PRECOMP(0);
}
static inline void kcrashcatEngineReadEmem_DISPATCH(struct KernelCrashCatEngine *arg0, NvU64 offset, NvU64 size, void *pBuf) {
arg0->__kcrashcatEngineReadEmem__(arg0, offset, size, pBuf);
}
const NvU32 *kcrashcatEngineGetScratchOffsets_TU102(struct KernelCrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId);
static inline const NvU32 *kcrashcatEngineGetScratchOffsets_DISPATCH(struct KernelCrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return arg0->__kcrashcatEngineGetScratchOffsets__(arg0, scratchGroupId);
}
NvU32 kcrashcatEngineGetWFL0Offset_TU102(struct KernelCrashCatEngine *arg0);
static inline NvU32 kcrashcatEngineGetWFL0Offset_DISPATCH(struct KernelCrashCatEngine *arg0) {
return arg0->__kcrashcatEngineGetWFL0Offset__(arg0);
}
NV_STATUS kcrashcatEngineConfigure_IMPL(struct KernelCrashCatEngine *arg0, KernelCrashCatEngineConfig *pEngConfig);
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
static inline NV_STATUS kcrashcatEngineConfigure(struct KernelCrashCatEngine *arg0, KernelCrashCatEngineConfig *pEngConfig) {
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kernel_crashcat_engine_h_disabled
#define kcrashcatEngineConfigure(arg0, pEngConfig) kcrashcatEngineConfigure_IMPL(arg0, pEngConfig)
#endif //__nvoc_kernel_crashcat_engine_h_disabled
MEMORY_DESCRIPTOR *kcrashcatEngineGetQueueMemDesc_IMPL(struct KernelCrashCatEngine *arg0);
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
static inline MEMORY_DESCRIPTOR *kcrashcatEngineGetQueueMemDesc(struct KernelCrashCatEngine *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
return NULL;
}
#else //__nvoc_kernel_crashcat_engine_h_disabled
#define kcrashcatEngineGetQueueMemDesc(arg0) kcrashcatEngineGetQueueMemDesc_IMPL(arg0)
#endif //__nvoc_kernel_crashcat_engine_h_disabled
NV_STATUS kcrashcatEngineRegisterCrashBuffer_IMPL(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1);
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
static inline NV_STATUS kcrashcatEngineRegisterCrashBuffer(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kernel_crashcat_engine_h_disabled
#define kcrashcatEngineRegisterCrashBuffer(arg0, arg1) kcrashcatEngineRegisterCrashBuffer_IMPL(arg0, arg1)
#endif //__nvoc_kernel_crashcat_engine_h_disabled
void kcrashcatEngineUnregisterCrashBuffer_IMPL(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1);
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
static inline void kcrashcatEngineUnregisterCrashBuffer(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
}
#else //__nvoc_kernel_crashcat_engine_h_disabled
#define kcrashcatEngineUnregisterCrashBuffer(arg0, arg1) kcrashcatEngineUnregisterCrashBuffer_IMPL(arg0, arg1)
#endif //__nvoc_kernel_crashcat_engine_h_disabled
#undef PRIVATE_FIELD
#endif // KERNEL_CRASHCAT_ENGINE_H
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _G_KERNEL_CRASHCAT_ENGINE_NVOC_H_

View File

@@ -13,6 +13,10 @@ char __nvoc_class_id_uniqueness_check_0xb6b1af = 1;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* );
void __nvoc_init_funcTable_KernelFalcon(KernelFalcon*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon*, RmHalspecOwner* );
@@ -26,10 +30,24 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_KernelFalcon = {
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_CrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelFalcon, __nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_KernelCrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelFalcon, __nvoc_base_KernelCrashCatEngine),
};
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelFalcon = {
/*numRelatives=*/ 1,
/*numRelatives=*/ 3,
/*relatives=*/ {
&__nvoc_rtti_KernelFalcon_KernelFalcon,
&__nvoc_rtti_KernelFalcon_KernelCrashCatEngine,
&__nvoc_rtti_KernelFalcon_CrashCatEngine,
},
};
@@ -51,13 +69,75 @@ const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon =
/*pExportInfo=*/ &__nvoc_export_info_KernelFalcon
};
static NvU32 __nvoc_thunk_KernelFalcon_kcrashcatEngineRegRead(struct OBJGPU *pGpu, struct KernelCrashCatEngine *pKernelFlcn, NvU32 offset) {
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) - __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset);
}
static void __nvoc_thunk_KernelFalcon_kcrashcatEngineRegWrite(struct OBJGPU *pGpu, struct KernelCrashCatEngine *pKernelFlcn, NvU32 offset, NvU32 data) {
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) - __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, data);
}
static NvU32 __nvoc_thunk_KernelFalcon_kcrashcatEngineMaskDmemAddr(struct OBJGPU *pGpu, struct KernelCrashCatEngine *pKernelFlcn, NvU32 addr) {
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) - __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), addr);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnReadEmem(struct KernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kflcnGetWFL0Offset(struct KernelFalcon *arg0) {
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset));
}
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_kflcnGetScratchOffsets(struct KernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), scratchGroupId);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnUnload(struct KernelFalcon *arg0) {
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset));
}
static NvBool __nvoc_thunk_KernelCrashCatEngine_kflcnConfigured(struct KernelFalcon *arg0) {
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset));
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kflcnPriRead(struct KernelFalcon *arg0, NvU32 offset) {
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnVprintf(struct KernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), bReportStart, fmt, args);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnPriWrite(struct KernelFalcon *arg0, NvU32 offset, NvU32 data) {
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, data);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnSyncBufferDescriptor(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), pBufDesc, offset, size);
}
static void *__nvoc_thunk_KernelCrashCatEngine_kflcnMapBufferDescriptor(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnUnmapBufferDescriptor(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_kflcnReadDmem(struct KernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelFalcon =
{
/*numEntries=*/ 0,
/*pExportEntries=*/ 0
};
void __nvoc_dtor_KernelCrashCatEngine(KernelCrashCatEngine*);
void __nvoc_dtor_KernelFalcon(KernelFalcon *pThis) {
__nvoc_dtor_KernelCrashCatEngine(&pThis->__nvoc_base_KernelCrashCatEngine);
PORT_UNREFERENCED_VARIABLE(pThis);
}
@@ -74,11 +154,15 @@ void __nvoc_init_dataField_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRm
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_KernelCrashCatEngine(KernelCrashCatEngine* , RmHalspecOwner* );
NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_KernelCrashCatEngine(&pThis->__nvoc_base_KernelCrashCatEngine, pRmhalspecowner);
if (status != NV_OK) goto __nvoc_ctor_KernelFalcon_fail_KernelCrashCatEngine;
__nvoc_init_dataField_KernelFalcon(pThis, pRmhalspecowner);
goto __nvoc_ctor_KernelFalcon_exit; // Success
__nvoc_ctor_KernelFalcon_fail_KernelCrashCatEngine:
__nvoc_ctor_KernelFalcon_exit:
return status;
@@ -96,6 +180,12 @@ static void __nvoc_init_funcTable_KernelFalcon_1(KernelFalcon *pThis, RmHalspecO
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
// Hal function -- kflcnRegRead
pThis->__kflcnRegRead__ = &kflcnRegRead_TU102;
// Hal function -- kflcnRegWrite
pThis->__kflcnRegWrite__ = &kflcnRegWrite_TU102;
// Hal function -- kflcnIsRiscvActive
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */
{
@@ -188,14 +278,48 @@ static void __nvoc_init_funcTable_KernelFalcon_1(KernelFalcon *pThis, RmHalspecO
{
pThis->__kflcnMaskDmemAddr__ = &kflcnMaskDmemAddr_GA100;
}
pThis->__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineRegRead__ = &__nvoc_thunk_KernelFalcon_kcrashcatEngineRegRead;
pThis->__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineRegWrite__ = &__nvoc_thunk_KernelFalcon_kcrashcatEngineRegWrite;
pThis->__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineMaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_kcrashcatEngineMaskDmemAddr;
pThis->__kflcnReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnReadEmem;
pThis->__kflcnGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnGetWFL0Offset;
pThis->__kflcnGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnGetScratchOffsets;
pThis->__kflcnUnload__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnUnload;
pThis->__kflcnConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnConfigured;
pThis->__kflcnPriRead__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnPriRead;
pThis->__kflcnVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnVprintf;
pThis->__kflcnPriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnPriWrite;
pThis->__kflcnSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnSyncBufferDescriptor;
pThis->__kflcnMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnMapBufferDescriptor;
pThis->__kflcnUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnUnmapBufferDescriptor;
pThis->__kflcnReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnReadDmem;
}
void __nvoc_init_funcTable_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
__nvoc_init_funcTable_KernelFalcon_1(pThis, pRmhalspecowner);
}
void __nvoc_init_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
void __nvoc_init_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_KernelFalcon = pThis;
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelCrashCatEngine;
__nvoc_init_KernelCrashCatEngine(&pThis->__nvoc_base_KernelCrashCatEngine, pRmhalspecowner);
__nvoc_init_funcTable_KernelFalcon(pThis, pRmhalspecowner);
}
@@ -205,6 +329,10 @@ char __nvoc_class_id_uniqueness_check_0xabcf08 = 1;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericKernelFalcon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService;
@@ -224,6 +352,18 @@ static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_GenericKernelFalco
/*offset=*/ 0,
};
static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_CrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_KernelFalcon = {
/*pClassDef=*/ &__nvoc_class_def_KernelFalcon,
/*dtor=*/ &__nvoc_destructFromBase,
@@ -243,12 +383,14 @@ static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_Object = {
};
static const struct NVOC_CASTINFO __nvoc_castinfo_GenericKernelFalcon = {
/*numRelatives=*/ 4,
/*numRelatives=*/ 6,
/*relatives=*/ {
&__nvoc_rtti_GenericKernelFalcon_GenericKernelFalcon,
&__nvoc_rtti_GenericKernelFalcon_Object,
&__nvoc_rtti_GenericKernelFalcon_IntrService,
&__nvoc_rtti_GenericKernelFalcon_KernelFalcon,
&__nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine,
&__nvoc_rtti_GenericKernelFalcon_CrashCatEngine,
},
};
@@ -279,14 +421,74 @@ static NV_STATUS __nvoc_thunk_GenericKernelFalcon_intrservServiceNotificationInt
return gkflcnServiceNotificationInterrupt(arg0, (struct GenericKernelFalcon *)(((unsigned char *)arg1) - __nvoc_rtti_GenericKernelFalcon_IntrService.offset), arg2);
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnReadEmem(struct GenericKernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_gkflcnGetWFL0Offset(struct GenericKernelFalcon *arg0) {
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset));
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnUnload(struct GenericKernelFalcon *arg0) {
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset));
}
static NvBool __nvoc_thunk_KernelCrashCatEngine_gkflcnConfigured(struct GenericKernelFalcon *arg0) {
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset));
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_gkflcnPriRead(struct GenericKernelFalcon *arg0, NvU32 offset) {
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset);
}
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_gkflcnGetScratchOffsets(struct GenericKernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), scratchGroupId);
}
static void __nvoc_thunk_KernelFalcon_gkflcnRegWrite(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset), offset, data);
}
static NvU32 __nvoc_thunk_KernelFalcon_gkflcnMaskDmemAddr(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 addr) {
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset), addr);
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnVprintf(struct GenericKernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), bReportStart, fmt, args);
}
static NvBool __nvoc_thunk_IntrService_gkflcnClearInterrupt(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_GenericKernelFalcon_IntrService.offset), pParams);
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnPriWrite(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 data) {
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset, data);
}
static void *__nvoc_thunk_KernelCrashCatEngine_gkflcnMapBufferDescriptor(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnSyncBufferDescriptor(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), pBufDesc, offset, size);
}
static NvU32 __nvoc_thunk_KernelFalcon_gkflcnRegRead(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset) {
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset), offset);
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnUnmapBufferDescriptor(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
}
static NvU32 __nvoc_thunk_IntrService_gkflcnServiceInterrupt(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_GenericKernelFalcon_IntrService.offset), pParams);
}
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnReadDmem(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericKernelFalcon =
{
/*numEntries=*/ 0,
@@ -351,9 +553,39 @@ static void __nvoc_init_funcTable_GenericKernelFalcon_1(GenericKernelFalcon *pTh
pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_GenericKernelFalcon_intrservServiceNotificationInterrupt;
pThis->__gkflcnReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnReadEmem;
pThis->__gkflcnGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnGetWFL0Offset;
pThis->__gkflcnUnload__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnUnload;
pThis->__gkflcnConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnConfigured;
pThis->__gkflcnPriRead__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnPriRead;
pThis->__gkflcnGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnGetScratchOffsets;
pThis->__gkflcnRegWrite__ = &__nvoc_thunk_KernelFalcon_gkflcnRegWrite;
pThis->__gkflcnMaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_gkflcnMaskDmemAddr;
pThis->__gkflcnVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnVprintf;
pThis->__gkflcnClearInterrupt__ = &__nvoc_thunk_IntrService_gkflcnClearInterrupt;
pThis->__gkflcnPriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnPriWrite;
pThis->__gkflcnMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnMapBufferDescriptor;
pThis->__gkflcnSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnSyncBufferDescriptor;
pThis->__gkflcnRegRead__ = &__nvoc_thunk_KernelFalcon_gkflcnRegRead;
pThis->__gkflcnUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnUnmapBufferDescriptor;
pThis->__gkflcnServiceInterrupt__ = &__nvoc_thunk_IntrService_gkflcnServiceInterrupt;
pThis->__gkflcnReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnReadDmem;
}
void __nvoc_init_funcTable_GenericKernelFalcon(GenericKernelFalcon *pThis) {
@@ -365,6 +597,8 @@ void __nvoc_init_IntrService(IntrService*);
void __nvoc_init_Object(Object*);
void __nvoc_init_GenericKernelFalcon(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_GenericKernelFalcon = pThis;
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine;
pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon;
pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;

View File

@@ -41,6 +41,7 @@ extern "C" {
#include "core/core.h"
#include "gpu/falcon/falcon_common.h"
#include "gpu/falcon/kernel_crashcat_engine.h"
#include "gpu/intr/intr_service.h"
struct KernelChannel;
@@ -67,6 +68,8 @@ typedef struct KernelFalconEngineConfig {
NvU32 ctxAttr; // Memory attributes used for context buffers
NvU32 ctxBufferSize; // Context buffer size in bytes
NvU32 addrSpaceList; // index into ADDRLIST array in mem_desc.h
KernelCrashCatEngineConfig crashcatEngConfig;
} KernelFalconEngineConfig;
/*!
@@ -79,7 +82,12 @@ typedef struct KernelFalconEngineConfig {
#endif
struct KernelFalcon {
const struct NVOC_RTTI *__nvoc_rtti;
struct KernelCrashCatEngine __nvoc_base_KernelCrashCatEngine;
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
NvU32 (*__kflcnRegRead__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
void (*__kflcnRegWrite__)(struct OBJGPU *, struct KernelFalcon *, NvU32, NvU32);
NvBool (*__kflcnIsRiscvActive__)(struct OBJGPU *, struct KernelFalcon *);
void (*__kflcnRiscvProgramBcr__)(struct OBJGPU *, struct KernelFalcon *, NvBool);
void (*__kflcnSwitchToFalcon__)(struct OBJGPU *, struct KernelFalcon *);
@@ -90,6 +98,18 @@ struct KernelFalcon {
void (*__kflcnIntrRetrigger__)(struct OBJGPU *, struct KernelFalcon *);
NvU32 (*__kflcnMaskImemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
NvU32 (*__kflcnMaskDmemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
void (*__kflcnReadEmem__)(struct KernelFalcon *, NvU64, NvU64, void *);
NvU32 (*__kflcnGetWFL0Offset__)(struct KernelFalcon *);
const NvU32 *(*__kflcnGetScratchOffsets__)(struct KernelFalcon *, NV_CRASHCAT_SCRATCH_GROUP_ID);
void (*__kflcnUnload__)(struct KernelFalcon *);
NvBool (*__kflcnConfigured__)(struct KernelFalcon *);
NvU32 (*__kflcnPriRead__)(struct KernelFalcon *, NvU32);
void (*__kflcnVprintf__)(struct KernelFalcon *, NvBool, const char *, va_list);
void (*__kflcnPriWrite__)(struct KernelFalcon *, NvU32, NvU32);
void (*__kflcnSyncBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *, NvU32, NvU32);
void *(*__kflcnMapBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *);
void (*__kflcnUnmapBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *);
void (*__kflcnReadDmem__)(struct KernelFalcon *, NvU32, NvU32, void *);
NvU32 registerBase;
NvU32 riscvRegisterBase;
NvU32 fbifBase;
@@ -130,6 +150,10 @@ NV_STATUS __nvoc_objCreate_KernelFalcon(KernelFalcon**, Dynamic*, NvU32);
#define __objCreate_KernelFalcon(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_KernelFalcon((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
#define kflcnRegRead(pGpu, pKernelFlcn, offset) kflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define kflcnRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define kflcnRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define kflcnRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define kflcnIsRiscvActive(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn)
#define kflcnIsRiscvActive_HAL(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn)
#define kflcnRiscvProgramBcr(pGpu, pKernelFlcn, bBRFetch) kflcnRiscvProgramBcr_DISPATCH(pGpu, pKernelFlcn, bBRFetch)
@@ -149,33 +173,18 @@ NV_STATUS __nvoc_objCreate_KernelFalcon(KernelFalcon**, Dynamic*, NvU32);
#define kflcnMaskImemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskImemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define kflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define kflcnMaskDmemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
NvU32 kflcnRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
#ifdef __nvoc_kernel_falcon_h_disabled
static inline NvU32 kflcnRegRead(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) {
NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
return 0;
}
#else //__nvoc_kernel_falcon_h_disabled
#define kflcnRegRead(pGpu, pKernelFlcn, offset) kflcnRegRead_TU102(pGpu, pKernelFlcn, offset)
#endif //__nvoc_kernel_falcon_h_disabled
#define kflcnRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRegRead(pGpu, pKernelFlcn, offset)
void kflcnRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data);
#ifdef __nvoc_kernel_falcon_h_disabled
static inline void kflcnRegWrite(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
}
#else //__nvoc_kernel_falcon_h_disabled
#define kflcnRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_TU102(pGpu, pKernelFlcn, offset, data)
#endif //__nvoc_kernel_falcon_h_disabled
#define kflcnRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRegWrite(pGpu, pKernelFlcn, offset, data)
#define kflcnReadEmem(arg0, offset, size, pBuf) kflcnReadEmem_DISPATCH(arg0, offset, size, pBuf)
#define kflcnGetWFL0Offset(arg0) kflcnGetWFL0Offset_DISPATCH(arg0)
#define kflcnGetScratchOffsets(arg0, scratchGroupId) kflcnGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
#define kflcnUnload(arg0) kflcnUnload_DISPATCH(arg0)
#define kflcnConfigured(arg0) kflcnConfigured_DISPATCH(arg0)
#define kflcnPriRead(arg0, offset) kflcnPriRead_DISPATCH(arg0, offset)
#define kflcnVprintf(arg0, bReportStart, fmt, args) kflcnVprintf_DISPATCH(arg0, bReportStart, fmt, args)
#define kflcnPriWrite(arg0, offset, data) kflcnPriWrite_DISPATCH(arg0, offset, data)
#define kflcnSyncBufferDescriptor(arg0, pBufDesc, offset, size) kflcnSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
#define kflcnMapBufferDescriptor(arg0, pBufDesc) kflcnMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define kflcnUnmapBufferDescriptor(arg0, pBufDesc) kflcnUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define kflcnReadDmem(arg0, offset, size, pBuf) kflcnReadDmem_DISPATCH(arg0, offset, size, pBuf)
NvU32 kflcnRiscvRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
@@ -296,6 +305,18 @@ static inline NV_STATUS kflcnWaitForHalt(struct OBJGPU *pGpu, struct KernelFalco
#define kflcnWaitForHalt_HAL(pGpu, pKernelFlcn, timeoutUs, flags) kflcnWaitForHalt(pGpu, pKernelFlcn, timeoutUs, flags)
NvU32 kflcnRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
static inline NvU32 kflcnRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) {
return pKernelFlcn->__kflcnRegRead__(pGpu, pKernelFlcn, offset);
}
void kflcnRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data);
static inline void kflcnRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
pKernelFlcn->__kflcnRegWrite__(pGpu, pKernelFlcn, offset, data);
}
NvBool kflcnIsRiscvActive_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
NvBool kflcnIsRiscvActive_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
@@ -380,6 +401,54 @@ static inline NvU32 kflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct Kerne
return pKernelFlcn->__kflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr);
}
static inline void kflcnReadEmem_DISPATCH(struct KernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
arg0->__kflcnReadEmem__(arg0, offset, size, pBuf);
}
static inline NvU32 kflcnGetWFL0Offset_DISPATCH(struct KernelFalcon *arg0) {
return arg0->__kflcnGetWFL0Offset__(arg0);
}
static inline const NvU32 *kflcnGetScratchOffsets_DISPATCH(struct KernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return arg0->__kflcnGetScratchOffsets__(arg0, scratchGroupId);
}
static inline void kflcnUnload_DISPATCH(struct KernelFalcon *arg0) {
arg0->__kflcnUnload__(arg0);
}
static inline NvBool kflcnConfigured_DISPATCH(struct KernelFalcon *arg0) {
return arg0->__kflcnConfigured__(arg0);
}
static inline NvU32 kflcnPriRead_DISPATCH(struct KernelFalcon *arg0, NvU32 offset) {
return arg0->__kflcnPriRead__(arg0, offset);
}
static inline void kflcnVprintf_DISPATCH(struct KernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
arg0->__kflcnVprintf__(arg0, bReportStart, fmt, args);
}
static inline void kflcnPriWrite_DISPATCH(struct KernelFalcon *arg0, NvU32 offset, NvU32 data) {
arg0->__kflcnPriWrite__(arg0, offset, data);
}
static inline void kflcnSyncBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
arg0->__kflcnSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
}
static inline void *kflcnMapBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
return arg0->__kflcnMapBufferDescriptor__(arg0, pBufDesc);
}
static inline void kflcnUnmapBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
arg0->__kflcnUnmapBufferDescriptor__(arg0, pBufDesc);
}
static inline void kflcnReadDmem_DISPATCH(struct KernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
arg0->__kflcnReadDmem__(arg0, offset, size, pBuf);
}
void kflcnConfigureEngine_IMPL(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig);
#ifdef __nvoc_kernel_falcon_h_disabled
@@ -429,6 +498,8 @@ struct GenericKernelFalcon {
struct KernelFalcon __nvoc_base_KernelFalcon;
struct IntrService __nvoc_base_IntrService;
struct Object __nvoc_base_Object;
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
struct IntrService *__nvoc_pbase_IntrService;
struct Object *__nvoc_pbase_Object;
@@ -436,8 +507,23 @@ struct GenericKernelFalcon {
NV_STATUS (*__gkflcnResetHw__)(struct OBJGPU *, struct GenericKernelFalcon *);
void (*__gkflcnRegisterIntrService__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceRecord *);
NV_STATUS (*__gkflcnServiceNotificationInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceNotificationInterruptArguments *);
void (*__gkflcnReadEmem__)(struct GenericKernelFalcon *, NvU64, NvU64, void *);
NvU32 (*__gkflcnGetWFL0Offset__)(struct GenericKernelFalcon *);
void (*__gkflcnUnload__)(struct GenericKernelFalcon *);
NvBool (*__gkflcnConfigured__)(struct GenericKernelFalcon *);
NvU32 (*__gkflcnPriRead__)(struct GenericKernelFalcon *, NvU32);
const NvU32 *(*__gkflcnGetScratchOffsets__)(struct GenericKernelFalcon *, NV_CRASHCAT_SCRATCH_GROUP_ID);
void (*__gkflcnRegWrite__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32, NvU32);
NvU32 (*__gkflcnMaskDmemAddr__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32);
void (*__gkflcnVprintf__)(struct GenericKernelFalcon *, NvBool, const char *, va_list);
NvBool (*__gkflcnClearInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceClearInterruptArguments *);
void (*__gkflcnPriWrite__)(struct GenericKernelFalcon *, NvU32, NvU32);
void *(*__gkflcnMapBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *);
void (*__gkflcnSyncBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *, NvU32, NvU32);
NvU32 (*__gkflcnRegRead__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32);
void (*__gkflcnUnmapBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *);
NvU32 (*__gkflcnServiceInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceInterruptArguments *);
void (*__gkflcnReadDmem__)(struct GenericKernelFalcon *, NvU32, NvU32, void *);
};
#ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__
@@ -471,8 +557,23 @@ NV_STATUS __nvoc_objCreate_GenericKernelFalcon(GenericKernelFalcon**, Dynamic*,
#define gkflcnResetHw(pGpu, pGenKernFlcn) gkflcnResetHw_DISPATCH(pGpu, pGenKernFlcn)
#define gkflcnRegisterIntrService(arg0, arg1, arg2) gkflcnRegisterIntrService_DISPATCH(arg0, arg1, arg2)
#define gkflcnServiceNotificationInterrupt(arg0, arg1, arg2) gkflcnServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
#define gkflcnReadEmem(arg0, offset, size, pBuf) gkflcnReadEmem_DISPATCH(arg0, offset, size, pBuf)
#define gkflcnGetWFL0Offset(arg0) gkflcnGetWFL0Offset_DISPATCH(arg0)
#define gkflcnUnload(arg0) gkflcnUnload_DISPATCH(arg0)
#define gkflcnConfigured(arg0) gkflcnConfigured_DISPATCH(arg0)
#define gkflcnPriRead(arg0, offset) gkflcnPriRead_DISPATCH(arg0, offset)
#define gkflcnGetScratchOffsets(arg0, scratchGroupId) gkflcnGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
#define gkflcnRegWrite(pGpu, pKernelFlcn, offset, data) gkflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define gkflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) gkflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define gkflcnVprintf(arg0, bReportStart, fmt, args) gkflcnVprintf_DISPATCH(arg0, bReportStart, fmt, args)
#define gkflcnClearInterrupt(pGpu, pIntrService, pParams) gkflcnClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define gkflcnPriWrite(arg0, offset, data) gkflcnPriWrite_DISPATCH(arg0, offset, data)
#define gkflcnMapBufferDescriptor(arg0, pBufDesc) gkflcnMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define gkflcnSyncBufferDescriptor(arg0, pBufDesc, offset, size) gkflcnSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
#define gkflcnRegRead(pGpu, pKernelFlcn, offset) gkflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define gkflcnUnmapBufferDescriptor(arg0, pBufDesc) gkflcnUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define gkflcnServiceInterrupt(pGpu, pIntrService, pParams) gkflcnServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define gkflcnReadDmem(arg0, offset, size, pBuf) gkflcnReadDmem_DISPATCH(arg0, offset, size, pBuf)
NV_STATUS gkflcnResetHw_IMPL(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn);
static inline NV_STATUS gkflcnResetHw_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn) {
@@ -491,14 +592,74 @@ static inline NV_STATUS gkflcnServiceNotificationInterrupt_DISPATCH(struct OBJGP
return arg1->__gkflcnServiceNotificationInterrupt__(arg0, arg1, arg2);
}
static inline void gkflcnReadEmem_DISPATCH(struct GenericKernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
arg0->__gkflcnReadEmem__(arg0, offset, size, pBuf);
}
static inline NvU32 gkflcnGetWFL0Offset_DISPATCH(struct GenericKernelFalcon *arg0) {
return arg0->__gkflcnGetWFL0Offset__(arg0);
}
static inline void gkflcnUnload_DISPATCH(struct GenericKernelFalcon *arg0) {
arg0->__gkflcnUnload__(arg0);
}
static inline NvBool gkflcnConfigured_DISPATCH(struct GenericKernelFalcon *arg0) {
return arg0->__gkflcnConfigured__(arg0);
}
static inline NvU32 gkflcnPriRead_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset) {
return arg0->__gkflcnPriRead__(arg0, offset);
}
static inline const NvU32 *gkflcnGetScratchOffsets_DISPATCH(struct GenericKernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return arg0->__gkflcnGetScratchOffsets__(arg0, scratchGroupId);
}
static inline void gkflcnRegWrite_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
pKernelFlcn->__gkflcnRegWrite__(pGpu, pKernelFlcn, offset, data);
}
static inline NvU32 gkflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 addr) {
return pKernelFlcn->__gkflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr);
}
static inline void gkflcnVprintf_DISPATCH(struct GenericKernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
arg0->__gkflcnVprintf__(arg0, bReportStart, fmt, args);
}
static inline NvBool gkflcnClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return pIntrService->__gkflcnClearInterrupt__(pGpu, pIntrService, pParams);
}
static inline void gkflcnPriWrite_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 data) {
arg0->__gkflcnPriWrite__(arg0, offset, data);
}
static inline void *gkflcnMapBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
return arg0->__gkflcnMapBufferDescriptor__(arg0, pBufDesc);
}
static inline void gkflcnSyncBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
arg0->__gkflcnSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
}
static inline NvU32 gkflcnRegRead_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset) {
return pKernelFlcn->__gkflcnRegRead__(pGpu, pKernelFlcn, offset);
}
static inline void gkflcnUnmapBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
arg0->__gkflcnUnmapBufferDescriptor__(arg0, pBufDesc);
}
static inline NvU32 gkflcnServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
return pIntrService->__gkflcnServiceInterrupt__(pGpu, pIntrService, pParams);
}
static inline void gkflcnReadDmem_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
arg0->__gkflcnReadDmem__(arg0, offset, size, pBuf);
}
NV_STATUS gkflcnConstruct_IMPL(struct GenericKernelFalcon *arg_pGenKernFlcn, struct OBJGPU *arg_pGpu, KernelFalconEngineConfig *arg_pFalconConfig);
#define __nvoc_gkflcnConstruct(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig) gkflcnConstruct_IMPL(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig)

View File

@@ -19,6 +19,10 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
void __nvoc_init_KernelGsp(KernelGsp*, RmHalspecOwner* );
@@ -52,6 +56,18 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_IntrService = {
/*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_IntrService),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_CrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelCrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelFalcon = {
/*pClassDef=*/ &__nvoc_class_def_KernelFalcon,
/*dtor=*/ &__nvoc_destructFromBase,
@@ -59,10 +75,12 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelFalcon = {
};
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGsp = {
/*numRelatives=*/ 5,
/*numRelatives=*/ 7,
/*relatives=*/ {
&__nvoc_rtti_KernelGsp_KernelGsp,
&__nvoc_rtti_KernelGsp_KernelFalcon,
&__nvoc_rtti_KernelGsp_KernelCrashCatEngine,
&__nvoc_rtti_KernelGsp_CrashCatEngine,
&__nvoc_rtti_KernelGsp_IntrService,
&__nvoc_rtti_KernelGsp_OBJENGSTATE,
&__nvoc_rtti_KernelGsp_Object,
@@ -100,10 +118,70 @@ static NV_STATUS __nvoc_thunk_KernelGsp_kflcnResetHw(struct OBJGPU *pGpu, struct
return kgspResetHw(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_KernelFalcon.offset));
}
static NvBool __nvoc_thunk_KernelCrashCatEngine_kgspConfigured(struct KernelGsp *arg0) {
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset));
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kgspPriRead(struct KernelGsp *arg0, NvU32 offset) {
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset);
}
static void __nvoc_thunk_KernelFalcon_kgspRegWrite(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset, NvU32 data) {
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelGsp_KernelFalcon.offset), offset, data);
}
static NvU32 __nvoc_thunk_KernelFalcon_kgspMaskDmemAddr(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 addr) {
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelGsp_KernelFalcon.offset), addr);
}
static void __nvoc_thunk_OBJENGSTATE_kgspStateDestroy(POBJGPU pGpu, struct KernelGsp *pEngstate) {
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
}
static void __nvoc_thunk_KernelCrashCatEngine_kgspVprintf(struct KernelGsp *arg0, NvBool bReportStart, const char *fmt, va_list args) {
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), bReportStart, fmt, args);
}
static NvBool __nvoc_thunk_IntrService_kgspClearInterrupt(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams);
}
static void __nvoc_thunk_KernelCrashCatEngine_kgspPriWrite(struct KernelGsp *arg0, NvU32 offset, NvU32 data) {
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset, data);
}
static void *__nvoc_thunk_KernelCrashCatEngine_kgspMapBufferDescriptor(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_kgspSyncBufferDescriptor(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), pBufDesc, offset, size);
}
static NvU32 __nvoc_thunk_KernelFalcon_kgspRegRead(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset) {
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelGsp_KernelFalcon.offset), offset);
}
static NvBool __nvoc_thunk_OBJENGSTATE_kgspIsPresent(POBJGPU pGpu, struct KernelGsp *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
}
static void __nvoc_thunk_KernelCrashCatEngine_kgspReadEmem(struct KernelGsp *arg0, NvU64 offset, NvU64 size, void *pBuf) {
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset, size, pBuf);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
}
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_kgspGetScratchOffsets(struct KernelGsp *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), scratchGroupId);
}
static void __nvoc_thunk_KernelCrashCatEngine_kgspUnload(struct KernelGsp *arg0) {
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
}
@@ -112,6 +190,10 @@ static NV_STATUS __nvoc_thunk_IntrService_kgspServiceNotificationInterrupt(struc
return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams);
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kgspGetWFL0Offset(struct KernelGsp *arg0) {
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateInitLocked(POBJGPU pGpu, struct KernelGsp *pEngstate) {
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
}
@@ -124,10 +206,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePostUnload(POBJGPU pGpu, stru
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
}
static void __nvoc_thunk_OBJENGSTATE_kgspStateDestroy(POBJGPU pGpu, struct KernelGsp *pEngstate) {
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
}
@@ -148,16 +226,16 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreInitUnlocked(POBJGPU pGpu,
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
}
static NvBool __nvoc_thunk_IntrService_kgspClearInterrupt(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePostLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
}
static NvBool __nvoc_thunk_OBJENGSTATE_kgspIsPresent(POBJGPU pGpu, struct KernelGsp *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
static void __nvoc_thunk_KernelCrashCatEngine_kgspUnmapBufferDescriptor(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_kgspReadDmem(struct KernelGsp *arg0, NvU32 offset, NvU32 size, void *pBuf) {
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset, size, pBuf);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGsp =
@@ -709,20 +787,50 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
pThis->__nvoc_base_KernelFalcon.__kflcnResetHw__ = &__nvoc_thunk_KernelGsp_kflcnResetHw;
pThis->__kgspConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_kgspConfigured;
pThis->__kgspPriRead__ = &__nvoc_thunk_KernelCrashCatEngine_kgspPriRead;
pThis->__kgspRegWrite__ = &__nvoc_thunk_KernelFalcon_kgspRegWrite;
pThis->__kgspMaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_kgspMaskDmemAddr;
pThis->__kgspStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kgspStateDestroy;
pThis->__kgspVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_kgspVprintf;
pThis->__kgspClearInterrupt__ = &__nvoc_thunk_IntrService_kgspClearInterrupt;
pThis->__kgspPriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_kgspPriWrite;
pThis->__kgspMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kgspMapBufferDescriptor;
pThis->__kgspSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kgspSyncBufferDescriptor;
pThis->__kgspRegRead__ = &__nvoc_thunk_KernelFalcon_kgspRegRead;
pThis->__kgspIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgspIsPresent;
pThis->__kgspReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_kgspReadEmem;
pThis->__kgspStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStateLoad;
pThis->__kgspGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_kgspGetScratchOffsets;
pThis->__kgspUnload__ = &__nvoc_thunk_KernelCrashCatEngine_kgspUnload;
pThis->__kgspStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStateUnload;
pThis->__kgspServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_kgspServiceNotificationInterrupt;
pThis->__kgspGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_kgspGetWFL0Offset;
pThis->__kgspStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStateInitLocked;
pThis->__kgspStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreLoad;
pThis->__kgspStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePostUnload;
pThis->__kgspStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kgspStateDestroy;
pThis->__kgspStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreUnload;
pThis->__kgspStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStateInitUnlocked;
@@ -733,11 +841,11 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
pThis->__kgspStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreInitUnlocked;
pThis->__kgspClearInterrupt__ = &__nvoc_thunk_IntrService_kgspClearInterrupt;
pThis->__kgspStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePostLoad;
pThis->__kgspIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgspIsPresent;
pThis->__kgspUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kgspUnmapBufferDescriptor;
pThis->__kgspReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_kgspReadDmem;
}
void __nvoc_init_funcTable_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) {
@@ -752,6 +860,8 @@ void __nvoc_init_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) {
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService;
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine;
pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon;
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
__nvoc_init_IntrService(&pThis->__nvoc_base_IntrService);

View File

@@ -253,6 +253,8 @@ struct KernelGsp {
struct Object *__nvoc_pbase_Object;
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
struct IntrService *__nvoc_pbase_IntrService;
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
struct KernelGsp *__nvoc_pbase_KernelGsp;
NV_STATUS (*__kgspConstructEngine__)(struct OBJGPU *, struct KernelGsp *, ENGDESCRIPTOR);
@@ -294,21 +296,36 @@ struct KernelGsp {
NV_STATUS (*__kgspFreeVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32);
const char *(*__kgspGetSignatureSectionNamePrefix__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspSetupGspFmcArgs__)(struct OBJGPU *, struct KernelGsp *, GSP_FIRMWARE *);
NvBool (*__kgspConfigured__)(struct KernelGsp *);
NvU32 (*__kgspPriRead__)(struct KernelGsp *, NvU32);
void (*__kgspRegWrite__)(struct OBJGPU *, struct KernelGsp *, NvU32, NvU32);
NvU32 (*__kgspMaskDmemAddr__)(struct OBJGPU *, struct KernelGsp *, NvU32);
void (*__kgspStateDestroy__)(POBJGPU, struct KernelGsp *);
void (*__kgspVprintf__)(struct KernelGsp *, NvBool, const char *, va_list);
NvBool (*__kgspClearInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceClearInterruptArguments *);
void (*__kgspPriWrite__)(struct KernelGsp *, NvU32, NvU32);
void *(*__kgspMapBufferDescriptor__)(struct KernelGsp *, CrashCatBufferDescriptor *);
void (*__kgspSyncBufferDescriptor__)(struct KernelGsp *, CrashCatBufferDescriptor *, NvU32, NvU32);
NvU32 (*__kgspRegRead__)(struct OBJGPU *, struct KernelGsp *, NvU32);
NvBool (*__kgspIsPresent__)(POBJGPU, struct KernelGsp *);
void (*__kgspReadEmem__)(struct KernelGsp *, NvU64, NvU64, void *);
NV_STATUS (*__kgspStateLoad__)(POBJGPU, struct KernelGsp *, NvU32);
const NvU32 *(*__kgspGetScratchOffsets__)(struct KernelGsp *, NV_CRASHCAT_SCRATCH_GROUP_ID);
void (*__kgspUnload__)(struct KernelGsp *);
NV_STATUS (*__kgspStateUnload__)(POBJGPU, struct KernelGsp *, NvU32);
NV_STATUS (*__kgspServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceServiceNotificationInterruptArguments *);
NvU32 (*__kgspGetWFL0Offset__)(struct KernelGsp *);
NV_STATUS (*__kgspStateInitLocked__)(POBJGPU, struct KernelGsp *);
NV_STATUS (*__kgspStatePreLoad__)(POBJGPU, struct KernelGsp *, NvU32);
NV_STATUS (*__kgspStatePostUnload__)(POBJGPU, struct KernelGsp *, NvU32);
void (*__kgspStateDestroy__)(POBJGPU, struct KernelGsp *);
NV_STATUS (*__kgspStatePreUnload__)(POBJGPU, struct KernelGsp *, NvU32);
NV_STATUS (*__kgspStateInitUnlocked__)(POBJGPU, struct KernelGsp *);
void (*__kgspInitMissing__)(POBJGPU, struct KernelGsp *);
NV_STATUS (*__kgspStatePreInitLocked__)(POBJGPU, struct KernelGsp *);
NV_STATUS (*__kgspStatePreInitUnlocked__)(POBJGPU, struct KernelGsp *);
NvBool (*__kgspClearInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceClearInterruptArguments *);
NV_STATUS (*__kgspStatePostLoad__)(POBJGPU, struct KernelGsp *, NvU32);
NvBool (*__kgspIsPresent__)(POBJGPU, struct KernelGsp *);
void (*__kgspUnmapBufferDescriptor__)(struct KernelGsp *, CrashCatBufferDescriptor *);
void (*__kgspReadDmem__)(struct KernelGsp *, NvU32, NvU32, void *);
struct MESSAGE_QUEUE_COLLECTION *pMQCollection;
struct OBJRPC *pRpc;
struct OBJRPC *pLocklessRpc;
@@ -351,6 +368,7 @@ struct KernelGsp {
NvBool bInInit;
NvBool bInLockdown;
NvBool bPollingForRpcResponse;
NvBool bFatalError;
MEMORY_DESCRIPTOR *pMemDesc_simAccessBuf;
SimAccessBuffer *pSimAccessBuf;
NvP64 pSimAccessBufPriv;
@@ -470,21 +488,36 @@ NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp**, Dynamic*, NvU32);
#define kgspGetSignatureSectionNamePrefix_HAL(pGpu, pKernelGsp) kgspGetSignatureSectionNamePrefix_DISPATCH(pGpu, pKernelGsp)
#define kgspSetupGspFmcArgs(pGpu, pKernelGsp, pGspFw) kgspSetupGspFmcArgs_DISPATCH(pGpu, pKernelGsp, pGspFw)
#define kgspSetupGspFmcArgs_HAL(pGpu, pKernelGsp, pGspFw) kgspSetupGspFmcArgs_DISPATCH(pGpu, pKernelGsp, pGspFw)
#define kgspConfigured(arg0) kgspConfigured_DISPATCH(arg0)
#define kgspPriRead(arg0, offset) kgspPriRead_DISPATCH(arg0, offset)
#define kgspRegWrite(pGpu, pKernelFlcn, offset, data) kgspRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define kgspMaskDmemAddr(pGpu, pKernelFlcn, addr) kgspMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define kgspStateDestroy(pGpu, pEngstate) kgspStateDestroy_DISPATCH(pGpu, pEngstate)
#define kgspVprintf(arg0, bReportStart, fmt, args) kgspVprintf_DISPATCH(arg0, bReportStart, fmt, args)
#define kgspClearInterrupt(pGpu, pIntrService, pParams) kgspClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define kgspPriWrite(arg0, offset, data) kgspPriWrite_DISPATCH(arg0, offset, data)
#define kgspMapBufferDescriptor(arg0, pBufDesc) kgspMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define kgspSyncBufferDescriptor(arg0, pBufDesc, offset, size) kgspSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
#define kgspRegRead(pGpu, pKernelFlcn, offset) kgspRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define kgspIsPresent(pGpu, pEngstate) kgspIsPresent_DISPATCH(pGpu, pEngstate)
#define kgspReadEmem(arg0, offset, size, pBuf) kgspReadEmem_DISPATCH(arg0, offset, size, pBuf)
#define kgspStateLoad(pGpu, pEngstate, arg0) kgspStateLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kgspGetScratchOffsets(arg0, scratchGroupId) kgspGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
#define kgspUnload(arg0) kgspUnload_DISPATCH(arg0)
#define kgspStateUnload(pGpu, pEngstate, arg0) kgspStateUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kgspServiceNotificationInterrupt(pGpu, pIntrService, pParams) kgspServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define kgspGetWFL0Offset(arg0) kgspGetWFL0Offset_DISPATCH(arg0)
#define kgspStateInitLocked(pGpu, pEngstate) kgspStateInitLocked_DISPATCH(pGpu, pEngstate)
#define kgspStatePreLoad(pGpu, pEngstate, arg0) kgspStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kgspStatePostUnload(pGpu, pEngstate, arg0) kgspStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kgspStateDestroy(pGpu, pEngstate) kgspStateDestroy_DISPATCH(pGpu, pEngstate)
#define kgspStatePreUnload(pGpu, pEngstate, arg0) kgspStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kgspStateInitUnlocked(pGpu, pEngstate) kgspStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kgspInitMissing(pGpu, pEngstate) kgspInitMissing_DISPATCH(pGpu, pEngstate)
#define kgspStatePreInitLocked(pGpu, pEngstate) kgspStatePreInitLocked_DISPATCH(pGpu, pEngstate)
#define kgspStatePreInitUnlocked(pGpu, pEngstate) kgspStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kgspClearInterrupt(pGpu, pIntrService, pParams) kgspClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define kgspStatePostLoad(pGpu, pEngstate, arg0) kgspStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kgspIsPresent(pGpu, pEngstate) kgspIsPresent_DISPATCH(pGpu, pEngstate)
#define kgspUnmapBufferDescriptor(arg0, pBufDesc) kgspUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define kgspReadDmem(arg0, offset, size, pBuf) kgspReadDmem_DISPATCH(arg0, offset, size, pBuf)
void kgspProgramLibosBootArgsAddr_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
@@ -512,12 +545,13 @@ static inline NV_STATUS kgspSetCmdQueueHead(struct OBJGPU *pGpu, struct KernelGs
#define kgspSetCmdQueueHead_HAL(pGpu, pKernelGsp, queueIdx, value) kgspSetCmdQueueHead(pGpu, pKernelGsp, queueIdx, value)
void kgspHealthCheck_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
NvBool kgspHealthCheck_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
#ifdef __nvoc_kernel_gsp_h_disabled
static inline void kgspHealthCheck(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
static inline NvBool kgspHealthCheck(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!");
return NV_FALSE;
}
#else //__nvoc_kernel_gsp_h_disabled
#define kgspHealthCheck(pGpu, pKernelGsp) kgspHealthCheck_TU102(pGpu, pKernelGsp)
@@ -984,10 +1018,70 @@ static inline NV_STATUS kgspSetupGspFmcArgs_DISPATCH(struct OBJGPU *pGpu, struct
return pKernelGsp->__kgspSetupGspFmcArgs__(pGpu, pKernelGsp, pGspFw);
}
static inline NvBool kgspConfigured_DISPATCH(struct KernelGsp *arg0) {
return arg0->__kgspConfigured__(arg0);
}
static inline NvU32 kgspPriRead_DISPATCH(struct KernelGsp *arg0, NvU32 offset) {
return arg0->__kgspPriRead__(arg0, offset);
}
static inline void kgspRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset, NvU32 data) {
pKernelFlcn->__kgspRegWrite__(pGpu, pKernelFlcn, offset, data);
}
static inline NvU32 kgspMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 addr) {
return pKernelFlcn->__kgspMaskDmemAddr__(pGpu, pKernelFlcn, addr);
}
static inline void kgspStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
pEngstate->__kgspStateDestroy__(pGpu, pEngstate);
}
static inline void kgspVprintf_DISPATCH(struct KernelGsp *arg0, NvBool bReportStart, const char *fmt, va_list args) {
arg0->__kgspVprintf__(arg0, bReportStart, fmt, args);
}
static inline NvBool kgspClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return pIntrService->__kgspClearInterrupt__(pGpu, pIntrService, pParams);
}
static inline void kgspPriWrite_DISPATCH(struct KernelGsp *arg0, NvU32 offset, NvU32 data) {
arg0->__kgspPriWrite__(arg0, offset, data);
}
static inline void *kgspMapBufferDescriptor_DISPATCH(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
return arg0->__kgspMapBufferDescriptor__(arg0, pBufDesc);
}
static inline void kgspSyncBufferDescriptor_DISPATCH(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
arg0->__kgspSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
}
static inline NvU32 kgspRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset) {
return pKernelFlcn->__kgspRegRead__(pGpu, pKernelFlcn, offset);
}
static inline NvBool kgspIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
return pEngstate->__kgspIsPresent__(pGpu, pEngstate);
}
static inline void kgspReadEmem_DISPATCH(struct KernelGsp *arg0, NvU64 offset, NvU64 size, void *pBuf) {
arg0->__kgspReadEmem__(arg0, offset, size, pBuf);
}
static inline NV_STATUS kgspStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return pEngstate->__kgspStateLoad__(pGpu, pEngstate, arg0);
}
static inline const NvU32 *kgspGetScratchOffsets_DISPATCH(struct KernelGsp *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return arg0->__kgspGetScratchOffsets__(arg0, scratchGroupId);
}
static inline void kgspUnload_DISPATCH(struct KernelGsp *arg0) {
arg0->__kgspUnload__(arg0);
}
static inline NV_STATUS kgspStateUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return pEngstate->__kgspStateUnload__(pGpu, pEngstate, arg0);
}
@@ -996,6 +1090,10 @@ static inline NV_STATUS kgspServiceNotificationInterrupt_DISPATCH(struct OBJGPU
return pIntrService->__kgspServiceNotificationInterrupt__(pGpu, pIntrService, pParams);
}
static inline NvU32 kgspGetWFL0Offset_DISPATCH(struct KernelGsp *arg0) {
return arg0->__kgspGetWFL0Offset__(arg0);
}
static inline NV_STATUS kgspStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
return pEngstate->__kgspStateInitLocked__(pGpu, pEngstate);
}
@@ -1008,10 +1106,6 @@ static inline NV_STATUS kgspStatePostUnload_DISPATCH(POBJGPU pGpu, struct Kernel
return pEngstate->__kgspStatePostUnload__(pGpu, pEngstate, arg0);
}
static inline void kgspStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
pEngstate->__kgspStateDestroy__(pGpu, pEngstate);
}
static inline NV_STATUS kgspStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return pEngstate->__kgspStatePreUnload__(pGpu, pEngstate, arg0);
}
@@ -1032,16 +1126,16 @@ static inline NV_STATUS kgspStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct K
return pEngstate->__kgspStatePreInitUnlocked__(pGpu, pEngstate);
}
static inline NvBool kgspClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return pIntrService->__kgspClearInterrupt__(pGpu, pIntrService, pParams);
}
static inline NV_STATUS kgspStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
return pEngstate->__kgspStatePostLoad__(pGpu, pEngstate, arg0);
}
static inline NvBool kgspIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
return pEngstate->__kgspIsPresent__(pGpu, pEngstate);
static inline void kgspUnmapBufferDescriptor_DISPATCH(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
arg0->__kgspUnmapBufferDescriptor__(arg0, pBufDesc);
}
static inline void kgspReadDmem_DISPATCH(struct KernelGsp *arg0, NvU32 offset, NvU32 size, void *pBuf) {
arg0->__kgspReadDmem__(arg0, offset, size, pBuf);
}
void kgspDestruct_IMPL(struct KernelGsp *pKernelGsp);

View File

@@ -19,6 +19,10 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
void __nvoc_init_KernelSec2(KernelSec2*, RmHalspecOwner* );
@@ -52,6 +56,18 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_IntrService = {
/*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_IntrService),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_CrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelCrashCatEngine = {
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
/*dtor=*/ &__nvoc_destructFromBase,
/*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine),
};
static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelFalcon = {
/*pClassDef=*/ &__nvoc_class_def_KernelFalcon,
/*dtor=*/ &__nvoc_destructFromBase,
@@ -59,10 +75,12 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelFalcon = {
};
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelSec2 = {
/*numRelatives=*/ 5,
/*numRelatives=*/ 7,
/*relatives=*/ {
&__nvoc_rtti_KernelSec2_KernelSec2,
&__nvoc_rtti_KernelSec2_KernelFalcon,
&__nvoc_rtti_KernelSec2_KernelCrashCatEngine,
&__nvoc_rtti_KernelSec2_CrashCatEngine,
&__nvoc_rtti_KernelSec2_IntrService,
&__nvoc_rtti_KernelSec2_OBJENGSTATE,
&__nvoc_rtti_KernelSec2_Object,
@@ -104,10 +122,78 @@ static NV_STATUS __nvoc_thunk_KernelSec2_engstateStateLoad(struct OBJGPU *pGpu,
return ksec2StateLoad(pGpu, (struct KernelSec2 *)(((unsigned char *)pKernelSec2) - __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
}
static NvBool __nvoc_thunk_KernelCrashCatEngine_ksec2Configured(struct KernelSec2 *arg0) {
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset));
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_ksec2PriRead(struct KernelSec2 *arg0, NvU32 offset) {
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset);
}
static void __nvoc_thunk_KernelFalcon_ksec2RegWrite(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset, NvU32 data) {
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelSec2_KernelFalcon.offset), offset, data);
}
static NvU32 __nvoc_thunk_KernelFalcon_ksec2MaskDmemAddr(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 addr) {
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelSec2_KernelFalcon.offset), addr);
}
static void __nvoc_thunk_OBJENGSTATE_ksec2StateDestroy(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
}
static void __nvoc_thunk_KernelCrashCatEngine_ksec2Vprintf(struct KernelSec2 *arg0, NvBool bReportStart, const char *fmt, va_list args) {
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), bReportStart, fmt, args);
}
static NvBool __nvoc_thunk_IntrService_ksec2ClearInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
}
static void __nvoc_thunk_KernelCrashCatEngine_ksec2PriWrite(struct KernelSec2 *arg0, NvU32 offset, NvU32 data) {
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset, data);
}
static void *__nvoc_thunk_KernelCrashCatEngine_ksec2MapBufferDescriptor(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), pBufDesc);
}
static void __nvoc_thunk_KernelCrashCatEngine_ksec2SyncBufferDescriptor(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), pBufDesc, offset, size);
}
static NvU32 __nvoc_thunk_KernelFalcon_ksec2RegRead(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset) {
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelSec2_KernelFalcon.offset), offset);
}
static NvBool __nvoc_thunk_OBJENGSTATE_ksec2IsPresent(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
}
static NvU32 __nvoc_thunk_IntrService_ksec2ServiceInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
}
static void __nvoc_thunk_KernelCrashCatEngine_ksec2ReadEmem(struct KernelSec2 *arg0, NvU64 offset, NvU64 size, void *pBuf) {
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset, size, pBuf);
}
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_ksec2GetScratchOffsets(struct KernelSec2 *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), scratchGroupId);
}
static void __nvoc_thunk_KernelCrashCatEngine_ksec2Unload(struct KernelSec2 *arg0) {
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
}
static NvU32 __nvoc_thunk_KernelCrashCatEngine_ksec2GetWFL0Offset(struct KernelSec2 *arg0) {
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateInitLocked(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
}
@@ -120,10 +206,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePostUnload(POBJGPU pGpu, str
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
}
static void __nvoc_thunk_OBJENGSTATE_ksec2StateDestroy(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
}
@@ -144,20 +226,16 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreInitUnlocked(POBJGPU pGpu
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
}
static NvBool __nvoc_thunk_IntrService_ksec2ClearInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePostLoad(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
}
static NvBool __nvoc_thunk_OBJENGSTATE_ksec2IsPresent(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
static void __nvoc_thunk_KernelCrashCatEngine_ksec2UnmapBufferDescriptor(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), pBufDesc);
}
static NvU32 __nvoc_thunk_IntrService_ksec2ServiceInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
static void __nvoc_thunk_KernelCrashCatEngine_ksec2ReadDmem(struct KernelSec2 *arg0, NvU32 offset, NvU32 size, void *pBuf) {
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset, size, pBuf);
}
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSec2 =
@@ -313,16 +391,48 @@ static void __nvoc_init_funcTable_KernelSec2_1(KernelSec2 *pThis, RmHalspecOwner
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelSec2_engstateStateLoad;
pThis->__ksec2Configured__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2Configured;
pThis->__ksec2PriRead__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2PriRead;
pThis->__ksec2RegWrite__ = &__nvoc_thunk_KernelFalcon_ksec2RegWrite;
pThis->__ksec2MaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_ksec2MaskDmemAddr;
pThis->__ksec2StateDestroy__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateDestroy;
pThis->__ksec2Vprintf__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2Vprintf;
pThis->__ksec2ClearInterrupt__ = &__nvoc_thunk_IntrService_ksec2ClearInterrupt;
pThis->__ksec2PriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2PriWrite;
pThis->__ksec2MapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2MapBufferDescriptor;
pThis->__ksec2SyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2SyncBufferDescriptor;
pThis->__ksec2RegRead__ = &__nvoc_thunk_KernelFalcon_ksec2RegRead;
pThis->__ksec2IsPresent__ = &__nvoc_thunk_OBJENGSTATE_ksec2IsPresent;
pThis->__ksec2ServiceInterrupt__ = &__nvoc_thunk_IntrService_ksec2ServiceInterrupt;
pThis->__ksec2ReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2ReadEmem;
pThis->__ksec2GetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2GetScratchOffsets;
pThis->__ksec2Unload__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2Unload;
pThis->__ksec2StateUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateUnload;
pThis->__ksec2GetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2GetWFL0Offset;
pThis->__ksec2StateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateInitLocked;
pThis->__ksec2StatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreLoad;
pThis->__ksec2StatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePostUnload;
pThis->__ksec2StateDestroy__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateDestroy;
pThis->__ksec2StatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreUnload;
pThis->__ksec2StateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateInitUnlocked;
@@ -333,13 +443,11 @@ static void __nvoc_init_funcTable_KernelSec2_1(KernelSec2 *pThis, RmHalspecOwner
pThis->__ksec2StatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreInitUnlocked;
pThis->__ksec2ClearInterrupt__ = &__nvoc_thunk_IntrService_ksec2ClearInterrupt;
pThis->__ksec2StatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePostLoad;
pThis->__ksec2IsPresent__ = &__nvoc_thunk_OBJENGSTATE_ksec2IsPresent;
pThis->__ksec2UnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2UnmapBufferDescriptor;
pThis->__ksec2ServiceInterrupt__ = &__nvoc_thunk_IntrService_ksec2ServiceInterrupt;
pThis->__ksec2ReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2ReadDmem;
}
void __nvoc_init_funcTable_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) {
@@ -354,6 +462,8 @@ void __nvoc_init_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner)
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService;
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine;
pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon;
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
__nvoc_init_IntrService(&pThis->__nvoc_base_IntrService);

View File

@@ -57,6 +57,8 @@ struct KernelSec2 {
struct Object *__nvoc_pbase_Object;
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
struct IntrService *__nvoc_pbase_IntrService;
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
struct KernelSec2 *__nvoc_pbase_KernelSec2;
NV_STATUS (*__ksec2ConstructEngine__)(struct OBJGPU *, struct KernelSec2 *, ENGDESCRIPTOR);
@@ -69,20 +71,35 @@ struct KernelSec2 {
const BINDATA_ARCHIVE *(*__ksec2GetBinArchiveBlUcode__)(struct OBJGPU *, struct KernelSec2 *);
NV_STATUS (*__ksec2GetGenericBlUcode__)(struct OBJGPU *, struct KernelSec2 *, const RM_FLCN_BL_DESC **, const NvU8 **);
const BINDATA_ARCHIVE *(*__ksec2GetBinArchiveSecurescrubUcode__)(struct OBJGPU *, struct KernelSec2 *);
NvBool (*__ksec2Configured__)(struct KernelSec2 *);
NvU32 (*__ksec2PriRead__)(struct KernelSec2 *, NvU32);
void (*__ksec2RegWrite__)(struct OBJGPU *, struct KernelSec2 *, NvU32, NvU32);
NvU32 (*__ksec2MaskDmemAddr__)(struct OBJGPU *, struct KernelSec2 *, NvU32);
void (*__ksec2StateDestroy__)(POBJGPU, struct KernelSec2 *);
void (*__ksec2Vprintf__)(struct KernelSec2 *, NvBool, const char *, va_list);
NvBool (*__ksec2ClearInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceClearInterruptArguments *);
void (*__ksec2PriWrite__)(struct KernelSec2 *, NvU32, NvU32);
void *(*__ksec2MapBufferDescriptor__)(struct KernelSec2 *, CrashCatBufferDescriptor *);
void (*__ksec2SyncBufferDescriptor__)(struct KernelSec2 *, CrashCatBufferDescriptor *, NvU32, NvU32);
NvU32 (*__ksec2RegRead__)(struct OBJGPU *, struct KernelSec2 *, NvU32);
NvBool (*__ksec2IsPresent__)(POBJGPU, struct KernelSec2 *);
NvU32 (*__ksec2ServiceInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceServiceInterruptArguments *);
void (*__ksec2ReadEmem__)(struct KernelSec2 *, NvU64, NvU64, void *);
const NvU32 *(*__ksec2GetScratchOffsets__)(struct KernelSec2 *, NV_CRASHCAT_SCRATCH_GROUP_ID);
void (*__ksec2Unload__)(struct KernelSec2 *);
NV_STATUS (*__ksec2StateUnload__)(POBJGPU, struct KernelSec2 *, NvU32);
NvU32 (*__ksec2GetWFL0Offset__)(struct KernelSec2 *);
NV_STATUS (*__ksec2StateInitLocked__)(POBJGPU, struct KernelSec2 *);
NV_STATUS (*__ksec2StatePreLoad__)(POBJGPU, struct KernelSec2 *, NvU32);
NV_STATUS (*__ksec2StatePostUnload__)(POBJGPU, struct KernelSec2 *, NvU32);
void (*__ksec2StateDestroy__)(POBJGPU, struct KernelSec2 *);
NV_STATUS (*__ksec2StatePreUnload__)(POBJGPU, struct KernelSec2 *, NvU32);
NV_STATUS (*__ksec2StateInitUnlocked__)(POBJGPU, struct KernelSec2 *);
void (*__ksec2InitMissing__)(POBJGPU, struct KernelSec2 *);
NV_STATUS (*__ksec2StatePreInitLocked__)(POBJGPU, struct KernelSec2 *);
NV_STATUS (*__ksec2StatePreInitUnlocked__)(POBJGPU, struct KernelSec2 *);
NvBool (*__ksec2ClearInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceClearInterruptArguments *);
NV_STATUS (*__ksec2StatePostLoad__)(POBJGPU, struct KernelSec2 *, NvU32);
NvBool (*__ksec2IsPresent__)(POBJGPU, struct KernelSec2 *);
NvU32 (*__ksec2ServiceInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceServiceInterruptArguments *);
void (*__ksec2UnmapBufferDescriptor__)(struct KernelSec2 *, CrashCatBufferDescriptor *);
void (*__ksec2ReadDmem__)(struct KernelSec2 *, NvU32, NvU32, void *);
const RM_FLCN_BL_DESC *pGenericBlUcodeDesc;
const NvU8 *pGenericBlUcodeImg;
};
@@ -137,20 +154,35 @@ NV_STATUS __nvoc_objCreate_KernelSec2(KernelSec2**, Dynamic*, NvU32);
#define ksec2GetGenericBlUcode_HAL(pGpu, pKernelSec2, ppDesc, ppImg) ksec2GetGenericBlUcode_DISPATCH(pGpu, pKernelSec2, ppDesc, ppImg)
#define ksec2GetBinArchiveSecurescrubUcode(pGpu, pKernelSec2) ksec2GetBinArchiveSecurescrubUcode_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetBinArchiveSecurescrubUcode_HAL(pGpu, pKernelSec2) ksec2GetBinArchiveSecurescrubUcode_DISPATCH(pGpu, pKernelSec2)
#define ksec2Configured(arg0) ksec2Configured_DISPATCH(arg0)
#define ksec2PriRead(arg0, offset) ksec2PriRead_DISPATCH(arg0, offset)
#define ksec2RegWrite(pGpu, pKernelFlcn, offset, data) ksec2RegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define ksec2MaskDmemAddr(pGpu, pKernelFlcn, addr) ksec2MaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define ksec2StateDestroy(pGpu, pEngstate) ksec2StateDestroy_DISPATCH(pGpu, pEngstate)
#define ksec2Vprintf(arg0, bReportStart, fmt, args) ksec2Vprintf_DISPATCH(arg0, bReportStart, fmt, args)
#define ksec2ClearInterrupt(pGpu, pIntrService, pParams) ksec2ClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define ksec2PriWrite(arg0, offset, data) ksec2PriWrite_DISPATCH(arg0, offset, data)
#define ksec2MapBufferDescriptor(arg0, pBufDesc) ksec2MapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define ksec2SyncBufferDescriptor(arg0, pBufDesc, offset, size) ksec2SyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
#define ksec2RegRead(pGpu, pKernelFlcn, offset) ksec2RegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define ksec2IsPresent(pGpu, pEngstate) ksec2IsPresent_DISPATCH(pGpu, pEngstate)
#define ksec2ServiceInterrupt(pGpu, pIntrService, pParams) ksec2ServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define ksec2ReadEmem(arg0, offset, size, pBuf) ksec2ReadEmem_DISPATCH(arg0, offset, size, pBuf)
#define ksec2GetScratchOffsets(arg0, scratchGroupId) ksec2GetScratchOffsets_DISPATCH(arg0, scratchGroupId)
#define ksec2Unload(arg0) ksec2Unload_DISPATCH(arg0)
#define ksec2StateUnload(pGpu, pEngstate, arg0) ksec2StateUnload_DISPATCH(pGpu, pEngstate, arg0)
#define ksec2GetWFL0Offset(arg0) ksec2GetWFL0Offset_DISPATCH(arg0)
#define ksec2StateInitLocked(pGpu, pEngstate) ksec2StateInitLocked_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreLoad(pGpu, pEngstate, arg0) ksec2StatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define ksec2StatePostUnload(pGpu, pEngstate, arg0) ksec2StatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define ksec2StateDestroy(pGpu, pEngstate) ksec2StateDestroy_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreUnload(pGpu, pEngstate, arg0) ksec2StatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
#define ksec2StateInitUnlocked(pGpu, pEngstate) ksec2StateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define ksec2InitMissing(pGpu, pEngstate) ksec2InitMissing_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreInitLocked(pGpu, pEngstate) ksec2StatePreInitLocked_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreInitUnlocked(pGpu, pEngstate) ksec2StatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
#define ksec2ClearInterrupt(pGpu, pIntrService, pParams) ksec2ClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define ksec2StatePostLoad(pGpu, pEngstate, arg0) ksec2StatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
#define ksec2IsPresent(pGpu, pEngstate) ksec2IsPresent_DISPATCH(pGpu, pEngstate)
#define ksec2ServiceInterrupt(pGpu, pIntrService, pParams) ksec2ServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define ksec2UnmapBufferDescriptor(arg0, pBufDesc) ksec2UnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
#define ksec2ReadDmem(arg0, offset, size, pBuf) ksec2ReadDmem_DISPATCH(arg0, offset, size, pBuf)
NV_STATUS ksec2ConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0);
static inline NV_STATUS ksec2ConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0) {
@@ -235,10 +267,78 @@ static inline const BINDATA_ARCHIVE *ksec2GetBinArchiveSecurescrubUcode_DISPATCH
return pKernelSec2->__ksec2GetBinArchiveSecurescrubUcode__(pGpu, pKernelSec2);
}
static inline NvBool ksec2Configured_DISPATCH(struct KernelSec2 *arg0) {
return arg0->__ksec2Configured__(arg0);
}
static inline NvU32 ksec2PriRead_DISPATCH(struct KernelSec2 *arg0, NvU32 offset) {
return arg0->__ksec2PriRead__(arg0, offset);
}
static inline void ksec2RegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset, NvU32 data) {
pKernelFlcn->__ksec2RegWrite__(pGpu, pKernelFlcn, offset, data);
}
static inline NvU32 ksec2MaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 addr) {
return pKernelFlcn->__ksec2MaskDmemAddr__(pGpu, pKernelFlcn, addr);
}
static inline void ksec2StateDestroy_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
pEngstate->__ksec2StateDestroy__(pGpu, pEngstate);
}
static inline void ksec2Vprintf_DISPATCH(struct KernelSec2 *arg0, NvBool bReportStart, const char *fmt, va_list args) {
arg0->__ksec2Vprintf__(arg0, bReportStart, fmt, args);
}
static inline NvBool ksec2ClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return pIntrService->__ksec2ClearInterrupt__(pGpu, pIntrService, pParams);
}
static inline void ksec2PriWrite_DISPATCH(struct KernelSec2 *arg0, NvU32 offset, NvU32 data) {
arg0->__ksec2PriWrite__(arg0, offset, data);
}
static inline void *ksec2MapBufferDescriptor_DISPATCH(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
return arg0->__ksec2MapBufferDescriptor__(arg0, pBufDesc);
}
static inline void ksec2SyncBufferDescriptor_DISPATCH(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
arg0->__ksec2SyncBufferDescriptor__(arg0, pBufDesc, offset, size);
}
static inline NvU32 ksec2RegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset) {
return pKernelFlcn->__ksec2RegRead__(pGpu, pKernelFlcn, offset);
}
static inline NvBool ksec2IsPresent_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
return pEngstate->__ksec2IsPresent__(pGpu, pEngstate);
}
static inline NvU32 ksec2ServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
return pIntrService->__ksec2ServiceInterrupt__(pGpu, pIntrService, pParams);
}
static inline void ksec2ReadEmem_DISPATCH(struct KernelSec2 *arg0, NvU64 offset, NvU64 size, void *pBuf) {
arg0->__ksec2ReadEmem__(arg0, offset, size, pBuf);
}
static inline const NvU32 *ksec2GetScratchOffsets_DISPATCH(struct KernelSec2 *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
return arg0->__ksec2GetScratchOffsets__(arg0, scratchGroupId);
}
static inline void ksec2Unload_DISPATCH(struct KernelSec2 *arg0) {
arg0->__ksec2Unload__(arg0);
}
static inline NV_STATUS ksec2StateUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
return pEngstate->__ksec2StateUnload__(pGpu, pEngstate, arg0);
}
static inline NvU32 ksec2GetWFL0Offset_DISPATCH(struct KernelSec2 *arg0) {
return arg0->__ksec2GetWFL0Offset__(arg0);
}
static inline NV_STATUS ksec2StateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
return pEngstate->__ksec2StateInitLocked__(pGpu, pEngstate);
}
@@ -251,10 +351,6 @@ static inline NV_STATUS ksec2StatePostUnload_DISPATCH(POBJGPU pGpu, struct Kerne
return pEngstate->__ksec2StatePostUnload__(pGpu, pEngstate, arg0);
}
static inline void ksec2StateDestroy_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
pEngstate->__ksec2StateDestroy__(pGpu, pEngstate);
}
static inline NV_STATUS ksec2StatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
return pEngstate->__ksec2StatePreUnload__(pGpu, pEngstate, arg0);
}
@@ -275,20 +371,16 @@ static inline NV_STATUS ksec2StatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct
return pEngstate->__ksec2StatePreInitUnlocked__(pGpu, pEngstate);
}
static inline NvBool ksec2ClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
return pIntrService->__ksec2ClearInterrupt__(pGpu, pIntrService, pParams);
}
static inline NV_STATUS ksec2StatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
return pEngstate->__ksec2StatePostLoad__(pGpu, pEngstate, arg0);
}
static inline NvBool ksec2IsPresent_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
return pEngstate->__ksec2IsPresent__(pGpu, pEngstate);
static inline void ksec2UnmapBufferDescriptor_DISPATCH(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
arg0->__ksec2UnmapBufferDescriptor__(arg0, pBufDesc);
}
static inline NvU32 ksec2ServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
return pIntrService->__ksec2ServiceInterrupt__(pGpu, pIntrService, pParams);
static inline void ksec2ReadDmem_DISPATCH(struct KernelSec2 *arg0, NvU32 offset, NvU32 size, void *pBuf) {
arg0->__ksec2ReadDmem__(arg0, offset, size, pBuf);
}
void ksec2Destruct_IMPL(struct KernelSec2 *pKernelSec2);

View File

@@ -815,6 +815,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x20B6, 0x1492, 0x10de, "NVIDIA PG506-232" },
{ 0x20B7, 0x1532, 0x10de, "NVIDIA A30" },
{ 0x20B7, 0x1804, 0x10de, "NVIDIA A30" },
{ 0x20BD, 0x17f4, 0x10de, "NVIDIA A800-SXM4-40GB" },
{ 0x20F1, 0x145f, 0x10de, "NVIDIA A100-PCIE-40GB" },
{ 0x20F3, 0x179b, 0x10de, "NVIDIA A800-SXM4-80GB" },
{ 0x20F3, 0x179c, 0x10de, "NVIDIA A800-SXM4-80GB" },
@@ -826,6 +827,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x20F3, 0x17a2, 0x10de, "NVIDIA A800-SXM4-80GB" },
{ 0x20F5, 0x1799, 0x10de, "NVIDIA A800 80GB PCIe" },
{ 0x20F5, 0x179a, 0x10de, "NVIDIA A800 80GB PCIe LC" },
{ 0x20F6, 0x180a, 0x10de, "NVIDIA A800 40GB Active" },
{ 0x2182, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" },
{ 0x2184, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660" },
{ 0x2187, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 SUPER" },
@@ -885,6 +887,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2236, 0x1482, 0x10de, "NVIDIA A10" },
{ 0x2237, 0x152f, 0x10de, "NVIDIA A10G" },
{ 0x2238, 0x1677, 0x10de, "NVIDIA A10M" },
{ 0x2321, 0x1839, 0x10de, "NVIDIA H100 NVL" },
{ 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" },
{ 0x2324, 0x17a6, 0x10de, "NVIDIA H800" },
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
@@ -892,6 +895,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@@ -986,10 +990,13 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },
{ 0x26B9, 0x1851, 0x10de, "NVIDIA L40S" },
{ 0x26B9, 0x18cf, 0x10de, "NVIDIA L40S" },
{ 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" },
{ 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
{ 0x2730, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Laptop GPU" },
{ 0x2757, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
{ 0x2770, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Embedded GPU" },
{ 0x2782, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti" },
{ 0x2786, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070" },
{ 0x27A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
@@ -1006,6 +1013,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x27BA, 0x0000, 0x0000, "NVIDIA RTX 4000 Ada Generation Laptop GPU" },
{ 0x27BB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Laptop GPU" },
{ 0x27E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
{ 0x27FB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Embedded GPU" },
{ 0x2803, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" },
{ 0x2805, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" },
{ 0x2820, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" },
@@ -1017,6 +1025,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" },
{ 0x28E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
{ 0x28E1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
{ 0x28F8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Embedded GPU" },
{ 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" },
{ 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" },
{ 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" },
@@ -1534,6 +1543,19 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x20F5, 0x17ca, 0x10DE, "GRID A800D-40C" },
{ 0x20F5, 0x17cb, 0x10DE, "GRID A800D-80C" },
{ 0x20F5, 0x183f, 0x10DE, "GRID A800D-1-20C" },
{ 0x20F6, 0x17cc, 0x10DE, "GRID A800-1-5CME" },
{ 0x20F6, 0x17cd, 0x10DE, "GRID A800-1-5C" },
{ 0x20F6, 0x17ce, 0x10DE, "GRID A800-2-10C" },
{ 0x20F6, 0x17cf, 0x10DE, "GRID A800-3-20C" },
{ 0x20F6, 0x17d0, 0x10DE, "GRID A800-4-20C" },
{ 0x20F6, 0x17d1, 0x10DE, "GRID A800-7-40C" },
{ 0x20F6, 0x17d2, 0x10DE, "GRID A800-4C" },
{ 0x20F6, 0x17d3, 0x10DE, "GRID A800-5C" },
{ 0x20F6, 0x17d4, 0x10DE, "GRID A800-8C" },
{ 0x20F6, 0x17d5, 0x10DE, "GRID A800-10C" },
{ 0x20F6, 0x17d6, 0x10DE, "GRID A800-20C" },
{ 0x20F6, 0x17d7, 0x10DE, "GRID A800-40C" },
{ 0x20F6, 0x1843, 0x10DE, "GRID A800-1-10C" },
{ 0x2230, 0x14fa, 0x10DE, "NVIDIA RTXA6000-1B" },
{ 0x2230, 0x14fb, 0x10DE, "NVIDIA RTXA6000-2B" },
{ 0x2230, 0x14fc, 0x10DE, "NVIDIA RTXA6000-1Q" },
@@ -1888,6 +1910,30 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX6000-Ada-16C" },
{ 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX6000-Ada-24C" },
{ 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX6000-Ada-48C" },
{ 0x26B2, 0x1821, 0x10DE, "NVIDIA RTX5000-Ada-1B" },
{ 0x26B2, 0x1822, 0x10DE, "NVIDIA RTX5000-Ada-2B" },
{ 0x26B2, 0x1823, 0x10DE, "NVIDIA RTX5000-Ada-1Q" },
{ 0x26B2, 0x1824, 0x10DE, "NVIDIA RTX5000-Ada-2Q" },
{ 0x26B2, 0x1825, 0x10DE, "NVIDIA RTX5000-Ada-4Q" },
{ 0x26B2, 0x1826, 0x10DE, "NVIDIA RTX5000-Ada-8Q" },
{ 0x26B2, 0x1827, 0x10DE, "NVIDIA RTX5000-Ada-16Q" },
{ 0x26B2, 0x1828, 0x10DE, "NVIDIA RTX5000-Ada-32Q" },
{ 0x26B2, 0x1829, 0x10DE, "NVIDIA RTX5000-Ada-1A" },
{ 0x26B2, 0x182a, 0x10DE, "NVIDIA RTX5000-Ada-2A" },
{ 0x26B2, 0x182b, 0x10DE, "NVIDIA RTX5000-Ada-4A" },
{ 0x26B2, 0x182c, 0x10DE, "NVIDIA RTX5000-Ada-8A" },
{ 0x26B2, 0x182d, 0x10DE, "NVIDIA RTX5000-Ada-16A" },
{ 0x26B2, 0x182e, 0x10DE, "NVIDIA RTX5000-Ada-32A" },
{ 0x26B2, 0x182f, 0x10DE, "NVIDIA RTX5000-Ada-1" },
{ 0x26B2, 0x1830, 0x10DE, "NVIDIA RTX5000-Ada-2" },
{ 0x26B2, 0x1831, 0x10DE, "NVIDIA RTX5000-Ada-4" },
{ 0x26B2, 0x1832, 0x10DE, "NVIDIA RTX5000-Ada-8" },
{ 0x26B2, 0x1833, 0x10DE, "NVIDIA RTX5000-Ada-16" },
{ 0x26B2, 0x1834, 0x10DE, "NVIDIA RTX5000-Ada-32" },
{ 0x26B2, 0x1835, 0x10DE, "NVIDIA RTX5000-Ada-4C" },
{ 0x26B2, 0x1836, 0x10DE, "NVIDIA RTX5000-Ada-8C" },
{ 0x26B2, 0x1837, 0x10DE, "NVIDIA RTX5000-Ada-16C" },
{ 0x26B2, 0x1838, 0x10DE, "NVIDIA RTX5000-Ada-32C" },
{ 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" },
{ 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" },
{ 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" },
@@ -1962,6 +2008,47 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" },
{ 0x26B8, 0x181c, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x26B8, 0x181e, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x26B9, 0x1889, 0x10DE, "NVIDIA L40S-1B" },
{ 0x26B9, 0x188a, 0x10DE, "NVIDIA L40S-2B" },
{ 0x26B9, 0x188b, 0x10DE, "NVIDIA L40S-1Q" },
{ 0x26B9, 0x188c, 0x10DE, "NVIDIA L40S-2Q" },
{ 0x26B9, 0x188d, 0x10DE, "NVIDIA L40S-3Q" },
{ 0x26B9, 0x188e, 0x10DE, "NVIDIA L40S-4Q" },
{ 0x26B9, 0x188f, 0x10DE, "NVIDIA L40S-6Q" },
{ 0x26B9, 0x1890, 0x10DE, "NVIDIA L40S-8Q" },
{ 0x26B9, 0x1891, 0x10DE, "NVIDIA L40S-12Q" },
{ 0x26B9, 0x1892, 0x10DE, "NVIDIA L40S-16Q" },
{ 0x26B9, 0x1893, 0x10DE, "NVIDIA L40S-24Q" },
{ 0x26B9, 0x1894, 0x10DE, "NVIDIA L40S-48Q" },
{ 0x26B9, 0x1895, 0x10DE, "NVIDIA L40S-1A" },
{ 0x26B9, 0x1896, 0x10DE, "NVIDIA L40S-2A" },
{ 0x26B9, 0x1897, 0x10DE, "NVIDIA L40S-3A" },
{ 0x26B9, 0x1898, 0x10DE, "NVIDIA L40S-4A" },
{ 0x26B9, 0x1899, 0x10DE, "NVIDIA L40S-6A" },
{ 0x26B9, 0x189a, 0x10DE, "NVIDIA L40S-8A" },
{ 0x26B9, 0x189b, 0x10DE, "NVIDIA L40S-12A" },
{ 0x26B9, 0x189c, 0x10DE, "NVIDIA L40S-16A" },
{ 0x26B9, 0x189d, 0x10DE, "NVIDIA L40S-24A" },
{ 0x26B9, 0x189e, 0x10DE, "NVIDIA L40S-48A" },
{ 0x26B9, 0x189f, 0x10DE, "GeForce RTX 3050" },
{ 0x26B9, 0x18a0, 0x10DE, "GeForce RTX 3060" },
{ 0x26B9, 0x18a1, 0x10DE, "NVIDIA L40S-1" },
{ 0x26B9, 0x18a2, 0x10DE, "NVIDIA L40S-2" },
{ 0x26B9, 0x18a3, 0x10DE, "NVIDIA L40S-3" },
{ 0x26B9, 0x18a4, 0x10DE, "NVIDIA L40S-4" },
{ 0x26B9, 0x18a5, 0x10DE, "NVIDIA L40S-6" },
{ 0x26B9, 0x18a6, 0x10DE, "NVIDIA L40S-8" },
{ 0x26B9, 0x18a7, 0x10DE, "NVIDIA L40S-12" },
{ 0x26B9, 0x18a8, 0x10DE, "NVIDIA L40S-16" },
{ 0x26B9, 0x18a9, 0x10DE, "NVIDIA L40S-24" },
{ 0x26B9, 0x18aa, 0x10DE, "NVIDIA L40S-48" },
{ 0x26B9, 0x18ab, 0x10DE, "NVIDIA L40S-4C" },
{ 0x26B9, 0x18ac, 0x10DE, "NVIDIA L40S-6C" },
{ 0x26B9, 0x18ad, 0x10DE, "NVIDIA L40S-8C" },
{ 0x26B9, 0x18ae, 0x10DE, "NVIDIA L40S-12C" },
{ 0x26B9, 0x18af, 0x10DE, "NVIDIA L40S-16C" },
{ 0x26B9, 0x18b0, 0x10DE, "NVIDIA L40S-24C" },
{ 0x26B9, 0x18b1, 0x10DE, "NVIDIA L40S-48C" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA L4-1B" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA L4-2B" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA L4-1Q" },

File diff suppressed because it is too large Load Diff

View File

@@ -489,6 +489,7 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdOsUnixAudioDynamicPower__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *);
@@ -1082,6 +1083,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles(pSubdevice, pParams) subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdOsUnixGc6BlockerRefCnt(pSubdevice, pParams) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdOsUnixAllowDisallowGcoff(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdOsUnixAudioDynamicPower(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(pSubdevice, pParams)
@@ -3481,6 +3483,12 @@ static inline NV_STATUS subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_DISPATC
return pSubdevice->__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams) {

View File

@@ -0,0 +1,3 @@
#include "g_kernel_crashcat_engine_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_crashcat_engine_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_crashcat_queue_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_crashcat_report_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_crashcat_wayfinder_nvoc.h"

View File

@@ -566,8 +566,11 @@ typedef struct UvmPlatformInfo_tag
// Out: ATS (Address Translation Services) is supported
NvBool atsSupported;
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
NvBool sevEnabled;
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
// is enabled in the VM, indicating that Confidential Computing must be
// also enabled in the GPU(s); these two security features are either both
// enabled, or both disabled.
NvBool confComputingEnabled;
} UvmPlatformInfo;
typedef struct UvmGpuClientInfo_tag

View File

@@ -154,6 +154,7 @@ static NV_STATUS _issueRpcAndWait(OBJGPU *pGpu, OBJRPC *pRpc)
// should not be called in broadcast mode
NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE);
NV_CHECK(LEVEL_ERROR, rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
if (bProfileRPC)
{

View File

@@ -88,6 +88,7 @@ confComputeConstructEngine_IMPL(OBJGPU *pGpu,
else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_CC_FEATURE_CAPABLE))
{
pConfCompute->setProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED, NV_TRUE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED, NV_TRUE);
}
else
{

View File

@@ -235,7 +235,8 @@ confComputeApiCtrlCmdGetGpuCertificate_IMPL
pGpu = GPU_RES_GET_GPU(pSubdevice);
pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if (pConfCompute != NULL)
if (pConfCompute != NULL && pConfCompute->pSpdm != NULL &&
pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED))
{
// Set max size of certificate buffers before calling SPDM.
pParams->certChainSize = NV_CONF_COMPUTE_CERT_CHAIN_MAX_SIZE;
@@ -271,7 +272,8 @@ confComputeApiCtrlCmdGetGpuAttestationReport_IMPL
pGpu = GPU_RES_GET_GPU(pSubdevice);
pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if (pConfCompute != NULL)
if (pConfCompute != NULL && pConfCompute->pSpdm != NULL &&
pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED))
{
// Set max size of report buffers before calling SPDM.
pParams->attestationReportSize = NV_CONF_COMPUTE_GPU_ATTESTATION_REPORT_MAX_SIZE;

View File

@@ -0,0 +1,74 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#include "gpu/falcon/kernel_crashcat_engine.h"
#include "gpu/gpu.h"
#include "utils/nvprintf.h"
#include "published/turing/tu102/dev_falcon_v4.h"
void kcrashcatEngineReadDmem_TU102
(
KernelCrashCatEngine *pKernelCrashCatEng,
NvU32 offset,
NvU32 size,
void *pBuf
)
{
// This implementation only supports 32-bit-aligned accesses
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, (offset & (sizeof(NvU32) - 1)) == 0);
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, (size & (sizeof(NvU32) - 1)) == 0);
NvU8 port = pKernelCrashCatEng->dmemPort;
NvU32 dmemc = kcrashcatEngineMaskDmemAddr(pKernelCrashCatEng->pGpu, pKernelCrashCatEng, offset);
dmemc = FLD_SET_DRF(_PFALCON, _FALCON_DMEMC, _AINCR, _TRUE, dmemc);
kcrashcatEngineRegWrite(pKernelCrashCatEng->pGpu, pKernelCrashCatEng,
NV_PFALCON_FALCON_DMEMC(port), dmemc);
NvU32 *pWordBuf = (NvU32 *)pBuf;
for (NvU32 i = 0; i < (size >> 2); i++)
pWordBuf[i] = kcrashcatEngineRegRead(pKernelCrashCatEng->pGpu, pKernelCrashCatEng,
NV_PFALCON_FALCON_DMEMD(port));
}
NvU32 kcrashcatEngineGetWFL0Offset_TU102(KernelCrashCatEngine *pKernelCrashCatEng)
{
return NV_PFALCON_FALCON_DEBUGINFO;
}
const NvU32 *kcrashcatEngineGetScratchOffsets_TU102
(
KernelCrashCatEngine *pKernelCrashCatEng,
NV_CRASHCAT_SCRATCH_GROUP_ID scratchId
)
{
NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE(scratchOffsetTable);
if (scratchId <= NV_CRASHCAT_SCRATCH_GROUP_ID_LAST)
return scratchOffsetTable[scratchId];
NV_PRINTF(LEVEL_ERROR, "unknown CrashCat scratch ID %u\n", scratchId);
return NULL;
}

View File

@@ -0,0 +1,328 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#include "gpu/falcon/kernel_crashcat_engine.h"
#include "gpu/gpu.h"
#include "core/printf.h"
#include "os/nv_memory_type.h"
NV_STATUS kcrashcatEngineConfigure_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
KernelCrashCatEngineConfig *pEngConfig
)
{
if (!pEngConfig->bEnable)
return NV_OK;
NV_CHECK_OR_RETURN(LEVEL_ERROR, pEngConfig->pName != NULL, NV_ERR_INVALID_ARGUMENT);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pEngConfig->errorId != 0, NV_ERR_INVALID_ARGUMENT);
pKernelCrashCatEng->bConfigured = NV_TRUE;
pKernelCrashCatEng->pName = pEngConfig->pName;
pKernelCrashCatEng->errorId = pEngConfig->errorId;
pKernelCrashCatEng->pGpu = ENG_GET_GPU(pKernelCrashCatEng);
pKernelCrashCatEng->dmemPort = pEngConfig->dmemPort;
if (pEngConfig->allocQueueSize > 0)
{
const NvU32 CRASHCAT_QUEUE_ALIGNMENT = 1u << 10;
pEngConfig->allocQueueSize = NV_ALIGN_UP(pEngConfig->allocQueueSize,
CRASHCAT_QUEUE_ALIGNMENT);
NV_STATUS status;
//
// The queue must be contiguous and 1KB aligned in both size and offset.
// Typically the queue will be a single page to satisfy these requirements.
//
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
memdescCreate(&pKernelCrashCatEng->pQueueMemDesc, pKernelCrashCatEng->pGpu,
pEngConfig->allocQueueSize, CRASHCAT_QUEUE_ALIGNMENT, NV_TRUE,
ADDR_SYSMEM, NV_MEMORY_CACHED, MEMDESC_FLAGS_NONE));
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
memdescAlloc(pKernelCrashCatEng->pQueueMemDesc),
memdescCleanup);
//
// After kcrashcatEngineRegisterCrashBuffer(), the CrashCat library should be able to map
// and access the queue buffer when it shows up in a wayfinder.
//
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
kcrashcatEngineRegisterCrashBuffer(pKernelCrashCatEng,
pKernelCrashCatEng->pQueueMemDesc),
memdescCleanup);
memdescCleanup:
if (status != NV_OK)
{
kcrashcatEngineUnload(pKernelCrashCatEng);
return status;
}
}
return NV_OK;
}
NvBool kcrashcatEngineConfigured_IMPL(KernelCrashCatEngine *pKernelCrashCatEng)
{
return pKernelCrashCatEng->bConfigured;
}
MEMORY_DESCRIPTOR *kcrashcatEngineGetQueueMemDesc_IMPL(KernelCrashCatEngine *pKernelCrashCatEng)
{
return pKernelCrashCatEng->pQueueMemDesc;
}
void kcrashcatEngineUnload_IMPL(KernelCrashCatEngine *pKernelCrashCatEng)
{
if (pKernelCrashCatEng->pQueueMemDesc != NULL)
{
kcrashcatEngineUnregisterCrashBuffer(pKernelCrashCatEng, pKernelCrashCatEng->pQueueMemDesc);
memdescFree(pKernelCrashCatEng->pQueueMemDesc);
memdescDestroy(pKernelCrashCatEng->pQueueMemDesc);
pKernelCrashCatEng->pQueueMemDesc = NULL;
}
crashcatEngineUnload_IMPL(staticCast(pKernelCrashCatEng, CrashCatEngine));
}
NvU32 kcrashcatEnginePriRead_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
NvU32 offset
)
{
return kcrashcatEngineRegRead(pKernelCrashCatEng->pGpu, pKernelCrashCatEng, offset);
}
void kcrashcatEnginePriWrite_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
NvU32 offset,
NvU32 data
)
{
kcrashcatEngineRegWrite(pKernelCrashCatEng->pGpu, pKernelCrashCatEng, offset, data);
}
void kcrashcatEngineVprintf_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
NvBool bReportStart,
const char *fmt,
va_list args
)
{
//
// The first line logs an Xid - subsequent crash report lines are printed via
// portDbgPrintString() so that they are in dmesg, but don't cause additional Xid "events".
//
if (bReportStart)
{
va_list argsCopy;
//
// Prefix the engine name to the format string.
// nvErrorLog() appends a newline, so we don't add one here.
//
nvDbgSnprintf(pKernelCrashCatEng->fmtBuffer, MAX_ERROR_STRING, "%s %s",
pKernelCrashCatEng->pName, fmt);
va_copy(argsCopy, args);
nvErrorLog(pKernelCrashCatEng->pGpu, pKernelCrashCatEng->errorId,
pKernelCrashCatEng->fmtBuffer, argsCopy);
va_end(argsCopy);
}
// portDbgPrintString/NVLOG_PRINTF don't add a newline, so add one here
const char *newline = "\n";
const NvLength fmtSize = portStringLength(fmt) + 1;
const NvLength newlineSize = 3; // Two chars plus terminating null
const NvLength newFmtSize = fmtSize + newlineSize - 1; // terminating null is shared
portMemCopy(pKernelCrashCatEng->fmtBuffer, MAX_ERROR_STRING, fmt, fmtSize);
portStringCat(pKernelCrashCatEng->fmtBuffer, newFmtSize, newline, newlineSize);
nvDbgVsnprintf(pKernelCrashCatEng->printBuffer, MAX_ERROR_STRING,
pKernelCrashCatEng->fmtBuffer, args);
// The report-starting line was already printed by nvErrorLog above
if (!bReportStart)
portDbgPrintString(pKernelCrashCatEng->printBuffer, MAX_ERROR_STRING);
//
// Also print the formatted string to NvLog - avoid direct NV_PRINTF calls so as not to
// duplicate output in dmesg.
//
NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, pKernelCrashCatEng->printBuffer);
}
static NV_INLINE
NV_CRASHCAT_MEM_APERTURE _addressSpaceToCrashcatAperture(NV_ADDRESS_SPACE addrSpace)
{
switch (addrSpace)
{
case ADDR_SYSMEM: return NV_CRASHCAT_MEM_APERTURE_SYSGPA;
case ADDR_FBMEM: return NV_CRASHCAT_MEM_APERTURE_FBGPA;
default: NV_ASSERT_OR_RETURN(0, NV_CRASHCAT_MEM_APERTURE_UNKNOWN);
}
}
NV_STATUS kcrashcatEngineRegisterCrashBuffer_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
MEMORY_DESCRIPTOR *pMemDesc
)
{
return crashcatEngineRegisterCrashBuffer(staticCast(pKernelCrashCatEng, CrashCatEngine),
_addressSpaceToCrashcatAperture(memdescGetAddressSpace(pMemDesc)),
memdescGetPhysAddr(pMemDesc, AT_GPU, 0), memdescGetSize(pMemDesc),
pMemDesc);
}
void kcrashcatEngineUnregisterCrashBuffer_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
MEMORY_DESCRIPTOR *pMemDesc
)
{
crashcatEngineUnregisterCrashBuffer(staticCast(pKernelCrashCatEng, CrashCatEngine),
_addressSpaceToCrashcatAperture(memdescGetAddressSpace(pMemDesc)),
memdescGetPhysAddr(pMemDesc, AT_GPU, 0), memdescGetSize(pMemDesc));
}
static NV_INLINE NV_ADDRESS_SPACE _crashcatApertureToAddressSpace(NV_CRASHCAT_MEM_APERTURE aper)
{
switch (aper)
{
case NV_CRASHCAT_MEM_APERTURE_FBGPA: return ADDR_FBMEM;
case NV_CRASHCAT_MEM_APERTURE_SYSGPA: return ADDR_SYSMEM;
default: return ADDR_UNKNOWN;
}
}
static MEMORY_DESCRIPTOR *_kcrashcatEngineCreateBufferMemDesc
(
KernelCrashCatEngine *pKernelCrashCatEng,
CrashCatBufferDescriptor *pBufDesc
)
{
// Convert the buffer descriptor to a set of memdesc parameters
MEMORY_DESCRIPTOR *pMemDesc;
NV_STATUS status;
NV_ADDRESS_SPACE bufAddrSpace = _crashcatApertureToAddressSpace(pBufDesc->aperture);
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
memdescCreate(&pMemDesc, pKernelCrashCatEng->pGpu, pBufDesc->size, 0,
NV_TRUE, bufAddrSpace, NV_MEMORY_CACHED, MEMDESC_FLAGS_NONE),
return NULL;);
memdescDescribe(pMemDesc, bufAddrSpace, pBufDesc->physOffset, pBufDesc->size);
return pMemDesc;
}
void *kcrashcatEngineMapBufferDescriptor_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
CrashCatBufferDescriptor *pBufDesc
)
{
MEMORY_DESCRIPTOR *pMemDesc;
if (pBufDesc->bRegistered)
pMemDesc = pBufDesc->pEngPriv;
else
pMemDesc = _kcrashcatEngineCreateBufferMemDesc(pKernelCrashCatEng, pBufDesc);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc != NULL, NULL);
NvP64 pBuf, pPriv;
NV_STATUS status;
// CrashCat buffers are read-only
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
memdescMap(pMemDesc, 0, memdescGetSize(pMemDesc), NV_TRUE,
NV_PROTECT_READABLE, &pBuf, &pPriv),
{
if (pBufDesc->pEngPriv == NULL)
memdescDestroy(pMemDesc);
return NULL;
});
memdescSetKernelMapping(pMemDesc, pBuf);
memdescSetKernelMappingPriv(pMemDesc, pPriv);
pBufDesc->pEngPriv = pMemDesc;
return NvP64_VALUE(pBuf);
}
void kcrashcatEngineUnmapBufferDescriptor_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
CrashCatBufferDescriptor *pBufDesc
)
{
MEMORY_DESCRIPTOR *pMemDesc = pBufDesc->pEngPriv;
NvP64 pBuf = memdescGetKernelMapping(pMemDesc);
NvP64 pPriv = memdescGetKernelMappingPriv(pMemDesc);
memdescUnmap(pMemDesc, NV_TRUE, 0, pBuf, pPriv);
memdescSetKernelMapping(pMemDesc, NULL);
memdescSetKernelMappingPriv(pMemDesc, NULL);
if (!pBufDesc->bRegistered)
memdescDestroy(pMemDesc);
}
void kcrashcatEngineSyncBufferDescriptor_IMPL
(
KernelCrashCatEngine *pKernelCrashCatEng,
CrashCatBufferDescriptor *pBufDesc,
NvU32 offset,
NvU32 size
)
{
//
// The buffers which support the "sync" operation don't have a memdesc - they are accessed
// through ports, so we copy the data out into a local buffer instead of direct map.
//
NV_ASSERT_CHECKED(NvU64_HI32(pBufDesc->physOffset) == 0);
NV_ASSERT_CHECKED(NvU64_HI32(pBufDesc->size) == 0);
switch (pBufDesc->aperture)
{
case NV_CRASHCAT_MEM_APERTURE_DMEM:
kcrashcatEngineReadDmem_HAL(pKernelCrashCatEng,
NvU64_LO32(pBufDesc->physOffset) + offset,
size,
(void *)((NvUPtr)pBufDesc->pMapping + offset));
return;
case NV_CRASHCAT_MEM_APERTURE_EMEM:
kcrashcatEngineReadEmem_HAL(pKernelCrashCatEng,
NvU64_LO32(pBufDesc->physOffset) + offset,
size,
(void *)((NvUPtr)pBufDesc->pMapping + offset));
return;
default:
NV_ASSERT_CHECKED(0);
}
}

View File

@@ -36,7 +36,6 @@
#include "mem_mgr/ctx_buf_pool.h"
#include "rmapi/rmapi.h"
void kflcnConfigureEngine_IMPL(OBJGPU *pGpu, KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig)
{
pKernelFalcon->registerBase = pFalconConfig->registerBase;
@@ -50,6 +49,10 @@ void kflcnConfigureEngine_IMPL(OBJGPU *pGpu, KernelFalcon *pKernelFalcon, Kernel
pKernelFalcon->ctxBufferSize = pFalconConfig->ctxBufferSize;
pKernelFalcon->addrSpaceList = pFalconConfig->addrSpaceList;
// Configure CrashCat with caller arguments (disabled by default)
kcrashcatEngineConfigure(staticCast(pKernelFalcon, KernelCrashCatEngine),
&pFalconConfig->crashcatEngConfig);
NV_PRINTF(LEVEL_INFO, "for physEngDesc 0x%x\n", pKernelFalcon->physEngDesc);
}

View File

@@ -59,6 +59,7 @@
#include "platform/platform.h"
#include "platform/chipset/chipset.h"
#include "kernel/gpu/host_eng/host_eng.h"
#include "gpu/bif/kernel_bif.h"
#include "gpu/ce/kernel_ce.h"
#include "gpu/conf_compute/conf_compute.h"
#include "gpu/fifo/kernel_fifo.h"

View File

@@ -28,6 +28,8 @@
#include "gpu/gsp/kernel_gsp.h"
#include "gpu/bus/kern_bus.h"
#include "gpu/conf_compute/conf_compute.h"
#include "nverror.h"
#include "rmgspseq.h"
#include "vgpu/rpc.h"
@@ -61,6 +63,22 @@ kgspConfigureFalcon_GA102
falconConfig.bIsPmcDeviceEngine = NV_FALSE;
falconConfig.physEngDesc = ENG_GSP;
// Enable CrashCat monitoring
falconConfig.crashcatEngConfig.bEnable = NV_TRUE;
falconConfig.crashcatEngConfig.pName = MAKE_NV_PRINTF_STR("GSP");
falconConfig.crashcatEngConfig.errorId = GSP_ERROR;
ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
if (pCC != NULL && pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED))
{
// No CrashCat queue when CC is enabled, as it's not encrypted
falconConfig.crashcatEngConfig.allocQueueSize = 0;
}
else
{
falconConfig.crashcatEngConfig.allocQueueSize = RM_PAGE_SIZE;
}
kflcnConfigureEngine(pGpu, staticCast(pKernelGsp, KernelFalcon), &falconConfig);
}

View File

@@ -345,6 +345,16 @@ kgspCalculateFbLayout_GH100
pWprMeta->gspFwHeapVfPartitionCount =
pGpu->bVgpuGspPluginOffloadEnabled ? MAX_PARTITIONS_WITH_GFID : 0;
// CrashCat queue (if allocated in sysmem)
KernelCrashCatEngine *pKernelCrashCatEng = staticCast(pKernelGsp, KernelCrashCatEngine);
MEMORY_DESCRIPTOR *pCrashCatQueueMemDesc = kcrashcatEngineGetQueueMemDesc(pKernelCrashCatEng);
if (pCrashCatQueueMemDesc != NULL)
{
NV_ASSERT_CHECKED(memdescGetAddressSpace(pCrashCatQueueMemDesc) == ADDR_SYSMEM);
pWprMeta->sysmemAddrOfCrashReportQueue = memdescGetPhysAddr(pCrashCatQueueMemDesc, AT_GPU, 0);
pWprMeta->sizeOfCrashReportQueue = (NvU32)memdescGetSize(pCrashCatQueueMemDesc);
}
// Fill in the meta-metadata
pWprMeta->revision = GSP_FW_WPR_META_REVISION;
pWprMeta->magic = GSP_FW_WPR_META_MAGIC;

View File

@@ -37,6 +37,7 @@
#include "os/os.h"
#include "nverror.h"
#include "gsp/gsp_error.h"
#include "crashcat/crashcat_report.h"
#include "published/turing/tu102/dev_gsp.h"
#include "published/turing/tu102/dev_gsp_addendum.h"
@@ -76,6 +77,11 @@ kgspConfigureFalcon_TU102
falconConfig.bIsPmcDeviceEngine = NV_FALSE;
falconConfig.physEngDesc = ENG_GSP;
// Enable CrashCat monitoring
falconConfig.crashcatEngConfig.bEnable = NV_TRUE;
falconConfig.crashcatEngConfig.pName = MAKE_NV_PRINTF_STR("GSP");
falconConfig.crashcatEngConfig.errorId = GSP_ERROR;
kflcnConfigureEngine(pGpu, staticCast(pKernelGsp, KernelFalcon), &falconConfig);
}
@@ -612,6 +618,16 @@ kgspCalculateFbLayout_TU102
pWprMeta->sizeOfSignature = memdescGetSize(pKernelGsp->pSignatureMemdesc);
}
// CrashCat queue (if allocated in sysmem)
KernelCrashCatEngine *pKernelCrashCatEng = staticCast(pKernelGsp, KernelCrashCatEngine);
MEMORY_DESCRIPTOR *pCrashCatQueueMemDesc = kcrashcatEngineGetQueueMemDesc(pKernelCrashCatEng);
if (pCrashCatQueueMemDesc != NULL)
{
NV_ASSERT_CHECKED(memdescGetAddressSpace(pCrashCatQueueMemDesc) == ADDR_SYSMEM);
pWprMeta->sysmemAddrOfCrashReportQueue = memdescGetPhysAddr(pCrashCatQueueMemDesc, AT_GPU, 0);
pWprMeta->sizeOfCrashReportQueue = (NvU32)memdescGetSize(pCrashCatQueueMemDesc);
}
pWprMeta->bootCount = 0;
pWprMeta->verified = 0;
pWprMeta->revision = GSP_FW_WPR_META_REVISION;
@@ -752,42 +768,84 @@ kgspResetHw_TU102
return NV_OK;
}
void
NvBool
kgspHealthCheck_TU102
(
OBJGPU *pGpu,
KernelGsp *pKernelGsp
)
{
NvBool bHealthy = NV_TRUE;
// If enabled, CrashCat is the primary reporting interface for GSP issues
KernelCrashCatEngine *pKernelCrashCatEng = staticCast(pKernelGsp, KernelCrashCatEngine);
if (kcrashcatEngineConfigured(pKernelCrashCatEng))
{
CrashCatEngine *pCrashCatEng = staticCast(pKernelCrashCatEng, CrashCatEngine);
CrashCatReport *pReport;
while ((pReport = crashcatEngineGetNextCrashReport(pCrashCatEng)) != NULL)
{
bHealthy = NV_FALSE;
pKernelGsp->bFatalError = NV_TRUE;
NV_PRINTF(LEVEL_ERROR,
"****************************** GSP-CrashCat Report *******************************\n");
crashcatReportLog(pReport);
NV_PRINTF(LEVEL_ERROR,
"**********************************************************************************\n");
objDelete(pReport);
}
return bHealthy;
}
NvU32 mb0 = GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(0));
//
// Check for an error message in the GSP mailbox. Any error here is severe
// enough that it should be reported as an Xid. Clear the error so more can
// potentially be reported by GSP, if it was able to recover. In that case,
// it's possible that GSP will skip reporting some more errors that happened
// before the clear, and it will just update the "skipped" count.
// Check for an error message in the GSP mailbox. Any error reported here is
// almost certainly fatal.
//
if (FLD_TEST_DRF(_GSP, _ERROR, _TAG, _VAL, mb0))
{
NvU32 mb1 = GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(1));
NvU32 skipped = DRF_VAL(_GSP, _ERROR, _SKIPPED, mb0);
bHealthy = NV_FALSE;
pKernelGsp->bFatalError = NV_TRUE;
// Clear the mailbox
GPU_REG_WR32(pGpu, NV_PGSP_MAILBOX(0), 0);
NV_PRINTF(LEVEL_NOTICE,
NV_PRINTF(LEVEL_ERROR,
"********************************* GSP Failure **********************************\n");
nvErrorLog_va((void*)pGpu, GSP_ERROR,
"GSP Error: Task %d raised error code 0x%x for reason 0x%x at 0x%x (%d more errors skipped)",
"GSP Error: Task %d raised error code 0x%x for reason 0x%x at 0x%x. The GPU likely needs to be reset.",
DRF_VAL(_GSP, _ERROR, _TASK, mb0),
DRF_VAL(_GSP, _ERROR, _CODE, mb0),
DRF_VAL(_GSP, _ERROR, _REASON, mb0),
mb1,
DRF_VAL(_GSP, _ERROR, _SKIPPED, mb0));
mb1);
NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, NV_PRINTF_ADD_PREFIX
("GSP Error: Task %d raised error code 0x%x for reason 0x%x at 0x%x"),
DRF_VAL(_GSP, _ERROR, _TASK, mb0),
DRF_VAL(_GSP, _ERROR, _CODE, mb0),
DRF_VAL(_GSP, _ERROR, _REASON, mb0),
mb1);
NV_PRINTF(LEVEL_NOTICE,
// Check if GSP had more errors to report (unlikely)
if (skipped)
{
NV_PRINTF(LEVEL_ERROR, "%d more errors skipped\n", skipped);
}
NV_PRINTF(LEVEL_ERROR,
"********************************************************************************\n");
}
return bHealthy;
}
/*!
@@ -803,7 +861,6 @@ kgspService_TU102
KernelGsp *pKernelGsp
)
{
NvU32 clearBits = 0;
NvU32 intrStatus;
KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon);
@@ -825,16 +882,17 @@ kgspService_TU102
if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _HALT, _TRUE))
{
clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _HALT, _SET);
//
// The _HALT is triggered by ucode as part of the CrashCat protocol to
// signal the host that some handling is required. Clear the interrupt
// before handling, so that once the GSP code continues, we won't miss
// a second _HALT interrupt for the next step.
//
kflcnRegWrite_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_IRQSCLR,
DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _HALT, _SET));
//
// Currently, GSP-RISCV triggers _HALT interrupt to RM when it finds
// itself running into a bad state. Triggering _HALT interrupt to RM
// provides RM a chance to handle it so we have better debugability
// into GSP-RISCV issues.
//
kgspDumpGspLogs(pKernelGsp, NV_FALSE);
kgspHealthCheck_HAL(pGpu, pKernelGsp);
(void)kgspHealthCheck_HAL(pGpu, pKernelGsp);
}
if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _SWGEN0, _TRUE))
{
@@ -854,9 +912,6 @@ kgspService_TU102
NV_CHECK_OR_RETURN(LEVEL_SILENT, !pKernelGsp->bInLockdown, 0);
}
// Clear any sources that were serviced and get the new status
kflcnRegWrite_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_IRQSCLR, clearBits);
kflcnIntrRetrigger_HAL(pGpu, pKernelFalcon);
intrStatus = kflcnReadIntrStatus_HAL(pGpu, pKernelFalcon);

View File

@@ -60,6 +60,7 @@
#include "kernel/gpu/pmu/kern_pmu.h"
#include "gpu/perf/kern_perf.h"
#include "core/locks.h"
#include "kernel/gpu/intr/intr.h"
#define RPC_STRUCTURES
#define RPC_GENERIC_UNION
@@ -185,6 +186,43 @@ _kgspGetActiveRpcDebugData
}
}
static NV_STATUS
_kgspRpcSanityCheck(OBJGPU *pGpu)
{
if (API_GPU_IN_RESET_SANITY_CHECK(pGpu))
{
NV_PRINTF(LEVEL_INFO,
"GPU in reset, skipping RPC\n");
return NV_ERR_GPU_IN_FULLCHIP_RESET;
}
if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu) ||
pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST))
{
NV_PRINTF(LEVEL_INFO,
"GPU lost, skipping RPC\n");
return NV_ERR_GPU_IS_LOST;
}
if (osIsGpuShutdown(pGpu))
{
NV_PRINTF(LEVEL_INFO,
"GPU shutdown, skipping RPC\n");
return NV_ERR_GPU_IS_LOST;
}
if (!gpuIsGpuFullPowerForPmResume(pGpu))
{
NV_PRINTF(LEVEL_INFO,
"GPU not full power, skipping RPC\n");
return NV_ERR_GPU_NOT_FULL_POWER;
}
if (!gpuCheckSysmemAccess(pGpu))
{
NV_PRINTF(LEVEL_INFO,
"GPU has no sysmem access, skipping RPC\n");
return NV_ERR_INVALID_ACCESS_TYPE;
}
return NV_OK;
}
/*!
* GSP client RM RPC send routine
*/
@@ -200,14 +238,7 @@ _kgspRpcSendMessage
NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
NV_ASSERT_OR_RETURN(!osIsGpuShutdown(pGpu), NV_ERR_GPU_IS_LOST);
// Skip queuing RPC if we are in the GPU reset path.
if (API_GPU_IN_RESET_SANITY_CHECK(pGpu))
{
NV_PRINTF(LEVEL_INFO, "Skip queuing RPC in the GPU reset path \n");
return NV_ERR_GPU_IS_LOST;
}
NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, _kgspRpcSanityCheck(pGpu));
nvStatus = GspMsgQueueSendCommand(pRpc->pMessageQueueInfo, pGpu);
if (nvStatus != NV_OK)
@@ -374,6 +405,18 @@ _kgspRpcRCTriggered
// check if there's a PCI-E error pending either in device status or in AER
krcCheckBusError_HAL(pGpu, pKernelRc);
//
// If we have received a special msg from GSP then ack back immediately
// that we are done writing notifiers since we would have already processed the
// other RC msgs that trigger notifier writes before this one.
//
if (rpc_params->exceptType == ROBUST_CHANNEL_FAST_PATH_ERROR)
{
NV_RM_RPC_ECC_NOTIFIER_WRITE_ACK(pGpu, status);
NV_ASSERT_OK(status);
return status;
}
status = kfifoGetChidMgrFromType(pGpu, pKernelFifo,
ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
(NvU32)rmEngineType,
@@ -388,6 +431,16 @@ _kgspRpcRCTriggered
pKernelChannel != NULL,
NV_ERR_INVALID_CHANNEL);
// With CC enabled, CPU-RM needs to write error notifiers
if (gpuIsCCFeatureEnabled(pGpu))
{
NV_ASSERT_OK_OR_RETURN(krcErrorSetNotifier(pGpu, pKernelRc,
pKernelChannel,
rpc_params->exceptType,
rmEngineType,
rpc_params->scope));
}
return krcErrorSendEventNotifications_HAL(pGpu, pKernelRc,
pKernelChannel,
rmEngineType, // unused on kernel side
@@ -1246,7 +1299,9 @@ _kgspRpcDrainEvents
kgspDumpGspLogs(pKernelGsp, NV_FALSE);
}
kgspHealthCheck_HAL(pGpu, pKernelGsp);
// If GSP-RM has died,
if (!kgspHealthCheck_HAL(pGpu, pKernelGsp))
return NV_ERR_RESET_REQUIRED;
if (nvStatus == NV_WARN_NOTHING_TO_DO)
nvStatus = NV_OK;
@@ -1282,6 +1337,12 @@ _kgspLogXid119
_getRpcName(expectedFunc),
pRpc->rpcHistory[historyEntry].data[0],
pRpc->rpcHistory[historyEntry].data[1]);
NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, NV_PRINTF_ADD_PREFIX
("Timeout waiting for RPC from GSP%d! Expected function %d (0x%x 0x%x)"),
gpuGetInstance(pGpu),
expectedFunc,
pRpc->rpcHistory[historyEntry].data[0],
pRpc->rpcHistory[historyEntry].data[1]);
if (pRpc->timeoutCount == 1)
{
@@ -1333,8 +1394,17 @@ _kgspRpcIncrementTimeoutCountAndRateLimitPrints
OBJRPC *pRpc
)
{
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
pRpc->timeoutCount++;
if (pKernelGsp->bFatalError)
{
// in case of a fatal GSP error, don't bother printing RPC errors at all
pRpc->bQuietPrints = NV_TRUE;
return;
}
if ((pRpc->timeoutCount == (RPC_TIMEOUT_LIMIT_PRINT_RATE_THRESH + 1)) &&
(RPC_TIMEOUT_LIMIT_PRINT_RATE_SKIP > 0))
{
@@ -1369,7 +1439,8 @@ _kgspRpcRecvPoll
)
{
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
NV_STATUS nvStatus;
NV_STATUS rpcStatus = NV_OK;
NV_STATUS timeoutStatus = NV_OK;
RMTIMEOUT timeout;
NvU32 timeoutUs;
NvU32 timeoutFlags;
@@ -1431,11 +1502,17 @@ _kgspRpcRecvPoll
for (;;)
{
nvStatus = _kgspRpcDrainEvents(pGpu, pKernelGsp, expectedFunc);
//
// Check for GPU timeout, save that information, and then verify if the RPC is completed.
// Otherwise if the CPU thread goes to sleep immediately after the RPC check, it may result in hitting a timeout.
//
timeoutStatus = gpuCheckTimeout(pGpu, &timeout);
switch (nvStatus) {
rpcStatus = _kgspRpcDrainEvents(pGpu, pKernelGsp, expectedFunc);
switch (rpcStatus) {
case NV_WARN_MORE_PROCESSING_REQUIRED:
nvStatus = NV_OK;
rpcStatus = NV_OK;
goto done;
case NV_OK:
// Check timeout and continue outer loop.
@@ -1444,11 +1521,12 @@ _kgspRpcRecvPoll
goto done;
}
osSpinLoop();
NV_CHECK_OK_OR_GOTO(rpcStatus, LEVEL_SILENT, _kgspRpcSanityCheck(pGpu), done);
nvStatus = gpuCheckTimeout(pGpu, &timeout);
if (nvStatus == NV_ERR_TIMEOUT)
if (timeoutStatus == NV_ERR_TIMEOUT)
{
rpcStatus = timeoutStatus;
_kgspRpcIncrementTimeoutCountAndRateLimitPrints(pGpu, pRpc);
if (!pRpc->bQuietPrints)
@@ -1458,12 +1536,15 @@ _kgspRpcRecvPoll
goto done;
}
if (osIsGpuShutdown(pGpu))
else if (timeoutStatus != NV_OK)
{
nvStatus = NV_ERR_GPU_IS_LOST;
NV_PRINTF(LEVEL_ERROR, "gpuCheckTimeout() returned unexpected error (0x%08x)\n",
timeoutStatus);
rpcStatus = timeoutStatus;
goto done;
}
osSpinLoop();
}
pRpc->timeoutCount = 0;
@@ -1477,7 +1558,7 @@ done:
threadStateResetTimeout(pGpu);
}
return nvStatus;
return rpcStatus;
}
/*!
@@ -2410,7 +2491,13 @@ kgspInitRm_IMPL
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "cannot bootstrap riscv/gsp: 0x%x\n", status);
kgspHealthCheck_HAL(pGpu, pKernelGsp);
//
// Ignore return value - a crash report may have already been consumed,
// this is just here as a last attempt to report boot issues that might
// escaped prior checks.
//
(void)kgspHealthCheck_HAL(pGpu, pKernelGsp);
goto done;
}

View File

@@ -405,6 +405,11 @@ NV_STATUS GspStatusQueueInit(OBJGPU *pGpu, MESSAGE_QUEUE_INFO **ppMQI)
break;
kgspDumpGspLogs(pKernelGsp, NV_FALSE);
if (!kgspHealthCheck_HAL(pGpu, pKernelGsp))
{
nvStatus = NV_ERR_RESET_REQUIRED;
break;
}
}
if (nRet < 0)

View File

@@ -166,6 +166,16 @@ subdeviceCtrlCmdMcServiceInterrupts_IMPL
RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
NV_STATUS status = NV_OK;
//
// Force kernel-RM to service interrupts from GSP-RM. This will allow
// kernel-RM to write notifiers and send an ack back to GSP.
// GSP waits for this ack before clearing fast path POSSIBLE_ERR interrupt.
//
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED))
{
intrServiceStallSingle_HAL(pGpu, pIntr, MC_ENGINE_IDX_GSP, NV_TRUE);
}
NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, pRmCtrlParams->hObject, pRmCtrlParams->cmd,
pRmCtrlParams->pParams, pRmCtrlParams->paramsSize, status);
if (status != NV_OK)
@@ -1424,14 +1434,14 @@ _intrServiceStallExactList
if (bRequiresPossibleErrorNotifier)
{
//
// Notify CUDA there may be an error in ERR_CONT that they may miss because we're
// about to clear it out of the NV_CTRL tree backing ERR_CONT before the interrupt
// is serviced.
//
// info32 contains shadowed value of ERR_CONT
//
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POSSIBLE_ERROR, NULL, 0, intrReadErrCont_HAL(pGpu, pIntr), 0);
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED))
{
}
else
{
// info32 contains shadowed value of ERR_CONT
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POSSIBLE_ERROR, NULL, 0, intrReadErrCont_HAL(pGpu, pIntr), 0);
}
}
for (iter = vectIterAll(pIntrTable); vectIterNext(&iter);)
@@ -1486,14 +1496,10 @@ _intrServiceStallExactList
if (bRequiresPossibleErrorNotifier)
{
//
// Notify CUDA there may be an error in ERR_CONT that they may miss because we're
// about to clear it out of the NV_CTRL tree backing ERR_CONT before the interrupt
// is serviced.
//
// info32 contains shadowed value of ERR_CONT
//
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POSSIBLE_ERROR, NULL, 0, intrReadErrCont_HAL(pGpu, pIntr), 0);
{
// info32 contains shadowed value of ERR_CONT
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POSSIBLE_ERROR, NULL, 0, intrReadErrCont_HAL(pGpu, pIntr), 0);
}
}
if (bIntrStuck)

View File

@@ -2542,6 +2542,29 @@ kmigmgrSetGPUInstanceInfo_IMPL
pKernelMIGGpuInstance->pMemoryPartitionHeap = pMemoryPartitionHeap;
pKernelMIGGpuInstance->partitionFlag = partitionFlag;
//
// Offloading of VGPU to GSP requires that the memRange in KERNEL_MIG_GPU_INSTANCE
// be populated, as the plugin will query only within GSP for GPU INSTANCE information.
// CPU-RM is the entity which actually calculates and allocates memory, so with
// VGPU offloaded, GSP-RM must be updated with the memRange info.
//
if (IS_GSP_CLIENT(pGpu) && !IS_VIRTUAL(pGpu) && IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu))
{
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS memParams;
memParams.swizzId = pKernelMIGGpuInstance->swizzId;
memParams.memAddrRange.lo = pKernelMIGGpuInstance->memRange.lo;
memParams.memAddrRange.hi = pKernelMIGGpuInstance->memRange.hi;
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE,
&memParams,
sizeof(memParams)));
}
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
kmigmgrGetProfileByPartitionFlag(pGpu, pKernelMIGManager, partitionFlag, &pKernelMIGGpuInstance->pProfile));

View File

@@ -886,7 +886,6 @@ spdmDeviceInit_GH100
// Ucode is responsible for setting DMA index.
pCcInitCtx->guestId = pSpdm->guestId;
pCcInitCtx->rmBufferSizeInByte = NV_RM_BUFFER_SIZE_IN_BYTE;
NvU64_ALIGN32_PACK(&pCcInitCtx->dmaAddr, &pIOContext->dmaAddr);
gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);

View File

@@ -131,14 +131,37 @@ _vidmemPmaAllocate
NV_STATUS status;
NvU64 sizeAlign = 0;
PMA_ALLOCATION_OPTIONS allocOptions = {0};
NvBool bContig = !FLD_TEST_DRF(OS32, _ATTR,
_PHYSICALITY, _NONCONTIGUOUS,
pAllocData->attr);
NvBool bContig;
NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
NvBool bCompressed = !FLD_TEST_DRF(OS32, _ATTR, _COMPR,
_NONE, pAllocData->attr);
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NvU32 gfid;
NvU32 pmaConfig = PMA_QUERY_NUMA_ENABLED;
status = pmaQueryConfigs(pPma, &pmaConfig);
NV_ASSERT(status == NV_OK);
//
// In NUMA platforms, contig memory is allocated using page order from
// kernel and that could lead to memory wastage when the size is not
// naturally aligned to page order. Prefer non-contig when clients
// are okay with NON_CONTIG.
//
if ((status == NV_OK) && (pmaConfig & PMA_QUERY_NUMA_ENABLED))
{
bContig =
!FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY,
_ALLOW_NONCONTIGUOUS, pAllocData->attr) &&
!FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY,
_NONCONTIGUOUS, pAllocData->attr);
}
else
{
bContig = !FLD_TEST_DRF(OS32, _ATTR,
_PHYSICALITY, _NONCONTIGUOUS,
pAllocData->attr);
}
// LOCK: acquire device lock
status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE,

View File

@@ -0,0 +1,362 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_engine.h"
#include "crashcat/crashcat_queue.h"
#include "crashcat/crashcat_wayfinder.h"
#include "utils/nvassert.h"
#include "nv-crashcat-decoder.h"
static NV_INLINE NvU64 _crashcatEngineComputeDescriptorKey(NV_CRASHCAT_MEM_APERTURE, NvU64);
static CrashCatBufferDescriptor *_crashcatEngineCreateBufferDescriptor(CrashCatEngine *,
NV_CRASHCAT_MEM_APERTURE,
NvU64, NvU64, void *);
static NV_INLINE void _crashcatEngineDestroyBufferDescriptor(CrashCatEngine *,
CrashCatBufferDescriptor *);
static void *_crashcatEngineMapBufferDescriptor(CrashCatEngine *, CrashCatBufferDescriptor *);
static void _crashcatEngineUnmapBufferDescriptor(CrashCatEngine *, CrashCatBufferDescriptor *);
NV_STATUS crashcatEngineLoadWayfinder_IMPL(CrashCatEngine *pCrashCatEng)
{
NvU32 wfl0Offset = crashcatEngineGetWFL0Offset(pCrashCatEng);
NvU32 wfl0 = crashcatEnginePriRead(pCrashCatEng, wfl0Offset);
// Has the L0 wayfinder been populated yet?
if (!crashcatWayfinderL0Valid(wfl0))
return NV_WARN_NOTHING_TO_DO;
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
objCreate(&pCrashCatEng->pWayfinder, pCrashCatEng, CrashCatWayfinder,
crashcatWayfinderL0Version(wfl0), // halspec args
wfl0)); // constructor args
return NV_OK;
}
CrashCatReport *crashcatEngineGetNextCrashReport_IMPL(CrashCatEngine *pCrashCatEng)
{
// Don't attempt to probe for reports if CrashCat is not configured
if (!crashcatEngineConfigured(pCrashCatEng))
return NULL;
// No reports if there's no wayfinder yet
if ((pCrashCatEng->pWayfinder == NULL) &&
(crashcatEngineLoadWayfinder(pCrashCatEng) != NV_OK))
return NULL;
CrashCatQueue *pQueue = crashcatWayfinderGetReportQueue_HAL(pCrashCatEng->pWayfinder);
if (pQueue != NULL)
return crashcatQueueConsumeNextReport_HAL(pQueue);
return NULL;
}
NV_STATUS crashcatEngineConstruct_IMPL
(
CrashCatEngine *pCrashCatEng
)
{
mapInitIntrusive(&pCrashCatEng->registeredCrashBuffers);
mapInitIntrusive(&pCrashCatEng->mappedCrashBuffers);
return NV_OK;
}
void crashcatEngineDestruct_IMPL
(
CrashCatEngine *pCrashCatEng
)
{
crashcatEngineUnload(pCrashCatEng);
// All buffers should be unmapped and unregistered before the destructor is called
NV_ASSERT(mapCount(&pCrashCatEng->mappedCrashBuffers) == 0);
NV_ASSERT(mapCount(&pCrashCatEng->registeredCrashBuffers) == 0);
mapDestroy(&pCrashCatEng->mappedCrashBuffers);
mapDestroy(&pCrashCatEng->registeredCrashBuffers);
}
void crashcatEngineUnload_IMPL
(
CrashCatEngine *pCrashCatEng
)
{
objDelete(pCrashCatEng->pWayfinder);
}
// Non-NVOC wrapper to handle variadic arguments
void crashcatEnginePrintf(CrashCatEngine *pCrashCatEng, NvBool bReportStart, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
// Dispatches virtual function with va_list
crashcatEngineVprintf(pCrashCatEng, bReportStart, fmt, args);
va_end(args);
}
static NV_INLINE NvU64 _crashcatEngineComputeDescriptorKey
(
NV_CRASHCAT_MEM_APERTURE aperture,
NvU64 offset
)
{
// Offset should be at least 8-byte aligned so that the aperture bits can be stuffed
NV_ASSERT_CHECKED((offset & (sizeof(NvU64) - 1)) == 0);
return (offset | (NvU64)aperture);
}
static CrashCatBufferDescriptor *_crashcatEngineCreateBufferDescriptor
(
CrashCatEngine *pCrashCatEng,
NV_CRASHCAT_MEM_APERTURE aperture,
NvU64 offset,
NvU64 size,
void *pEngPriv
)
{
CrashCatBufferDescriptor *pBufDesc = portMemAllocNonPaged(sizeof(*pBufDesc));
if (pBufDesc == NULL)
return NULL;
portMemSet(pBufDesc, 0, sizeof(*pBufDesc));
pBufDesc->aperture = aperture;
pBufDesc->physOffset = offset;
pBufDesc->size = size;
pBufDesc->pEngPriv = pEngPriv;
return pBufDesc;
}
static NV_INLINE void _crashcatEngineDestroyBufferDescriptor
(
CrashCatEngine *pCrashCatEng,
CrashCatBufferDescriptor *pBufDesc
)
{
portMemFree(pBufDesc);
}
static void *_crashcatEngineMapBufferDescriptor
(
CrashCatEngine *pCrashCatEng,
CrashCatBufferDescriptor *pBufDesc
)
{
void *ptr = NULL;
switch (pBufDesc->aperture)
{
case NV_CRASHCAT_MEM_APERTURE_SYSGPA:
case NV_CRASHCAT_MEM_APERTURE_FBGPA:
{
ptr = crashcatEngineMapBufferDescriptor(pCrashCatEng, pBufDesc);
NV_CHECK_OR_RETURN(LEVEL_ERROR, ptr != NULL, NULL);
break;
}
case NV_CRASHCAT_MEM_APERTURE_DMEM:
case NV_CRASHCAT_MEM_APERTURE_EMEM:
{
ptr = portMemAllocNonPaged(pBufDesc->size);
NV_CHECK_OR_RETURN(LEVEL_ERROR, ptr != NULL, NULL);
break;
}
default:
NV_PRINTF(LEVEL_WARNING,
"Unknown CrashCat aperture ID 0x%02x (offset = 0x%" NvU64_fmtx
", size = 0x%" NvU64_fmtx ")\n",
pBufDesc->aperture, pBufDesc->physOffset, pBufDesc->size);
break;
}
return ptr;
}
static void _crashcatEngineUnmapBufferDescriptor
(
CrashCatEngine *pCrashCatEng,
CrashCatBufferDescriptor *pBufDesc
)
{
switch (pBufDesc->aperture)
{
case NV_CRASHCAT_MEM_APERTURE_SYSGPA:
case NV_CRASHCAT_MEM_APERTURE_FBGPA:
crashcatEngineUnmapBufferDescriptor(pCrashCatEng, pBufDesc);
break;
case NV_CRASHCAT_MEM_APERTURE_DMEM:
case NV_CRASHCAT_MEM_APERTURE_EMEM:
portMemFree(pBufDesc->pMapping);
break;
default:
NV_PRINTF(LEVEL_WARNING,
"Unknown CrashCat aperture ID 0x%02x (offset = 0x%" NvU64_fmtx
", size = 0x%" NvU64_fmtx ")\n",
pBufDesc->aperture, pBufDesc->physOffset, pBufDesc->size);
break;
}
}
NV_STATUS crashcatEngineRegisterCrashBuffer_IMPL
(
CrashCatEngine *pCrashCatEng,
NV_CRASHCAT_MEM_APERTURE aperture,
NvU64 offset,
NvU64 size,
void *pEngPriv
)
{
NV_CHECK_OR_RETURN(LEVEL_INFO, (aperture == NV_CRASHCAT_MEM_APERTURE_SYSGPA) ||
(aperture == NV_CRASHCAT_MEM_APERTURE_FBGPA),
NV_ERR_INVALID_ARGUMENT);
NV_CHECK_OR_RETURN(LEVEL_INFO, size > 0, NV_ERR_INVALID_ARGUMENT);
NV_CHECK_OR_RETURN(LEVEL_INFO, pEngPriv != NULL, NV_ERR_INVALID_ARGUMENT);
// Create a crashcat buffer descriptor and register in the registeredCrashBuffers
CrashCatBufferDescriptor *pBufDesc = _crashcatEngineCreateBufferDescriptor(pCrashCatEng,
aperture,
offset, size,
pEngPriv);
if (pBufDesc == NULL)
return NV_ERR_NO_MEMORY;
pBufDesc->bRegistered = NV_TRUE;
NvU64 key = _crashcatEngineComputeDescriptorKey(aperture, offset);
if (!mapInsertExisting(&pCrashCatEng->registeredCrashBuffers, key, pBufDesc))
{
_crashcatEngineDestroyBufferDescriptor(pCrashCatEng, pBufDesc);
return NV_ERR_INSERT_DUPLICATE_NAME;
}
return NV_OK;
}
void crashcatEngineUnregisterCrashBuffer_IMPL
(
CrashCatEngine *pCrashCatEng,
NV_CRASHCAT_MEM_APERTURE aperture,
NvU64 offset,
NvU64 size
)
{
NvU64 key = _crashcatEngineComputeDescriptorKey(aperture, offset);
CrashCatBufferDescriptor *pBufDesc = mapFind(&pCrashCatEng->registeredCrashBuffers, key);
if (pBufDesc == NULL)
return;
NV_ASSERT_CHECKED(pBufDesc->size == size);
//
// CrashCat should be unloaded from the engine before unregistering the crash buffer.
// Unload will unmap all buffers.
//
NV_ASSERT_CHECKED(pBufDesc->pMapping == NULL);
mapRemove(&pCrashCatEng->registeredCrashBuffers, pBufDesc);
_crashcatEngineDestroyBufferDescriptor(pCrashCatEng, pBufDesc);
}
void *crashcatEngineMapCrashBuffer_IMPL
(
CrashCatEngine *pCrashCatEng,
NV_CRASHCAT_MEM_APERTURE aperture,
NvU64 offset,
NvU64 size
)
{
NvU64 key = _crashcatEngineComputeDescriptorKey(aperture, offset);
CrashCatBufferDescriptor *pBufDesc = mapFind(&pCrashCatEng->registeredCrashBuffers, key);
// Sysmem buffers always need to be pre-registered
if ((aperture != NV_CRASHCAT_MEM_APERTURE_SYSGPA) && (pBufDesc == NULL))
pBufDesc = _crashcatEngineCreateBufferDescriptor(pCrashCatEng, aperture,
offset, size, NULL);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pBufDesc != NULL, NULL);
NV_ASSERT_CHECKED(pBufDesc->size == size);
pBufDesc->pMapping = _crashcatEngineMapBufferDescriptor(pCrashCatEng, pBufDesc);
if ((pBufDesc->pMapping == NULL) ||
!mapInsertExisting(&pCrashCatEng->mappedCrashBuffers, (NvU64)pBufDesc->pMapping, pBufDesc))
{
if (pBufDesc->pMapping != NULL)
{
_crashcatEngineUnmapBufferDescriptor(pCrashCatEng, pBufDesc);
pBufDesc->pMapping = NULL;
}
//
// If this wasn't a registered buffer descriptor, it was created above, so destroy it
// before returning.
//
if (!pBufDesc->bRegistered)
_crashcatEngineDestroyBufferDescriptor(pCrashCatEng, pBufDesc);
}
return pBufDesc->pMapping;
}
void crashcatEngineUnmapCrashBuffer_IMPL
(
CrashCatEngine *pCrashCatEng,
void *ptr
)
{
CrashCatBufferDescriptor *pBufDesc = mapFind(&pCrashCatEng->mappedCrashBuffers, (NvU64)ptr);
if (pBufDesc == NULL)
return;
NV_ASSERT_CHECKED(ptr == pBufDesc->pMapping);
mapRemove(&pCrashCatEng->mappedCrashBuffers, pBufDesc);
_crashcatEngineUnmapBufferDescriptor(pCrashCatEng, pBufDesc);
pBufDesc->pMapping = NULL;
// If this was not a registered buffer, destroy the buffered descriptor now
if (!pBufDesc->bRegistered)
_crashcatEngineDestroyBufferDescriptor(pCrashCatEng, pBufDesc);
}
void crashcatEngineSyncCrashBuffer_IMPL
(
CrashCatEngine *pCrashCatEng,
void *ptr,
NvU32 offset,
NvU32 size
)
{
CrashCatBufferDescriptor *pBufDesc = mapFind(&pCrashCatEng->mappedCrashBuffers, (NvU64)ptr);
NV_ASSERT_OR_RETURN_VOID(pBufDesc != NULL);
// Direct-map buffers don't require any sync
if ((pBufDesc->aperture == NV_CRASHCAT_MEM_APERTURE_SYSGPA) ||
(pBufDesc->aperture == NV_CRASHCAT_MEM_APERTURE_FBGPA))
return;
crashcatEngineSyncBufferDescriptor(pCrashCatEng, pBufDesc, offset, size);
}

View File

@@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_QUEUE_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_queue.h"
#include "crashcat/crashcat_engine.h"
NV_STATUS crashcatQueueConstruct_IMPL(CrashCatQueue *pQueue, CrashCatQueueConfig *pConfig)
{
pQueue->pEngine = CRASHCAT_GET_ENGINE(pQueue);
pQueue->config = *pConfig;
pQueue->pMapping = crashcatEngineMapCrashBuffer(pQueue->pEngine, pQueue->config.aperture,
pQueue->config.offset, pQueue->config.size);
NV_CHECK_OR_RETURN(LEVEL_WARNING, pQueue->pMapping != NULL, NV_ERR_INVALID_OBJECT_BUFFER);
return NV_OK;
}
void crashcatQueueDestruct_IMPL(CrashCatQueue *pQueue)
{
crashcatEngineUnmapCrashBuffer(pQueue->pEngine, pQueue->pMapping);
}

View File

@@ -0,0 +1,164 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_report.h"
#include "crashcat/crashcat_engine.h"
#include "utils/nvassert.h"
NV_STATUS crashcatReportConstruct_IMPL
(
CrashCatReport *pReport,
void **ppReportBytes,
NvLength numBytes
)
{
// Cache the CrashCatEngine pointer for quick access during logging
pReport->pEngine = CRASHCAT_GET_ENGINE(pReport);
// Extract the report bytes into our structured report record
void *pBufferStart = *ppReportBytes;
*ppReportBytes = crashcatReportExtract_HAL(pReport, pBufferStart, numBytes);
if (pBufferStart == *ppReportBytes)
{
NV_PRINTF(LEVEL_ERROR, "no report data extracted from %" NvUPtr_fmtu " bytes\n", numBytes);
return NV_ERR_INVALID_DATA;
}
return NV_OK;
}
void crashcatReportLog_IMPL(CrashCatReport *pReport)
{
NV_CRASHCAT_PACKET_TYPE packetType;
NV_ASSERT_OR_RETURN_VOID((pReport->validTags & NVBIT(NV_CRASHCAT_PACKET_TYPE_REPORT)) != 0);
// TODO: acquire mutex to prevent multi-line reports interleaving
crashcatReportLogSource_HAL(pReport);
crashcatReportLogReporter_HAL(pReport);
// Log additional data associated with the report
FOR_EACH_INDEX_IN_MASK(32, packetType,
pReport->validTags & ~NVBIT(NV_CRASHCAT_PACKET_TYPE_REPORT))
{
switch (packetType)
{
case NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE:
crashcatReportLogRiscv64CsrState_HAL(pReport);
break;
case NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE:
crashcatReportLogRiscv64GprState_HAL(pReport);
break;
case NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE:
crashcatReportLogRiscv64Trace_HAL(pReport);
break;
case NV_CRASHCAT_PACKET_TYPE_IO32_STATE:
crashcatReportLogIo32State_HAL(pReport);
break;
default:
NV_PRINTF(LEVEL_WARNING, "unrecognized packet type 0x%02x\n", packetType);
break;
}
}
FOR_EACH_INDEX_IN_MASK_END;
crashcatEnginePrintf(pReport->pEngine, NV_FALSE,
CRASHCAT_LOG_PREFIX "------------[ end crash report ]------------");
}
// xcause CSR format and codes are a backward-compatible part of the RISC-V standard
#define RISCV_CSR_XCAUSE_EXCODE 4:0
#define RISCV_CSR_XCAUSE_EXCODE_IAMA 0x00000000
#define RISCV_CSR_XCAUSE_EXCODE_IACC_FAULT 0x00000001
#define RISCV_CSR_XCAUSE_EXCODE_ILL 0x00000002
#define RISCV_CSR_XCAUSE_EXCODE_BKPT 0x00000003
#define RISCV_CSR_XCAUSE_EXCODE_LAMA 0x00000004
#define RISCV_CSR_XCAUSE_EXCODE_LACC_FAULT 0x00000005
#define RISCV_CSR_XCAUSE_EXCODE_SAMA 0x00000006
#define RISCV_CSR_XCAUSE_EXCODE_SACC_FAULT 0x00000007
#define RISCV_CSR_XCAUSE_EXCODE_UCALL 0x00000008
#define RISCV_CSR_XCAUSE_EXCODE_SCALL 0x00000009
#define RISCV_CSR_XCAUSE_EXCODE_MCALL 0x0000000b
#define RISCV_CSR_XCAUSE_EXCODE_IPAGE_FAULT 0x0000000c
#define RISCV_CSR_XCAUSE_EXCODE_LPAGE_FAULT 0x0000000d
#define RISCV_CSR_XCAUSE_EXCODE_SPAGE_FAULT 0x0000000f
#define RISCV_CSR_XCAUSE_EXCODE_U_SWINT 0x00000000
#define RISCV_CSR_XCAUSE_EXCODE_S_SWINT 0x00000001
#define RISCV_CSR_XCAUSE_EXCODE_M_SWINT 0x00000003
#define RISCV_CSR_XCAUSE_EXCODE_U_TINT 0x00000004
#define RISCV_CSR_XCAUSE_EXCODE_S_TINT 0x00000005
#define RISCV_CSR_XCAUSE_EXCODE_M_TINT 0x00000007
#define RISCV_CSR_XCAUSE_EXCODE_U_EINT 0x00000008
#define RISCV_CSR_XCAUSE_EXCODE_S_EINT 0x00000009
#define RISCV_CSR_XCAUSE_EXCODE_M_EINT 0x0000000b
#define RISCV_CSR_XCAUSE_INT 63:63
#define RISCV_CSR_XCAUSE_EXCODE_CASE(code, str) \
case RISCV_CSR_XCAUSE_EXCODE_ ## code: return MAKE_NV_PRINTF_STR(str)
const char *crashcatReportRiscvCauseToString(NvU64 xcause)
{
NvBool bIntr = (NvBool)REF_VAL64(RISCV_CSR_XCAUSE_INT, xcause);
NvU8 excode = (NvU8)REF_VAL64(RISCV_CSR_XCAUSE_EXCODE, xcause);
if (bIntr)
{
switch (excode)
{
RISCV_CSR_XCAUSE_EXCODE_CASE(U_SWINT, "user software interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(S_SWINT, "supervisor software interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(M_SWINT, "machine software interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(U_TINT, "user timer interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(S_TINT, "supervisor timer interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(M_TINT, "machine timer interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(U_EINT, "user external interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(S_EINT, "supervisor external interrupt");
RISCV_CSR_XCAUSE_EXCODE_CASE(M_EINT, "machine external interrupt");
default: return MAKE_NV_PRINTF_STR("unknown interrupt");
}
}
else
{
switch (excode)
{
RISCV_CSR_XCAUSE_EXCODE_CASE(IAMA, "instruction address misaligned");
RISCV_CSR_XCAUSE_EXCODE_CASE(IACC_FAULT, "instruction access fault");
RISCV_CSR_XCAUSE_EXCODE_CASE(ILL, "illegal instruction");
RISCV_CSR_XCAUSE_EXCODE_CASE(BKPT, "breakpoint");
RISCV_CSR_XCAUSE_EXCODE_CASE(LAMA, "load address misaligned");
RISCV_CSR_XCAUSE_EXCODE_CASE(LACC_FAULT, "load access fault");
RISCV_CSR_XCAUSE_EXCODE_CASE(SAMA, "store address misaligned");
RISCV_CSR_XCAUSE_EXCODE_CASE(SACC_FAULT, "store access fault");
RISCV_CSR_XCAUSE_EXCODE_CASE(UCALL, "environment call from U-mode");
RISCV_CSR_XCAUSE_EXCODE_CASE(SCALL, "environment call from S-mode");
RISCV_CSR_XCAUSE_EXCODE_CASE(MCALL, "environment call from M-mode");
RISCV_CSR_XCAUSE_EXCODE_CASE(IPAGE_FAULT, "instruction access page fault");
RISCV_CSR_XCAUSE_EXCODE_CASE(LPAGE_FAULT, "load access page fault");
RISCV_CSR_XCAUSE_EXCODE_CASE(SPAGE_FAULT, "store access page fault");
default: return MAKE_NV_PRINTF_STR("unknown exception");
}
}
}
#undef RISCV_CSR_MCAUSE_EXCODE_CASE

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_wayfinder.h"
#include "crashcat/crashcat_queue.h"
#include "crashcat/crashcat_report.h"
NV_STATUS crashcatWayfinderConstruct_IMPL(CrashCatWayfinder *pWayfinder, NvU32 wfl0)
{
crashcatWayfinderSetWFL0_HAL(pWayfinder, wfl0);
return NV_OK;
}
void crashcatWayfinderDestruct_IMPL(CrashCatWayfinder *pWayfinder)
{
objDelete(pWayfinder->pQueue);
}

View File

@@ -0,0 +1,146 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_QUEUE_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_queue.h"
#include "crashcat/crashcat_engine.h"
#include "crashcat/crashcat_report.h"
#include "utils/nvassert.h"
#include "nv-crashcat-decoder.h"
static NV_STATUS _getCrashCatReportHalspecArgs
(
void *pBuf,
NvU8 *pFormatVersion,
NvU64 *pImplementerSig
)
{
NvCrashCatPacketHeader_V1 header = *(NvCrashCatPacketHeader_V1 *)pBuf;
// Verify the report data looks sane
NV_CHECK_OR_RETURN(LEVEL_ERROR, crashcatPacketHeaderValid(header), NV_ERR_INVALID_DATA);
*pFormatVersion = crashcatPacketHeaderFormatVersion(header);
switch (*pFormatVersion)
{
default:
// Fall through for backward compatibility if version is not recognized
case NV_CRASHCAT_PACKET_FORMAT_VERSION_1:
{
NvCrashCatReport_V1 *pReport = (NvCrashCatReport_V1 *)pBuf;
*pImplementerSig = pReport->implementerSignature;
break;
}
}
return NV_OK;
}
CrashCatReport *crashcatQueueConsumeNextReport_V1(CrashCatQueue *pQueue)
{
const NvU32 size = pQueue->config.size;
const NvU32 put = crashcatEnginePriRead(pQueue->pEngine, pQueue->config.putRegOffset);
NvU32 get = crashcatEnginePriRead(pQueue->pEngine, pQueue->config.getRegOffset);
NV_CHECK_OR_RETURN(LEVEL_ERROR, get < size, NULL);
NV_CHECK_OR_RETURN(LEVEL_ERROR, put < size, NULL);
NV_CHECK_OR_RETURN(LEVEL_SILENT, put != get, NULL); // Nothing to read
NvU32 readSize;
void *pBuf;
NvBool bAllocated = NV_FALSE;
if (put > get)
{
// We can read directly from the mapping
readSize = put - get;
crashcatEngineSyncCrashBuffer(pQueue->pEngine, pQueue->pMapping, get, readSize);
pBuf = (void *)((NvUPtr)pQueue->pMapping + get);
}
else if (put == 0)
{
// Buffer just wrapped, but we can still read directly from the mapping
readSize = size - get;
crashcatEngineSyncCrashBuffer(pQueue->pEngine, pQueue->pMapping, get, readSize);
pBuf = (void *)((NvUPtr)pQueue->pMapping + get);
}
else
{
// Need to handle wraparound, allocate a temporary buffer to simplify decoding
NvU32 preWrapSize = size - get;
NvU32 postWrapSize = put;
readSize = preWrapSize + postWrapSize;
pBuf = portMemAllocNonPaged(readSize);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuf != NULL, NULL);
bAllocated = NV_TRUE;
crashcatEngineSyncCrashBuffer(pQueue->pEngine, pQueue->pMapping, get, preWrapSize);
portMemCopy(pBuf, preWrapSize,
(void *)((NvUPtr)pQueue->pMapping + get), preWrapSize);
crashcatEngineSyncCrashBuffer(pQueue->pEngine, pQueue->pMapping, 0, postWrapSize);
portMemCopy((void *)((NvUPtr)pBuf + preWrapSize), postWrapSize,
pQueue->pMapping, postWrapSize);
}
//
// To create the CrashCatReport object, we pass the implementer signature as a halspec arg.
// The implementer signature location is technically report-version-specific, so we need a
// little adapter logic to get the right one before the report is created.
//
CrashCatReport *pReport = NULL;
void *pReadBuf = pBuf;
NV_STATUS status = NV_ERR_INVALID_DATA;
NvU8 reportFormatVersion;
NvU64 reportImplementer;
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
_getCrashCatReportHalspecArgs(pBuf, &reportFormatVersion, &reportImplementer),
updateGetPointer);
NV_CHECK_OK(status, LEVEL_ERROR,
objCreate(&pReport, pQueue, CrashCatReport,
reportFormatVersion, reportImplementer,
&pReadBuf, readSize));
updateGetPointer:
//
// Update the get pointer based on how many bytes were read, or skip it all if there was an
// extraction failure, so that we don't flood the logs with repeated failed extraction attempts.
// TODO: log raw data somewhere so the failure can be analyzed.
//
{
NvU64 diff = (NvU64)pReadBuf - (NvU64)pBuf;
if ((status != NV_OK) || (diff == 0))
diff = readSize;
NV_ASSERT_CHECKED(NvU64_HI32(diff) == 0);
get = (get + NvU64_LO32(diff)) % size;
crashcatEnginePriWrite(pQueue->pEngine, pQueue->config.getRegOffset, get);
}
if (bAllocated)
portMemFree(pBuf);
return pReport;
}

View File

@@ -0,0 +1,458 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_report.h"
#include "crashcat/crashcat_engine.h"
#include "utils/nvassert.h"
#include "nv-crashcat-decoder.h"
static NV_INLINE NvUPtr ptrDiff(void *pStart, void *pEnd)
{
return (NvUPtr)pEnd - (NvUPtr)pStart;
}
static NV_INLINE void *advancePtr(void *pStart, void *pEnd, NvUPtr *pBytesRemaining)
{
*pBytesRemaining -= ptrDiff(pStart, pEnd);
return pEnd;
}
static NV_INLINE NvCrashCatPacketHeader_V1 crashcatReadPacketHeader(void *pHdr)
{
return *(NvCrashCatPacketHeader_V1 *)pHdr;
}
void crashcatReportDestruct_V1(CrashCatReport *pReport)
{
portMemFree(pReport->v1.pRiscv64Trace);
portMemFree(pReport->v1.pIo32State);
}
void *crashcatReportExtract_V1
(
CrashCatReport *pReport,
void *pReportBytes,
NvLength bytesRemaining
)
{
// Check for the base report first
void *pStart = pReportBytes;
void *pEnd = crashcatReportExtractReport_HAL(pReport, pStart, bytesRemaining);
// If nothing was extracted, there's nothing more to do
if (pEnd == pStart)
return pStart;
NV_ASSERT_OR_RETURN(pStart < pEnd, pStart);
//
// Pull out additional report data until we've exhausted the buffer or the start of the next
// report is found.
//
for (pStart = advancePtr(pStart, pEnd, &bytesRemaining);
bytesRemaining > 0;
pStart = advancePtr(pStart, pEnd, &bytesRemaining))
{
NvCrashCatPacketHeader_V1 hdr = crashcatReadPacketHeader(pStart);
// Not a valid header, nothing else to do here
if (!crashcatPacketHeaderValid(hdr))
{
NV_PRINTF(LEVEL_WARNING,
"Invalid packet header (0x%" NvU64_fmtx ") found, skipping %" NvUPtr_fmtu
" bytes remaining\n", hdr, bytesRemaining);
pEnd = (void *)((NvUPtr)pStart + bytesRemaining);
continue;
}
NV_CRASHCAT_PACKET_TYPE packetType = crashcatPacketHeaderV1Type(hdr);
// This is the next report (which may have a different format version)
if (packetType == NV_CRASHCAT_PACKET_TYPE_REPORT)
break;
switch (packetType)
{
case NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE:
pEnd = crashcatReportExtractRiscv64CsrState_HAL(pReport, pStart, bytesRemaining);
break;
case NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE:
pEnd = crashcatReportExtractRiscv64GprState_HAL(pReport, pStart, bytesRemaining);
break;
case NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE:
pEnd = crashcatReportExtractRiscv64Trace_HAL(pReport, pStart, bytesRemaining);
break;
case NV_CRASHCAT_PACKET_TYPE_IO32_STATE:
pEnd = crashcatReportExtractIo32State_HAL(pReport, pStart, bytesRemaining);
break;
default:
NV_PRINTF(LEVEL_WARNING, "Unsupported V1 packet type 0x%02x\n", packetType);
break;
}
// Did packet extraction fail?
if (pEnd == pStart)
{
NvLength dataSize = crashcatPacketHeaderPayloadSize(hdr) +
sizeof(NvCrashCatPacketHeader_V1);
NV_PRINTF(LEVEL_WARNING,
"Unable to extract packet type 0x%02x (%" NvUPtr_fmtu " bytes), skipping\n",
packetType, dataSize);
pEnd = (void *)((NvUPtr)pStart + dataSize);
continue;
}
}
return pStart;
}
void *crashcatReportExtractReport_V1
(
CrashCatReport *pReport,
void *pReportBytes,
NvLength bytesRemaining
)
{
if (bytesRemaining < sizeof(NvCrashCatReport_V1))
{
NV_PRINTF(LEVEL_WARNING,
"Not enough data (%" NvUPtr_fmtu " bytes) to decode NvCrashCatReport_V1\n",
bytesRemaining);
return pReportBytes;
}
NvCrashCatPacketHeader_V1 hdr = crashcatReadPacketHeader(pReportBytes);
const NvLength expPayloadSize = sizeof(pReport->v1.report)
- sizeof(pReport->v1.report.header);
if (!crashcatPacketHeaderValid(hdr) ||
(crashcatPacketHeaderFormatVersion(hdr) != NV_CRASHCAT_PACKET_FORMAT_VERSION_1) ||
(crashcatPacketHeaderPayloadSize(hdr) != expPayloadSize))
{
NV_PRINTF(LEVEL_WARNING, "Invalid V1 report header 0x%" NvU64_fmtx "\n", hdr);
return pReportBytes;
}
pReport->v1.report = *(NvCrashCatReport_V1 *)pReportBytes;
pReport->validTags |= NVBIT(NV_CRASHCAT_PACKET_TYPE_REPORT);
return (void *)((NvUPtr)pReportBytes + sizeof(NvCrashCatReport_V1));
}
void *crashcatReportExtractRiscv64CsrState_V1
(
CrashCatReport *pReport,
void *pReportBytes,
NvLength bytesRemaining
)
{
if (bytesRemaining < sizeof(NvCrashCatRiscv64CsrState_V1))
{
NV_PRINTF(LEVEL_WARNING,
"Not enough data (%" NvUPtr_fmtu " bytes) to decode NvCrashCatRiscv64CsrState_V1\n",
bytesRemaining);
return pReportBytes;
}
NvCrashCatPacketHeader_V1 hdr = crashcatReadPacketHeader(pReportBytes);
const NvLength expPayloadSize = sizeof(pReport->v1.riscv64CsrState)
- sizeof(pReport->v1.riscv64CsrState.header);
if (!crashcatPacketHeaderValid(hdr) ||
(crashcatPacketHeaderFormatVersion(hdr) != NV_CRASHCAT_PACKET_FORMAT_VERSION_1) ||
(crashcatPacketHeaderPayloadSize(hdr) != expPayloadSize))
{
NV_PRINTF(LEVEL_WARNING, "Invalid V1 RISCV CSR state header 0x%" NvU64_fmtx "\n", hdr);
return pReportBytes;
}
pReport->v1.riscv64CsrState = *(NvCrashCatRiscv64CsrState_V1 *)pReportBytes;
pReport->validTags |= NVBIT(NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE);
return (void *)((NvUPtr)pReportBytes + sizeof(NvCrashCatRiscv64CsrState_V1));
}
void *crashcatReportExtractRiscv64GprState_V1
(
CrashCatReport *pReport,
void *pReportBytes,
NvLength bytesRemaining
)
{
if (bytesRemaining < sizeof(NvCrashCatRiscv64GprState_V1))
{
NV_PRINTF(LEVEL_WARNING,
"Not enough data (%" NvUPtr_fmtu " bytes) to decode NvCrashCatRiscv64GprState_V1\n",
bytesRemaining);
return pReportBytes;
}
NvCrashCatPacketHeader_V1 hdr = crashcatReadPacketHeader(pReportBytes);
const NvLength expPayloadSize = sizeof(pReport->v1.riscv64GprState)
- sizeof(pReport->v1.riscv64GprState.header);
if (!crashcatPacketHeaderValid(hdr) ||
(crashcatPacketHeaderFormatVersion(hdr) != NV_CRASHCAT_PACKET_FORMAT_VERSION_1) ||
(crashcatPacketHeaderPayloadSize(hdr) != expPayloadSize))
{
NV_PRINTF(LEVEL_WARNING, "Invalid V1 RISCV GPR state header 0x%" NvU64_fmtx "\n", hdr);
return pReportBytes;
}
pReport->v1.riscv64GprState = *(NvCrashCatRiscv64GprState_V1 *)pReportBytes;
pReport->validTags |= NVBIT(NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE);
return (void *)((NvUPtr)pReportBytes + sizeof(NvCrashCatRiscv64GprState_V1));
}
void *crashcatReportExtractRiscv64Trace_V1
(
CrashCatReport *pReport,
void *pReportBytes,
NvLength bytesRemaining
)
{
if (bytesRemaining < sizeof(NvCrashCatPacketHeader_V1))
{
NV_PRINTF(LEVEL_WARNING,
"Not enough data (%" NvUPtr_fmtu " bytes) to decode NvCrashCatRiscv64Trace_V1\n",
bytesRemaining);
return pReportBytes;
}
NvCrashCatPacketHeader_V1 hdr = crashcatReadPacketHeader(pReportBytes);
if (!crashcatPacketHeaderValid(hdr) ||
(crashcatPacketHeaderFormatVersion(hdr) != NV_CRASHCAT_PACKET_FORMAT_VERSION_1))
{
NV_PRINTF(LEVEL_WARNING, "Invalid V1 stack trace header 0x%" NvU64_fmtx "\n", hdr);
return pReportBytes;
}
// Allocate the buffer for the stack trace
const NvU32 payloadSize = crashcatPacketHeaderPayloadSize(hdr);
const NvLength sizeBytes = sizeof(NvCrashCatRiscv64Trace_V1) + payloadSize;
NV_CHECK_OR_RETURN(LEVEL_ERROR, payloadSize > 0, pReportBytes);
pReport->v1.pRiscv64Trace = portMemAllocNonPaged(sizeBytes);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pReport->v1.pRiscv64Trace != NULL, pReportBytes);
portMemCopy(pReport->v1.pRiscv64Trace, sizeBytes, pReportBytes, sizeBytes);
pReport->validTags |= NVBIT(NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE);
return (void *)((NvUPtr)pReportBytes + sizeBytes);
}
void *crashcatReportExtractIo32State_V1
(
CrashCatReport *pReport,
void *pReportBytes,
NvLength bytesRemaining
)
{
if (bytesRemaining < sizeof(NvCrashCatPacketHeader_V1))
{
NV_PRINTF(LEVEL_WARNING,
"Not enough data (%" NvUPtr_fmtu " bytes) to decode NvCrashCatIo32State_V1\n",
bytesRemaining);
return pReportBytes;
}
//
// TODO: support multiple IO32_STATE packets. This implementation assumes there will be only
// one, and always keeps the first one.
//
if ((pReport->validTags & NVBIT(NV_CRASHCAT_PACKET_TYPE_IO32_STATE)) != 0)
{
NV_PRINTF(LEVEL_WARNING, "Report already contains valid reg32 data, skipping\n");
return pReportBytes;
}
NvCrashCatPacketHeader_V1 hdr = crashcatReadPacketHeader(pReportBytes);
if (!crashcatPacketHeaderValid(hdr) ||
(crashcatPacketHeaderFormatVersion(hdr) != NV_CRASHCAT_PACKET_FORMAT_VERSION_1))
{
NV_PRINTF(LEVEL_WARNING, "Invalid V1 reg32 state header 0x%" NvU64_fmtx "\n", hdr);
return pReportBytes;
}
// Allocate the buffer for the register state.
const NvU32 payloadSize = crashcatPacketHeaderPayloadSize(hdr);
const NvLength sizeBytes = sizeof(NvCrashCatIo32State_V1) + payloadSize;
NV_CHECK_OR_RETURN(LEVEL_ERROR, payloadSize > 0, pReportBytes);
pReport->v1.pIo32State = portMemAllocNonPaged(sizeBytes);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pReport->v1.pIo32State != NULL, pReportBytes);
portMemCopy(pReport->v1.pIo32State, sizeBytes, pReportBytes, sizeBytes);
pReport->validTags |= NVBIT(NV_CRASHCAT_PACKET_TYPE_IO32_STATE);
return (void *)((NvUPtr)pReportBytes + sizeBytes);
}
void crashcatReportLogRiscv64CsrState_V1(CrashCatReport *pReport)
{
NvCrashCatRiscv64CsrState_V1 *pCsrStateV1 = &pReport->v1.riscv64CsrState;
NV_CRASHCAT_RISCV_MODE mode = crashcatRiscv64CsrStateV1Mode(pCsrStateV1);
//
// TODO: this equates M-mode with NVRISCV 1.x (e.g., mbadaddr vs stval). This happens to be
// correct as of this writing because there isn't an M-mode implementer on NVRISCV 2.x+,
// but we should have a way of distinguishing in the protocol.
//
CRASHCAT_REPORT_LOG_PACKET_TYPE(pReport, "RISC-V CSR State:");
switch (mode)
{
case NV_CRASHCAT_RISCV_MODE_M:
CRASHCAT_REPORT_LOG_DATA(pReport,
"mstatus:0x%016" NvU64_fmtx " mscratch:0x%016" NvU64_fmtx " mie:0x%016" NvU64_fmtx " mip:0x%016" NvU64_fmtx,
pCsrStateV1->xstatus, pCsrStateV1->xscratch, pCsrStateV1->xie, pCsrStateV1->xip);
CRASHCAT_REPORT_LOG_DATA(pReport,
" mepc:0x%016" NvU64_fmtx " mbadaddr:0x%016" NvU64_fmtx " mcause:0x%016" NvU64_fmtx,
pCsrStateV1->xepc, pCsrStateV1->xtval, pCsrStateV1->xcause);
break;
case NV_CRASHCAT_RISCV_MODE_S:
CRASHCAT_REPORT_LOG_DATA(pReport,
"sstatus:0x%016" NvU64_fmtx " sscratch:0x%016" NvU64_fmtx " sie:0x%016" NvU64_fmtx " sip:0x%016" NvU64_fmtx,
pCsrStateV1->xstatus, pCsrStateV1->xscratch, pCsrStateV1->xie, pCsrStateV1->xip);
CRASHCAT_REPORT_LOG_DATA(pReport,
" sepc:0x%016" NvU64_fmtx " stval:0x%016" NvU64_fmtx " scause:0x%016" NvU64_fmtx,
pCsrStateV1->xepc, pCsrStateV1->xtval, pCsrStateV1->xcause);
break;
default:
// We don't expect CSR dumps from other modes
NV_PRINTF(LEVEL_WARNING, "Unexpected RISC-V mode (%u) CSR dump\n", mode);
CRASHCAT_REPORT_LOG_DATA(pReport,
"xstatus:0x%016" NvU64_fmtx " xscratch:0x%016" NvU64_fmtx " xie:0x%016" NvU64_fmtx " xip:0x%016" NvU64_fmtx,
pCsrStateV1->xstatus, pCsrStateV1->xscratch, pCsrStateV1->xie, pCsrStateV1->xip);
CRASHCAT_REPORT_LOG_DATA(pReport,
" xepc:0x%016" NvU64_fmtx " xtval:0x%016" NvU64_fmtx " xcause:0x%016" NvU64_fmtx,
pCsrStateV1->xepc, pCsrStateV1->xtval, pCsrStateV1->xcause);
break;
}
}
void crashcatReportLogRiscv64GprState_V1(CrashCatReport *pReport)
{
NvCrashCatRiscv64GprState_V1 *pGprStateV1 = &pReport->v1.riscv64GprState;
// TODO: log the mode of the GPR state
CRASHCAT_REPORT_LOG_PACKET_TYPE(pReport, "RISC-V GPR State:");
CRASHCAT_REPORT_LOG_DATA(pReport,
"ra:0x%016" NvU64_fmtx " sp:0x%016" NvU64_fmtx " gp:0x%016" NvU64_fmtx " tp:0x%016" NvU64_fmtx,
pGprStateV1->ra, pGprStateV1->sp, pGprStateV1->gp, pGprStateV1->tp);
CRASHCAT_REPORT_LOG_DATA(pReport,
"a0:0x%016" NvU64_fmtx " a1:0x%016" NvU64_fmtx " a2:0x%016" NvU64_fmtx " a3:0x%016" NvU64_fmtx,
pGprStateV1->a0, pGprStateV1->a1, pGprStateV1->a2, pGprStateV1->a3);
CRASHCAT_REPORT_LOG_DATA(pReport,
"a4:0x%016" NvU64_fmtx " a5:0x%016" NvU64_fmtx " a6:0x%016" NvU64_fmtx " a7:0x%016" NvU64_fmtx,
pGprStateV1->a4, pGprStateV1->a5, pGprStateV1->a6, pGprStateV1->a7);
CRASHCAT_REPORT_LOG_DATA(pReport,
"s0:0x%016" NvU64_fmtx " s1:0x%016" NvU64_fmtx " s2:0x%016" NvU64_fmtx " s3:0x%016" NvU64_fmtx,
pGprStateV1->s0, pGprStateV1->s1, pGprStateV1->s2, pGprStateV1->s3);
CRASHCAT_REPORT_LOG_DATA(pReport,
"s4:0x%016" NvU64_fmtx " s5:0x%016" NvU64_fmtx " s6:0x%016" NvU64_fmtx " s7:0x%016" NvU64_fmtx,
pGprStateV1->s4, pGprStateV1->s5, pGprStateV1->s6, pGprStateV1->s7);
CRASHCAT_REPORT_LOG_DATA(pReport,
"s8:0x%016" NvU64_fmtx " s9:0x%016" NvU64_fmtx " s10:0x%016" NvU64_fmtx " s11:0x%016" NvU64_fmtx,
pGprStateV1->s8, pGprStateV1->s9, pGprStateV1->s10, pGprStateV1->s11);
CRASHCAT_REPORT_LOG_DATA(pReport,
"t0:0x%016" NvU64_fmtx " t1:0x%016" NvU64_fmtx " t2:0x%016" NvU64_fmtx " t3:0x%016" NvU64_fmtx,
pGprStateV1->t0, pGprStateV1->t1, pGprStateV1->t2, pGprStateV1->t3);
CRASHCAT_REPORT_LOG_DATA(pReport,
"t4:0x%016" NvU64_fmtx " t5:0x%016" NvU64_fmtx " t6:0x%016" NvU64_fmtx,
pGprStateV1->t4, pGprStateV1->t5, pGprStateV1->t6);
}
static NV_INLINE const char *crashcatRiscv64TraceTypeToString_V1(NV_CRASHCAT_TRACE_TYPE traceType)
{
switch (traceType)
{
case NV_CRASHCAT_TRACE_TYPE_STACK: return MAKE_NV_PRINTF_STR("Stack");
case NV_CRASHCAT_TRACE_TYPE_NVRVTB: return MAKE_NV_PRINTF_STR("PC");
default: return MAKE_NV_PRINTF_STR("Address");
}
}
void crashcatReportLogRiscv64Trace_V1(CrashCatReport *pReport)
{
NvCrashCatRiscv64Trace_V1 *pTraceV1 = pReport->v1.pRiscv64Trace;
NvU16 entries = crashcatPacketHeaderPayloadSize(pTraceV1->header) >> 3;
NV_CRASHCAT_TRACE_TYPE traceType = crashcatRiscv64TraceV1Type(pTraceV1);
// TODO: log the mode of the trace using implementer terminology (e.g., kernel/task)
CRASHCAT_REPORT_LOG_PACKET_TYPE(pReport, "%s Trace:",
crashcatRiscv64TraceTypeToString_V1(traceType));
for (NvU16 i = 0; i < entries; i++)
CRASHCAT_REPORT_LOG_DATA(pReport, "0x%" NvU64_fmtx, pTraceV1->addr[i]);
}
static NV_INLINE const char *crashcatIo32ApertureToString_V1(NV_CRASHCAT_IO_APERTURE aperture)
{
switch (aperture)
{
case NV_CRASHCAT_IO_APERTURE_INTIO: return MAKE_NV_PRINTF_STR("Local I/O");
case NV_CRASHCAT_IO_APERTURE_EXTIO: return MAKE_NV_PRINTF_STR("External I/O");
default: return MAKE_NV_PRINTF_STR("Additional");
}
}
void crashcatReportLogIo32State_V1(CrashCatReport *pReport)
{
NvCrashCatIo32State_V1 *pIo32StateV1 = pReport->v1.pIo32State;
NvU16 entries = crashcatPacketHeaderPayloadSize(pIo32StateV1->header) >> 3;
NV_CRASHCAT_IO_APERTURE aperture = crashcatIo32StateV1Aperture(pIo32StateV1);
CRASHCAT_REPORT_LOG_PACKET_TYPE(pReport, "%s Register State:",
crashcatIo32ApertureToString_V1(aperture));
// Print 4 registers on a line to save space
const NvU8 REGS_PER_LINE = 4;
NvU16 idx;
for (idx = 0; idx < (entries / REGS_PER_LINE) * REGS_PER_LINE; idx += REGS_PER_LINE)
CRASHCAT_REPORT_LOG_DATA(pReport,
"0x%08x:0x%08x 0x%08x:0x%08x 0x%08x:0x%08x 0x%08x:0x%08x",
pIo32StateV1->regs[idx + 0].offset, pIo32StateV1->regs[idx + 0].value,
pIo32StateV1->regs[idx + 1].offset, pIo32StateV1->regs[idx + 1].value,
pIo32StateV1->regs[idx + 2].offset, pIo32StateV1->regs[idx + 2].value,
pIo32StateV1->regs[idx + 3].offset, pIo32StateV1->regs[idx + 3].value);
switch (entries - idx)
{
case 3:
CRASHCAT_REPORT_LOG_DATA(pReport, "0x%08x:0x%08x 0x%08x:0x%08x 0x%08x:0x%08x",
pIo32StateV1->regs[idx + 0].offset, pIo32StateV1->regs[idx + 0].value,
pIo32StateV1->regs[idx + 1].offset, pIo32StateV1->regs[idx + 1].value,
pIo32StateV1->regs[idx + 2].offset, pIo32StateV1->regs[idx + 2].value);
break;
case 2:
CRASHCAT_REPORT_LOG_DATA(pReport, "0x%08x:0x%08x 0x%08x:0x%08x",
pIo32StateV1->regs[idx + 0].offset, pIo32StateV1->regs[idx + 0].value,
pIo32StateV1->regs[idx + 1].offset, pIo32StateV1->regs[idx + 1].value);
break;
case 1:
CRASHCAT_REPORT_LOG_DATA(pReport, "0x%08x:0x%08x",
pIo32StateV1->regs[idx + 0].offset, pIo32StateV1->regs[idx + 0].value);
break;
default:
break;
}
}

View File

@@ -0,0 +1,125 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_wayfinder.h"
#include "crashcat/crashcat_engine.h"
#include "crashcat/crashcat_queue.h"
#include "crashcat/crashcat_report.h"
#include "utils/nvassert.h"
#include "nv-crashcat-decoder.h"
void crashcatWayfinderSetWFL0_V1(CrashCatWayfinder *pWayfinder, NvU32 wfl0)
{
pWayfinder->v1.wfl0 = wfl0;
}
CrashCatQueue *crashcatWayfinderGetReportQueue_V1(CrashCatWayfinder *pWayfinder)
{
if (pWayfinder->pQueue != NULL)
return pWayfinder->pQueue;
//
// If we've already decoded WFL1 but don't have a queue, queue control object creation failed
// and is unlikely to succeed on subsequent attempts, and we don't want to spam the logs.
//
if (pWayfinder->v1.wfl1 != 0)
return NULL;
CrashCatEngine *pEngine = CRASHCAT_GET_ENGINE(pWayfinder);
NV_CRASHCAT_SCRATCH_GROUP_ID wfl1Location =
crashcatWayfinderL0V1Wfl1Location(pWayfinder->v1.wfl0);
// Read the L1 wayfinder to locate the queue
const NvU32 *pScratchOffsets = crashcatEngineGetScratchOffsets(pEngine, wfl1Location);
if (pScratchOffsets == NULL)
{
NV_PRINTF(LEVEL_ERROR, "invalid WFL1 scratch location %u\n", wfl1Location);
return NULL;
}
//
// In NV_CRASHCAT_WAYFINDER_VERSION_1, the WFL1 contains two 32-bit values specifying the queue
// location, so we only need to read two registers. Where the scratch group contains 4
// registers, the other two will be used for the queue control.
//
if ((pScratchOffsets[0] == 0) || (pScratchOffsets[1] == 0))
{
NV_PRINTF(LEVEL_ERROR, "insufficiently-sized L1 wayfinder scratch location %u\n",
wfl1Location);
return NULL;
}
// Have we already decoded WFL1?
if (pWayfinder->v1.wfl1 == 0)
{
pWayfinder->v1.wfl1 =
((NvU64)crashcatEnginePriRead(pEngine, pScratchOffsets[1]) << 32) |
crashcatEnginePriRead(pEngine, pScratchOffsets[0]);
}
CrashCatQueueConfig queueConfig;
queueConfig.aperture = crashcatWayfinderL1V1QueueAperture(pWayfinder->v1.wfl1);
queueConfig.size = crashcatWayfinderL1V1QueueSize(pWayfinder->v1.wfl1);
queueConfig.offset = crashcatWayfinderL1V1QueueOffset(pWayfinder->v1.wfl1);
if ((pScratchOffsets[2] != 0) && (pScratchOffsets[3] != 0))
{
//
// If the scratch group has at least 4 32-bit registers, the 3rd and 4th are used for queue
// put and get, respectively.
//
queueConfig.putRegOffset = pScratchOffsets[2];
queueConfig.getRegOffset = pScratchOffsets[3];
}
else
{
//
// If there are only 2 scratch registers in the group, the WFL1 is erased and the registers
// are reused for queue put and get, respectively.
//
queueConfig.putRegOffset = pScratchOffsets[0];
queueConfig.getRegOffset = pScratchOffsets[1];
crashcatEnginePriWrite(pEngine, queueConfig.getRegOffset, 0);
crashcatEnginePriWrite(pEngine, queueConfig.putRegOffset, 0);
//
// Producer watches WFL0 and waits for the _WFL1_LOCATION bits to be set to _NONE
// before it will update the queue put pointer.
//
NvU32 wfl0Offset = crashcatEngineGetWFL0Offset(pEngine);
NvU32 wfl0 = FLD_SET_DRF64(_CRASHCAT, _WAYFINDER_L0_V1, _WFL1_LOCATION, _NONE,
pWayfinder->v1.wfl0);
crashcatEnginePriWrite(pEngine, wfl0Offset, wfl0);
}
// Create the queue control object
NV_STATUS status;
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
objCreate(&pWayfinder->pQueue, pWayfinder, CrashCatQueue, &queueConfig),
return NULL);
return pWayfinder->pQueue;
}

View File

@@ -0,0 +1,77 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_report.h"
#include "crashcat/crashcat_engine.h"
#include "nv-crashcat-decoder.h"
static inline const char crashcatReportModeToChar_GENERIC(NV_CRASHCAT_RISCV_MODE mode)
{
switch (mode)
{
case NV_CRASHCAT_RISCV_MODE_M: return 'M';
case NV_CRASHCAT_RISCV_MODE_S: return 'S';
case NV_CRASHCAT_RISCV_MODE_U: return 'U';
default: return '?';
}
}
static inline const char *crashcatReportCauseTypeToString_GENERIC(NV_CRASHCAT_CAUSE_TYPE causeType)
{
switch (causeType)
{
case NV_CRASHCAT_CAUSE_TYPE_EXCEPTION: return MAKE_NV_PRINTF_STR("exception");
case NV_CRASHCAT_CAUSE_TYPE_PANIC: return MAKE_NV_PRINTF_STR("panic");
case NV_CRASHCAT_CAUSE_TYPE_TIMEOUT: return MAKE_NV_PRINTF_STR("timeout");
default: return MAKE_NV_PRINTF_STR("unknown failure");
}
}
void crashcatReportLogSource_V1_GENERIC(CrashCatReport *pReport)
{
NvCrashCatReport_V1 *pReportV1 = &pReport->v1.report;
NvCrashCatNvriscvPartition partition = crashcatReportV1SourcePartition(pReportV1);
NvCrashCatNvriscvUcodeId ucodeId = crashcatReportV1SourceUcodeId(pReportV1);
NV_CRASHCAT_RISCV_MODE riscvMode = crashcatReportV1SourceMode(pReportV1);
NV_CRASHCAT_CAUSE_TYPE causeType = crashcatReportV1SourceCauseType(pReportV1);
crashcatEnginePrintf(pReport->pEngine, NV_TRUE,
"%s in partition:%u ucode:%u [%c-mode] @ pc:0x%" NvU64_fmtx ", data:0x%" NvU64_fmtx,
crashcatReportCauseTypeToString_GENERIC(causeType), partition, ucodeId,
crashcatReportModeToChar_GENERIC(riscvMode), pReportV1->sourcePc, pReportV1->sourceData);
}
void crashcatReportLogReporter_V1_GENERIC(CrashCatReport *pReport)
{
NvCrashCatReport_V1 *pReportV1 = &pReport->v1.report;
NvCrashCatNvriscvPartition partition = crashcatReportV1ReporterPartition(pReportV1);
NvCrashCatNvriscvUcodeId ucodeId = crashcatReportV1ReporterUcodeId(pReportV1);
NV_CRASHCAT_RISCV_MODE riscvMode = crashcatReportV1ReporterMode(pReportV1);
crashcatEnginePrintf(pReport->pEngine, NV_FALSE,
"Reported by partition:%u ucode:%u [%c-mode] version:%u @ %u",
partition, ucodeId, crashcatReportModeToChar_GENERIC(riscvMode),
crashcatReportV1ReporterVersion(pReportV1),
crashcatReportV1ReporterTimestamp(pReportV1));
}

View File

@@ -0,0 +1,135 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
#include "crashcat/crashcat_report.h"
#include "crashcat/crashcat_engine.h"
#include "libos_v2_crashcat.h"
#include "utils/nvprintf.h"
#include "nv-crashcat-decoder.h"
static inline const char *crashcatReportModeToString_LIBOS2(NV_CRASHCAT_RISCV_MODE mode)
{
switch (mode)
{
case NV_CRASHCAT_RISCV_MODE_M:
case NV_CRASHCAT_RISCV_MODE_S:
return MAKE_NV_PRINTF_STR("kernel");
case NV_CRASHCAT_RISCV_MODE_U:
return MAKE_NV_PRINTF_STR("task");
default:
return MAKE_NV_PRINTF_STR("unspecified");
}
}
static inline const char *crashcatReportPanicReasonToString_LIBOS2(LibosPanicReason reason)
{
#define LIBOS_PANIC_REASON_CASE(reason, str) \
case LibosPanicReason ## reason: return MAKE_NV_PRINTF_STR(str)
switch (reason)
{
LIBOS_PANIC_REASON_CASE(UnrecoverableTaskCrash, "unrecoverable task crash");
LIBOS_PANIC_REASON_CASE(UnhandledState, "unhandled state");
LIBOS_PANIC_REASON_CASE(InvalidConfiguration, "invalid configuration");
LIBOS_PANIC_REASON_CASE(FatalHardwareError, "fatal hardware error");
LIBOS_PANIC_REASON_CASE(InsufficientResources, "insufficient resources");
LIBOS_PANIC_REASON_CASE(Timeout, "timeout");
LIBOS_PANIC_REASON_CASE(EnvCallFailed, "environment call failed");
LIBOS_PANIC_REASON_CASE(SspStackCheckFailed, "stack smashing detected");
LIBOS_PANIC_REASON_CASE(AsanMemoryError, "asan memory error detected");
LIBOS_PANIC_REASON_CASE(Test, "test");
LIBOS_PANIC_REASON_CASE(ProgrammingError, "programming error");
LIBOS_PANIC_REASON_CASE(DebugAssertionFailed, "debug assertion failed");
default: return MAKE_NV_PRINTF_STR("unknown error");
}
}
void crashcatReportLogSource_V1_LIBOS2(CrashCatReport *pReport)
{
NvCrashCatReport_V1 *pReportV1 = &pReport->v1.report;
NvU8 taskId = crashcatReportV1SourceLibos2TaskId(pReportV1);
#define CRASHCAT_LOG_LIBOS2_SOURCE(fmt, ...) \
if (taskId == NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_TASK_ID_UNSPECIFIED) \
crashcatEnginePrintf(pReport->pEngine, NV_TRUE, fmt, __VA_ARGS__ ); \
else \
crashcatEnginePrintf(pReport->pEngine, NV_TRUE, fmt ", task:%u", __VA_ARGS__, taskId)
const char *pModeStr = crashcatReportModeToString_LIBOS2(crashcatReportV1SourceMode(pReportV1));
switch (crashcatReportV1SourceCauseType(pReportV1))
{
case NV_CRASHCAT_CAUSE_TYPE_EXCEPTION:
{
// Kernel or task unhandled exception - the sourceData is the xcause value
NvU64 xcause = pReportV1->sourceData;
CRASHCAT_LOG_LIBOS2_SOURCE(
"%s exception: %s (cause:0x%" NvU64_fmtx ") @ pc:0x%" NvU64_fmtx,
pModeStr, crashcatReportRiscvCauseToString(xcause), xcause,
pReportV1->sourcePc);
break;
}
case NV_CRASHCAT_CAUSE_TYPE_TIMEOUT:
{
// Task timeout (no way for libos2 to self-report kernel timeouts)
CRASHCAT_LOG_LIBOS2_SOURCE(
"%s timeout @ pc:0x%" NvU64_fmtx, pModeStr, pReportV1->sourcePc);
break;
}
case NV_CRASHCAT_CAUSE_TYPE_PANIC:
{
// Kernel or task panic
LibosPanicReason reason = crashcatReportV1SourceCauseLibos2Reason(pReportV1);
CRASHCAT_LOG_LIBOS2_SOURCE(
"%s panic: %s (%u) @ pc:0x%" NvU64_fmtx ", aux:0x%" NvU64_fmtx,
pModeStr, crashcatReportPanicReasonToString_LIBOS2(reason),
reason, pReportV1->sourcePc, pReportV1->sourceData);
break;
}
}
}
void crashcatReportLogReporter_V1_LIBOS2(CrashCatReport *pReport)
{
NvCrashCatReport_V1 *pReportV1 = &pReport->v1.report;
NvU8 taskId = crashcatReportV1ReporterLibos2TaskId(pReportV1);
if (taskId == NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_TASK_ID_UNSPECIFIED)
{
crashcatEnginePrintf(pReport->pEngine, NV_FALSE,
"Reported by libos kernel v%u.%u [%u] @ %u",
crashcatReportV1ReporterVersionLibos2Major(pReportV1),
crashcatReportV1ReporterVersionLibos2Minor(pReportV1),
crashcatReportV1ReporterVersionLibos2Cl(pReportV1),
crashcatReportV1ReporterTimestamp(pReportV1));
}
else
{
crashcatEnginePrintf(pReport->pEngine, NV_FALSE,
"Reported by libos task:%u v%u.%u [%u] @ ts:%u",
taskId, crashcatReportV1ReporterVersionLibos2Major(pReportV1),
crashcatReportV1ReporterVersionLibos2Minor(pReportV1),
crashcatReportV1ReporterVersionLibos2Cl(pReportV1),
crashcatReportV1ReporterTimestamp(pReportV1));
}
}

View File

@@ -17,6 +17,10 @@ SRCS += generated/g_conf_compute_api_nvoc.c
SRCS += generated/g_conf_compute_nvoc.c
SRCS += generated/g_console_mem_nvoc.c
SRCS += generated/g_context_dma_nvoc.c
SRCS += generated/g_crashcat_engine_nvoc.c
SRCS += generated/g_crashcat_queue_nvoc.c
SRCS += generated/g_crashcat_report_nvoc.c
SRCS += generated/g_crashcat_wayfinder_nvoc.c
SRCS += generated/g_dbgbuffer_nvoc.c
SRCS += generated/g_deferred_api_nvoc.c
SRCS += generated/g_device_nvoc.c
@@ -84,6 +88,7 @@ SRCS += generated/g_kernel_ce_nvoc.c
SRCS += generated/g_kernel_channel_group_api_nvoc.c
SRCS += generated/g_kernel_channel_group_nvoc.c
SRCS += generated/g_kernel_channel_nvoc.c
SRCS += generated/g_kernel_crashcat_engine_nvoc.c
SRCS += generated/g_kernel_ctxshare_nvoc.c
SRCS += generated/g_kernel_falcon_nvoc.c
SRCS += generated/g_kernel_fifo_nvoc.c
@@ -388,7 +393,9 @@ SRCS += src/kernel/gpu/external_device/gsync_api.c
SRCS += src/kernel/gpu/external_device/kern_external_device.c
SRCS += src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga100.c
SRCS += src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga102.c
SRCS += src/kernel/gpu/falcon/arch/turing/kernel_crashcat_engine_tu102.c
SRCS += src/kernel/gpu/falcon/arch/turing/kernel_falcon_tu102.c
SRCS += src/kernel/gpu/falcon/kernel_crashcat_engine.c
SRCS += src/kernel/gpu/falcon/kernel_falcon.c
SRCS += src/kernel/gpu/falcon/kernel_falcon_ctrl.c
SRCS += src/kernel/gpu/fifo/arch/ampere/kernel_channel_ga100.c
@@ -740,6 +747,15 @@ SRCS += src/libraries/containers/multimap.c
SRCS += src/libraries/containers/queue.c
SRCS += src/libraries/containers/ringbuf.c
SRCS += src/libraries/containers/vector.c
SRCS += src/libraries/crashcat/crashcat_engine.c
SRCS += src/libraries/crashcat/crashcat_queue.c
SRCS += src/libraries/crashcat/crashcat_report.c
SRCS += src/libraries/crashcat/crashcat_wayfinder.c
SRCS += src/libraries/crashcat/v1/crashcat_queue_v1.c
SRCS += src/libraries/crashcat/v1/crashcat_report_v1.c
SRCS += src/libraries/crashcat/v1/crashcat_wayfinder_v1.c
SRCS += src/libraries/crashcat/v1/impl/crashcat_report_v1_generic.c
SRCS += src/libraries/crashcat/v1/impl/crashcat_report_v1_libos2.c
SRCS += src/libraries/eventbuffer/eventbufferproducer.c
SRCS += src/libraries/fnv_hash/fnv_hash.c
SRCS += src/libraries/ioaccess/ioaccess.c