580.94.10

This commit is contained in:
russellcnv
2025-11-17 09:42:27 -08:00
parent e2dbb3d99c
commit 5237658d5c
53 changed files with 701 additions and 274 deletions

View File

@@ -43,18 +43,18 @@
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r580/VK580_65-182"
#define NV_BUILD_CHANGELIST_NUM (36741708)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r580/VK580_65-186"
#define NV_BUILD_CHANGELIST_NUM (36888175)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r580/VK580_65-182"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36741708)
#define NV_BUILD_NAME "rel/gpu_drv/r580/VK580_65-186"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36888175)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "VK580_65-9"
#define NV_BUILD_CHANGELIST_NUM (36741708)
#define NV_BUILD_BRANCH_VERSION "VK580_65-12"
#define NV_BUILD_CHANGELIST_NUM (36887028)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "581.71"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36741708)
#define NV_BUILD_NAME "581.90"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36887028)
#define NV_BUILD_BRANCH_BASE_VERSION R580
#endif
// End buildmeister python edited section

View File

@@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "580.94.06"
#define NV_VERSION_STRING "580.94.10"
#else

View File

@@ -175,6 +175,7 @@ static NVHDMIPKT_RESULT SetFRLLinkRate(NVHDMIPKT_CLASS *pThis,
const NvU32 subDevice,
const NvU32 displayId,
const NvBool bFakeLt,
const NvBool bDoNotSkipLt,
const NvBool bLinkAssessmentOnly,
const NvU32 frlRate)
{
@@ -184,6 +185,7 @@ static NVHDMIPKT_RESULT SetFRLLinkRate(NVHDMIPKT_CLASS *pThis,
params.displayId = displayId;
params.data = frlRate;
params.bFakeLt = bFakeLt;
params.bDoNotSkipLt = bDoNotSkipLt;
params.bLinkAssessmentOnly = bLinkAssessmentOnly;
#if NVHDMIPKT_RM_CALLS_INTERNAL
@@ -275,14 +277,16 @@ performLinkTraningToAssessFRLLink(NVHDMIPKT_CLASS *pThis,
{
// If the display is active and the maximum link rate matches the link
// rate required for the current mode timings, avoid marking the set
// link configuration call as an assessment only. This prevents
// re-training after the assessment.
// link configuration call as an assessment only. This allows us to
// re-train the existing link now instead of after the assessment.
// In addition, do not allow link training to be skipped to ensure
// we succesfully recover an existing FRL config.
const NvBool bLinkAssessmentOnly =
bIsDisplayActive ? (nv0073currFRLRate != maxFRLRate) : NV_TRUE;
if (SetFRLLinkRate(pThis, subDevice, displayId,
NV_FALSE /* bFakeLt */, bLinkAssessmentOnly,
maxFRLRate) == NVHDMIPKT_SUCCESS)
NV_FALSE /* bFakeLt */, NV_TRUE /* bDoNotSkipLt */,
bLinkAssessmentOnly, maxFRLRate) == NVHDMIPKT_SUCCESS)
{
break;
}
@@ -299,11 +303,13 @@ performLinkTraningToAssessFRLLink(NVHDMIPKT_CLASS *pThis,
if (SetFRLLinkRate(pThis, subDevice, displayId,
bFakeLt, NV_FALSE /* bLinkAssessmentOnly */,
NV_FALSE /* bDoNotSkipLt */,
currFRLRate) != NVHDMIPKT_SUCCESS)
{
if (!bFakeLt) {
if (SetFRLLinkRate(pThis, subDevice, displayId,
NV_TRUE, NV_FALSE /* bLinkAssessmentOnly */,
NV_FALSE /* bDoNotSkipLt */,
currFRLRate) != NVHDMIPKT_SUCCESS) {
NvHdmiPkt_Assert(0);
}
@@ -1130,6 +1136,19 @@ hdmiQueryFRLConfigC671(NVHDMIPKT_CLASS *pThis,
NvU32 bppMinX16Itr, bppMaxX16Itr;
NvBool bHasPreCalcFRLData = NV_FALSE;
NvBool forceFRLRateDSC = pClientCtrl->forceFRLRate;
HDMI_FRL_DATA_RATE requestedFRLRate = pClientCtrl->frlRate;
#if defined(NVHDMIPKT_NVKMS)
NvU32 rr = (pVidTransInfo->pTiming->pclk * (NvU64)10000) /
(pVidTransInfo->pTiming->HTotal * (NvU64)pVidTransInfo->pTiming->VTotal);
if (!pVidTransInfo->pTiming->interlaced && (rr >= 480)) {
forceFRLRateDSC = NV_TRUE;
requestedFRLRate = dscMaxFRLRate;
}
#endif
// DSC_All_bpp = 1:
// Lower the compression ratio better the pixel quality, hence a high bppTarget value will be ideal
// DSC_All_bpp = 1 allows us the flexibility to use a bppTarget setting different from the primary compressed format
@@ -1237,16 +1256,16 @@ hdmiQueryFRLConfigC671(NVHDMIPKT_CLASS *pThis,
frlParams.compressionInfo.hSlices = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceWidth);
}
if (pClientCtrl->forceFRLRate)
if (forceFRLRateDSC)
{
if (pClientCtrl->frlRate > dscMaxFRLRate)
if (requestedFRLRate > dscMaxFRLRate)
{
result = NVHDMIPKT_FAIL;
goto frlQuery_fail;
}
minFRLRateItr = pClientCtrl->frlRate;
maxFRLRateItr = pClientCtrl->frlRate;
minFRLRateItr = requestedFRLRate;
maxFRLRateItr = requestedFRLRate;
}
if (pClientCtrl->forceBppx16)
@@ -1419,6 +1438,7 @@ hdmiSetFRLConfigC671(NVHDMIPKT_CLASS *pThis,
{
return SetFRLLinkRate(pThis, subDevice, displayId, bFakeLt,
NV_FALSE /* bLinkAssessmentOnly */,
NV_FALSE /* bDoNotSkipLt */,
translateFRLRateToNv0073SetHdmiFrlConfig(pFRLConfig->frlRate));
}
@@ -1432,6 +1452,7 @@ hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis,
{
return SetFRLLinkRate(pThis, subDevice, displayId,
NV_FALSE, NV_FALSE /* bLinkAssessmentOnly */,
NV_FALSE /* bDoNotSkipLt */,
NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE);
}

View File

@@ -84,6 +84,7 @@ typedef struct
#define NVLINK_INBAND_GPU_PROBE_CAPS_ATS_SUPPORT NVBIT(3)
#define NVLINK_INBAND_GPU_PROBE_CAPS_LINK_RETRAIN_SUPPORT NVBIT(4)
#define NVLINK_INBAND_GPU_PROBE_CAPS_HEALTH_SUMMARY NVBIT(6)
#define NVLINK_INBAND_GPU_PROBE_CAPS_MC_RETRY NVBIT(8)
/* Add more caps as need in the future */

View File

@@ -1377,6 +1377,7 @@ typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS {
NvU32 displayId;
NvU32 data;
NvBool bFakeLt;
NvBool bDoNotSkipLt;
NvBool bLtSkipped;
NvBool bLinkAssessmentOnly;
} NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS;

View File

@@ -165,7 +165,8 @@
#define ROBUST_CHANNEL_UNUSED_ERROR_170 (170)
#define UNCORRECTABLE_DRAM_ERROR (171)
#define UNCORRECTABLE_SRAM_ERROR (172)
#define ROBUST_CHANNEL_LAST_ERROR (172)
#define C2C_FATAL_LINK_FAILURE (173)
#define ROBUST_CHANNEL_LAST_ERROR (173)
// Indexed CE reference
#define ROBUST_CHANNEL_CE_ERROR(x) \

View File

@@ -165,6 +165,7 @@ NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabri
NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full")
NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty")
NV_STATUS_CODE(NV_ERR_MC_FLA_OFFSET_TABLE_FULL, 0x0000008A, "Multicast FLA offset table has no available slots")
NV_STATUS_CODE(NV_ERR_DMA_XFER_FAILED, 0x0000008B, "DMA transfer failed")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")

View File

@@ -621,25 +621,6 @@ ENTRY(0x2238, 0x16B8, 0x10de, "NVIDIA A10M-10C"),
ENTRY(0x2238, 0x16B9, 0x10de, "NVIDIA A10M-20C"),
ENTRY(0x2238, 0x16E6, 0x10de, "NVIDIA A10M-1"),
ENTRY(0x2238, 0x2208, 0x10de, "NVIDIA A10M-3B"),
ENTRY(0x230E, 0x20F5, 0x10de, "NVIDIA H20L-1-15CME"),
ENTRY(0x230E, 0x20F6, 0x10de, "NVIDIA H20L-1-15C"),
ENTRY(0x230E, 0x20F7, 0x10de, "NVIDIA H20L-1-30C"),
ENTRY(0x230E, 0x20F8, 0x10de, "NVIDIA H20L-2-30C"),
ENTRY(0x230E, 0x20F9, 0x10de, "NVIDIA H20L-3-60C"),
ENTRY(0x230E, 0x20FA, 0x10de, "NVIDIA H20L-4-60C"),
ENTRY(0x230E, 0x20FB, 0x10de, "NVIDIA H20L-7-120C"),
ENTRY(0x230E, 0x20FC, 0x10de, "NVIDIA H20L-4C"),
ENTRY(0x230E, 0x20FD, 0x10de, "NVIDIA H20L-5C"),
ENTRY(0x230E, 0x20FE, 0x10de, "NVIDIA H20L-6C"),
ENTRY(0x230E, 0x20FF, 0x10de, "NVIDIA H20L-8C"),
ENTRY(0x230E, 0x2100, 0x10de, "NVIDIA H20L-10C"),
ENTRY(0x230E, 0x2101, 0x10de, "NVIDIA H20L-12C"),
ENTRY(0x230E, 0x2102, 0x10de, "NVIDIA H20L-15C"),
ENTRY(0x230E, 0x2103, 0x10de, "NVIDIA H20L-20C"),
ENTRY(0x230E, 0x2104, 0x10de, "NVIDIA H20L-30C"),
ENTRY(0x230E, 0x2105, 0x10de, "NVIDIA H20L-40C"),
ENTRY(0x230E, 0x2106, 0x10de, "NVIDIA H20L-60C"),
ENTRY(0x230E, 0x2107, 0x10de, "NVIDIA H20L-120C"),
ENTRY(0x2321, 0x1853, 0x10de, "NVIDIA H100L-1-12CME"),
ENTRY(0x2321, 0x1854, 0x10de, "NVIDIA H100L-1-12C"),
ENTRY(0x2321, 0x1855, 0x10de, "NVIDIA H100L-1-24C"),

View File

@@ -17,7 +17,6 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
{ 0x20B7, 0x1804, 0x20B7, 0x1532 },
{ 0x20B9, 0x157F, 0x20B7, 0x1532 },
{ 0x20FD, 0x17F8, 0x20F5, 0x0 },
{ 0x230E, 0x20DF, 0x230E, 0x20DF },
{ 0x2324, 0x17A8, 0x2324, 0x17A6 },
{ 0x2329, 0x198C, 0x2329, 0x198B },
{ 0x232C, 0x2064, 0x232C, 0x2063 },
@@ -122,13 +121,6 @@ static const struct {
{0x20F610DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1094}, // GRID A800-4-20C
{0x20F610DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1095}, // GRID A800-7-40C
{0x20F610DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1091}, // GRID A800-1-10C
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1499}, // NVIDIA H20L-1-15CME
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1500}, // NVIDIA H20L-1-15C
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1501}, // NVIDIA H20L-1-30C
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU , 1502}, // NVIDIA H20L-2-30C
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1503}, // NVIDIA H20L-3-60C
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1504}, // NVIDIA H20L-4-60C
{0x230E10DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1505}, // NVIDIA H20L-7-120C
{0x232110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1061}, // NVIDIA H100L-1-12CME
{0x232110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1062}, // NVIDIA H100L-1-12C
{0x232110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1063}, // NVIDIA H100L-1-24C

View File

@@ -68,8 +68,8 @@ typedef void (*msgqFcnBarrier)(void);
// Function to access backend memory (if it's not memory mapped).
// Keep in mind than when using it, pointers given by peek can't be trusted
// Should return 0 on success.
typedef int (*msgqFcnBackendRw)(void *pDest, const void *pSrc, unsigned size,
unsigned flags, void *pArg);
typedef unsigned (*msgqFcnBackendRw)(void *pDest, const void *pSrc, unsigned size,
unsigned flags, void *pArg);
/**
* @brief Return size of metadata (that must be allocated)

View File

@@ -104,35 +104,45 @@ msgqSetBarrier(msgqHandle handle, msgqFcnBarrier fcn)
/*
* Helper functions to access indirect backend.
*/
sysSHARED_CODE static void
// TODO: Make these funcions return NV_STATUS instead of int wherever possible.
sysSHARED_CODE static int
_backendRead32(msgqMetadata *pQueue, volatile const void *pAddr, NvU32 *pVal, unsigned flags)
{
if (pQueue->fcnBackendRw != NULL)
{
pQueue->fcnBackendRw(pVal, (const void *)pAddr, sizeof(*pVal),
flags | FCN_FLAG_BACKEND_ACCESS_READ,
pQueue->fcnBackendRwArg);
int status = pQueue->fcnBackendRw(pVal, (const void *)pAddr, sizeof(*pVal),
flags | FCN_FLAG_BACKEND_ACCESS_READ,
pQueue->fcnBackendRwArg);
if (status != 0)
{
return -1;
}
}
else
{
*pVal = *(volatile const NvU32*)pAddr;
}
return 0;
}
sysSHARED_CODE static void
sysSHARED_CODE static int
_backendWrite32(msgqMetadata *pQueue, volatile void *pAddr, NvU32 *pVal, unsigned flags)
{
if (pQueue->fcnBackendRw != NULL)
{
pQueue->fcnBackendRw((void*)pAddr, pVal, sizeof(*pVal),
flags | FCN_FLAG_BACKEND_ACCESS_WRITE,
pQueue->fcnBackendRwArg);
int status = pQueue->fcnBackendRw((void*)pAddr, pVal, sizeof(*pVal),
flags | FCN_FLAG_BACKEND_ACCESS_WRITE,
pQueue->fcnBackendRwArg);
if (status != 0)
{
return -1;
}
}
else
{
*(volatile NvU32*)pAddr = *pVal;
}
return 0;
}
/**
@@ -142,7 +152,7 @@ _backendWrite32(msgqMetadata *pQueue, volatile void *pAddr, NvU32 *pVal, unsigne
sysSHARED_CODE static void
msgqRiscvDefaultBarrier(void)
{
asm volatile("fence iorw,iorw");
__asm__ volatile("fence iorw,iorw");
}
#endif
@@ -188,6 +198,7 @@ msgqTxCreate
{
msgqMetadata *pQueue = (msgqMetadata*)handle;
msgqTxHeader *pTx;
int status;
if ((pQueue == NULL) || pQueue->txLinked)
{
@@ -282,10 +293,15 @@ msgqTxCreate
// Indirect access to backend
if (pQueue->fcnBackendRw != NULL)
{
pQueue->fcnBackendRw(pTx, &pQueue->tx, sizeof *pTx,
FCN_FLAG_BACKEND_ACCESS_WRITE | FCN_FLAG_BACKEND_QUEUE_TX,
pQueue->fcnBackendRwArg);
} else
status = pQueue->fcnBackendRw(pTx, &pQueue->tx, sizeof *pTx,
FCN_FLAG_BACKEND_ACCESS_WRITE | FCN_FLAG_BACKEND_QUEUE_TX,
pQueue->fcnBackendRwArg);
if (status != 0)
{
return -1;
}
}
else
{
memcpy(pTx, &pQueue->tx, sizeof *pTx);
}
@@ -315,6 +331,7 @@ sysSHARED_CODE int
msgqRxLink(msgqHandle handle, const void *pBackingStore, unsigned size, unsigned msgSize)
{
msgqMetadata *pQueue = (msgqMetadata*)handle;
int status;
if ((pQueue == NULL) || pQueue->rxLinked)
{
@@ -347,10 +364,14 @@ msgqRxLink(msgqHandle handle, const void *pBackingStore, unsigned size, unsigned
// copy their metadata
if (pQueue->fcnBackendRw != NULL)
{
pQueue->fcnBackendRw(&pQueue->rx, (const void *)pQueue->pTheirTxHdr,
sizeof pQueue->rx,
FCN_FLAG_BACKEND_ACCESS_READ | FCN_FLAG_BACKEND_QUEUE_RX,
pQueue->fcnBackendRwArg);
status = pQueue->fcnBackendRw(&pQueue->rx, (const void *)pQueue->pTheirTxHdr,
sizeof pQueue->rx,
FCN_FLAG_BACKEND_ACCESS_READ | FCN_FLAG_BACKEND_QUEUE_RX,
pQueue->fcnBackendRwArg);
if (status != 0)
{
return -11;
}
}
else
{
@@ -413,8 +434,13 @@ msgqRxLink(msgqHandle handle, const void *pBackingStore, unsigned size, unsigned
}
pQueue->rxReadPtr = 0;
_backendWrite32(pQueue, pQueue->pReadOutgoing, &pQueue->rxReadPtr,
pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_TX : FCN_FLAG_BACKEND_QUEUE_RX);
status = _backendWrite32(pQueue, pQueue->pReadOutgoing, &pQueue->rxReadPtr,
pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_TX : FCN_FLAG_BACKEND_QUEUE_RX);
if (status != 0)
{
return -12;
}
if (pQueue->fcnFlush != NULL)
{
pQueue->fcnFlush(pQueue->pReadOutgoing, sizeof(NvU32));
@@ -451,8 +477,12 @@ msgqTxGetFreeSpace(msgqHandle handle)
return 0;
}
_backendRead32(pQueue, pQueue->pReadIncoming, &pQueue->txReadPtr,
pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_RX : FCN_FLAG_BACKEND_QUEUE_TX);
if (_backendRead32(pQueue, pQueue->pReadIncoming, &pQueue->txReadPtr,
pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_RX : FCN_FLAG_BACKEND_QUEUE_TX) != 0)
{
return 0;
}
if (pQueue->txReadPtr >= pQueue->tx.msgCount)
{
return 0;
@@ -505,6 +535,7 @@ sysSHARED_CODE int
msgqTxSubmitBuffers(msgqHandle handle, unsigned n)
{
msgqMetadata *pQueue = (msgqMetadata*)handle;
int status;
if ((pQueue == NULL) || !pQueue->txLinked)
{
@@ -531,8 +562,19 @@ msgqTxSubmitBuffers(msgqHandle handle, unsigned n)
pQueue->tx.writePtr -= pQueue->tx.msgCount;
}
_backendWrite32(pQueue, pQueue->pWriteOutgoing,
&pQueue->tx.writePtr, FCN_FLAG_BACKEND_QUEUE_TX);
status = _backendWrite32(pQueue, pQueue->pWriteOutgoing,
&pQueue->tx.writePtr, FCN_FLAG_BACKEND_QUEUE_TX);
if (status != 0)
{
// restore write pointer
if (pQueue->tx.writePtr < n)
{
pQueue->tx.writePtr += pQueue->tx.msgCount;
}
pQueue->tx.writePtr -= n;
return -2;
}
// Adjust cached value for number of free elements.
pQueue->txFree -= n;
@@ -606,7 +648,11 @@ msgqRxGetReadAvailable(msgqHandle handle)
return 0;
}
_backendRead32(pQueue, pQueue->pWriteIncoming, &pQueue->rx.writePtr, FCN_FLAG_BACKEND_QUEUE_RX);
if (_backendRead32(pQueue, pQueue->pWriteIncoming, &pQueue->rx.writePtr, FCN_FLAG_BACKEND_QUEUE_RX) != 0)
{
return 0;
}
if (pQueue->rx.writePtr >= pQueue->rx.msgCount)
{
return 0;
@@ -659,6 +705,7 @@ sysSHARED_CODE int
msgqRxMarkConsumed(msgqHandle handle, unsigned n)
{
msgqMetadata *pQueue = (msgqMetadata*)handle;
int status;
if ((pQueue == NULL) || !pQueue->rxLinked)
{
@@ -679,8 +726,19 @@ msgqRxMarkConsumed(msgqHandle handle, unsigned n)
}
// Copy to backend
_backendWrite32(pQueue, pQueue->pReadOutgoing, &pQueue->rxReadPtr,
pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_TX : FCN_FLAG_BACKEND_QUEUE_RX);
status = _backendWrite32(pQueue, pQueue->pReadOutgoing, &pQueue->rxReadPtr,
pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_TX : FCN_FLAG_BACKEND_QUEUE_RX);
if (status != 0)
{
// restore read pointer
if (pQueue->rxReadPtr < n)
{
pQueue->rxReadPtr += pQueue->rx.msgCount;
}
pQueue->rxReadPtr -= n;
return -2;
}
// Adjust cached value for number of available elements.
pQueue->rxAvail -= n;