530.41.03

This commit is contained in:
Andy Ritger
2023-03-23 11:00:12 -07:00
parent 4397463e73
commit 6dd092ddb7
63 changed files with 848 additions and 149 deletions

View File

@@ -931,6 +931,11 @@ NV_STATUS osAllocPagesInternal(
if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE)))
nv->force_dma32_alloc = NV_TRUE;
if (NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount) > NV_U32_MAX)
{
status = NV_ERR_INVALID_LIMIT;
}
else
{
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),

View File

@@ -167,12 +167,25 @@ const NvU8 * RmGetGpuUuidRaw(
)
{
NV_STATUS rmStatus;
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
OBJGPU *pGpu = NULL;
NvU32 gidFlags;
NvBool isApiLockTaken = NV_FALSE;
if (pNv->nv_uuid_cache.valid)
goto done;
return pNv->nv_uuid_cache.uuid;
if (!rmapiLockIsOwner())
{
rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
if (rmStatus != NV_OK)
{
return NULL;
}
isApiLockTaken = NV_TRUE;
}
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
//
// PBI is not present in simulation and the loop inside
@@ -193,7 +206,7 @@ const NvU8 * RmGetGpuUuidRaw(
rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid);
if (rmStatus != NV_OK)
{
return NULL;
goto err;
}
pNv->nv_uuid_cache.valid = NV_TRUE;
@@ -209,45 +222,35 @@ const NvU8 * RmGetGpuUuidRaw(
gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1)
| DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY);
if (!rmapiLockIsOwner())
{
rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
if (rmStatus != NV_OK)
{
return NULL;
}
isApiLockTaken = NV_TRUE;
}
if (pGpu == NULL)
{
if (isApiLockTaken == NV_TRUE)
{
rmapiLockRelease();
}
return NULL;
}
if (!pGpu)
goto err;
rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags);
if (isApiLockTaken == NV_TRUE)
{
rmapiLockRelease();
}
if (rmStatus != NV_OK)
return NULL;
goto err;
if (!pGpu->gpuUuid.isInitialized)
return NULL;
goto err;
// copy the uuid from the OBJGPU uuid cache
os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN);
pNv->nv_uuid_cache.valid = NV_TRUE;
done:
if (isApiLockTaken)
{
rmapiLockRelease();
}
return pNv->nv_uuid_cache.uuid;
err:
if (isApiLockTaken)
{
rmapiLockRelease();
}
return NULL;
}
static NV_STATUS RmGpuUuidRawToString(

View File

@@ -344,6 +344,9 @@ typedef struct MEMORY_DESCRIPTOR
// Serve as a head node in a list of submemdescs
MEMORY_DESCRIPTOR_LIST *pSubMemDescList;
// Reserved for RM exclusive use
NvBool bRmExclusiveUse;
// If strung in a intrusive linked list
ListNode node;
@@ -653,6 +656,8 @@ NvBool memdescGetCustomHeap(PMEMORY_DESCRIPTOR);
// Temporary function for 64-bit pageSize transition
NvU64 memdescGetPageSize64(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation);
NvBool memdescAcquireRmExclusiveUse(MEMORY_DESCRIPTOR *pMemDesc);
/*!
* @brief Get PTE kind
*

View File

@@ -481,7 +481,6 @@ struct MemoryManager {
NvU32 zbcSurfaces;
NvU64 overrideInitHeapMin;
NvU64 overrideHeapMax;
NvU64 fbOverrideStartKb;
NvU64 rsvdMemorySizeIncrement;
struct OBJFBSR *pFbsr[8];
struct OBJFBSR *pActiveFbsr;

View File

@@ -884,9 +884,13 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2236, 0x1482, 0x10de, "NVIDIA A10" },
{ 0x2237, 0x152f, 0x10de, "NVIDIA A10G" },
{ 0x2238, 0x1677, 0x10de, "NVIDIA A10M" },
{ 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" },
{ 0x2324, 0x17a6, 0x10de, "NVIDIA H800" },
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@@ -973,11 +977,18 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B1, 0x16a1, 0x10de, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B1, 0x16a1, 0x17aa, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },
{ 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" },
{ 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
{ 0x2757, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
{ 0x2782, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti" },
{ 0x27A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
{ 0x27B0, 0x16fa, 0x1028, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B0, 0x16fa, 0x103c, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B0, 0x16fa, 0x10de, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B0, 0x16fa, 0x17aa, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B8, 0x16ca, 0x10de, "NVIDIA L4" },
{ 0x27B8, 0x16ee, 0x10de, "NVIDIA L4" },
{ 0x27E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
{ 0x2820, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" },
{ 0x2860, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" },
@@ -1236,6 +1247,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x1E37, 0x148a, 0x10DE, "GRID RTX T10-2" },
{ 0x1E37, 0x148b, 0x10DE, "GRID RTX T10-1" },
{ 0x1E37, 0x148c, 0x10DE, "GRID RTX T10-0" },
{ 0x1E37, 0x180d, 0x10DE, "NVIDIA GeForce GTX 1060" },
{ 0x1E37, 0x1820, 0x10DE, "GeForce RTX 2080" },
{ 0x1E78, 0x13f7, 0x10DE, "GRID RTX6000P-1B" },
{ 0x1E78, 0x13f8, 0x10DE, "GRID RTX6000P-2B" },
{ 0x1E78, 0x13f9, 0x10DE, "GRID RTX6000P-1Q" },
@@ -1523,6 +1536,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2237, 0x1631, 0x10DE, "NVIDIA A10G-8Q" },
{ 0x2237, 0x1632, 0x10DE, "NVIDIA A10G-12Q" },
{ 0x2237, 0x1633, 0x10DE, "NVIDIA A10G-24Q" },
{ 0x2237, 0x1810, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x2237, 0x1811, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x2238, 0x16a3, 0x10DE, "NVIDIA A10M-1B" },
{ 0x2238, 0x16a4, 0x10DE, "NVIDIA A10M-2B" },
{ 0x2238, 0x16a5, 0x10DE, "NVIDIA A10M-1Q" },
@@ -1636,6 +1651,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B5, 0x1791, 0x10DE, "NVIDIA L40-16C" },
{ 0x26B5, 0x1792, 0x10DE, "NVIDIA L40-24C" },
{ 0x26B5, 0x1793, 0x10DE, "NVIDIA L40-48C" },
{ 0x26B5, 0x1818, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x26B5, 0x181a, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x26B8, 0x174e, 0x10DE, "NVIDIA L40G-1B" },
{ 0x26B8, 0x174f, 0x10DE, "NVIDIA L40G-2B" },
{ 0x26B8, 0x1750, 0x10DE, "NVIDIA L40G-1Q" },
@@ -1659,6 +1676,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B8, 0x176a, 0x10DE, "NVIDIA L40G-8C" },
{ 0x26B8, 0x176b, 0x10DE, "NVIDIA L40G-12C" },
{ 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" },
{ 0x26B8, 0x181c, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x26B8, 0x181e, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA L4-1B" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA L4-2B" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA L4-1Q" },

View File

@@ -115,6 +115,13 @@ struct ProfilerBase {
void (*__profilerBaseControlSerialization_Epilogue__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__profilerBaseMap__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__profilerBaseAccessCallback__)(struct ProfilerBase *, struct RsClient *, void *, RsAccessRight);
NvU32 maxPmaChannels;
NvU32 pmaVchIdx;
NvBool bLegacyHwpm;
struct RsResourceRef **ppBytesAvailable;
struct RsResourceRef **ppStreamBuffers;
struct RsResourceRef *pBoundCntBuf;
struct RsResourceRef *pBoundPmaBuf;
};
#ifndef __NVOC_CLASS_ProfilerBase_TYPEDEF__

View File

@@ -572,10 +572,15 @@ RmPhysAddr dmaPageArrayGetPhysAddr(DMA_PAGE_ARRAY *pPageArray, NvU32 pageIndex);
//
// hal.dmaAllocVASpace() flags
//
#define DMA_ALLOC_VASPACE_NONE 0
#define DMA_VA_LIMIT_49B NVBIT(0)
#define DMA_VA_LIMIT_57B NVBIT(1)
#define DMA_ALLOC_VASPACE_SIZE_ALIGNED NVBIT(9)
#define DMA_ALLOC_VASPACE_NONE 0
#define DMA_VA_LIMIT_49B NVBIT(0)
#define DMA_VA_LIMIT_57B NVBIT(1)
#define DMA_ALLOC_VASPACE_SIZE_ALIGNED NVBIT(9)
//
// Bug 3610538 For unlinked SLI, clients want to restrict internal buffers to
// Internal VA range, so that SLI vaspaces can mirror each other.
//
#define DMA_ALLOC_VASPACE_USE_RM_INTERNAL_VALIMITS NVBIT(10)
//
// Internal device allocation flags

View File

@@ -167,6 +167,11 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
{
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get display mask from input buffer
// display mask is 4 byte long and available at byte 1
@@ -181,27 +186,54 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
// get acpi id
acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask);
outDataSize = sizeof(NvU32);
outStatus = osCallACPI_MXMX(pGpu, acpiId, pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUON:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_GPUON(pGpu, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUOFF:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_GPUOFF(pGpu, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUSTA:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_GPUSTA(pGpu, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
@@ -214,11 +246,17 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_MXDS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
@@ -231,11 +269,17 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = osCallACPI_MXDS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
@@ -249,10 +293,16 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
sizeof(NvU32));
outStatus = osCallACPI_MXDM(pGpu, acpiId, (NvU32*) pInOutData);
outDataSize = sizeof(NvU32);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
// get acpi id from input buffer
portMemCopy(&acpiId,
@@ -260,22 +310,34 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = osCallACPI_MXID(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
portMemCopy(&acpiId,
sizeof(NvU32),
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_LRST(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
portMemCopy(&acpiId,
sizeof(NvU32),
@@ -290,6 +352,11 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
@@ -305,11 +372,17 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
// get acpi id
acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask);
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_MXMX(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
// acpi id is 4 byte long and available at byte 4
@@ -324,20 +397,35 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
// get acpi id
acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask);
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_DOS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_ROM:
{
NvU32 *pBuffer = (NvU32*) pInOutData;
if ((inOutDataSize < (2 * sizeof(NvU32))) || (inOutDataSize < pBuffer[1]))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = pBuffer[1];
outStatus = osCallACPI_NVHG_ROM(pGpu, (NvU32*) pInOutData, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DCS:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
// get display mask from input buffer
portMemCopy(&acpiId, sizeof(NvU32), pInOutData, sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_DCS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}

View File

@@ -1477,6 +1477,11 @@ kgraphicsMapCtxBuffer_IMPL
NvU32 updateFlags = bIsReadOnly ? (DMA_UPDATE_VASPACE_FLAGS_READ_ONLY |
DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY) : DMA_UPDATE_VASPACE_FLAGS_NONE;
if (pGVAS->flags & VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS)
{
allocFlags |= DMA_ALLOC_VASPACE_USE_RM_INTERNAL_VALIMITS;
}
if (kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics))
{
status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pMemDesc, &vaddr,
@@ -2153,6 +2158,9 @@ deviceCtrlCmdKGrGetCaps_IMPL
return NV_ERR_NOT_SUPPORTED;
}
NV_CHECK_OR_RETURN(LEVEL_ERROR, pGrCaps != NULL, NV_ERR_INVALID_ARGUMENT);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pParams->capsTblSize == NV0080_CTRL_GR_CAPS_TBL_SIZE, NV_ERR_INVALID_ARGUMENT);
SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
{
KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu);

View File

@@ -42,6 +42,40 @@ profilerBaseCtrlCmdFreePmaStream_IMPL
portMemSet(&internalParams, 0, sizeof(NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS));
internalParams.pmaChannelIdx = pParams->pmaChannelIdx;
{
RsResourceRef *pCountRef = NULL;
RsResourceRef *pBufferRef = NULL;
if (pProfiler->maxPmaChannels <= pParams->pmaChannelIdx)
{
goto err;
}
pCountRef = pProfiler->ppBytesAvailable[pParams->pmaChannelIdx];
pProfiler->ppBytesAvailable[pParams->pmaChannelIdx] = NULL;
pBufferRef = pProfiler->ppStreamBuffers[pParams->pmaChannelIdx];
pProfiler->ppStreamBuffers[pParams->pmaChannelIdx] = NULL;
if(pProfiler->pBoundCntBuf == pCountRef && pProfiler->pBoundPmaBuf == pBufferRef)
{
Memory *pCntMem = dynamicCast(pCountRef->pResource, Memory);
Memory *pBufMem = dynamicCast(pBufferRef->pResource, Memory);
pProfiler->pBoundCntBuf = NULL;
pProfiler->pBoundPmaBuf = NULL;
pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
}
if (pCountRef != NULL)
{
refRemoveDependant(pCountRef, RES_GET_REF(pProfiler));
}
if (pBufferRef != NULL)
{
refRemoveDependant(pBufferRef, RES_GET_REF(pProfiler));
}
}
err:
return pRmApi->Control(pRmApi,
RES_GET_CLIENT_HANDLE(pProfiler),
@@ -61,10 +95,52 @@ profilerBaseCtrlCmdBindPmResources_IMPL
NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler);
NvHandle hObject = RES_GET_HANDLE(pProfiler);
NV_STATUS status = NV_OK;
RsResourceRef *pCntRef = NULL;
RsResourceRef *pBufRef = NULL;
Memory *pCntMem = NULL;
Memory *pBufMem = NULL;
NV_CHECK_OR_GOTO(LEVEL_INFO,
!pProfiler->bLegacyHwpm && pProfiler->maxPmaChannels != 0, physical_control);
if (pProfiler->maxPmaChannels <= pProfiler->pmaVchIdx)
{
return NV_ERR_INVALID_ARGUMENT;
}
pCntRef = pProfiler->ppBytesAvailable[pProfiler->pmaVchIdx];
pBufRef = pProfiler->ppStreamBuffers[pProfiler->pmaVchIdx];
NV_CHECK_OR_GOTO(LEVEL_INFO,
pCntRef != NULL && pBufRef != NULL, physical_control);
pCntMem = dynamicCast(pCntRef->pResource, Memory);
pBufMem = dynamicCast(pBufRef->pResource, Memory);
NV_ASSERT_OR_RETURN(pCntMem != NULL && pBufMem != NULL, NV_ERR_INVALID_STATE);
if (!memdescAcquireRmExclusiveUse(pCntMem->pMemDesc) ||
!memdescAcquireRmExclusiveUse(pBufMem->pMemDesc))
{
pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
return NV_ERR_INVALID_ARGUMENT;
}
pProfiler->pBoundCntBuf = pCntRef;
pProfiler->pBoundPmaBuf = pBufRef;
physical_control:
status = pRmApi->Control(pRmApi, hClient, hObject,
NVB0CC_CTRL_CMD_INTERNAL_BIND_PM_RESOURCES,
NULL, 0);
if (status != NV_OK && pCntMem != NULL && pBufMem != NULL)
{
pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
pProfiler->pBoundCntBuf = NULL;
pProfiler->pBoundPmaBuf = NULL;
}
return status;
}
@@ -78,6 +154,31 @@ profilerBaseCtrlCmdUnbindPmResources_IMPL
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler);
NvHandle hObject = RES_GET_HANDLE(pProfiler);
RsResourceRef *pCntRef = NULL;
RsResourceRef *pBufRef = NULL;
pCntRef = pProfiler->pBoundCntBuf;
pBufRef = pProfiler->pBoundPmaBuf;
if (pCntRef != NULL)
{
Memory *pCntMem = dynamicCast(pCntRef->pResource, Memory);
if (pCntMem != NULL)
{
pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
}
pProfiler->pBoundCntBuf = NULL;
}
if (pBufRef != NULL)
{
Memory *pBufMem = dynamicCast(pBufRef->pResource, Memory);
if (pBufMem != NULL)
{
pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE;
}
pProfiler->pBoundPmaBuf = NULL;
}
return pRmApi->Control(pRmApi, hClient, hObject,
NVB0CC_CTRL_CMD_INTERNAL_UNBIND_PM_RESOURCES,
@@ -96,6 +197,7 @@ profilerBaseCtrlCmdReserveHwpmLegacy_IMPL
NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler);
NvHandle hObject = RES_GET_HANDLE(pProfiler);
pProfiler->bLegacyHwpm = NV_TRUE;
return pRmApi->Control(pRmApi, hClient, hObject,
NVB0CC_CTRL_CMD_INTERNAL_RESERVE_HWPM_LEGACY,
pParams, sizeof(*pParams));
@@ -117,6 +219,7 @@ profilerBaseCtrlCmdAllocPmaStream_IMPL
NvBool bMemPmaBufferRegistered = NV_FALSE;
NvBool bMemPmaBytesAvailableRegistered = NV_FALSE;
NVB0CC_CTRL_INTERNAL_ALLOC_PMA_STREAM_PARAMS internalParams;
RsResourceRef *pMemoryRef = NULL;
//
// REGISTER MEMDESCs TO GSP
// These are no-op with BareMetal/No GSP
@@ -150,6 +253,32 @@ profilerBaseCtrlCmdAllocPmaStream_IMPL
&internalParams, sizeof(internalParams)), fail);
pParams->pmaChannelIdx = internalParams.pmaChannelIdx;
if (pProfiler->ppBytesAvailable == NULL)
{
NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS maxPmaParams;
portMemSet(&maxPmaParams, 0, sizeof(NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS));
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
pRmApi->Control(pRmApi, hClient, hObject,
NVB0CC_CTRL_CMD_INTERNAL_GET_MAX_PMAS,
&maxPmaParams, sizeof(maxPmaParams)), fail);
pProfiler->maxPmaChannels = maxPmaParams.maxPmaChannels;
pProfiler->ppBytesAvailable = (RsResourceRef**)portMemAllocNonPaged(maxPmaParams.maxPmaChannels * sizeof(RsResourceRef*));
pProfiler->ppStreamBuffers = (RsResourceRef**)portMemAllocNonPaged(maxPmaParams.maxPmaChannels * sizeof(RsResourceRef*));
}
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
serverutilGetResourceRef(hClient, pParams->hMemPmaBytesAvailable, &pMemoryRef), fail);
pProfiler->ppBytesAvailable[pParams->pmaChannelIdx] = pMemoryRef;
refAddDependant(pMemoryRef, RES_GET_REF(pProfiler));
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
serverutilGetResourceRef(hClient, pParams->hMemPmaBuffer, &pMemoryRef), fail);
pProfiler->ppStreamBuffers[pParams->pmaChannelIdx] = pMemoryRef;
refAddDependant(pMemoryRef, RES_GET_REF(pProfiler));
// Copy output params to external struct.
pProfiler->pmaVchIdx = pParams->pmaChannelIdx;
pProfiler->bLegacyHwpm = NV_FALSE;
// Copy output params to external struct.
pParams->pmaBufferVA = internalParams.pmaBufferVA;

View File

@@ -629,13 +629,13 @@ dmaAllocMapping_GM107
NvU64 vaAlign = NV_MAX(pLocals->pageSize, compAlign);
NvU64 vaSize = RM_ALIGN_UP(pLocals->mapLength, vaAlign);
NvU64 pageSizeLockMask = 0;
pGVAS = dynamicCast(pVAS, OBJGVASPACE);
if (FLD_TEST_DRF(OS46, _FLAGS, _PAGE_SIZE, _BOTH, flags))
{
vaAlign = NV_MAX(vaAlign, pLocals->vaspaceBigPageSize);
vaSize = RM_ALIGN_UP(pLocals->mapLength, vaAlign);
}
//
// Third party code path, nvidia_p2p_get_pages, expects on BAR1 VA to be
// always aligned at 64K.
@@ -665,6 +665,14 @@ dmaAllocMapping_GM107
goto cleanup;
}
}
if (pGVAS != NULL && gvaspaceIsInternalVaRestricted(pGVAS))
{
if ((pLocals->vaRangeLo >= pGVAS->vaStartInternal && pLocals->vaRangeLo <= pGVAS->vaLimitInternal) ||
(pLocals->vaRangeHi <= pGVAS->vaLimitInternal && pLocals->vaRangeHi >= pGVAS->vaStartInternal))
{
return NV_ERR_INVALID_PARAMETER;
}
}
}
else if (pDma->getProperty(pDma, PDB_PROP_DMA_RESTRICT_VA_RANGE))
{
@@ -690,7 +698,6 @@ dmaAllocMapping_GM107
// Clients can pass an allocation flag to the device or VA space constructor
// so that mappings and allocations will fail without an explicit address.
//
pGVAS = dynamicCast(pVAS, OBJGVASPACE);
if (pGVAS != NULL)
{
if ((pGVAS->flags & VASPACE_FLAGS_REQUIRE_FIXED_OFFSET) &&
@@ -700,6 +707,18 @@ dmaAllocMapping_GM107
NV_PRINTF(LEVEL_ERROR, "The VA space requires all allocations to specify a fixed address\n");
goto cleanup;
}
//
// Bug 3610538 clients can allocate GPU VA, during mapping for ctx dma.
// But if clients enable RM to map internal buffers in a reserved
// range of VA for unlinked SLI in Linux, we want to tag these
// allocations as "client allocated", so that it comes outside of
// RM internal region.
//
if (gvaspaceIsInternalVaRestricted(pGVAS))
{
allocFlags.bClientAllocation = NV_TRUE;
}
}
status = vaspaceAlloc(pVAS, vaSize, vaAlign, pLocals->vaRangeLo, pLocals->vaRangeHi,
@@ -2210,9 +2229,20 @@ dmaMapBuffer_GM107
vaAlign = NV_MAX(vaAlign, temp);
}
// Set this first in case we ignore DMA_ALLOC_VASPACE_USE_RM_INTERNAL_VALIMITS next
rangeLo = vaspaceGetVaStart(pVAS);
rangeHi = vaspaceGetVaLimit(pVAS);
if (flagsForAlloc & DMA_ALLOC_VASPACE_USE_RM_INTERNAL_VALIMITS)
{
OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE);
if (pGVAS)
{
rangeLo = pGVAS->vaStartInternal;
rangeHi = pGVAS->vaLimitInternal;
}
}
// If trying to conserve 32bit address space, map RM buffers at 4GB+
if (pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER) &&
(pVASpaceHeap->free > NVBIT64(32)))

View File

@@ -4735,8 +4735,11 @@ NV_STATUS heapResize_IMPL
if (resizeBy < 0) // Shrink the allocation
{
NvS64 newSize;
NV_ASSERT_OR_RETURN(pBlockLast->owner == NVOS32_BLOCK_TYPE_FREE, NV_ERR_NO_MEMORY);
NV_ASSERT_OR_RETURN((pBlockLast->end - pBlockLast->begin + resizeBy > 0), NV_ERR_INVALID_LIMIT);
NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddS64(pBlockLast->end - pBlockLast->begin, resizeBy, &newSize) &&
(newSize > 0), NV_ERR_INVALID_LIMIT);
pBlockLast->end += resizeBy;
}
else // Grow the allocation

View File

@@ -2317,6 +2317,28 @@ memdescFillPages
}
}
/*!
* @brief Acquire exclusive use for memdesc for RM.
*
* @param[inout] pMemDesc Memory descriptor
*
* @returns Boolean indicating whether we successfully acquired the memdesc for exclusive use
*/
NvBool
memdescAcquireRmExclusiveUse
(
MEMORY_DESCRIPTOR *pMemDesc
)
{
NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pParentDescriptor == NULL &&
!pMemDesc->bRmExclusiveUse &&
pMemDesc->DupCount == 1,
NV_FALSE);
pMemDesc->bRmExclusiveUse = NV_TRUE;
return NV_TRUE;
}
//
// SubMemory per subdevice chart: (MD - Memory Descriptor, SD - subdevice)
//
@@ -2451,6 +2473,7 @@ memdescCreateSubMem
pMemDescNew->bUsingSuballocator = pMemDesc->bUsingSuballocator;
pMemDescNew->_pParentDescriptor = pMemDesc;
pMemDesc->childDescriptorCnt++;
pMemDescNew->bRmExclusiveUse = pMemDesc->bRmExclusiveUse;
pMemDescNew->subMemOffset = Offset;

View File

@@ -2638,6 +2638,8 @@ memmgrPmaRegisterRegions_IMPL
NvU32 blackListCount;
NvU64 base, size;
NV_STATUS status = NV_OK;
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu));
blackListCount = pHeap->blackListAddresses.count;
base = pHeap->base;
@@ -2764,9 +2766,9 @@ memmgrPmaRegisterRegions_IMPL
_pmaInitFailed:
portMemFree(pBlacklistPages);
if ((status == NV_OK) && (pMemoryManager->fbOverrideStartKb != 0))
if ((status == NV_OK) && (pMemsysConfig->fbOverrideStartKb != 0))
{
NvU64 allocSize = NV_ALIGN_UP(((NvU64)pMemoryManager->fbOverrideStartKb << 10), PMA_GRANULARITY);
NvU64 allocSize = NV_ALIGN_UP(((NvU64)pMemsysConfig->fbOverrideStartKb << 10), PMA_GRANULARITY);
NvU32 numPages = (NvU32)(allocSize >> PMA_PAGE_SHIFT);
PMA_ALLOCATION_OPTIONS allocOptions = {0};
@@ -2785,7 +2787,6 @@ _pmaInitFailed:
portMemFree(pPages);
}
}
if (status != NV_OK)
{
if (memmgrIsPmaInitialized(pMemoryManager))

View File

@@ -350,6 +350,18 @@ vaspaceapiConstruct_IMPL
}
}
//
// Bug 3610538 For unlinked SLI, clients want to restrict internal buffers to
// Internal VA range. setting internal va range to match what we use for
// windows.
//
if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_VA_INTERNAL_LIMIT)
{
vaStartInternal = SPLIT_VAS_SERVER_RM_MANAGED_VA_START;
vaLimitInternal = SPLIT_VAS_SERVER_RM_MANAGED_VA_START +
SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE - 1;
}
// Finally call the factory
status = vmmCreateVaspace(pVmm, pParams->externalClassId,
pNvVASpaceAllocParams->index,
@@ -619,6 +631,10 @@ static NV_STATUS translateAllocFlagsToVASpaceFlags(NvU32 allocFlags, NvU32 *tran
{
flags |= VASPACE_FLAGS_REQUIRE_FIXED_OFFSET;
}
if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_VA_INTERNAL_LIMIT)
{
flags |= VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS;
}
flags |= VASPACE_FLAGS_ENABLE_VMM;
// Validate the flag combinations

View File

@@ -308,13 +308,19 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
}
else
{
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, pKernelMemorySystem);
NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbTotalMemSizeMb << 10));
data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbTotalMemSizeMb << 10), (pMemoryManager->Ram.fbOverrideSizeMb << 10)));
data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbTotalMemSizeMb << 10),
(pMemoryManager->Ram.fbOverrideSizeMb << 10))
- pMemsysConfig->fbOverrideStartKb);
break;
}
}
case NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE:
{
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, pKernelMemorySystem);
if (pMemoryPartitionHeap != NULL)
{
NvU32 heapSizeKb;
@@ -336,11 +342,15 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
break;
}
NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbTotalMemSizeMb << 10));
data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbTotalMemSizeMb << 10), (pMemoryManager->Ram.fbOverrideSizeMb << 10)));
data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbTotalMemSizeMb << 10),
(pMemoryManager->Ram.fbOverrideSizeMb << 10))
- pMemsysConfig->fbOverrideStartKb);
break;
}
case NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE:
{
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, pKernelMemorySystem);
if (pMemoryPartitionHeap != NULL)
{
NvU32 heapSizeKb;
@@ -362,11 +372,15 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
break;
}
NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbUsableMemSize >> 10));
data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbUsableMemSize >> 10), (pMemoryManager->Ram.fbOverrideSizeMb << 10)));
data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbUsableMemSize >> 10 ),
(pMemoryManager->Ram.fbOverrideSizeMb << 10))
- pMemsysConfig->fbOverrideStartKb);
break;
}
case NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE:
{
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, pKernelMemorySystem);
if (bIsPmaEnabled)
{
pmaGetTotalMemory(&pHeap->pmaObject, &bytesTotal);
@@ -381,6 +395,7 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
NV_ASSERT(NvU64_HI32(size >> 10) == 0);
data = NvU64_LO32(size >> 10);
}
data -= pMemsysConfig->fbOverrideStartKb;
break;
}
case NV2080_CTRL_FB_INFO_INDEX_HEAP_START:
@@ -400,13 +415,23 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
}
else
{
//
// Returns start of heap in kbytes. This is zero unless
// VGA display memory is reserved.
//
heapGetBase(pHeap, &heapBase);
data = NvU64_LO32(heapBase >> 10);
NV_ASSERT(((NvU64) data << 10ULL) == heapBase);
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, pKernelMemorySystem);
if (pMemsysConfig->fbOverrideStartKb != 0)
{
data = NvU64_LO32(pMemsysConfig->fbOverrideStartKb);
NV_ASSERT(((NvU64) data << 10ULL) == pMemsysConfig->fbOverrideStartKb);
}
else
{
//
// Returns start of heap in kbytes. This is zero unless
// VGA display memory is reserved.
//
heapGetBase(pHeap, &heapBase);
data = NvU64_LO32(heapBase >> 10);
NV_ASSERT(((NvU64) data << 10ULL) == heapBase);
}
}
break;
}
@@ -487,6 +512,8 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
case NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE:
{
const MEMORY_SYSTEM_STATIC_CONFIG *pMemsysConfig =
kmemsysGetStaticConfig(pGpu, pKernelMemorySystem);
if (bIsPmaEnabled)
{
NvU32 heapSizeKb;
@@ -512,6 +539,7 @@ _fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_I
if (data > heapSizeKb)
data = heapSizeKb;
}
data -= pMemsysConfig->fbOverrideStartKb;
break;
}
case NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT:

View File

@@ -665,7 +665,8 @@ gvaspaceConstruct__IMPL
// By default allocations will be routed within RM internal va range.
pGVAS->bRMInternalRestrictedVaRange = NV_TRUE;
status = _gvaspaceReserveRange(pGVAS, pVAS->vasLimit + 1, pGVAS->vaLimitMax);
if (pVAS->vasLimit != pGVAS->vaLimitMax)
status = _gvaspaceReserveRange(pGVAS, pVAS->vasLimit + 1, pGVAS->vaLimitMax);
}
else
{
@@ -1178,7 +1179,6 @@ _gvaspaceGpuStateConstruct
if (flags & VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS)
{
NV_ASSERT_OR_RETURN(vaLimitInternal <= vaLimitMax, NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN(vaLimitInternal <= vaLimit, NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN(vaStartInternal <= vaLimitInternal, NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN(vaStartInternal >= vaStartMin, NV_ERR_INVALID_ARGUMENT);

View File

@@ -310,7 +310,8 @@ continue_alloc_object:
pPteArray = memdescGetPteArray(pMemDesc, AT_GPU);
if (!portSafeMulU32(sizeof(NvU64), pAllocParams->pageCount, &result))
if ((pAllocParams->pageCount > pMemDesc->PageCount) ||
!portSafeMulU32(sizeof(NvU64), pAllocParams->pageCount, &result))
{
memdescDestroy(pMemDesc);
return NV_ERR_INVALID_ARGUMENT;

View File

@@ -1351,7 +1351,12 @@ static NV_STATUS getNbsiObjFromCache
// return the full table size
*pTotalObjSize = tempGlobSize;
rtnObjSizeWithOffset = *pTotalObjSize - rtnObjOffset;
if (!portSafeSubU32(*pTotalObjSize, rtnObjOffset, &rtnObjSizeWithOffset))
{
// Failed argument validation.
status = NV_ERR_INVALID_OFFSET;
}
else
{
if (*pRtnObjSize >= rtnObjSizeWithOffset)
{
@@ -2884,7 +2889,7 @@ NV_STATUS getNbsiObjByType
pRtnObjSize,
pTotalObjSize,
pRtnGlobStatus);
if (status == NV_OK)
if (status != NV_ERR_GENERIC)
{
// It's in the cache, it may or may not fit.
return status;
@@ -3054,7 +3059,12 @@ NV_STATUS getNbsiObjByType
// return the full table size
*pTotalObjSize = testObjSize;
rtnObjSizeWithOffset = *pTotalObjSize - wantedRtnObjOffset;
if (!portSafeSubU32(*pTotalObjSize, wantedRtnObjOffset, &rtnObjSizeWithOffset))
{
// Failed argument validation.
status = NV_ERR_INVALID_OFFSET;
}
else
{
if (*pRtnObjSize >= rtnObjSizeWithOffset)
{

View File

@@ -62,10 +62,15 @@ static void
_logAssertCount(void)
{
static NvU32 assertCount = 0;
NV00DE_SHARED_DATA *pSharedData = gpushareddataWriteStart(g_pGpu);
NV00DE_SHARED_DATA *pSharedData;
if (g_pGpu == NULL)
{
return;
}
pSharedData = gpushareddataWriteStart(g_pGpu);
pSharedData->gspAssertCount = ++assertCount;
gpushareddataWriteFinish(g_pGpu);
}