525.85.05

This commit is contained in:
Andy Ritger
2023-01-19 10:41:59 -08:00
parent dac2350c7f
commit 811073c51e
90 changed files with 1937 additions and 668 deletions

View File

@@ -1,65 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef EFI_CONSOLE_H
#define EFI_CONSOLE_H
#include "gpu/disp/kern_disp_max.h"
struct OBJGPU;
typedef struct
{
NvBool isDispStateSave;
NvU32 activeDisplayId[OBJ_MAX_HEADS];
struct
{
NvU32 displayId;
struct {
NvU32 index;
NvU32 subLinkMask;
} sorXBar;
struct {
NvU32 linkBw;
NvU32 laneCount;
NvU32 linkCtl;
} displayPort;
} activeDfpState[OBJ_MAX_DFPS];
NvU32 numDfps;
struct
{
NvU32 coreChannelClass;
NvU32 cacheSize;
NvU32 *pCache;
} display;
} nv_efi_t;
void RmSaveEFIDisplayState (OBJGPU *pGpu);
void RmRestoreEFIDisplayState (OBJGPU *pGpu);
#endif // EFI_CONSOLE_H

View File

@@ -30,8 +30,6 @@
#include <gpu/disp/kern_disp_max.h>
#include <gpu/disp/kern_disp_type.h>
#include <efi-console.h>
#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d))
#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d))
#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d))
@@ -331,8 +329,6 @@ typedef struct
nv_vga_t vga;
nv_efi_t efi;
NvU32 flags;
NvU32 status;

View File

@@ -88,12 +88,12 @@ struct OS_RM_CAPS
nv_cap_t **caps;
};
NvBool osIsRaisedIRQL()
NvBool osIsRaisedIRQL(void)
{
return (!os_semaphore_may_sleep());
}
NvBool osIsISR()
NvBool osIsISR(void)
{
return os_is_isr();
}
@@ -1783,7 +1783,7 @@ NV_STATUS osPackageRegistry(
return RmPackageRegistry(nv, pRegTable, pSize);
}
NvU32 osGetCpuCount()
NvU32 osGetCpuCount(void)
{
return os_get_cpu_count(); // Total number of logical CPUs.
}
@@ -1834,7 +1834,7 @@ void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *p
return;
}
void osFlushLog()
void osFlushLog(void)
{
// Not implemented
}
@@ -2671,7 +2671,7 @@ NV_STATUS osGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLocksRelease)
return NV_SEMA_RELEASE_FAILED;
}
void osSyncWithRmDestroy()
void osSyncWithRmDestroy(void)
{
}
@@ -3511,7 +3511,7 @@ osGetGpuRailVoltageInfo
* @return pointer to the security token.
*/
PSECURITY_TOKEN
osGetSecurityToken()
osGetSecurityToken(void)
{
NV_STATUS rmStatus;
TOKEN_USER *pTokenUser;
@@ -4177,7 +4177,7 @@ osWaitForIbmnpuRsync
}
NvU32
osGetPageSize()
osGetPageSize(void)
{
return os_page_size;
}

View File

@@ -1213,8 +1213,6 @@ void RmClearPrivateState(
void *pVbiosCopy = NULL;
void *pRegistryCopy = NULL;
NvU32 vbiosSize;
NvU32 *pEfiDisplayCache;
NvU32 efiDisplayCacheSize;
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
nv_dynamic_power_t dynamicPowerCopy;
NvU32 x = 0;
@@ -1234,8 +1232,6 @@ void RmClearPrivateState(
pVbiosCopy = nvp->pVbiosCopy;
vbiosSize = nvp->vbiosSize;
pRegistryCopy = nvp->pRegistry;
pEfiDisplayCache = nvp->efi.display.pCache;
efiDisplayCacheSize = nvp->efi.display.cacheSize;
dynamicPowerCopy = nvp->dynamic_power;
pmc_boot_0 = nvp->pmc_boot_0;
pmc_boot_42 = nvp->pmc_boot_42;
@@ -1251,8 +1247,6 @@ void RmClearPrivateState(
nvp->pVbiosCopy = pVbiosCopy;
nvp->vbiosSize = vbiosSize;
nvp->pRegistry = pRegistryCopy;
nvp->efi.display.pCache = pEfiDisplayCache;
nvp->efi.display.cacheSize = efiDisplayCacheSize;
nvp->dynamic_power = dynamicPowerCopy;
nvp->pmc_boot_0 = pmc_boot_0;
nvp->pmc_boot_42 = pmc_boot_42;
@@ -1280,7 +1274,6 @@ void RmFreePrivateState(
if (nvp != NULL)
{
portMemFree(nvp->pVbiosCopy);
portMemFree(nvp->efi.display.pCache);
os_free_mem(nvp);
}

View File

@@ -192,21 +192,6 @@ static NvBool __nvoc_thunk_RmResource_deviceAccessCallback(struct Device *pResou
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdBifReset_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*flags=*/ 0x204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800102u,
/*paramSize=*/ sizeof(NV0080_CTRL_BIF_RESET_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdBifReset"
#endif
},
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -221,7 +206,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBifGetDmaBaseSysmemAddr"
#endif
},
{ /* [2] */
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -236,7 +221,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBifAspmFeatureSupported"
#endif
},
{ /* [3] */
{ /* [2] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -251,7 +236,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBifAspmCyaUpdate"
#endif
},
{ /* [4] */
{ /* [3] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -266,7 +251,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetClasslist"
#endif
},
{ /* [5] */
{ /* [4] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -281,7 +266,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetNumSubdevices"
#endif
},
{ /* [6] */
{ /* [5] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -296,7 +281,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence"
#endif
},
{ /* [7] */
{ /* [6] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -311,7 +296,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence"
#endif
},
{ /* [8] */
{ /* [7] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -326,7 +311,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode"
#endif
},
{ /* [9] */
{ /* [8] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -341,7 +326,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetSparseTextureComputeMode"
#endif
},
{ /* [10] */
{ /* [9] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -356,7 +341,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuSetSparseTextureComputeMode"
#endif
},
{ /* [11] */
{ /* [10] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -371,7 +356,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetVgxCaps"
#endif
},
{ /* [12] */
{ /* [11] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -386,7 +371,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetSriovCaps"
#endif
},
{ /* [13] */
{ /* [12] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -401,7 +386,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetClasslistV2"
#endif
},
{ /* [14] */
{ /* [13] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -416,7 +401,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle"
#endif
},
{ /* [15] */
{ /* [14] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -431,7 +416,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetBrandCaps"
#endif
},
{ /* [16] */
{ /* [15] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -446,7 +431,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size"
#endif
},
{ /* [17] */
{ /* [16] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -461,7 +446,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetCaps"
#endif
},
{ /* [18] */
{ /* [17] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -476,7 +461,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetInfo"
#endif
},
{ /* [19] */
{ /* [18] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -491,7 +476,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetTpcPartitionMode"
#endif
},
{ /* [20] */
{ /* [19] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -506,7 +491,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrSetTpcPartitionMode"
#endif
},
{ /* [21] */
{ /* [20] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -521,7 +506,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetCapsV2"
#endif
},
{ /* [22] */
{ /* [21] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -536,7 +521,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetInfoV2"
#endif
},
{ /* [23] */
{ /* [22] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -551,7 +536,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFbGetCaps"
#endif
},
{ /* [24] */
{ /* [23] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -566,7 +551,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFbGetCompbitStoreInfo"
#endif
},
{ /* [25] */
{ /* [24] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -581,7 +566,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFbGetCapsV2"
#endif
},
{ /* [26] */
{ /* [25] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -596,7 +581,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCaps"
#endif
},
{ /* [27] */
{ /* [26] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -611,7 +596,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCapsV2"
#endif
},
{ /* [28] */
{ /* [27] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -626,7 +611,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCaps"
#endif
},
{ /* [29] */
{ /* [28] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -641,7 +626,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartSelectedChannels"
#endif
},
{ /* [30] */
{ /* [29] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -656,7 +641,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetEngineContextProperties"
#endif
},
{ /* [31] */
{ /* [30] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -671,7 +656,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetChannelList"
#endif
},
{ /* [32] */
{ /* [31] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -686,7 +671,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetLatencyBufferSize"
#endif
},
{ /* [33] */
{ /* [32] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -701,7 +686,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoSetChannelProperties"
#endif
},
{ /* [34] */
{ /* [33] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -716,7 +701,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStopRunlist"
#endif
},
{ /* [35] */
{ /* [34] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -731,7 +716,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartRunlist"
#endif
},
{ /* [36] */
{ /* [35] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -746,7 +731,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCapsV2"
#endif
},
{ /* [37] */
{ /* [36] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -761,7 +746,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoIdleChannels"
#endif
},
{ /* [38] */
{ /* [37] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -776,7 +761,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPteInfo"
#endif
},
{ /* [39] */
{ /* [38] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -791,7 +776,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaFlush"
#endif
},
{ /* [40] */
{ /* [39] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -806,7 +791,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaAdvSchedGetVaCaps"
#endif
},
{ /* [41] */
{ /* [40] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -821,7 +806,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPdeInfo"
#endif
},
{ /* [42] */
{ /* [41] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -836,7 +821,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPteInfo"
#endif
},
{ /* [43] */
{ /* [42] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -851,7 +836,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [44] */
{ /* [43] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -866,7 +851,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetCaps"
#endif
},
{ /* [45] */
{ /* [44] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -881,7 +866,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetVASpaceSize"
#endif
},
{ /* [46] */
{ /* [45] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -896,7 +881,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUpdatePde2"
#endif
},
{ /* [47] */
{ /* [46] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -911,7 +896,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaEnablePrivilegedRange"
#endif
},
{ /* [48] */
{ /* [47] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -926,7 +911,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetDefaultVASpace"
#endif
},
{ /* [49] */
{ /* [48] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -941,7 +926,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPageDirectory"
#endif
},
{ /* [50] */
{ /* [49] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -956,7 +941,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUnsetPageDirectory"
#endif
},
{ /* [51] */
{ /* [50] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -971,7 +956,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdMsencGetCaps"
#endif
},
{ /* [52] */
{ /* [51] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -986,7 +971,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBspGetCapsV2"
#endif
},
{ /* [53] */
{ /* [52] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1001,7 +986,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
#endif
},
{ /* [54] */
{ /* [53] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1016,7 +1001,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
#endif
},
{ /* [55] */
{ /* [54] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1031,7 +1016,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdNvjpgGetCapsV2"
#endif
},
{ /* [56] */
{ /* [55] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1046,7 +1031,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfCudaLimitDisable"
#endif
},
{ /* [57] */
{ /* [56] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1061,7 +1046,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount"
#endif
},
{ /* [58] */
{ /* [57] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1081,7 +1066,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
{
/*numEntries=*/ 59,
/*numEntries=*/ 58,
/*pExportEntries=*/ __nvoc_exported_method_def_Device
};
@@ -1122,10 +1107,6 @@ static void __nvoc_init_funcTable_Device_1(Device *pThis) {
pThis->__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL;
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
pThis->__deviceCtrlCmdBifReset__ = &deviceCtrlCmdBifReset_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
pThis->__deviceCtrlCmdBifGetDmaBaseSysmemAddr__ = &deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL;
#endif

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -83,7 +83,6 @@ struct Device {
struct Device *__nvoc_pbase_Device;
NV_STATUS (*__deviceControl__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__deviceInternalControlForward__)(struct Device *, NvU32, void *, NvU32);
NV_STATUS (*__deviceCtrlCmdBifReset__)(struct Device *, NV0080_CTRL_BIF_RESET_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifGetDmaBaseSysmemAddr__)(struct Device *, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifAspmFeatureSupported__)(struct Device *, NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifAspmCyaUpdate__)(struct Device *, NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS *);
@@ -213,7 +212,6 @@ NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT
#define deviceControl(pResource, pCallContext, pParams) deviceControl_DISPATCH(pResource, pCallContext, pParams)
#define deviceInternalControlForward(pDevice, command, pParams, size) deviceInternalControlForward_DISPATCH(pDevice, command, pParams, size)
#define deviceCtrlCmdBifReset(pDevice, pBifResetParams) deviceCtrlCmdBifReset_DISPATCH(pDevice, pBifResetParams)
#define deviceCtrlCmdBifGetDmaBaseSysmemAddr(pDevice, pBifDmaBaseSysmemParams) deviceCtrlCmdBifGetDmaBaseSysmemAddr_DISPATCH(pDevice, pBifDmaBaseSysmemParams)
#define deviceCtrlCmdBifAspmFeatureSupported(pDevice, pBifAspmParams) deviceCtrlCmdBifAspmFeatureSupported_DISPATCH(pDevice, pBifAspmParams)
#define deviceCtrlCmdBifAspmCyaUpdate(pDevice, pBifAspmCyaParams) deviceCtrlCmdBifAspmCyaUpdate_DISPATCH(pDevice, pBifAspmCyaParams)
@@ -305,12 +303,6 @@ static inline NV_STATUS deviceInternalControlForward_DISPATCH(struct Device *pDe
return pDevice->__deviceInternalControlForward__(pDevice, command, pParams, size);
}
NV_STATUS deviceCtrlCmdBifReset_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_RESET_PARAMS *pBifResetParams);
static inline NV_STATUS deviceCtrlCmdBifReset_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_RESET_PARAMS *pBifResetParams) {
return pDevice->__deviceCtrlCmdBifReset__(pDevice, pBifResetParams);
}
NV_STATUS deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *pBifDmaBaseSysmemParams);
static inline NV_STATUS deviceCtrlCmdBifGetDmaBaseSysmemAddr_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *pBifDmaBaseSysmemParams) {

View File

@@ -533,11 +533,11 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
// Hal function -- gpuClearFbhubPoisonIntrForBug2924523
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_GA100_KERNEL;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_56cd7a;
}

View File

@@ -78,6 +78,10 @@ static NV_STATUS __nvoc_thunk_KernelBus_engstateStateInitLocked(OBJGPU *pGpu, st
return kbusStateInitLocked(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePreLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) {
return kbusStatePreLoad(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_KernelBus_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) {
return kbusStateLoad(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
@@ -102,10 +106,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState(POBJGPU pGpu
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePreLoad(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePostUnload(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
@@ -278,6 +278,12 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusStateInitLocked__ = &kbusStateInitLocked_IMPL;
// Hal function -- kbusStatePreLoad
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->__kbusStatePreLoad__ = &kbusStatePreLoad_56cd7a;
}
// Hal function -- kbusStateLoad
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
@@ -300,12 +306,9 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
}
// Hal function -- kbusStateUnload
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
pThis->__kbusStateUnload__ = &kbusStateUnload_GM107;
}
pThis->__kbusStateUnload__ = &kbusStateUnload_GM107;
}
// Hal function -- kbusStateDestroy
@@ -550,6 +553,21 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusAllocateFlaVaspace__ = &kbusAllocateFlaVaspace_395e98;
}
// Hal function -- kbusGetFlaRange
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */
{
pThis->__kbusGetFlaRange__ = &kbusGetFlaRange_GA100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kbusGetFlaRange__ = &kbusGetFlaRange_GH100;
}
// default
else
{
pThis->__kbusGetFlaRange__ = &kbusGetFlaRange_395e98;
}
// Hal function -- kbusAllocateLegacyFlaVaspace
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
@@ -816,6 +834,8 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelBus_engstateStateInitLocked;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreLoad__ = &__nvoc_thunk_KernelBus_engstateStatePreLoad;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelBus_engstateStateLoad;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelBus_engstateStatePostLoad;
@@ -828,8 +848,6 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState;
pThis->__kbusStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePreLoad;
pThis->__kbusStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePostUnload;
pThis->__kbusStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked;

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -297,6 +297,7 @@ struct KernelBus {
NV_STATUS (*__kbusConstructEngine__)(OBJGPU *, struct KernelBus *, ENGDESCRIPTOR);
NV_STATUS (*__kbusStatePreInitLocked__)(OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusStateInitLocked__)(OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusStatePreLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePostLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePreUnload__)(OBJGPU *, struct KernelBus *, NvU32);
@@ -323,6 +324,7 @@ struct KernelBus {
NV_STATUS (*__kbusCheckFlaSupportedAndInit__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusDetermineFlaRangeAndAllocate__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusAllocateFlaVaspace__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusGetFlaRange__)(OBJGPU *, struct KernelBus *, NvU64 *, NvU64 *, NvBool);
NV_STATUS (*__kbusAllocateLegacyFlaVaspace__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusAllocateHostManagedFlaVaspace__)(OBJGPU *, struct KernelBus *, NvHandle, NvHandle, NvHandle, NvHandle, NvU64, NvU64, NvU32);
void (*__kbusDestroyFla__)(OBJGPU *, struct KernelBus *);
@@ -347,7 +349,6 @@ struct KernelBus {
void (*__kbusUnmapCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, PMEMORY_DESCRIPTOR);
void (*__kbusTeardownCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, NvBool);
NV_STATUS (*__kbusReconcileTunableState__)(POBJGPU, struct KernelBus *, void *);
NV_STATUS (*__kbusStatePreLoad__)(POBJGPU, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePostUnload__)(POBJGPU, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *);
void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *);
@@ -451,6 +452,8 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusStatePreInitLocked(pGpu, pKernelBus) kbusStatePreInitLocked_DISPATCH(pGpu, pKernelBus)
#define kbusStatePreInitLocked_HAL(pGpu, pKernelBus) kbusStatePreInitLocked_DISPATCH(pGpu, pKernelBus)
#define kbusStateInitLocked(pGpu, pKernelBus) kbusStateInitLocked_DISPATCH(pGpu, pKernelBus)
#define kbusStatePreLoad(pGpu, pKernelBus, arg0) kbusStatePreLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStatePreLoad_HAL(pGpu, pKernelBus, arg0) kbusStatePreLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStateLoad(pGpu, pKernelBus, arg0) kbusStateLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStateLoad_HAL(pGpu, pKernelBus, arg0) kbusStateLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStatePostLoad(pGpu, pKernelBus, arg0) kbusStatePostLoad_DISPATCH(pGpu, pKernelBus, arg0)
@@ -505,6 +508,8 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusDetermineFlaRangeAndAllocate_HAL(pGpu, pKernelBus, base, size) kbusDetermineFlaRangeAndAllocate_DISPATCH(pGpu, pKernelBus, base, size)
#define kbusAllocateFlaVaspace(pGpu, pKernelBus, arg0, arg1) kbusAllocateFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusAllocateFlaVaspace_HAL(pGpu, pKernelBus, arg0, arg1) kbusAllocateFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusGetFlaRange(pGpu, pKernelBus, arg0, arg1, arg2) kbusGetFlaRange_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2)
#define kbusGetFlaRange_HAL(pGpu, pKernelBus, arg0, arg1, arg2) kbusGetFlaRange_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2)
#define kbusAllocateLegacyFlaVaspace(pGpu, pKernelBus, arg0, arg1) kbusAllocateLegacyFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusAllocateLegacyFlaVaspace_HAL(pGpu, pKernelBus, arg0, arg1) kbusAllocateLegacyFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusAllocateHostManagedFlaVaspace(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6) kbusAllocateHostManagedFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6)
@@ -552,7 +557,6 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusTeardownCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusTeardownCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusReconcileTunableState(pGpu, pEngstate, pTunableState) kbusReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kbusStatePreLoad(pGpu, pEngstate, arg0) kbusStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kbusStatePostUnload(pGpu, pEngstate, arg0) kbusStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate)
@@ -1601,6 +1605,14 @@ static inline NV_STATUS kbusStateInitLocked_DISPATCH(OBJGPU *pGpu, struct Kernel
return pKernelBus->__kbusStateInitLocked__(pGpu, pKernelBus);
}
static inline NV_STATUS kbusStatePreLoad_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
return NV_OK;
}
static inline NV_STATUS kbusStatePreLoad_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
return pKernelBus->__kbusStatePreLoad__(pGpu, pKernelBus, arg0);
}
NV_STATUS kbusStateLoad_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0);
static inline NV_STATUS kbusStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
@@ -1837,6 +1849,18 @@ static inline NV_STATUS kbusAllocateFlaVaspace_DISPATCH(OBJGPU *pGpu, struct Ker
return pKernelBus->__kbusAllocateFlaVaspace__(pGpu, pKernelBus, arg0, arg1);
}
NV_STATUS kbusGetFlaRange_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2);
NV_STATUS kbusGetFlaRange_GH100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2);
static inline NV_STATUS kbusGetFlaRange_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS kbusGetFlaRange_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2) {
return pKernelBus->__kbusGetFlaRange__(pGpu, pKernelBus, arg0, arg1, arg2);
}
NV_STATUS kbusAllocateLegacyFlaVaspace_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1);
static inline NV_STATUS kbusAllocateLegacyFlaVaspace_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1) {
@@ -2069,10 +2093,6 @@ static inline NV_STATUS kbusReconcileTunableState_DISPATCH(POBJGPU pGpu, struct
return pEngstate->__kbusReconcileTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS kbusStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return pEngstate->__kbusStatePreLoad__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kbusStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return pEngstate->__kbusStatePostUnload__(pGpu, pEngstate, arg0);
}

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@@ -149,7 +149,7 @@ struct KernelHostVgpuDeviceApi {
NV_STATUS (*__kernelhostvgpudeviceapiMap__)(struct KernelHostVgpuDeviceApi *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NvBool (*__kernelhostvgpudeviceapiAccessCallback__)(struct KernelHostVgpuDeviceApi *, struct RsClient *, void *, RsAccessRight);
struct KernelHostVgpuDeviceShr *pShared;
NvU32 notifyActions[6];
NvU32 notifyActions[7];
};
#ifndef __NVOC_CLASS_KernelHostVgpuDeviceApi_TYPEDEF__

View File

@@ -884,7 +884,11 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2236, 0x1482, 0x10de, "NVIDIA A10" },
{ 0x2237, 0x152f, 0x10de, "NVIDIA A10G" },
{ 0x2238, 0x1677, 0x10de, "NVIDIA A10M" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2336, 0x16c2, 0x10de, "NVIDIA H100 80GB HBM2e" },
{ 0x2336, 0x16c7, 0x10de, "NVIDIA H100 80GB HBM2e" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@@ -948,6 +952,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25A7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25A9, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25AA, 0x0000, 0x0000, "NVIDIA GeForce MX570 A" },
{ 0x25AD, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" },
{ 0x25B6, 0x157e, 0x10de, "NVIDIA A2" },
{ 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" },
@@ -957,6 +962,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" },
{ 0x25E2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
{ 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
{ 0x25ED, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25F9, 0x0000, 0x0000, "NVIDIA RTX A1000 Embedded GPU" },
{ 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" },
{ 0x25FB, 0x0000, 0x0000, "NVIDIA RTX A500 Embedded GPU" },
@@ -1700,20 +1706,20 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" },
{ 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" },
{ 0x2238, 0x16e6, 0x10DE, "NVIDIA A10M-1" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA H800-1-10CME" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA H800-1-10C" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA H800-2-20C" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA H800-3-40C" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA H800-4-40C" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA H800-7-80C" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA H800-4C" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA H800-5C" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA H800-8C" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA H800-10C" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA H800-16C" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA H800-20C" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA GPU-2322-17E2" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA GPU-2322-17E3" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA GPU-2322-17E4" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA GPU-2322-17E5" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA GPU-2322-17E6" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA GPU-2322-17E7" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA GPU-2322-17E8" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA GPU-2322-17E9" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA GPU-2322-17EA" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA GPU-2322-17EB" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA GPU-2322-17EC" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA GPU-2322-17ED" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA GPU-2322-17EE" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA GPU-2322-17EF" },
{ 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" },
{ 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" },
{ 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" },
@@ -1761,45 +1767,45 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25B6, 0x1655, 0x10DE, "NVIDIA A2-4C" },
{ 0x25B6, 0x1656, 0x10DE, "NVIDIA A2-8C" },
{ 0x25B6, 0x1657, 0x10DE, "NVIDIA A2-16C" },
{ 0x26B1, 0x1708, 0x10DE, "NVIDIA RTX 6000 Ada-1B" },
{ 0x26B1, 0x1709, 0x10DE, "NVIDIA RTX 6000 Ada-2B" },
{ 0x26B1, 0x170a, 0x10DE, "NVIDIA RTX 6000 Ada-1Q" },
{ 0x26B1, 0x170b, 0x10DE, "NVIDIA RTX 6000 Ada-2Q" },
{ 0x26B1, 0x170c, 0x10DE, "NVIDIA RTX 6000 Ada-3Q" },
{ 0x26B1, 0x170d, 0x10DE, "NVIDIA RTX 6000 Ada-4Q" },
{ 0x26B1, 0x170e, 0x10DE, "NVIDIA RTX 6000 Ada-6Q" },
{ 0x26B1, 0x170f, 0x10DE, "NVIDIA RTX 6000 Ada-8Q" },
{ 0x26B1, 0x1710, 0x10DE, "NVIDIA RTX 6000 Ada-12Q" },
{ 0x26B1, 0x1711, 0x10DE, "NVIDIA RTX 6000 Ada-16Q" },
{ 0x26B1, 0x1712, 0x10DE, "NVIDIA RTX 6000 Ada-24Q" },
{ 0x26B1, 0x1713, 0x10DE, "NVIDIA RTX 6000 Ada-48Q" },
{ 0x26B1, 0x1714, 0x10DE, "NVIDIA RTX 6000 Ada-1A" },
{ 0x26B1, 0x1715, 0x10DE, "NVIDIA RTX 6000 Ada-2A" },
{ 0x26B1, 0x1716, 0x10DE, "NVIDIA RTX 6000 Ada-3A" },
{ 0x26B1, 0x1717, 0x10DE, "NVIDIA RTX 6000 Ada-4A" },
{ 0x26B1, 0x1718, 0x10DE, "NVIDIA RTX 6000 Ada-6A" },
{ 0x26B1, 0x1719, 0x10DE, "NVIDIA RTX 6000 Ada-8A" },
{ 0x26B1, 0x171a, 0x10DE, "NVIDIA RTX 6000 Ada-12A" },
{ 0x26B1, 0x171b, 0x10DE, "NVIDIA RTX 6000 Ada-16A" },
{ 0x26B1, 0x171c, 0x10DE, "NVIDIA RTX 6000 Ada-24A" },
{ 0x26B1, 0x171d, 0x10DE, "NVIDIA RTX 6000 Ada-48A" },
{ 0x26B1, 0x171e, 0x10DE, "NVIDIA RTX 6000 Ada-1" },
{ 0x26B1, 0x171f, 0x10DE, "NVIDIA RTX 6000 Ada-2" },
{ 0x26B1, 0x1720, 0x10DE, "NVIDIA RTX 6000 Ada-3" },
{ 0x26B1, 0x1721, 0x10DE, "NVIDIA RTX 6000 Ada-4" },
{ 0x26B1, 0x1722, 0x10DE, "NVIDIA RTX 6000 Ada-6" },
{ 0x26B1, 0x1723, 0x10DE, "NVIDIA RTX 6000 Ada-8" },
{ 0x26B1, 0x1724, 0x10DE, "NVIDIA RTX 6000 Ada-12" },
{ 0x26B1, 0x1725, 0x10DE, "NVIDIA RTX 6000 Ada-16" },
{ 0x26B1, 0x1726, 0x10DE, "NVIDIA RTX 6000 Ada-24" },
{ 0x26B1, 0x1727, 0x10DE, "NVIDIA RTX 6000 Ada-48" },
{ 0x26B1, 0x1728, 0x10DE, "NVIDIA RTX 6000 Ada-4C" },
{ 0x26B1, 0x1729, 0x10DE, "NVIDIA RTX 6000 Ada-6C" },
{ 0x26B1, 0x172a, 0x10DE, "NVIDIA RTX 6000 Ada-8C" },
{ 0x26B1, 0x172b, 0x10DE, "NVIDIA RTX 6000 Ada-12C" },
{ 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX 6000 Ada-16C" },
{ 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX 6000 Ada-24C" },
{ 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX 6000 Ada-48C" },
{ 0x26B1, 0x1708, 0x10DE, "NVIDIA RTX6000-Ada-1B" },
{ 0x26B1, 0x1709, 0x10DE, "NVIDIA RTX6000-Ada-2B" },
{ 0x26B1, 0x170a, 0x10DE, "NVIDIA RTX6000-Ada-1Q" },
{ 0x26B1, 0x170b, 0x10DE, "NVIDIA RTX6000-Ada-2Q" },
{ 0x26B1, 0x170c, 0x10DE, "NVIDIA RTX6000-Ada-3Q" },
{ 0x26B1, 0x170d, 0x10DE, "NVIDIA RTX6000-Ada-4Q" },
{ 0x26B1, 0x170e, 0x10DE, "NVIDIA RTX6000-Ada-6Q" },
{ 0x26B1, 0x170f, 0x10DE, "NVIDIA RTX6000-Ada-8Q" },
{ 0x26B1, 0x1710, 0x10DE, "NVIDIA RTX6000-Ada-12Q" },
{ 0x26B1, 0x1711, 0x10DE, "NVIDIA RTX6000-Ada-16Q" },
{ 0x26B1, 0x1712, 0x10DE, "NVIDIA RTX6000-Ada-24Q" },
{ 0x26B1, 0x1713, 0x10DE, "NVIDIA RTX6000-Ada-48Q" },
{ 0x26B1, 0x1714, 0x10DE, "NVIDIA RTX6000-Ada-1A" },
{ 0x26B1, 0x1715, 0x10DE, "NVIDIA RTX6000-Ada-2A" },
{ 0x26B1, 0x1716, 0x10DE, "NVIDIA RTX6000-Ada-3A" },
{ 0x26B1, 0x1717, 0x10DE, "NVIDIA RTX6000-Ada-4A" },
{ 0x26B1, 0x1718, 0x10DE, "NVIDIA RTX6000-Ada-6A" },
{ 0x26B1, 0x1719, 0x10DE, "NVIDIA RTX6000-Ada-8A" },
{ 0x26B1, 0x171a, 0x10DE, "NVIDIA RTX6000-Ada-12A" },
{ 0x26B1, 0x171b, 0x10DE, "NVIDIA RTX6000-Ada-16A" },
{ 0x26B1, 0x171c, 0x10DE, "NVIDIA RTX6000-Ada-24A" },
{ 0x26B1, 0x171d, 0x10DE, "NVIDIA RTX6000-Ada-48A" },
{ 0x26B1, 0x171e, 0x10DE, "NVIDIA RTX6000-Ada-1" },
{ 0x26B1, 0x171f, 0x10DE, "NVIDIA RTX6000-Ada-2" },
{ 0x26B1, 0x1720, 0x10DE, "NVIDIA RTX6000-Ada-3" },
{ 0x26B1, 0x1721, 0x10DE, "NVIDIA RTX6000-Ada-4" },
{ 0x26B1, 0x1722, 0x10DE, "NVIDIA RTX6000-Ada-6" },
{ 0x26B1, 0x1723, 0x10DE, "NVIDIA RTX6000-Ada-8" },
{ 0x26B1, 0x1724, 0x10DE, "NVIDIA RTX6000-Ada-12" },
{ 0x26B1, 0x1725, 0x10DE, "NVIDIA RTX6000-Ada-16" },
{ 0x26B1, 0x1726, 0x10DE, "NVIDIA RTX6000-Ada-24" },
{ 0x26B1, 0x1727, 0x10DE, "NVIDIA RTX6000-Ada-48" },
{ 0x26B1, 0x1728, 0x10DE, "NVIDIA RTX6000-Ada-4C" },
{ 0x26B1, 0x1729, 0x10DE, "NVIDIA RTX6000-Ada-6C" },
{ 0x26B1, 0x172a, 0x10DE, "NVIDIA RTX6000-Ada-8C" },
{ 0x26B1, 0x172b, 0x10DE, "NVIDIA RTX6000-Ada-12C" },
{ 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX6000-Ada-16C" },
{ 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX6000-Ada-24C" },
{ 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX6000-Ada-48C" },
{ 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" },
{ 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" },
{ 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" },
@@ -1870,37 +1876,37 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B8, 0x176a, 0x10DE, "NVIDIA L40G-8C" },
{ 0x26B8, 0x176b, 0x10DE, "NVIDIA L40G-12C" },
{ 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA GPU 27B8-172F" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA GPU 27B8-1730" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA GPU 27B8-1731" },
{ 0x27B8, 0x1732, 0x10DE, "NVIDIA GPU 27B8-1732" },
{ 0x27B8, 0x1733, 0x10DE, "NVIDIA GPU 27B8-1733" },
{ 0x27B8, 0x1734, 0x10DE, "NVIDIA GPU 27B8-1734" },
{ 0x27B8, 0x1735, 0x10DE, "NVIDIA GPU 27B8-1735" },
{ 0x27B8, 0x1736, 0x10DE, "NVIDIA GPU 27B8-1736" },
{ 0x27B8, 0x1737, 0x10DE, "NVIDIA GPU 27B8-1737" },
{ 0x27B8, 0x1738, 0x10DE, "NVIDIA GPU 27B8-1738" },
{ 0x27B8, 0x1739, 0x10DE, "NVIDIA GPU 27B8-1739" },
{ 0x27B8, 0x173a, 0x10DE, "NVIDIA GPU 27B8-173A" },
{ 0x27B8, 0x173b, 0x10DE, "NVIDIA GPU 27B8-173B" },
{ 0x27B8, 0x173c, 0x10DE, "NVIDIA GPU 27B8-173C" },
{ 0x27B8, 0x173d, 0x10DE, "NVIDIA GPU 27B8-173D" },
{ 0x27B8, 0x173e, 0x10DE, "NVIDIA GPU 27B8-173E" },
{ 0x27B8, 0x173f, 0x10DE, "NVIDIA GPU 27B8-173F" },
{ 0x27B8, 0x1740, 0x10DE, "NVIDIA GPU 27B8-1740" },
{ 0x27B8, 0x1741, 0x10DE, "NVIDIA GPU 27B8-1741" },
{ 0x27B8, 0x1742, 0x10DE, "NVIDIA GPU 27B8-1742" },
{ 0x27B8, 0x1743, 0x10DE, "NVIDIA GPU 27B8-1743" },
{ 0x27B8, 0x1744, 0x10DE, "NVIDIA GPU 27B8-1744" },
{ 0x27B8, 0x1745, 0x10DE, "NVIDIA GPU 27B8-1745" },
{ 0x27B8, 0x1746, 0x10DE, "NVIDIA GPU 27B8-1746" },
{ 0x27B8, 0x1747, 0x10DE, "NVIDIA GPU 27B8-1747" },
{ 0x27B8, 0x1748, 0x10DE, "NVIDIA GPU 27B8-1748" },
{ 0x27B8, 0x1749, 0x10DE, "NVIDIA GPU 27B8-1749" },
{ 0x27B8, 0x174a, 0x10DE, "NVIDIA GPU 27B8-174A" },
{ 0x27B8, 0x174b, 0x10DE, "NVIDIA GPU 27B8-174B" },
{ 0x27B8, 0x174c, 0x10DE, "NVIDIA GPU 27B8-174C" },
{ 0x27B8, 0x174d, 0x10DE, "NVIDIA GPU 27B8-174D" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA GPU-27B8-172F" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA GPU-27B8-1730" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA GPU-27B8-1731" },
{ 0x27B8, 0x1732, 0x10DE, "NVIDIA GPU-27B8-1732" },
{ 0x27B8, 0x1733, 0x10DE, "NVIDIA GPU-27B8-1733" },
{ 0x27B8, 0x1734, 0x10DE, "NVIDIA GPU-27B8-1734" },
{ 0x27B8, 0x1735, 0x10DE, "NVIDIA GPU-27B8-1735" },
{ 0x27B8, 0x1736, 0x10DE, "NVIDIA GPU-27B8-1736" },
{ 0x27B8, 0x1737, 0x10DE, "NVIDIA GPU-27B8-1737" },
{ 0x27B8, 0x1738, 0x10DE, "NVIDIA GPU-27B8-1738" },
{ 0x27B8, 0x1739, 0x10DE, "NVIDIA GPU-27B8-1739" },
{ 0x27B8, 0x173a, 0x10DE, "NVIDIA GPU-27B8-173A" },
{ 0x27B8, 0x173b, 0x10DE, "NVIDIA GPU-27B8-173B" },
{ 0x27B8, 0x173c, 0x10DE, "NVIDIA GPU-27B8-173C" },
{ 0x27B8, 0x173d, 0x10DE, "NVIDIA GPU-27B8-173D" },
{ 0x27B8, 0x173e, 0x10DE, "NVIDIA GPU-27B8-173E" },
{ 0x27B8, 0x173f, 0x10DE, "NVIDIA GPU-27B8-173F" },
{ 0x27B8, 0x1740, 0x10DE, "NVIDIA GPU-27B8-1740" },
{ 0x27B8, 0x1741, 0x10DE, "NVIDIA GPU-27B8-1741" },
{ 0x27B8, 0x1742, 0x10DE, "NVIDIA GPU-27B8-1742" },
{ 0x27B8, 0x1743, 0x10DE, "NVIDIA GPU-27B8-1743" },
{ 0x27B8, 0x1744, 0x10DE, "NVIDIA GPU-27B8-1744" },
{ 0x27B8, 0x1745, 0x10DE, "NVIDIA GPU-27B8-1745" },
{ 0x27B8, 0x1746, 0x10DE, "NVIDIA GPU-27B8-1746" },
{ 0x27B8, 0x1747, 0x10DE, "NVIDIA GPU-27B8-1747" },
{ 0x27B8, 0x1748, 0x10DE, "NVIDIA GPU-27B8-1748" },
{ 0x27B8, 0x1749, 0x10DE, "NVIDIA GPU-27B8-1749" },
{ 0x27B8, 0x174a, 0x10DE, "NVIDIA GPU-27B8-174A" },
{ 0x27B8, 0x174b, 0x10DE, "NVIDIA GPU-27B8-174B" },
{ 0x27B8, 0x174c, 0x10DE, "NVIDIA GPU-27B8-174C" },
{ 0x27B8, 0x174d, 0x10DE, "NVIDIA GPU-27B8-174D" },
};
#endif // G_NV_NAME_RELEASED_H

View File

@@ -6503,18 +6503,33 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkL1Threshold_IMPL,
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSetL1Threshold_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*flags=*/ 0x204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x2080303eu,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS),
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkL1Threshold"
/*func=*/ "subdeviceCtrlCmdNvlinkSetL1Threshold"
#endif
},
{ /* [419] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetL1Threshold_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x2080303fu,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkGetL1Threshold"
#endif
},
{ /* [420] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1240u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6522,14 +6537,14 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1240u)
/*flags=*/ 0x1240u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x2080303fu,
/*methodId=*/ 0x20803040u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkInbandSendData"
#endif
},
{ /* [420] */
{ /* [421] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6537,14 +6552,14 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803041u,
/*methodId=*/ 0x20803042u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkDirectConnectCheck"
#endif
},
{ /* [421] */
{ /* [422] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6552,14 +6567,14 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*flags=*/ 0x200u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803042u,
/*methodId=*/ 0x20803043u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkPostFaultUp"
#endif
},
{ /* [422] */
{ /* [423] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6574,7 +6589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage"
#endif
},
{ /* [423] */
{ /* [424] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6589,7 +6604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch"
#endif
},
{ /* [424] */
{ /* [425] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6604,7 +6619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo"
#endif
},
{ /* [425] */
{ /* [426] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6619,7 +6634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet"
#endif
},
{ /* [426] */
{ /* [427] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6634,7 +6649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet"
#endif
},
{ /* [427] */
{ /* [428] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6649,7 +6664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo"
#endif
},
{ /* [428] */
{ /* [429] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6664,7 +6679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize"
#endif
},
{ /* [429] */
{ /* [430] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6679,7 +6694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters"
#endif
},
{ /* [430] */
{ /* [431] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6694,7 +6709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaRange"
#endif
},
{ /* [431] */
{ /* [432] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6709,7 +6724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock"
#endif
},
{ /* [432] */
{ /* [433] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6724,7 +6739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetRange"
#endif
},
{ /* [433] */
{ /* [434] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6739,7 +6754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats"
#endif
},
{ /* [434] */
{ /* [435] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6754,7 +6769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetFeatures"
#endif
},
{ /* [435] */
{ /* [436] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6769,7 +6784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo"
#endif
},
{ /* [436] */
{ /* [437] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6784,7 +6799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt"
#endif
},
{ /* [437] */
{ /* [438] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6799,7 +6814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff"
#endif
},
{ /* [438] */
{ /* [439] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6814,7 +6829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower"
#endif
},
{ /* [439] */
{ /* [440] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6829,7 +6844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus"
#endif
},
{ /* [440] */
{ /* [441] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6844,7 +6859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus"
#endif
},
{ /* [441] */
{ /* [442] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6859,7 +6874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask"
#endif
},
{ /* [442] */
{ /* [443] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6874,7 +6889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask"
#endif
},
{ /* [443] */
{ /* [444] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6889,7 +6904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType"
#endif
},
{ /* [444] */
{ /* [445] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6904,7 +6919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu"
#endif
},
{ /* [445] */
{ /* [446] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6919,7 +6934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo"
#endif
},
{ /* [446] */
{ /* [447] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6934,7 +6949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage"
#endif
},
{ /* [447] */
{ /* [448] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6949,7 +6964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity"
#endif
},
{ /* [448] */
{ /* [449] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6964,7 +6979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources"
#endif
},
{ /* [449] */
{ /* [450] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6979,7 +6994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding"
#endif
},
{ /* [450] */
{ /* [451] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -6994,7 +7009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport"
#endif
},
{ /* [451] */
{ /* [452] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -7009,7 +7024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig"
#endif
},
{ /* [452] */
{ /* [453] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -7024,7 +7039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask"
#endif
},
{ /* [453] */
{ /* [454] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -7044,7 +7059,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice =
{
/*numEntries=*/ 454,
/*numEntries=*/ 455,
/*pExportEntries=*/ __nvoc_exported_method_def_Subdevice
};
@@ -7489,13 +7504,17 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
pThis->__subdeviceCtrlCmdNvlinkL1Threshold__ = &subdeviceCtrlCmdNvlinkL1Threshold_IMPL;
pThis->__subdeviceCtrlCmdNvlinkSetL1Threshold__ = &subdeviceCtrlCmdNvlinkSetL1Threshold_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdNvlinkDirectConnectCheck__ = &subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdNvlinkGetL1Threshold__ = &subdeviceCtrlCmdNvlinkGetL1Threshold_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdI2cReadBuffer__ = &subdeviceCtrlCmdI2cReadBuffer_IMPL;
#endif
@@ -8151,10 +8170,6 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u)
pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL;
#endif
}
static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) {
@@ -8165,6 +8180,10 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuAcquireComputeModeReservation__ = &subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL;
#endif

View File

@@ -215,8 +215,9 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdNvlinkInbandSendData__)(struct Subdevice *, NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkPostFaultUp__)(struct Subdevice *, NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkEomControl__)(struct Subdevice *, NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkSetL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkDirectConnectCheck__)(struct Subdevice *, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkGetL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cReadBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cWriteBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cReadReg__)(struct Subdevice *, NV2080_CTRL_I2C_RW_REG_PARAMS *);
@@ -755,8 +756,9 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdNvlinkInbandSendData(pSubdevice, pParams) subdeviceCtrlCmdNvlinkInbandSendData_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkPostFaultUp(pSubdevice, pParams) subdeviceCtrlCmdNvlinkPostFaultUp_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkEomControl(pSubdevice, pParams) subdeviceCtrlCmdNvlinkEomControl_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkSetL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSetL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkDirectConnectCheck(pSubdevice, pParams) subdeviceCtrlCmdNvlinkDirectConnectCheck_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkGetL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdI2cReadBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadBuffer_DISPATCH(pSubdevice, pI2cParams)
#define subdeviceCtrlCmdI2cWriteBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cWriteBuffer_DISPATCH(pSubdevice, pI2cParams)
#define subdeviceCtrlCmdI2cReadReg(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadReg_DISPATCH(pSubdevice, pI2cParams)
@@ -1736,10 +1738,10 @@ static inline NV_STATUS subdeviceCtrlCmdNvlinkEomControl_DISPATCH(struct Subdevi
return pSubdevice->__subdeviceCtrlCmdNvlinkEomControl__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkL1Threshold_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *pParams);
NV_STATUS subdeviceCtrlCmdNvlinkSetL1Threshold_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkL1Threshold_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkL1Threshold__(pSubdevice, pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkSetL1Threshold_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkSetL1Threshold__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *pParams);
@@ -1748,6 +1750,12 @@ static inline NV_STATUS subdeviceCtrlCmdNvlinkDirectConnectCheck_DISPATCH(struct
return pSubdevice->__subdeviceCtrlCmdNvlinkDirectConnectCheck__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkGetL1Threshold_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkGetL1Threshold_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkGetL1Threshold__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdI2cReadBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams);
static inline NV_STATUS subdeviceCtrlCmdI2cReadBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams) {

View File

@@ -320,7 +320,7 @@ typedef struct
#else // PORT_MEM_TRACK_USE_CALLERINFO
#define PORT_MEM_CALLERINFO_PARAM
#define PORT_MEM_CALLERINFO_TYPE_PARAM
#define PORT_MEM_CALLERINFO_TYPE_PARAM void
#define PORT_MEM_CALLERINFO_COMMA_PARAM
#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM
#define PORT_MEM_CALLINFO_FUNC(f) f

View File

@@ -138,7 +138,7 @@ PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n)
#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU32 lo;
NvU32 hi;
@@ -148,7 +148,7 @@ PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
#define portUtilExReadTimestampCounter_SUPPORTED 1
#elif NVCPU_IS_AARCH64 && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU64 ts = 0;
__asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (ts));
@@ -157,7 +157,7 @@ PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
#define portUtilExReadTimestampCounter_SUPPORTED 1
#elif NVCPU_IS_PPC64LE && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU64 ts;
__asm__ __volatile__ ("mfspr %0,268" : "=r"(ts));
@@ -166,7 +166,7 @@ PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
#define portUtilExReadTimestampCounter_SUPPORTED 1
#elif NVCPU_IS_PPC && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU32 lo, hi, tmp;
__asm__ __volatile__ (

View File

@@ -26,6 +26,226 @@
#include "ctrl/ctrl0000/ctrl0000system.h"
/*
* Definitions for the static params table.
*/
/*!
* Layout of SysDev 2x data used for static config
*/
#define NVPCF_SYSDEV_STATIC_TABLE_VERSION_2X (0x20)
#define NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03 (0x03U)
#define NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_FMT_SIZE_03 ("3b")
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_SIZE_01 (0x01U)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_FMT_SIZE_01 ("1b")
/*!
* Static system dev header table, unpacked
*/
typedef struct
{
/*
* System device Table Version.
*/
NvU32 version;
/*
* Size of device Table Header in bytes .
*/
NvU32 headerSize;
/*
* Size of common entry in bytes.
*/
NvU32 commonSize;
} SYSDEV_STATIC_TABLE_HEADER_2X;
/*!
* Static system dev common entry
*/
typedef struct
{
NvU32 param0;
} SYSDEV_STATIC_TABLE_COMMON_2X;
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE 3:0
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_INTEL (0x00000000)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_AMD (0x00000001)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_NVIDIA (0x00000002)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_GPU_TYPE 7:4
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_GPU_TYPE_NVIDIA (0x00000000)
/*!
* Layout of Controller 2x data used for static config
*/
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_20 (0x20)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_21 (0x21)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22 (0x22)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23 (0x23)
// format for 2.0 and 2.1
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05 (0x05U)
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_FMT_SIZE_05 ("5b")
#define NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_SIZE_02 (0x02U)
#define NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_FMT_SIZE_02 ("1w")
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F (0x0FU)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FMT_SIZE_0F ("1b1w3d")
/*!
* Static system controller header table v2.0/2.1, unpacked
*/
typedef struct
{
/*
* System controller Table Version.
*/
NvU32 version;
/*
* Size of controller Table Header in bytes .
*/
NvU32 headerSize;
/*
* Size of controller Table Common/Global Entry in bytes.
*/
NvU32 commonSize;
/*
* Size of controller Table Entry in bytes.
*/
NvU32 entrySize;
/*
* Number of controller Entries
*/
NvU32 entryCount;
} CONTROLLER_STATIC_TABLE_HEADER_V20;
/*!
* Static system controller common/global entry v2.0/2.1, unpacked
*/
typedef struct
{
/*
* Base sampling period in ms
*/
NvU32 samplingPeriodms;
} CONTROLLER_STATIC_TABLE_COMMON_V20;
/*!
* Static system controller entry v2.0/2.1, unpacked
*/
typedef struct
{
/*
* System controller entry type specific flag (Flags0).
*/
NvU32 flags0;
/*
* Sampling Multiplier.
*/
NvU32 samplingMulti;
/*
* System controller entry filter parameters.
*/
NvU32 filterParams;
/*
* System controller entry Usage-Specific Parameter (Param0).
*/
NvU32 param0;
/*
* System controller entry Usage-Specific Parameter (Param1).
*/
NvU32 param1;
} CONTROLLER_STATIC_TABLE_ENTRY_V20;
// FLAGS0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS 3:0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_DISABLED (0x00000000)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_PPAB (0x00000001)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_CTGP (0x00000002)
// Filter
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTER_TYPE 7:0
// filterType = EWMA
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTERPARAM_EWMA_WEIGHT 15:8
// filterType = MAX, others
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTERPARAM_WINDOW_SIZE 15:8
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTER_RESERVED 31:16
// Param0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM0_QBOOST_INCREASE_GAIN 15:0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM0_QBOOST_DECREASE_GAIN 31:16
// Param1
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM1_QBOOST_DC_SUPPORT 0:0
// format for 2.2
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04 (0x04U)
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_FMT_SIZE_04 ("4b")
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05 (0x05U)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FMT_SIZE_05 ("1b1d")
/*!
* Static system controller header table v2.2, unpacked
*/
typedef struct
{
/*
* System controller Table Version.
*/
NvU32 version;
/*
* Size of controller Table Header in bytes .
*/
NvU32 headerSize;
/*
* Size of controller Table Entry in bytes.
*/
NvU32 entrySize;
/*
* Number of controller Entries
*/
NvU32 entryCount;
} CONTROLLER_STATIC_TABLE_HEADER_V22;
/*!
* Static system controller entry v2.2, unpacked
*/
typedef struct
{
/*
* System controller entry type specific flag (Flags0).
*/
NvU32 flags0;
/*
* System controller entry Usage-Specific Parameter (Param0).
*/
NvU32 param0;
} CONTROLLER_STATIC_TABLE_ENTRY_V22;
// FLAGS0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS 3:0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_DISABLED (0x00000000)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_PPAB (0x00000001)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_CTGP (0x00000002)
// Param0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_PARAM0_QBOOST_DC_SUPPORT 0:0
/*
* Definitions for the dynamic params table.
*/
@@ -70,8 +290,14 @@ typedef struct
/*!
* Config DSM NVPCF 2x version specific defines
*/
/*
* Definitions for the dynamic params table.
*/
#define NVPCF_DYNAMIC_PARAMS_20_VERSION (0x20)
#define NVPCF_DYNAMIC_PARAMS_21_VERSION (0x21)
#define NVPCF_DYNAMIC_PARAMS_22_VERSION (0x22)
#define NVPCF_DYNAMIC_PARAMS_23_VERSION (0x23)
#define NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05 (0x05U)
#define NVPCF_DYNAMIC_PARAMS_2X_HEADER_FMT_SIZE_05 ("5b")
#define NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10 (0x10U)

View File

@@ -135,6 +135,7 @@
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_CUDA_PROFILING_DISABLED 0xFF100008 /* RW--V */
/* RPC-specific code in result for incomplete request */
#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */
/* shared union field */

View File

@@ -92,11 +92,11 @@ nvlogInit(void *pData)
return NV_OK;
}
void nvlogUpdate() {
void nvlogUpdate(void) {
}
NV_STATUS
nvlogDestroy()
nvlogDestroy(void)
{
NvU32 i;

View File

@@ -59,7 +59,7 @@ enum {
// nvDbgBreakpointEnabled - Returns true if triggering a breakpoint is allowed
//
NvBool osDbgBreakpointEnabled(void);
NvBool nvDbgBreakpointEnabled()
NvBool nvDbgBreakpointEnabled(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
if (pSys != NULL)

View File

@@ -704,6 +704,7 @@ _kbifInitRegistryOverrides
NV_PRINTF(LEVEL_INFO, "allow peermapping reg key = %d\n", data32);
pKernelBif->peerMappingOverride = !!data32;
}
}
/*!

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -1348,3 +1348,30 @@ kbusSetupUnbindFla_GA100
return status;
}
NV_STATUS
kbusGetFlaRange_GA100
(
OBJGPU *pGpu,
KernelBus *pKernelBus,
NvU64 *ucFlaBase,
NvU64 *ucFlaSize,
NvBool bIsConntectedToNvswitch
)
{
if (gpuIsSriovEnabled(pGpu) && bIsConntectedToNvswitch)
{
if (pKernelBus->flaInfo.bFlaRangeRegistered)
{
*ucFlaBase = pKernelBus->flaInfo.base;
*ucFlaSize = pKernelBus->flaInfo.size;
}
}
else // direct connected system
{
*ucFlaSize = gpuGetFlaVasSize_HAL(pGpu, NV_FALSE);
*ucFlaBase = pGpu->gpuInstance * (*ucFlaSize);
}
return NV_OK;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -1595,3 +1595,36 @@ kbusSetupUnbindFla_GH100
return status;
}
NV_STATUS
kbusGetFlaRange_GH100
(
OBJGPU *pGpu,
KernelBus *pKernelBus,
NvU64 *ucFlaBase,
NvU64 *ucFlaSize,
NvBool bIsConntectedToNvswitch
)
{
if (!GPU_IS_NVSWITCH_DETECTED(pGpu))
{
*ucFlaSize = gpuGetFlaVasSize_HAL(pGpu, NV_FALSE);
*ucFlaBase = pGpu->gpuInstance * (*ucFlaSize);
}
else
{
FABRIC_VASPACE *pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE);
NvU64 ucFlaLimit;
if (pFabricVAS == NULL)
return NV_ERR_INVALID_STATE;
ucFlaLimit = fabricvaspaceGetUCFlaLimit(pFabricVAS);
if (ucFlaLimit == 0)
return NV_ERR_INVALID_STATE;
*ucFlaBase = fabricvaspaceGetUCFlaStart(pFabricVAS);
*ucFlaSize = ucFlaLimit - *ucFlaBase + 1;
}
return NV_OK;
}

View File

@@ -460,6 +460,9 @@ kbusStateUnload_GM107
NV_STATUS status = NV_OK;
KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
if (IS_VIRTUAL(pGpu) && !(flags & GPU_STATE_FLAGS_PRESERVING))
return NV_OK;
if ((pKernelBif != NULL)
&&
(!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) ||
@@ -476,9 +479,12 @@ kbusStateUnload_GM107
{
if (!IS_GPU_GC6_STATE_ENTERING(pGpu))
{
status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF);
// Do not use BAR2 physical mode for bootstrapping BAR2 across S/R.
pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE;
status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF);
if (!IS_VIRTUAL_WITH_SRIOV(pGpu))
{
// Do not use BAR2 physical mode for bootstrapping BAR2 across S/R.
pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE;
}
}
}
else

View File

@@ -1242,3 +1242,12 @@ kbusIsGpuP2pAlive_IMPL
{
return (pKernelBus->totalP2pObjectsAliveRefCount > 0);
}
/**
* @brief Setup VF BAR2 during hibernate resume
*
* @param[in] pGpu
* @param[in] pKernelBus
* @param[in] flags
*/

View File

@@ -145,6 +145,8 @@ kchannelConstruct_IMPL
NvBool bTsgAllocated = NV_FALSE;
NvHandle hChanGrp = NV01_NULL_OBJECT;
RsResourceRef *pDeviceRef = NULL;
RsResourceRef *pVASpaceRef = NULL;
KernelGraphicsContext *pKernelGraphicsContext = NULL;
NvBool bMIGInUse;
KernelChannelGroup *pKernelChannelGroup = NULL;
NvU32 chID = ~0;
@@ -694,7 +696,7 @@ kchannelConstruct_IMPL
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id %d for hClient %d hKernelChannel %d \n",
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
chID, hClient, pResourceRef->hResource);
DBG_BREAKPOINT();
goto cleanup;
@@ -729,7 +731,7 @@ kchannelConstruct_IMPL
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id %d for hClient %d hKernelChannel %d \n",
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
chID, hClient, pResourceRef->hResource);
chID = ~0;
DBG_BREAKPOINT();
@@ -852,8 +854,6 @@ kchannelConstruct_IMPL
// We depend on VASpace if it was provided
if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
{
RsResourceRef *pVASpaceRef = NULL;
NV_ASSERT_OK_OR_GOTO(status, clientGetResourceRef(pRsClient, pChannelGpfifoParams->hVASpace, &pVASpaceRef), cleanup);
NV_ASSERT_OR_ELSE(pVASpaceRef != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup);
@@ -875,8 +875,6 @@ kchannelConstruct_IMPL
pKernelChannel->hKernelGraphicsContext = pKernelChannelGroupApi->hKernelGraphicsContext;
if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT)
{
KernelGraphicsContext *pKernelGraphicsContext;
NV_ASSERT_OK_OR_GOTO(status,
kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext),
cleanup);
@@ -920,6 +918,24 @@ cleanup:
_kchannelCleanupNotifyActions(pKernelChannel);
}
// Remove any dependencies we may have added; we don't want our destructor called when freeing anything below
if (pKernelGraphicsContext != NULL)
{
refRemoveDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef);
}
if (pKernelChannel->pKernelCtxShareApi != NULL)
{
refRemoveDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef);
}
if (pVASpaceRef != NULL)
{
refRemoveDependant(pVASpaceRef, pResourceRef);
}
if (bTsgAllocated)
{
refRemoveDependant(pChanGrpRef, pResourceRef);
}
if (bAddedToGroup)
{
kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);

View File

@@ -1058,6 +1058,7 @@ kfspSendBootCommands_GH100
}
pCotPayload = portMemAllocNonPaged(sizeof(NVDM_PAYLOAD_COT));
NV_CHECK_OR_RETURN(LEVEL_ERROR, pCotPayload != NULL, NV_ERR_NO_MEMORY);
portMemSet(pCotPayload, 0, sizeof(NVDM_PAYLOAD_COT));
frtsSize = NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K << 12;

View File

@@ -367,6 +367,7 @@ kfspReadMessage
}
pPacketBuffer = portMemAllocNonPaged(kfspGetRmChannelSize_HAL(pGpu, pKernelFsp));
NV_CHECK_OR_RETURN(LEVEL_ERROR, pPacketBuffer != NULL, NV_ERR_NO_MEMORY);
while ((packetState != MCTP_PACKET_STATE_END) && (packetState != MCTP_PACKET_STATE_SINGLE_PACKET))
{
@@ -483,6 +484,7 @@ kfspSendPacket_IMPL
// Pad to align size to 4-bytes boundary since EMEMC increments by DWORDS
paddedSize = NV_ALIGN_UP(packetSize, sizeof(NvU32));
pBuffer = portMemAllocNonPaged(paddedSize);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuffer != NULL, NV_ERR_NO_MEMORY);
portMemSet(pBuffer, 0, paddedSize);
portMemCopy(pBuffer, paddedSize, pPacket, paddedSize);
@@ -537,6 +539,7 @@ kfspSendAndReadMessage_IMPL
// Allocate buffer of same size as channel
fspEmemRmChannelSize = kfspGetRmChannelSize_HAL(pGpu, pKernelFsp);
pBuffer = portMemAllocNonPaged(fspEmemRmChannelSize);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuffer != NULL, NV_ERR_NO_MEMORY);
portMemSet(pBuffer, 0, fspEmemRmChannelSize);
//

View File

@@ -2181,13 +2181,15 @@ gpuStateLoad_IMPL
return status;
}
// It is a no-op on baremetal and inside non SRIOV guest.
rmStatus = gpuCreateDefaultClientShare_HAL(pGpu);
if (rmStatus != NV_OK)
if (!(flags & GPU_STATE_FLAGS_PRESERVING))
{
return rmStatus;
// It is a no-op on baremetal and inside non SRIOV guest.
rmStatus = gpuCreateDefaultClientShare_HAL(pGpu);
if (rmStatus != NV_OK)
{
return rmStatus;
}
}
NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu));
rmStatus = gpuStatePreLoad(pGpu, flags);
@@ -2823,7 +2825,8 @@ gpuStateUnload_IMPL
rmStatus = gpuStatePostUnload(pGpu, flags);
NV_ASSERT_OK(rmStatus);
gpuDestroyDefaultClientShare_HAL(pGpu);
if(!(flags & GPU_STATE_FLAGS_PRESERVING))
gpuDestroyDefaultClientShare_HAL(pGpu);
// De-init SRIOV
gpuDeinitSriov_HAL(pGpu);

View File

@@ -502,6 +502,7 @@ gpuNotifySubDeviceEvent_IMPL
for (i = 0; i < pGpu->numSubdeviceBackReferences; i++)
{
Subdevice *pSubdevice = pGpu->pSubdeviceBackReferences[i];
NV_ASSERT_OR_RETURN_VOID(pSubdevice != NULL);
INotifier *pNotifier = staticCast(pSubdevice, INotifier);
GPU_RES_SET_THREAD_BC_STATE(pSubdevice);

View File

@@ -1139,7 +1139,7 @@ _kgspRpcRecvPoll
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
NV_STATUS nvStatus;
RMTIMEOUT timeout;
NvU32 timeoutUs = GPU_TIMEOUT_DEFAULT;
NvU32 timeoutUs;
NvBool bSlowGspRpc = IS_EMULATION(pGpu) || IS_SIMULATION(pGpu);
//
@@ -1172,12 +1172,20 @@ _kgspRpcRecvPoll
}
else
{
// We should only ever timeout this when GSP is in really bad state, so if it just
// happens to timeout on default timeout it should be OK for us to give it a little
// more time - make this timeout 1.5 of the default to allow some leeway.
NvU32 defaultus = pGpu->timeoutData.defaultus;
timeoutUs = defaultus + defaultus / 2;
if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu))
{
// Ensure at least 3.1s for vGPU-GSP before adding leeway (Bug 3928607)
timeoutUs = NV_MAX(3100 * 1000, defaultus) + (defaultus / 2);
}
else
{
// We should only ever timeout this when GSP is in really bad state, so if it just
// happens to timeout on default timeout it should be OK for us to give it a little
// more time - make this timeout 1.5 of the default to allow some leeway.
timeoutUs = defaultus + defaultus / 2;
}
}
NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));

View File

@@ -124,7 +124,7 @@ kmigmgrMakeCIReference_IMPL
/*! @brief create a Ref referencing no GI/CI */
MIG_INSTANCE_REF
kmigmgrMakeNoMIGReference_IMPL()
kmigmgrMakeNoMIGReference_IMPL(void)
{
MIG_INSTANCE_REF ref = { NULL, NULL };
return ref;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@@ -178,7 +178,7 @@ initAPIOSFunctionPointers(OBJOS *pOS)
//
// Function to find the maximum number of cores in the system
//
NvU32 osGetMaximumCoreCount()
NvU32 osGetMaximumCoreCount(void)
{
//
// Windows provides an API to query this that supports CPU hotadd that our
@@ -599,7 +599,7 @@ osMemGetFilter(NvUPtr address)
* full call stack that is much helpful for debugging.
*/
void osPagedSegmentAccessCheck()
void osPagedSegmentAccessCheck(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJOS *pOS = SYS_GET_OS(pSys);

View File

@@ -3364,7 +3364,7 @@ void initNbsiObject(NBSI_OBJ *pNbsiObj)
}
}
NBSI_OBJ *getNbsiObject()
NBSI_OBJ *getNbsiObject(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJPFM *pPfm = SYS_GET_PFM(pSys);

View File

@@ -69,6 +69,26 @@
#include "gpu/gsp/kernel_gsp.h"
#include "power/gpu_boost_mgr.h"
#define CONFIG_2X_BUFF_SIZE_MIN (2)
//
// Controller Table v2.2 has removed some params, set them using these
// default values instead
//
// EWMA retention weight (232/256) results in tau being 10x the sampling period
//
#define CONTROLLER_GRP_DEFAULT_BASE_SAMPLING_PERIOD_MS (100)
#define CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER (1)
#define CONTROLLER_GRP_DEFAULT_EWMA_WEIGHT (232)
#define CONTROLLER_GRP_DEFAULT_INCREASE_GAIN_UFXP4_12 (3686)
#define CONTROLLER_GRP_DEFAULT_DECREASE_GAIN_UFXP4_12 (4096)
/*!
* Define the filter types.
*/
#define NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA (0)
#define NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_MOVING_MAX (1)
NV_STATUS
cliresConstruct_IMPL
(
@@ -1899,6 +1919,580 @@ cliresCtrlCmdGpuAcctGetAccountingPids_IMPL
}
/*!
* Helper to build config data from unpacked table data,
* static config v2.0/2.1.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildConfig_StaticTable_v20
(
CONTROLLER_STATIC_TABLE_ENTRY_V20 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->samplingMulti =
(NvU16)pEntry->samplingMulti;
pParams->filterType =
(NvU8)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTER, _TYPE,
pEntry->filterParams);
pParams->filterReserved =
(NvU16)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTER, _RESERVED,
pEntry->filterParams);
// Get the filter param based on filter type
switch (pParams->filterType)
{
case NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA:
{
pParams->filterParam.weight =
(NvU8)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTERPARAM, _EWMA_WEIGHT,
pEntry->filterParams);
break;
}
case NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_MOVING_MAX:
default:
{
pParams->filterParam.windowSize =
(NvU8)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTERPARAM, _WINDOW_SIZE,
pEntry->filterParams);
break;
}
}
}
/*!
* Helper to build Qboost's config data from unpacked table data,
* static config v2.0/2.1.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildQboostConfig_StaticTable_v20
(
CONTROLLER_STATIC_TABLE_ENTRY_V20 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->bIsBoostController = NV_TRUE;
// Type-specific param0
pParams->incRatio =
(NvUFXP4_12)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _PARAM0, _QBOOST_INCREASE_GAIN,
pEntry->param0);
pParams->decRatio =
(NvUFXP4_12)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _PARAM0, _QBOOST_DECREASE_GAIN,
pEntry->param0);
// Type-specific param1
pParams->bSupportBatt =
(NvBool)(DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _PARAM1, _QBOOST_DC_SUPPORT,
pEntry->param1));
}
/*!
* Helper to build config data from unpacked table data,
* static config v2.2.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildConfig_StaticTable_v22
(
CONTROLLER_STATIC_TABLE_ENTRY_V22 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->samplingMulti = CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER;
pParams->filterType = NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA;
pParams->filterParam.weight = CONTROLLER_GRP_DEFAULT_EWMA_WEIGHT;
}
/*!
* Helper to build Qboost's config data from unpacked table data,
* static config v2.2.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildQboostConfig_StaticTable_v22
(
CONTROLLER_STATIC_TABLE_ENTRY_V22 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->bIsBoostController = NV_TRUE;
// Use increase gain of 90%, decrease gain of 100%
pParams->incRatio = CONTROLLER_GRP_DEFAULT_INCREASE_GAIN_UFXP4_12;
pParams->decRatio = CONTROLLER_GRP_DEFAULT_DECREASE_GAIN_UFXP4_12;
// Type-specific param0
pParams->bSupportBatt =
(NvBool)(DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V22, _PARAM0, _QBOOST_DC_SUPPORT,
pEntry->param0));
}
/*!
* Helper to build CTGP controller's config data from unpacked table data,
* static config 2x version. Re-uses struct types from normal Qboost
* controller.
*
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildCtgpConfig_StaticTable_2x
(
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
//
// Sampling period only really only affects the delay in handling
// CTGP changes, so just set sampling period multiplier to 1
//
// Force EWMA filter type with weight 0, since currently the reading
// and filtering of CPU power is still done
//
pParams->samplingMulti = CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER;
pParams->filterType = NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA;
pParams->filterParam.weight = 0;
// Inform apps that there is no Dynamic Boost support
pParams->bIsBoostController = NV_FALSE;
pParams->incRatio = 0;
pParams->decRatio = 0;
pParams->bSupportBatt = NV_FALSE;
}
/*!
* Attempts to parse the static controller table, as v2.0 or v2.1.
*
* @param[in] pData Pointer to start (header) of the table
* @param[in] dataSize Size of entire table, including header
* @param[out] pEntryCount Number of controller entries found
* @param[out] pParams Structure to fill parsed info
*
* @return NV_OK
* Table was successfully parsed; caller should remember to free object array
* @return NV_ERR_NOT_SUPPORTED
* Failed to detect correct table version, no output
* @return Other errors
* NV_ERR_INVALID_DATA or errors propogated up from functions called
*/
static NV_STATUS
_controllerParseStaticTable_v20
(
NvU8 *pData,
NvU32 dataSize,
NvU8 *pEntryCount,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
const char *pHeaderFmt = NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_FMT_SIZE_05;
const char *pCommonFmt = NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_FMT_SIZE_02;
const char *pEntryFmt = NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FMT_SIZE_0F;
NvU32 loop = 0;
NV_STATUS status = NV_OK;
CONTROLLER_STATIC_TABLE_HEADER_V20 header = { 0 };
CONTROLLER_STATIC_TABLE_COMMON_V20 common = { 0 };
// Check if we can safely parse the header
if (dataSize < NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v20_exit;
}
// Unpack the table header
configReadStructure(pData, &header, 0, pHeaderFmt);
switch (header.version)
{
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_20:
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_21:
{
NvU32 expectedSize;
// check rest of header
if ((header.headerSize != NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05)
|| (header.commonSize != NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_SIZE_02)
|| (header.entrySize != NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F))
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
// must have at least one entry
if (header.entryCount == 0)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
// check data size
expectedSize = header.headerSize + header.commonSize
+ (header.entryCount * header.entrySize);
if (expectedSize != dataSize)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
break;
}
default:
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v20_exit;
}
}
// Unpack the common data, base sampling period cannot be 0
configReadStructure(pData, &common, header.headerSize, pCommonFmt);
if (common.samplingPeriodms == 0)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
pParams->samplingPeriodmS = (NvU16)common.samplingPeriodms;
// Parse each entry
for (loop = 0; loop < header.entryCount; loop++)
{
CONTROLLER_STATIC_TABLE_ENTRY_V20 entry = { 0 };
NvU32 offset = header.headerSize + header.commonSize +
(loop * NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F);
// Unpack the controller entry
configReadStructure(pData, &entry, offset, pEntryFmt);
switch (DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FLAGS0, _CLASS,
entry.flags0))
{
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_PPAB:
{
_controllerBuildConfig_StaticTable_v20(&entry, pParams);
_controllerBuildQboostConfig_StaticTable_v20(&entry, pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_CTGP:
{
_controllerBuildCtgpConfig_StaticTable_2x(pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_DISABLED:
default:
{
}
}
}
pParams->version = (NvU8)header.version;
*pEntryCount = (NvU8)header.entryCount;
_controllerParseStaticTable_v20_exit:
return status;
}
/*!
* Attempts to parse the static controller table, as v2.2.
*
* @param[in] pData Pointer to start (header) of the table
* @param[in] dataSize Size of entire table, including header
* @param[out] pEntryCount Number of controller entries found
* @param[out] pParams Structure to fill parsed info
*
* @return NV_OK
* Table was successfully parsed; caller should remember to free object array
* @return NV_ERR_NOT_SUPPORTED
* Failed to detect correct table version, no output
* @return Other errors
* NV_ERR_INVALID_DATA or errors propogated up from functions called
*/
static NV_STATUS
_controllerParseStaticTable_v22
(
NvU8 *pData,
NvU32 dataSize,
NvU8 *pEntryCount,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
const char *pHeaderFmt = NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_FMT_SIZE_04;
const char *pEntryFmt = NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FMT_SIZE_05;
NV_STATUS status = NV_OK;
NvU32 loop = 0;
CONTROLLER_STATIC_TABLE_HEADER_V22 header = { 0 };
// Check if we can safely parse the header
if (dataSize < NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v22_exit;
}
// Unpack the table header
configReadStructure(pData, &header, 0, pHeaderFmt);
switch (header.version)
{
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23:
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22:
{
NvU32 expectedSize;
// check rest of header
if ((header.headerSize != NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04)
|| (header.entrySize != NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05))
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v22_exit;
}
// must have at least one entry
if (header.entryCount == 0)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v22_exit;
}
// check data size
expectedSize = header.headerSize + (header.entryCount * header.entrySize);
if (expectedSize != dataSize)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v22_exit;
}
break;
}
default:
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v22_exit;
}
}
// Parse each entry
for (loop = 0; loop < header.entryCount; loop++)
{
CONTROLLER_STATIC_TABLE_ENTRY_V22 entry = { 0 };
NvU32 offset = header.headerSize +
(loop * NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05);
// Unpack the controller entry
configReadStructure(pData, &entry, offset, pEntryFmt);
switch (DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V22, _FLAGS0, _CLASS,
entry.flags0))
{
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_PPAB:
{
_controllerBuildConfig_StaticTable_v22(&entry, pParams);
_controllerBuildQboostConfig_StaticTable_v22(&entry, pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_CTGP:
{
_controllerBuildCtgpConfig_StaticTable_2x(pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_DISABLED:
default:
{
}
}
}
pParams->version = (NvU8)header.version;
pParams->samplingPeriodmS = CONTROLLER_GRP_DEFAULT_BASE_SAMPLING_PERIOD_MS;
*pEntryCount = (NvU8)header.entryCount;
_controllerParseStaticTable_v22_exit:
return status;
}
static NV_STATUS
_sysDeviceParseStaticTable_2x
(
NvU8 *pData,
NvU32 *dataSize,
NvU32 *controllerTableOffset,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
NV_STATUS status = NV_OK;
NvU32 deviceTableOffset = 0;
SYSDEV_STATIC_TABLE_HEADER_2X sysdevHeader = { 0 };
SYSDEV_STATIC_TABLE_COMMON_2X common = { 0 };
const char *pSzSysDevHeaderFmt =
NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_FMT_SIZE_03;
const char *pSzCommonFmt =
NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_FMT_SIZE_01;
// Unpack the table header
configReadStructure(pData, &sysdevHeader, deviceTableOffset, pSzSysDevHeaderFmt);
// Check the header version and sizes
if ((sysdevHeader.version != NVPCF_SYSDEV_STATIC_TABLE_VERSION_2X) ||
(sysdevHeader.headerSize != NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03) ||
(sysdevHeader.commonSize != NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_SIZE_01))
{
NV_PRINTF(LEVEL_ERROR, "NVPCF: %s: Unsupported header\n",
__FUNCTION__);
status = NV_ERR_INVALID_DATA;
goto _sysDeviceParseStaticTable_2x_exit;
}
// Update controller table pointer based on sysdev header data
*controllerTableOffset = deviceTableOffset + sysdevHeader.headerSize + sysdevHeader.commonSize;
configReadStructure(pData,
&common,
deviceTableOffset + sysdevHeader.headerSize,
pSzCommonFmt);
pParams->cpuType = (DRF_VAL(PCF_SYSDEV_STATIC_TABLE_COMMON_2X, _PARAM0, _CPU_TYPE,
common.param0));
pParams->gpuType = (DRF_VAL(PCF_SYSDEV_STATIC_TABLE_COMMON_2X, _PARAM0, _GPU_TYPE,
common.param0));
_sysDeviceParseStaticTable_2x_exit:
return status;
}
static NV_STATUS
_controllerParseStaticTable_2x
(
NvU8 *pData,
NvU32 dataSize,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
NvU32 controllerTableOffset = 0;
NvU8 entryCount = 0;
NV_STATUS status = NV_OK;
// Make sure we can safely parse the sysdev header
if (dataSize < NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_exit;
}
_sysDeviceParseStaticTable_2x(pData, &dataSize, &controllerTableOffset, pParams);
// Make sure data size is at least the controller table offset
if (dataSize < controllerTableOffset)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_exit;
}
pData += controllerTableOffset;
dataSize -= controllerTableOffset;
status = _controllerParseStaticTable_v22(pData,
dataSize,
&entryCount,
pParams);
if (status == NV_ERR_NOT_SUPPORTED)
{
status = _controllerParseStaticTable_v20(pData,
dataSize,
&entryCount,
pParams);
}
if (status != NV_OK)
{
goto _controllerParseStaticTable_exit;
}
_controllerParseStaticTable_exit:
return status;
}
/*!
* Helper function to validate the config static data that can be
* received from various sources, using one byte two's complement
* checksum. And match is against the last byte the original
* checksum byte is stored in the data.
*
* @param[in/out] pData NvU8 data buffer pointer
* @param[in] pDataSize NvU32 pointer to the data size in bytes
*
* @return NV_OK
* Checksum successfully matched.
*
* @return NV_ERR_INVALID_POINTER
* Invalid input data buffer pointer. *
* @return NV_ERR_INVALID_DATA
* Checksum failure or wrong data size.
*/
static NV_STATUS
_validateConfigStaticTable_2x
(
NvU8 *pData,
NvU16 *pDataSize
)
{
NV_STATUS status = NV_OK;
NvU8 checkSum;
NvU8 idx;
NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_POINTER);
NV_ASSERT_OR_RETURN(pDataSize != NULL, NV_ERR_INVALID_POINTER);
//
// Check data size length for static2x data. Must be min 2 bytes
// (CONFIG_2X_BUFF_SIZE_MIN) including 1 byte for checksum. The
// max allowed for static2x is CONFIG_2X_BUFF_SIZE_MAX.
//
if ((*pDataSize < CONFIG_2X_BUFF_SIZE_MIN) ||
(*pDataSize > NVPCF0100_CTRL_CONFIG_2X_BUFF_SIZE_MAX))
{
status = NV_ERR_INVALID_DATA;
goto validateConfigStaticTable_2x_exit;
}
checkSum = 0;
for (idx = 0; idx < (*pDataSize - 1); idx++)
{
checkSum += pData[idx];
}
checkSum = (~checkSum) + 1;
// Match with the original checksum
if (checkSum != pData[*pDataSize - 1])
{
status = NV_ERR_INVALID_DATA;
goto validateConfigStaticTable_2x_exit;
}
validateConfigStaticTable_2x_exit:
return status;
}
NV_STATUS
cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
(
@@ -2026,7 +2620,7 @@ cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
portMemSet(&header, 0, sizeof(header));
header.version = NVPCF_DYNAMIC_PARAMS_20_VERSION;
header.version = pParams->version;
header.headerSize = NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05;
header.commonSize = NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10;
header.entrySize = NVPCF_DYNAMIC_PARAMS_2X_ENTRY_SIZE_1C;
@@ -2073,6 +2667,12 @@ cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
// Unpack the header part
configReadStructure(pData, (void *)&headerOut, 0, pSzHeaderFmt);
if (headerOut.version != pParams->version)
{
status = NV_ERR_INVALID_DATA;
goto nvpcf2xGetDynamicParams_exit;
}
if ((headerOut.headerSize != NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05) ||
(headerOut.commonSize != NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10) ||
(headerOut.entrySize != NVPCF_DYNAMIC_PARAMS_2X_ENTRY_SIZE_1C))
@@ -2127,8 +2727,51 @@ cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
nvpcf2xGetDynamicParams_exit:
portMemFree(pData);
break;
}
case NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CASE:
{
NvU8 *pData = NULL;
NvU16 dataSize = NVPCF0100_CTRL_CONFIG_2X_BUFF_SIZE_MAX;
pData = portMemAllocNonPaged(dataSize);
if ((rc = pOS->osCallACPI_DSM(pGpu,
ACPI_DSM_FUNCTION_NVPCF_2X,
NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CONFIG_TABLES,
(NvU32 *)pData,
&dataSize)) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"Unable to retrieve NVPCF Static data. Possibly not supported by SBIOS"
"rc = %x\n", rc);
status = NV_ERR_NOT_SUPPORTED;
goto nvpcf2xGetStaticParams_exit;
}
status = _validateConfigStaticTable_2x(pData, &dataSize);
if (NV_OK != status)
{
NV_PRINTF(LEVEL_WARNING, "Config Static Data checksum failed\n");
status = NV_ERR_NOT_SUPPORTED;
goto nvpcf2xGetStaticParams_exit;
}
// Subtract 1 byte for the checksum
dataSize--;
status = _controllerParseStaticTable_2x(pData, dataSize, pParams);
if (NV_OK != status)
{
status = NV_ERR_NOT_SUPPORTED;
}
nvpcf2xGetStaticParams_exit:
portMemFree(pData);
break;
}
default:
{
NV_PRINTF(LEVEL_INFO, "Inavlid NVPCF subFunc : 0x%x\n", pParams->subFunc);

View File

@@ -662,6 +662,12 @@ NV_STATUS serverControl_ValidateCookie
OBJGPU *pGpu;
CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
if (pCallContext == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Calling context is NULL!\n");
return NV_ERR_INVALID_PARAMETER;
}
if (RMCFG_FEATURE_PLATFORM_GSP)
{
pGpu = gpumgrGetSomeGpu();

View File

@@ -125,7 +125,7 @@ NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool bAllowInternal)
return rmapiControlIsCacheable(flags, accessRight, bAllowInternal);
}
NV_STATUS rmapiControlCacheInit()
NV_STATUS rmapiControlCacheInit(void)
{
#if defined(DEBUG)
RmapiControlCache.mode = NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY;
@@ -492,4 +492,4 @@ void rmapiControlCacheSetMode(NvU32 mode)
NvU32 rmapiControlCacheGetMode(void)
{
return RmapiControlCache.mode;
}
}

View File

@@ -40,7 +40,7 @@ static PORT_STATE portState;
/// @todo Add better way to initialize all modules
NV_STATUS portInitialize()
NV_STATUS portInitialize(void)
{
if (PORT_INC(portState.initCount) == 1)
{
@@ -66,7 +66,7 @@ NV_STATUS portInitialize()
return NV_OK;
}
void portShutdown()
void portShutdown(void)
{
if (PORT_DEC(portState.initCount) == 0)
{
@@ -92,7 +92,7 @@ void portShutdown()
}
}
NvBool portIsInitialized()
NvBool portIsInitialized(void)
{
return portState.initCount > 0;
}

View File

@@ -53,7 +53,7 @@ struct PORT_CRYPTO_PRNG
};
PORT_CRYPTO_PRNG *portCryptoDefaultGenerator;
void portCryptoInitialize()
void portCryptoInitialize(void)
{
NvU64 seed;
#if defined(PORT_CRYPTO_PRNG_SEED)
@@ -73,7 +73,7 @@ void portCryptoInitialize()
portCryptoPseudoRandomSetSeed(seed);
}
void portCryptoShutdown()
void portCryptoShutdown(void)
{
portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator);
portCryptoDefaultGenerator = NULL;
@@ -174,12 +174,12 @@ void portCryptoPseudoRandomSetSeed(NvU64 seed)
portCryptoDefaultGenerator = portCryptoPseudoRandomGeneratorCreate(seed);
}
NvU32 portCryptoPseudoRandomGetU32()
NvU32 portCryptoPseudoRandomGetU32(void)
{
return portCryptoPseudoRandomGeneratorGetU32(portCryptoDefaultGenerator);
}
NvU64 portCryptoPseudoRandomGetU64()
NvU64 portCryptoPseudoRandomGetU64(void)
{
return portCryptoPseudoRandomGeneratorGetU64(portCryptoDefaultGenerator);
}

View File

@@ -34,45 +34,15 @@
#error "DEBUG module must be present for memory tracking"
#endif
#if !PORT_IS_MODULE_SUPPORTED(atomic)
#error "ATOMIC module must be present for memory tracking"
#endif
#if PORT_MEM_TRACK_USE_LIMIT
#include "os/os.h"
#define PORT_MEM_LIMIT_MAX_PIDS 32
#endif
#if NVOS_IS_LIBOS
#define PORT_MEM_THREAD_SAFE_ALLOCATIONS 0
#else
#define PORT_MEM_THREAD_SAFE_ALLOCATIONS 1
#endif
#if PORT_MEM_THREAD_SAFE_ALLOCATIONS && !PORT_IS_MODULE_SUPPORTED(atomic)
#error "ATOMIC module must be present for memory tracking"
#endif
#if PORT_MEM_THREAD_SAFE_ALLOCATIONS
#define PORT_MEM_ATOMIC_ADD_SIZE portAtomicAddSize
#define PORT_MEM_ATOMIC_SUB_SIZE portAtomicSubSize
#define PORT_MEM_ATOMIC_DEC_U32 portAtomicDecrementU32
#define PORT_MEM_ATOMIC_INC_U32 portAtomicIncrementU32
#define PORT_MEM_ATOMIC_SET_U32 portAtomicSetU32
#define PORT_MEM_ATOMIC_CAS_SIZE portAtomicCompareAndSwapSize
#define PORT_MEM_ATOMIC_CAS_U32 portAtomicCompareAndSwapU32
#else
//
// We can just stub out the atomic operations for non-atomic ones and not waste
// waste cycles on synchronization
//
#define PORT_MEM_ATOMIC_ADD_SIZE(pVal, val) (*((NvSPtr *)pVal) += val)
#define PORT_MEM_ATOMIC_SUB_SIZE(pVal, val) (*((NvSPtr *)pVal) -= val)
#define PORT_MEM_ATOMIC_DEC_U32(pVal) (--(*((NvU32 *)pVal)))
#define PORT_MEM_ATOMIC_INC_U32(pVal) (++(*((NvU32 *)pVal)))
#define PORT_MEM_ATOMIC_SET_U32(pVal, val) (*((NvU32 *)pVal) = val)
#define PORT_MEM_ATOMIC_CAS_SIZE(pVal, newVal, oldVal) \
((*pVal == oldVal) ? ((*((NvSPtr *)pVal) = newVal), NV_TRUE) : NV_FALSE)
#define PORT_MEM_ATOMIC_CAS_U32(pVal, newVal, oldVal) \
((*pVal == oldVal) ? ((*((NvU32 *)pVal) = newVal), NV_TRUE) : NV_FALSE)
#endif // !PORT_MEM_THREAD_SAFE_ALLOCATIONS
struct PORT_MEM_ALLOCATOR_IMPL
{
PORT_MEM_ALLOCATOR_TRACKING tracking;
@@ -108,11 +78,11 @@ static NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock)
}
static void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock)
{
while (!PORT_MEM_ATOMIC_CAS_U32(pSpinlock, 1, 0));
while (!portAtomicCompareAndSwapU32(pSpinlock, 1, 0));
}
static void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock)
{
PORT_MEM_ATOMIC_SET_U32(pSpinlock, 0);
portAtomicSetU32(pSpinlock, 0);
}
static void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock)
{
@@ -180,13 +150,13 @@ _portMemCounterInc
NvU32 activeAllocs;
NvLength activeSize = 0;
activeAllocs = PORT_MEM_ATOMIC_INC_U32(&pCounter->activeAllocs);
PORT_MEM_ATOMIC_INC_U32(&pCounter->totalAllocs);
activeAllocs = portAtomicIncrementU32(&pCounter->activeAllocs);
portAtomicIncrementU32(&pCounter->totalAllocs);
if (PORT_MEM_TRACK_USE_FENCEPOSTS)
{
activeSize = PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->activeSize, size);
activeSize = portAtomicAddSize(&pCounter->activeSize, size);
}
PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->totalSize, size);
portAtomicAddSize(&pCounter->totalSize, size);
// Atomically compare the peak value with the active, and update if greater.
while (1)
@@ -194,14 +164,14 @@ _portMemCounterInc
NvU32 peakAllocs = pCounter->peakAllocs;
if (activeAllocs <= peakAllocs)
break;
PORT_MEM_ATOMIC_CAS_U32(&pCounter->peakAllocs, activeAllocs, peakAllocs);
portAtomicCompareAndSwapU32(&pCounter->peakAllocs, activeAllocs, peakAllocs);
}
while (1)
{
NvLength peakSize = pCounter->peakSize;
if (activeSize <= peakSize)
break;
PORT_MEM_ATOMIC_CAS_SIZE(&pCounter->peakSize, activeSize, peakSize);
portAtomicCompareAndSwapSize(&pCounter->peakSize, activeSize, peakSize);
}
}
static NV_INLINE void
@@ -211,11 +181,11 @@ _portMemCounterDec
void *pMem
)
{
PORT_MEM_ATOMIC_DEC_U32(&pCounter->activeAllocs);
portAtomicDecrementU32(&pCounter->activeAllocs);
if (PORT_MEM_TRACK_USE_FENCEPOSTS)
{
PORT_MEM_ATOMIC_SUB_SIZE(&pCounter->activeSize,
((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize);
portAtomicSubSize(&pCounter->activeSize,
((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize);
}
}
@@ -303,7 +273,7 @@ _portMemListAdd
PORT_MEM_LIST *pList = &pHead->list;
pList->pNext = pList;
pList->pPrev = pList;
if (!PORT_MEM_ATOMIC_CAS_SIZE(&pTracking->pFirstAlloc, pList, NULL))
if (!portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList, NULL))
{
PORT_LOCKED_LIST_LINK(pTracking->pFirstAlloc, pList, pTracking->listLock);
}
@@ -318,11 +288,11 @@ _portMemListRemove
PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1;
PORT_MEM_LIST *pList = &pHead->list;
if (!PORT_MEM_ATOMIC_CAS_SIZE(&pList->pNext, NULL, pList))
if (!portAtomicCompareAndSwapSize(&pList->pNext, NULL, pList))
{
PORT_LOCKED_LIST_UNLINK(pTracking->pFirstAlloc, pList, pTracking->listLock);
}
PORT_MEM_ATOMIC_CAS_SIZE(&pTracking->pFirstAlloc, pList->pNext, pList);
portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList->pNext, pList);
}
static NV_INLINE PORT_MEM_HEADER *
@@ -417,7 +387,7 @@ typedef struct PORT_MEM_LOG_ENTRY
#define PORT_MEM_LOG_ENTRIES 4096
static void
_portMemLogInit()
_portMemLogInit(void)
{
NVLOG_BUFFER_HANDLE hBuffer;
nvlogAllocBuffer(PORT_MEM_LOG_ENTRIES * sizeof(PORT_MEM_LOG_ENTRY),
@@ -426,7 +396,7 @@ _portMemLogInit()
}
static void
_portMemLogDestroy()
_portMemLogDestroy(void)
{
NVLOG_BUFFER_HANDLE hBuffer;
nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer);
@@ -547,7 +517,7 @@ _portMemLimitInc(NvU32 pid, void *pMem, NvU64 size)
{
NvU32 pidIdx = pid - 1;
pMemHeader->blockSize = size;
PORT_MEM_ATOMIC_ADD_SIZE(&portMemGlobals.counterPid[pidIdx], size);
portAtomicAddSize(&portMemGlobals.counterPid[pidIdx], size);
}
}
}
@@ -571,7 +541,7 @@ _portMemLimitDec(void *pMem)
}
else
{
PORT_MEM_ATOMIC_SUB_SIZE(&portMemGlobals.counterPid[pidIdx], pMemHeader->blockSize);
portAtomicSubSize(&portMemGlobals.counterPid[pidIdx], pMemHeader->blockSize);
}
}
}
@@ -626,7 +596,7 @@ portMemInitialize(void)
#if PORT_MEM_TRACK_USE_CALLERINFO
PORT_MEM_CALLERINFO_TYPE_PARAM = PORT_MEM_CALLERINFO_MAKE;
#endif
if (PORT_MEM_ATOMIC_INC_U32(&portMemGlobals.initCount) != 1)
if (portAtomicIncrementU32(&portMemGlobals.initCount) != 1)
return;
portMemGlobals.mainTracking.pAllocator = NULL;
@@ -679,7 +649,7 @@ void
portMemShutdown(NvBool bForceSilent)
{
PORT_UNREFERENCED_VARIABLE(bForceSilent);
if (PORT_MEM_ATOMIC_DEC_U32(&portMemGlobals.initCount) != 0)
if (portAtomicDecrementU32(&portMemGlobals.initCount) != 0)
return;
#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT)
@@ -880,7 +850,7 @@ portMemInitializeAllocatorTracking
PORT_MEM_COUNTER_INIT(&pTracking->counter);
PORT_MEM_LIST_INIT(pTracking);
PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking);
PORT_MEM_ATOMIC_INC_U32(&portMemGlobals.totalAllocators);
portAtomicIncrementU32(&portMemGlobals.totalAllocators);
}
#if PORT_MEM_TRACK_USE_LIMIT
@@ -1231,7 +1201,7 @@ _portMemTrackingRelease
PORT_LOCKED_LIST_UNLINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock);
PORT_MEM_LIST_DESTROY(pTracking);
PORT_MEM_ATOMIC_DEC_U32(&portMemGlobals.totalAllocators);
portAtomicDecrementU32(&portMemGlobals.totalAllocators);
}
static void

View File

@@ -315,15 +315,17 @@ portSyncRwLockReleaseWrite
os_release_rwlock_write(pRwLock->rwlock);
}
NvBool portSyncExSafeToSleep()
NvBool portSyncExSafeToSleep(void)
{
return os_semaphore_may_sleep();
}
NvBool portSyncExSafeToWake()
NvBool portSyncExSafeToWake(void)
{
return NV_TRUE;
}
NvU64 portSyncExGetInterruptLevel()
NvU64 portSyncExGetInterruptLevel(void)
{
return !os_semaphore_may_sleep();
}

View File

@@ -46,14 +46,14 @@ const PORT_THREAD PORT_THREAD_INVALID = {0ULL};
// Invalid value for process.
const PORT_PROCESS PORT_PROCESS_INVALID = {0ULL};
NvU64 portThreadGetCurrentThreadId()
NvU64 portThreadGetCurrentThreadId(void)
{
NvU64 tid = 0;
os_get_current_thread(&tid);
return tid;
}
void portThreadYield()
void portThreadYield(void)
{
os_schedule();
}

View File

@@ -74,7 +74,7 @@ portUtilExGetStackTrace
}
#endif
NV_NOINLINE NvUPtr portUtilGetIPAddress()
NV_NOINLINE NvUPtr portUtilGetIPAddress(void)
{
return portUtilGetReturnAddress();
}

View File

@@ -130,18 +130,6 @@ NvU32 osGetMaximumCoreCount(void);
#endif
#endif
#if NVOS_IS_LIBOS
//
// On LibOS we have at most one passive thread (task_rm) and one ISR
// (task_interrupt) active at once (on same CPU core). Since these two will
// use different maps, we don't need to protect them with spinlocks.
//
#define TLS_SPINLOCK_ACQUIRE(x)
#define TLS_SPINLOCK_RELEASE(x)
#else
#define TLS_SPINLOCK_ACQUIRE(x) portSyncSpinlockAcquire(x)
#define TLS_SPINLOCK_RELEASE(x) portSyncSpinlockRelease(x)
#endif // NVOS_IS_LIBOS
#if !PORT_IS_FUNC_SUPPORTED(portSyncExSafeToSleep)
#define portSyncExSafeToSleep() NV_TRUE
@@ -158,7 +146,7 @@ NvU32 osGetMaximumCoreCount(void);
NV_STATUS tlsInitialize()
NV_STATUS tlsInitialize(void)
{
NV_STATUS status;
@@ -216,7 +204,7 @@ done:
return status;
}
void tlsShutdown()
void tlsShutdown(void)
{
if (portAtomicDecrementU32(&tlsDatabase.initCount) != 0)
{
@@ -313,7 +301,7 @@ PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void)
return _tlsIsrAllocatorGet();
}
NvU64 tlsEntryAlloc()
NvU64 tlsEntryAlloc(void)
{
NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL);
return portAtomicExIncrementU64(&tlsDatabase.lastEntryId);
@@ -427,7 +415,7 @@ NvU32 tlsEntryUnreference(NvU64 entryId)
static ThreadEntry *
_tlsThreadEntryGet()
_tlsThreadEntryGet(void)
{
ThreadEntry *pThreadEntry;
@@ -438,16 +426,16 @@ _tlsThreadEntryGet()
else
{
NvU64 threadId = portThreadGetCurrentThreadId();
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock);
pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId);
TLS_SPINLOCK_RELEASE(tlsDatabase.pLock);
portSyncSpinlockAcquire(tlsDatabase.pLock);
pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId);
portSyncSpinlockRelease(tlsDatabase.pLock);
}
return pThreadEntry;
}
static ThreadEntry *
_tlsThreadEntryGetOrAlloc()
_tlsThreadEntryGetOrAlloc(void)
{
ThreadEntry* pThreadEntry = NULL;
@@ -460,11 +448,11 @@ _tlsThreadEntryGetOrAlloc()
{
pThreadEntry->key.threadId = portThreadGetCurrentThreadId();
mapInitIntrusive(&pThreadEntry->map);
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock);
mapInsertExisting(&tlsDatabase.threadEntries,
pThreadEntry->key.threadId,
pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pLock);
portSyncSpinlockAcquire(tlsDatabase.pLock);
mapInsertExisting(&tlsDatabase.threadEntries,
pThreadEntry->key.threadId,
pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pLock);
}
}
@@ -522,9 +510,9 @@ _tlsEntryRelease
{
NV_ASSERT(portMemExSafeForNonPagedAlloc());
mapDestroy(&pThreadEntry->map);
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock);
mapRemove(&tlsDatabase.threadEntries, pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pLock);
portSyncSpinlockAcquire(tlsDatabase.pLock);
mapRemove(&tlsDatabase.threadEntries, pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pLock);
PORT_FREE(tlsDatabase.pAllocator, pThreadEntry);
}
}
@@ -549,7 +537,7 @@ static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void)
#if TLS_ISR_CAN_USE_LOCK
static NV_STATUS _tlsIsrEntriesInit()
static NV_STATUS _tlsIsrEntriesInit(void)
{
tlsDatabase.pIsrLock = portSyncSpinlockCreate(tlsDatabase.pAllocator);
if (tlsDatabase.pLock == NULL)
@@ -559,7 +547,7 @@ static NV_STATUS _tlsIsrEntriesInit()
mapInitIntrusive(&tlsDatabase.isrEntries);
return NV_OK;
}
static void _tlsIsrEntriesDestroy()
static void _tlsIsrEntriesDestroy(void)
{
if (tlsDatabase.pIsrLock)
portSyncSpinlockDestroy(tlsDatabase.pIsrLock);
@@ -567,40 +555,40 @@ static void _tlsIsrEntriesDestroy()
}
static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry)
{
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock);
mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock);
portSyncSpinlockAcquire(tlsDatabase.pIsrLock);
mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pIsrLock);
}
static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp)
{
ThreadEntry *pThreadEntry;
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock);
pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp);
mapRemove(&tlsDatabase.isrEntries, pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock);
portSyncSpinlockAcquire(tlsDatabase.pIsrLock);
pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp);
mapRemove(&tlsDatabase.isrEntries, pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pIsrLock);
return pThreadEntry;
}
static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp)
{
ThreadEntry *pThreadEntry;
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock);
portSyncSpinlockAcquire(tlsDatabase.pIsrLock);
#if STACK_GROWS_DOWNWARD
pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp);
pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp);
#else
pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp);
pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp);
#endif
TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock);
portSyncSpinlockRelease(tlsDatabase.pIsrLock);
return pThreadEntry;
}
#else // Lockless
static NV_STATUS _tlsIsrEntriesInit()
static NV_STATUS _tlsIsrEntriesInit(void)
{
portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries));
return NV_OK;
}
static void _tlsIsrEntriesDestroy()
static void _tlsIsrEntriesDestroy(void)
{
portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries));
}
@@ -656,7 +644,7 @@ static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp)
static NvBool _tlsIsIsr()
static NvBool _tlsIsIsr(void)
{
#if defined (TLS_ISR_UNIT_TEST)
// In unit tests we simulate ISR tests in different ways, so tests define this