550.40.07

This commit is contained in:
Bernhard Stoeckner
2024-01-24 17:51:53 +01:00
parent bb2dac1f20
commit 91676d6628
1411 changed files with 261367 additions and 145959 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,166 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "cci/cci_nvswitch.h"
#include "cci/cci_priv_nvswitch.h"
#include "cci/cci_cables_nvswitch.h"
#include "nvswitch/ls10/dev_nvlipt_lnk_ip.h"
#include "ls10/ls10.h"
static NvlStatus
_cci_cable_initialize_links
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
NvlStatus retval;
NvlStatus loopStatus;
NvU64 linkState;
NvU64 linkMask;
NvU32 linkId;
nvlink_link *link;
loopStatus = NVL_SUCCESS;
pOnboardState = &device->pCci->moduleState[moduleId];
// De-assert lpmode
retval = cciSetLPMode(device, moduleId, NV_FALSE);
if (retval != NVL_SUCCESS)
{
return retval;
}
linkMask = pOnboardState->linkTrainMask;
FOR_EACH_INDEX_IN_MASK(64, linkId, linkMask)
{
NVSWITCH_ASSERT(linkId < NVSWITCH_LINK_COUNT(device));
link = nvswitch_get_link(device, linkId);
if ((link == NULL) ||
!NVSWITCH_IS_LINK_ENG_VALID(device, link->linkNumber, NVLIPT_LNK) ||
(linkId >= NVSWITCH_NVLINK_MAX_LINKS))
{
continue;
}
retval = link->link_handlers->get_dl_link_mode(link, &linkState);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to get link state for link: %d\n",
__FUNCTION__, linkId);
continue;
}
// This only makes sense to do in the driver
if (linkState == NVLINK_LINKSTATE_HS ||
linkState == NVLINK_LINKSTATE_SAFE ||
linkState == NVLINK_LINKSTATE_RECOVERY ||
linkState == NVLINK_LINKSTATE_SLEEP)
{
continue;
}
// Shutdown and Reset link
if (linkState != NVLINK_LINKSTATE_RESET)
{
retval = nvswitch_request_tl_link_state_ls10(link,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_SHUTDOWN,
NV_TRUE);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TL link shutdown request failed for link: %d\n",
__FUNCTION__, linkId);
}
retval = nvswitch_request_tl_link_state_ls10(link,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET,
NV_TRUE);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TL link reset request failed for link: %d\n",
__FUNCTION__, linkId);
loopStatus = retval;
}
}
}
FOR_EACH_INDEX_IN_MASK_END;
if (loopStatus != NVL_SUCCESS)
{
return loopStatus;
}
// Have SOE perform system register setup
retval = cciModulesOnboardSOE(device, moduleId);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: System register setup failed for module: %d\n",
__FUNCTION__, moduleId);
return retval;
}
return NVL_SUCCESS;
}
/*
* @brief Initialize copper cable
*
* @param[in] device nvswitch_device pointer
* @param[in] moduleId Module will be initialized.
*
*/
void
cciCablesInitializeCopperAsync
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_LAUNCH_ALI;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_cable_initialize_links,
nextStateSuccess,
nextStateFail);
cciSetLedsInitialize(device, moduleId);
}

View File

@@ -0,0 +1,45 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "cci/cci_nvswitch.h"
#include "cci/cci_priv_nvswitch.h"
#include "cci/cci_cables_nvswitch.h"
void
cciCablesInitializeDirectAsync
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextState;
nvswitch_os_memset(&nextState, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
NVSWITCH_PRINT(device, INFO,
"%s: Not implemented.\n",
__FUNCTION__);
nextState.onboardPhase = CCI_ONBOARD_PHASE_MONITOR;
cciModuleOnboardPerformPhaseTransitionAsync(device, moduleId, nextState);
}

View File

@@ -0,0 +1,284 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "cci/cci_nvswitch.h"
#include "cci/cci_priv_nvswitch.h"
#include "cci/cci_cables_nvswitch.h"
#include "nvlink_export.h"
//
// Custom byte definition obtained from the following doc:
// "LINKX BOOT FAIL AND RECOVERY INDICATIONS"
//
#define MODULE_FLAGS_CUSTOM_BYTE_BOOT_FAILURE 0:0
#define MODULE_FLAGS_CUSTOM_BYTE_RECOVERY 1:1
static const char*
_cci_onboard_phase_to_text
(
CCI_MODULE_ONBOARD_PHASE phase,
CCI_MODULE_ONBOARD_SUBPHASE_OPTICAL subphase
)
{
switch (phase)
{
case CCI_ONBOARD_PHASE_CHECK_CONDITION:
{
return "CCI Onboard Phase Check Condition";
}
case CCI_ONBOARD_PHASE_IDENTIFY:
{
return "CCI Onboard Phase Identify";
}
case CCI_ONBOARD_PHASE_INIT_COPPER:
{
return "CCI Onboard Phase Init Copper";
}
case CCI_ONBOARD_PHASE_INIT_DIRECT:
{
return "CCI Onboard Phase Init Direct";
}
case CCI_ONBOARD_PHASE_INIT_OPTICAL:
{
switch (subphase)
{
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_START:
{
return "CCI Onboard Subphase Init Optical Start";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_CMIS_SELECT_APPLICATION:
{
return "CCI Onboard Subphase Init Optical CMIS Select Application";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_CONFIGURE_LINKS:
{
return "CCI Onboard Subphase Init Optical Configure Links";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_DISABLE_ALI:
{
return "CCI Onboard Subphase Init Optical Disable ALI";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_SETUP:
{
return "CCI Onboard Subphase Init Optical Pretrain Setup";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_SEND_CDB:
{
return "CCI Onboard Subphase Init Optical Pretrain Send CDB";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_POLL:
{
return "CCI Onboard Subphase Init Optical Pretrain Poll";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_GO_TRANSPARENT:
{
return "CCI Onboard Subphase Init Optical Go Transparent";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_RESET_LINKS:
{
return "CCI Onboard Subphase Init Optical Reset Links";
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_ENABLE_ALI:
{
return "CCI Onboard Subphase Init Optical Enable ALI";
}
default:
{
return "Unknown";
}
}
}
case CCI_ONBOARD_PHASE_LAUNCH_ALI:
{
return "CCI Onboard Phase Launch ALI";
}
case CCI_ONBOARD_PHASE_SLEEP:
{
return "CCI Onboard Phase Sleep";
}
case CCI_ONBOARD_PHASE_MONITOR:
{
return "CCI Onboard Phase Monitor";
}
default:
{
return "Unknown";
}
}
}
static void
_cci_check_module_boot_failure
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
NvU8 fwInfo[CMIS_CDB_LPL_MAX_SIZE];
NvU8 fwStatusFlags;
NvU8 moduleFlagsCustomByte;
retval = cciCmisRead(device, moduleId, 0, 0,
CMIS_MODULE_FLAGS_CUSTOM_BYTE,
1, &moduleFlagsCustomByte);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to get module custom flags\n",
__FUNCTION__);
return;
}
if (REF_NUM(MODULE_FLAGS_CUSTOM_BYTE_BOOT_FAILURE, moduleFlagsCustomByte) ||
REF_NUM(MODULE_FLAGS_CUSTOM_BYTE_RECOVERY, moduleFlagsCustomByte))
{
retval = cciGetXcvrFWInfo(device, moduleId, fwInfo);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_CCI_MODULE_BOOT,
"Module %d boot failure\n", moduleId);
}
else
{
fwStatusFlags = fwInfo[CMIS_CDB_GET_FW_INFO_LPL_FW_STATUS_FLAGS];
if (REF_VAL(CMIS_CDB_GET_FW_INFO_LPL_FW_STATUS_FLAGS_IMAGE_A_RUNNING,
fwStatusFlags))
{
// NULL terminate string
fwInfo[CMIS_CDB_GET_FW_INFO_LPL_IMAGE_A_EXTRA_STRING +
CMIS_CDB_GET_FW_INFO_LPL_IMAGE_FACTORY_BOOT_EXTRA_STRING_SIZE - 1] = 0;
if (REF_NUM(MODULE_FLAGS_CUSTOM_BYTE_BOOT_FAILURE, moduleFlagsCustomByte))
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_CCI_MODULE_BOOT,
"Module %d Image A boot failure\n",
moduleId);
}
else
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_CCI_MODULE_RECOVERY,
"Module %d Image A recovery failure\n",
moduleId);
}
NVSWITCH_PRINT(device, ERROR, "%s: Module %d Image A boot/recovery failure. ExtraString: %s\n",
__FUNCTION__, moduleId,
&fwInfo[CMIS_CDB_GET_FW_INFO_LPL_IMAGE_A_EXTRA_STRING]);
}
if (REF_VAL(CMIS_CDB_GET_FW_INFO_LPL_FW_STATUS_FLAGS_IMAGE_B_RUNNING,
fwStatusFlags))
{
// NULL terminate string
fwInfo[CMIS_CDB_GET_FW_INFO_LPL_IMAGE_B_EXTRA_STRING +
CMIS_CDB_GET_FW_INFO_LPL_IMAGE_FACTORY_BOOT_EXTRA_STRING_SIZE - 1] = 0;
if (REF_NUM(MODULE_FLAGS_CUSTOM_BYTE_BOOT_FAILURE, moduleFlagsCustomByte))
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_CCI_MODULE_BOOT,
"Module %d Image B boot failure\n",
moduleId);
}
else
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_CCI_MODULE_RECOVERY,
"Module %d Image B recovery failure\n",
moduleId);
}
NVSWITCH_PRINT(device, ERROR, "%s: Module %d Image B boot/recovery failure. ExtraString: %s\n",
__FUNCTION__, moduleId,
&fwInfo[CMIS_CDB_GET_FW_INFO_LPL_IMAGE_B_EXTRA_STRING]);
}
}
}
}
void
cciModuleOnboardCheckErrors
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
pOnboardState = &device->pCci->moduleState[moduleId];
if (pOnboardState->onboardError.bOnboardFailure)
{
if (device->regkeys.cci_error_log_enable)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_CCI_MODULE, "Module %d failed %s\n",
moduleId,
_cci_onboard_phase_to_text(
pOnboardState->onboardError.failedOnboardState.onboardPhase,
pOnboardState->onboardError.failedOnboardState.onboardSubPhase.optical));
}
}
_cci_check_module_boot_failure(device, moduleId);
}
NvBool
cciReportLinkErrors
(
nvswitch_device *device,
NvU32 linkNumber
)
{
NvlStatus retval;
NvU64 mode;
if ((device->pCci == NULL) || (!device->pCci->bInitialized))
{
NVSWITCH_PRINT(device, ERROR,
"%s: CCI not supported\n",
__FUNCTION__);
return -NVL_ERR_NOT_SUPPORTED;
}
if (!cciIsLinkManaged(device, linkNumber))
{
return NV_TRUE;
}
retval = cciGetLinkMode(device, linkNumber, &mode);
if (retval != NVL_SUCCESS)
{
return NV_TRUE;
}
if (mode == NVLINK_LINKSTATE_TRAINING_CCI)
{
return NV_FALSE;
}
else
{
return NV_TRUE;
}
}

View File

@@ -0,0 +1,930 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "cci/cci_nvswitch.h"
#include "cci/cci_priv_nvswitch.h"
#include "cci/cci_cables_nvswitch.h"
#include "nvswitch/ls10/dev_nvlipt_lnk_ip.h"
#include "nvswitch/ls10/dev_nvldl_ip.h"
#include "ls10/ls10.h"
static NvlStatus
_cci_cmis_deactivate_lanes
(
nvswitch_device *device,
NvU8 moduleId,
NvU8 deactivateLaneMask
)
{
NvlStatus retval;
NVSWITCH_TIMEOUT timeout;
NvU64 timeoutMs;
NvU8 dataPathState[4];
NvU8 byte;
NvU8 laneMaskTemp;
NvU8 laneNum;
NvU8 laneState;
laneMaskTemp = 0;
// Deactivate lanes
byte = deactivateLaneMask;
retval = cciCmisWrite(device, moduleId, 0, 0x10,
CMIS_DATA_PATH_CONTROL_BYTE, 1, &byte);
if (retval != NVL_SUCCESS)
{
return retval;
}
// Read max timeout value
retval = cciCmisRead(device, moduleId, 0, 0x1,
CMIS_DATA_PATH_MAX_DURATION_BYTE, 1, &byte);
if (retval != NVL_SUCCESS)
{
return retval;
}
timeoutMs = CMIS_MAX_DURATION_EN_TO_MS_MAP(
REF_VAL(CMIS_DATA_PATH_MAX_DURATION_BYTE_DEINIT, byte));
NVSWITCH_ASSERT(timeoutMs != 0);
nvswitch_timeout_create(timeoutMs * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
// Check if lanes are deactivated
retval = cciCmisRead(device, moduleId, 0, 0x11,
CMIS_DATA_PATH_STATE, CMIS_DATA_PATH_STATE_SIZE,
dataPathState);
if (retval != NVL_SUCCESS)
{
return retval;
}
laneMaskTemp = 0;
FOR_EACH_INDEX_IN_MASK(8, laneNum, deactivateLaneMask)
{
byte = dataPathState[CMIS_DATA_PATH_STATE_LANE_BYTE_MAP(laneNum)];
laneState = (laneNum % 2) ? REF_VAL(CMIS_DATA_PATH_STATE_BYTE_LANE_1, byte) :
REF_VAL(CMIS_DATA_PATH_STATE_BYTE_LANE_0, byte);
if (laneState == CMIS_DATA_PATH_STATE_LANE_DEACTIVATED)
{
laneMaskTemp |= NVBIT(laneNum);
}
}
FOR_EACH_INDEX_IN_MASK_END;
// Operation successful when all lanes are in expected state
if (laneMaskTemp == deactivateLaneMask)
{
break;
}
if (nvswitch_timeout_check(&timeout))
{
return -NVL_ERR_GENERIC;
}
nvswitch_os_sleep(10);
} while (NV_TRUE);
return NVL_SUCCESS;
}
static NvlStatus
_cci_cmis_check_config_errors
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
NVSWITCH_TIMEOUT timeout;
NvU64 timeoutMs;
NvU32 i;
NvU8 errorCodes[4];
NvU8 byte;
NvBool bLanesAccepted;
bLanesAccepted = NV_FALSE;
// Read max timeout value
retval = cciCmisRead(device, moduleId, 0, 0x1,
CMIS_DATA_PATH_MAX_DURATION_BYTE, 1, &byte);
if (retval != NVL_SUCCESS)
{
return retval;
}
timeoutMs = CMIS_MAX_DURATION_EN_TO_MS_MAP(
REF_VAL(CMIS_DATA_PATH_MAX_DURATION_BYTE_DEINIT, byte));
NVSWITCH_ASSERT(timeoutMs != 0);
nvswitch_timeout_create(timeoutMs * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
// Check for errors in config(applied in deactivated state)
retval = cciCmisRead(device, moduleId, 0, 0x11,
CMIS_DATA_PATH_CONFIG_ERROR_CODES, 4, errorCodes);
if (retval != NVL_SUCCESS)
{
return retval;
}
if ((*(NvU32*)errorCodes) == CMIS_DATA_PATH_CONFIG_ALL_LANES_ACCEPTED)
{
bLanesAccepted = NV_TRUE;
break;
}
if (nvswitch_timeout_check(&timeout))
{
break;
}
nvswitch_os_sleep(10);
} while (NV_TRUE);
if (!bLanesAccepted)
{
for (i = 0; i < 4; i++)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Lane %d, error code: 0x%x. Lane %d, error code: 0x%x",
__FUNCTION__, 2*i, (errorCodes[i] & 0xF), 2*i + 1, (errorCodes[i] >> 4));
}
return -NVL_ERR_GENERIC;
}
return NVL_SUCCESS;
}
/*
* Operation is performed on all lanes in the given module
*/
static NvlStatus
_cci_cmis_select_application
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
NvlStatus retval;
NvU32 i;
NvU8 nvl4AppSel[] = {0x20, 0x20, 0x24, 0x24, 0x28, 0x28, 0x2c, 0x2c};
NvU8 nvl4AppSelTemp[8];
NvU8 byte;
pOnboardState = &device->pCci->moduleState[moduleId];
// Nothing to do if module already setup
if (pOnboardState->bModuleOnboarded)
{
return NVL_SUCCESS;
}
// 1. Deactivate all lanes
retval = _cci_cmis_deactivate_lanes(device, moduleId, 0xFF);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Module %d: Lanes deactivate failed.\n",
__FUNCTION__, moduleId);
return retval;
}
// 2. Select application
retval = cciCmisWrite(device, moduleId, 0, 0x10,
CMIS_STAGE_CONTROL_SET_0_APP_SEL, 8, nvl4AppSel);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Module %d: Select application failed.\n",
__FUNCTION__, moduleId);
return retval;
}
// 3. Apply application
byte = 0xFF;
retval = cciCmisWrite(device, moduleId, 0, 0x10,
CMIS_STAGE_CONTROL_SET_0, 1, &byte);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Module %d: Apply application failed.\n",
__FUNCTION__, moduleId);
return retval;
}
// 4. Check for errors
retval = _cci_cmis_check_config_errors(device, moduleId);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Module %d: Datapath config failed.\n",
__FUNCTION__, moduleId);
return retval;
}
// 5. Verify that the application selection went through
retval = cciCmisRead(device, moduleId, 0, 0x11,
CMIS_ACTIVE_CONTROL_SET, 8, nvl4AppSelTemp);
if (retval != NVL_SUCCESS)
{
return retval;
}
for (i = 0; i < 8; i++)
{
if (nvl4AppSelTemp[i] != nvl4AppSel[i])
{
NVSWITCH_PRINT(device, ERROR,
"%s: Module %d: Application selection failed.\n",
__FUNCTION__, moduleId);
return -NVL_ERR_GENERIC;
}
}
// 6. De-assert lpmode
retval = cciSetLPMode(device, moduleId, NV_FALSE);
if (retval != NVL_SUCCESS)
{
return retval;
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_disable_ALI
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
// Have SOE perform this phase
retval = cciModulesOnboardSOE(device, moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_enable_ALI
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
// Have SOE perform this phase
retval = cciModulesOnboardSOE(device, moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_configure_links
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
// Have SOE perform this phase
retval = cciModulesOnboardSOE(device, moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_pretrain_setup
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
NvlStatus retval;
NvU64 linkTrainMask;
NvU32 linkId;
NvU8 laneMask;
NvU8 byte;
retval = cciSetLedsInitialize(device, moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
pOnboardState = &device->pCci->moduleState[moduleId];
linkTrainMask = pOnboardState->linkTrainMask;
// Train links to enable IOBIST
FOR_EACH_INDEX_IN_MASK(64, linkId, linkTrainMask)
{
retval = cciGetLaneMask(device, linkId, &laneMask);
if (retval != NVL_SUCCESS)
{
return retval;
}
retval = _cci_cmis_deactivate_lanes(device, moduleId, laneMask);
if (retval != NVL_SUCCESS)
{
return retval;
}
retval = nvswitch_cci_initialization_sequence_ls10(device, linkId);
if (retval != NVL_SUCCESS)
{
return retval;
}
retval = nvswitch_cci_enable_iobist_ls10(device, linkId, NV_TRUE);
if (retval != NVL_SUCCESS)
{
return retval;
}
}
FOR_EACH_INDEX_IN_MASK_END;
// Activate lanes
byte = 0;
retval = cciCmisWrite(device, moduleId, 0, 0x10,
CMIS_DATA_PATH_CONTROL_BYTE, 1, &byte);
if (retval != NVL_SUCCESS)
{
return retval;
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_pretrain_send_cdb
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
NvlStatus retval;
NvU64 linkTrainMask;
NvBool freeze_maintenance;
NvBool restart_training;
NvBool nvlink_mode;
pOnboardState = &device->pCci->moduleState[moduleId];
linkTrainMask = pOnboardState->linkTrainMask;
freeze_maintenance = NV_TRUE;
restart_training = NV_TRUE;
nvlink_mode = NV_TRUE;
retval = cciConfigureNvlinkModeModule(device, NVSWITCH_I2C_ACQUIRER_CCI_TRAIN,
moduleId, linkTrainMask,
freeze_maintenance, restart_training, nvlink_mode);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Pre-train CMD to module failed.\n",
__FUNCTION__);
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_go_transparant
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
NvlStatus retval;
NvU64 linkTrainMask;
NvBool freeze_maintenance;
NvBool restart_training;
NvBool nvlink_mode;
pOnboardState = &device->pCci->moduleState[moduleId];
linkTrainMask = pOnboardState->linkTrainMask;
freeze_maintenance = NV_TRUE;
restart_training = NV_FALSE;
nvlink_mode = NV_TRUE;
retval = cciConfigureNvlinkModeModule(device, NVSWITCH_I2C_ACQUIRER_CCI_TRAIN,
moduleId, linkTrainMask,
freeze_maintenance, restart_training, nvlink_mode);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to go transparent on links on module: %d\n",
__FUNCTION__, moduleId);
return retval;
}
return NVL_SUCCESS;
}
static NvlStatus
_cci_reset_links
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
NvlStatus retval;
nvlink_link *link;
NvU64 linkTrainMask;
NvU32 linkId;
pOnboardState = &device->pCci->moduleState[moduleId];
linkTrainMask = pOnboardState->linkTrainMask;
FOR_EACH_INDEX_IN_MASK(64, linkId, linkTrainMask)
{
link = nvswitch_get_link(device, linkId);
if ((link == NULL) ||
!NVSWITCH_IS_LINK_ENG_VALID(device, link->linkNumber, NVLIPT_LNK) ||
(linkId >= NVSWITCH_NVLINK_MAX_LINKS))
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to get link: %d\n",
__FUNCTION__, linkId);
return -NVL_ERR_GENERIC;
}
if (nvswitch_is_link_in_reset(device, link))
{
continue;
}
retval = nvswitch_cci_deinitialization_sequence_ls10(device, linkId);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: nvswitch_cci_deinitialization_sequence_ls10 failed for link: %d\n",
__FUNCTION__, linkId);
return -NVL_ERR_GENERIC;
}
// Disable IOBIST
retval = nvswitch_cci_enable_iobist_ls10(device, linkId, NV_FALSE);
if (retval != NVL_SUCCESS)
{
return -NVL_ERR_GENERIC;
}
// Shutdown link
retval = nvswitch_request_tl_link_state_ls10(link,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_SHUTDOWN,
NV_TRUE);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TL link shutdown request failed for link: %d\n",
__FUNCTION__, linkId);
return -NVL_ERR_GENERIC;
}
// Reset link
retval = nvswitch_request_tl_link_state_ls10(link,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET,
NV_TRUE);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TL link reset request failed for link: %d\n",
__FUNCTION__, linkId);
return -NVL_ERR_GENERIC;
}
}
FOR_EACH_INDEX_IN_MASK_END;
return NVL_SUCCESS;
}
static void
_cci_init_optical_start_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextStateSuccess.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_CMIS_SELECT_APPLICATION;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_reset_links,
nextStateSuccess,
nextStateFail);
}
static void
_cci_cmis_select_application_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextStateSuccess.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_CONFIGURE_LINKS;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_cmis_select_application,
nextStateSuccess,
nextStateFail);
}
static void
_cci_configure_links_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextStateSuccess.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_DISABLE_ALI;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_configure_links,
nextStateSuccess,
nextStateFail);
}
static void
_cci_disable_ALI_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextStateSuccess.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_SETUP;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_disable_ALI,
nextStateSuccess,
nextStateFail);
}
static void
_cci_pretrain_setup_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextStateSuccess.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_SEND_CDB;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_pretrain_setup,
nextStateSuccess,
nextStateFail);
}
static void
_cci_pretrain_send_cdb_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextStateSuccess.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_POLL;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_pretrain_send_cdb,
nextStateSuccess,
nextStateFail);
}
static void
_cci_pretrain_poll_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
CCI_MODULE_ONBOARD_STATE nextState;
NvU64 linkTrainMaskDone;
NvU32 linkId;
NvBool bTx;
NvBool bPreTrainDone;
pOnboardState = &device->pCci->moduleState[moduleId];
nvswitch_os_memset(&nextState, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
linkTrainMaskDone = 0;
FOR_EACH_INDEX_IN_MASK(64, linkId, pOnboardState->linkTrainMask)
{
bTx = NV_TRUE;
bPreTrainDone = cciCheckForPreTraining(device,
NVSWITCH_I2C_ACQUIRER_CCI_TRAIN, linkId, bTx);
if (!bPreTrainDone)
{
continue;
}
bTx = NV_FALSE;
bPreTrainDone = cciCheckForPreTraining(device,
NVSWITCH_I2C_ACQUIRER_CCI_TRAIN, linkId, bTx);
if (!bPreTrainDone)
{
continue;
}
linkTrainMaskDone |= NVBIT64(linkId);
}
FOR_EACH_INDEX_IN_MASK_END;
//
// Allow link training to proceed
// to prevent possible deadlock
//
if (linkTrainMaskDone)
{
pOnboardState->bPartialLinkTrainComplete = NV_TRUE;
pOnboardState->preTrainCounter++;
// Train remaining links on next attempts
if (pOnboardState->preTrainCounter == CCI_ONBOARD_PHASE_POLL_MAX)
{
if (pOnboardState->linkTrainMask != linkTrainMaskDone)
{
pOnboardState->bLinkTrainDeferred = NV_TRUE;
}
pOnboardState->linkTrainMask = linkTrainMaskDone;
pOnboardState->preTrainCounter = 0;
}
}
nextState.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
if (linkTrainMaskDone == pOnboardState->linkTrainMask)
{
nextState.onboardSubPhase.optical =
CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_GO_TRANSPARENT;
}
else
{
// Stay in poll phase until pre-training complete
nextState.onboardSubPhase.optical =
CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_POLL;
}
cciModuleOnboardPerformPhaseTransitionAsync(device, moduleId, nextState);
}
static void
_cci_go_transparant_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
CCI_MODULE_ONBOARD_STATE nextState;
CCI_MODULE_STATE *pOnboardState;
pOnboardState = &device->pCci->moduleState[moduleId];
nvswitch_os_memset(&nextState, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
retval = _cci_go_transparant(device, moduleId);
if (retval == NVL_SUCCESS)
{
// Move to next phase after sleep
nextState.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextState.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_RESET_LINKS;
cciModuleOnboardSleepAsync(device, moduleId, 10000, nextState);
}
else
{
pOnboardState->onboardError.bOnboardFailure = NV_TRUE;
pOnboardState->onboardError.failedOnboardState = pOnboardState->currOnboardState;
nextState.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseTransitionAsync(device, moduleId, nextState);
}
}
static void
_cci_reset_links_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
CCI_MODULE_ONBOARD_STATE nextState;
CCI_MODULE_STATE *pOnboardState;
pOnboardState = &device->pCci->moduleState[moduleId];
nvswitch_os_memset(&nextState, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
retval = _cci_reset_links(device, moduleId);
if (retval == NVL_SUCCESS)
{
// Move to next phase after sleep
nextState.onboardPhase = CCI_ONBOARD_PHASE_INIT_OPTICAL;
nextState.onboardSubPhase.optical = CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_ENABLE_ALI;
cciModuleOnboardSleepAsync(device, moduleId, 10000, nextState);
}
else
{
pOnboardState->onboardError.bOnboardFailure = NV_TRUE;
pOnboardState->onboardError.failedOnboardState = pOnboardState->currOnboardState;
nextState.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseTransitionAsync(device, moduleId, nextState);
}
}
static void
_cci_enable_ALI_async
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_ONBOARD_STATE nextStateSuccess;
CCI_MODULE_ONBOARD_STATE nextStateFail;
nvswitch_os_memset(&nextStateSuccess, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nvswitch_os_memset(&nextStateFail, 0, sizeof(CCI_MODULE_ONBOARD_STATE));
nextStateSuccess.onboardPhase = CCI_ONBOARD_PHASE_LAUNCH_ALI;
nextStateFail.onboardPhase = CCI_ONBOARD_PHASE_CHECK_CONDITION;
cciModuleOnboardPerformPhaseAsync(device, moduleId,
_cci_enable_ALI,
nextStateSuccess,
nextStateFail);
}
/*
* @brief Initialize optical cables asynchronously
*
* @param[in] device nvswitch_device pointer
* @param[in] moduleId Module will be initialized.
*
*/
void
cciCablesInitializeOpticalAsync
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_MODULE_STATE *pOnboardState;
pOnboardState = &device->pCci->moduleState[moduleId];
NVSWITCH_ASSERT(pOnboardState->currOnboardState.onboardPhase == CCI_ONBOARD_PHASE_INIT_OPTICAL);
switch (pOnboardState->currOnboardState.onboardSubPhase.optical)
{
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_START:
{
_cci_init_optical_start_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_CMIS_SELECT_APPLICATION:
{
_cci_cmis_select_application_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_CONFIGURE_LINKS:
{
_cci_configure_links_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_DISABLE_ALI:
{
_cci_disable_ALI_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_SETUP:
{
_cci_pretrain_setup_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_SEND_CDB:
{
_cci_pretrain_send_cdb_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_PRETRAIN_POLL:
{
_cci_pretrain_poll_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_GO_TRANSPARENT:
{
_cci_go_transparant_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_RESET_LINKS:
{
_cci_reset_links_async(device, moduleId);
break;
}
case CCI_ONBOARD_SUBPHASE_INIT_OPTICAL_ENABLE_ALI:
{
_cci_enable_ALI_async(device, moduleId);
break;
}
default:
{
break;
}
}
}

View File

@@ -0,0 +1,475 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "cci/cci_nvswitch.h"
#include "cci/cci_priv_nvswitch.h"
/*!
* @brief Checks for CDB command completion.
*
* Page 00h byte 37 contains status bits. BIT 7 is the busy bit.
* (see CMIS rev4.0, Table 9-3, CDB Command 0000h: QUERY-Status)
*/
static NvlStatus
_cci_check_for_cdb_complete
(
nvswitch_device *device,
NvU32 client,
NvU32 osfp
)
{
NvU8 status;
cciRead(device, client, osfp, CMIS_CDB_BLOCK_STATUS_BYTE(0), 1, &status);
// Return when the STS_BUSY bit goes to 0
if (FLD_TEST_REF(CMIS_CDB_BLOCK_STATUS_BYTE_BUSY, _FALSE, status))
{
return NVL_SUCCESS;
}
return NVL_ERR_STATE_IN_USE;
}
/*!
* @brief Check if module can recieve CDB command
*/
static NvlStatus
_cci_check_cdb_ready
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_CDB_STATE *pCdbState;
NvlStatus retval;
pCdbState = &device->pCci->cdbState[moduleId];
if (pCdbState->cdbPhase != CCI_CDB_PHASE_CHECK_READY)
{
return -NVL_ERR_GENERIC;
}
if (!cciModulePresent(device, moduleId))
{
NVSWITCH_PRINT(device, INFO,
"%s: osfp %d is missing\n",
__FUNCTION__, moduleId);
return -NVL_NOT_FOUND;
}
retval = _cci_check_for_cdb_complete(device, pCdbState->client, moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
pCdbState->cdbPhase = CCI_CDB_PHASE_SEND_COMMAND;
return NVL_SUCCESS;
}
/*!
* @brief Send CDB command
*/
static NvlStatus
_cci_send_cdb_command
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
CCI_CDB_STATE *pCdbState;
NvU8 payload[CMIS_CDB_LPL_MAX_SIZE];
NvU32 payLength;
NvU32 command;
NvBool padding;
NvU8 laneMask;
pCdbState = &device->pCci->cdbState[moduleId];
if (pCdbState->cdbPhase != CCI_CDB_PHASE_SEND_COMMAND)
{
return -NVL_ERR_GENERIC;
}
if (!cciModulePresent(device, moduleId))
{
NVSWITCH_PRINT(device, INFO,
"%s: osfp %d is missing\n",
__FUNCTION__, moduleId);
return -NVL_NOT_FOUND;
}
// Roll up lanes that will be operated on
laneMask = pCdbState->laneMasksPending[0] |
pCdbState->laneMasksPending[1] |
pCdbState->laneMasksPending[2] |
pCdbState->laneMasksPending[3];
// Clear lanes whose commands will be triggered
pCdbState->laneMasksPending[0] &= ~laneMask;
pCdbState->laneMasksPending[1] &= ~laneMask;
pCdbState->laneMasksPending[2] &= ~laneMask;
pCdbState->laneMasksPending[3] &= ~laneMask;
payload[0] = 0;
payload[1] = (pCdbState->freeze_maintenance << 4) +
(pCdbState->restart_training << 1) +
pCdbState->nvlink_mode;
payload[2] = 0;
// Tx
payload[3] = laneMask;
payload[4] = 0;
// Rx
payload[5] = laneMask;
payLength = 6;
command = NVSWITCH_CCI_CDB_CMD_ID;
padding = NV_FALSE;
retval = cciSendCDBCommand(device, pCdbState->client, moduleId, command, payLength, payload, padding);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, INFO,
"%s: Failed to send CDB Command: 0x%x\n",
__FUNCTION__, command);
return -NVL_ERR_GENERIC;
}
pCdbState->cdbPhase = CCI_CDB_PHASE_GET_RESPONSE;
return NVL_SUCCESS;
}
/*!
* @brief Get get CDB response.
*/
static NvlStatus
_cci_get_cdb_response
(
nvswitch_device *device,
NvU8 moduleId
)
{
NvlStatus retval;
CCI_CDB_STATE *pCdbState;
NvU8 cdbStatus = 0;
NvU8 response[CMIS_CDB_LPL_MAX_SIZE];
NvU32 resLength;
pCdbState = &device->pCci->cdbState[moduleId];
if (pCdbState->cdbPhase != CCI_CDB_PHASE_GET_RESPONSE)
{
return -NVL_ERR_GENERIC;
}
if (!cciModulePresent(device, moduleId))
{
NVSWITCH_PRINT(device, INFO,
"%s: osfp %d is missing\n",
__FUNCTION__, moduleId);
return -NVL_NOT_FOUND;
}
retval = _cci_check_for_cdb_complete(device, pCdbState->client, moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
retval = cciGetCDBStatus(device, pCdbState->client, moduleId, &cdbStatus);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: CDB command failed! result = 0x%x\n",
__FUNCTION__, cdbStatus);
return -NVL_ERR_GENERIC;
}
retval = cciGetCDBResponse(device, pCdbState->client, moduleId, response, &resLength);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to get CDB command response\n",
__FUNCTION__);
return -NVL_ERR_GENERIC;
}
pCdbState->cdbPhase = CCI_CDB_PHASE_CHECK_DONE;
return NVL_SUCCESS;
}
/*!
* @brief
*/
static NvlStatus
_cci_check_cdb_done
(
nvswitch_device *device,
NvU8 moduleId
)
{
CCI_CDB_STATE *pCdbState;
pCdbState = &device->pCci->cdbState[moduleId];
if (pCdbState->cdbPhase != CCI_CDB_PHASE_CHECK_DONE)
{
return -NVL_ERR_GENERIC;
}
// Finish pending links
if (pCdbState->laneMasksPending[0] ||
pCdbState->laneMasksPending[1] ||
pCdbState->laneMasksPending[2] ||
pCdbState->laneMasksPending[3])
{
pCdbState->cdbPhase = CCI_CDB_PHASE_CHECK_READY;
}
else
{
pCdbState->cdbPhase = CCI_CDB_PHASE_IDLE;
}
return NVL_SUCCESS;
}
/*
* @brief Performs CDB stages on a module without blocking
*
*/
static void
_cci_cdb_perform_phases
(
nvswitch_device *device,
NvU8 moduleId
)
{
NVSWITCH_TIMEOUT timeout;
NvlStatus retval = NVL_SUCCESS;
NvBool bContinue = NV_TRUE;
CCI_CDB_STATE *pCdbState;
pCdbState = &device->pCci->cdbState[moduleId];
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
do
{
switch (pCdbState->cdbPhase)
{
case CCI_CDB_PHASE_CHECK_READY:
{
retval = _cci_check_cdb_ready(device, moduleId);
break;
}
case CCI_CDB_PHASE_SEND_COMMAND:
{
retval = _cci_send_cdb_command(device, moduleId);
break;
}
case CCI_CDB_PHASE_GET_RESPONSE:
{
retval = _cci_get_cdb_response(device, moduleId);
break;
}
case CCI_CDB_PHASE_CHECK_DONE:
{
retval = _cci_check_cdb_done(device, moduleId);
break;
}
default:
{
retval = NVL_SUCCESS;
bContinue = NV_FALSE;
NVSWITCH_ASSERT(pCdbState->cdbPhase == CCI_CDB_PHASE_IDLE);
break;
}
}
// Module is busy
if (retval == NVL_ERR_STATE_IN_USE)
{
// Nothing more to do for now
bContinue = NV_FALSE;
}
if (retval < 0)
{
NVSWITCH_PRINT(device, ERROR,
"%s: CDB error module %d, phase %d!\n",
__FUNCTION__, moduleId, pCdbState->cdbPhase);
bContinue = NV_FALSE;
pCdbState->cdbPhase = CCI_CDB_PHASE_CHECK_DONE;
}
// Just in case
if (nvswitch_timeout_check(&timeout))
{
NVSWITCH_PRINT(device, ERROR,
"%s: Timeout!\n",
__FUNCTION__);
break;
}
} while(bContinue);
}
void
cciProcessCDBCallback
(
nvswitch_device *device
)
{
NVSWITCH_TIMEOUT timeout;
NvU32 moduleMaskPriority;
NvU32 moduleMask;
NvU8 moduleId;
moduleMaskPriority = 0;
nvswitch_timeout_create(20 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
// Attempt to complete CDB commands for present modules
if (cciGetXcvrMask(device, &moduleMask, NULL) == NVL_SUCCESS)
{
FOR_EACH_INDEX_IN_MASK(32, moduleId, moduleMask)
{
//
// Prioritize sending CDB commands.
// This also prioritizes getting responses for CDBs in the event that
// fewer than all lanes of a module were operated on.
//
if (device->pCci->cdbState[moduleId].laneMasksPending[0] ||
device->pCci->cdbState[moduleId].laneMasksPending[1] ||
device->pCci->cdbState[moduleId].laneMasksPending[2] ||
device->pCci->cdbState[moduleId].laneMasksPending[3])
{
moduleMaskPriority |= NVBIT32(moduleId);
}
}
FOR_EACH_INDEX_IN_MASK_END;
FOR_EACH_INDEX_IN_MASK(32, moduleId, moduleMaskPriority)
{
_cci_cdb_perform_phases(device, moduleId);
}
FOR_EACH_INDEX_IN_MASK_END;
FOR_EACH_INDEX_IN_MASK(32, moduleId, moduleMask)
{
_cci_cdb_perform_phases(device, moduleId);
// Short circuit getting non time sensistive responses
if (nvswitch_timeout_check(&timeout))
{
break;
}
}
FOR_EACH_INDEX_IN_MASK_END;
}
}
NvlStatus
cciConfigureNvlinkModeAsync
(
nvswitch_device *device,
NvU32 client,
NvU8 linkId,
NvBool freeze_maintenance,
NvBool restart_training,
NvBool nvlink_mode
)
{
NvlStatus retval;
CCI_CDB_STATE *pCdbState;
NvU32 moduleId;
NvU8 laneMask;
NvU8 laneMasksIndex;
if ((device->pCci == NULL) || (!device->pCci->bInitialized))
{
NVSWITCH_PRINT(device, ERROR,
"%s: CCI not supported\n",
__FUNCTION__);
return -NVL_ERR_NOT_SUPPORTED;
}
retval = cciGetModuleId(device, linkId, &moduleId);
if (retval != NVL_SUCCESS)
{
return retval;
}
if (!cciModulePresent(device, moduleId))
{
return -NVL_NOT_FOUND;
}
if (cciGetLaneMask(device, linkId, &laneMask) != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to get osfp lanemask associated with link %d\n",
__FUNCTION__, linkId);
return -NVL_ERR_NOT_SUPPORTED;
}
pCdbState = &device->pCci->cdbState[moduleId];
// Add to first available slot
for (laneMasksIndex = 0; laneMasksIndex < NVSWITCH_CCI_NUM_LINKS_PER_OSFP_LS10; laneMasksIndex++)
{
if (pCdbState->laneMasksPending[laneMasksIndex] == 0)
{
pCdbState->laneMasksPending[laneMasksIndex] = laneMask;
break;
}
}
if (pCdbState->cdbPhase != CCI_CDB_PHASE_IDLE)
{
// Don't support queuing multiple CDB command types
NVSWITCH_ASSERT(pCdbState->client == client);
NVSWITCH_ASSERT(pCdbState->freeze_maintenance == freeze_maintenance);
NVSWITCH_ASSERT(pCdbState->restart_training == restart_training);
NVSWITCH_ASSERT(pCdbState->nvlink_mode == nvlink_mode);
return NVL_SUCCESS;
}
// Setup cdbstate for callback
pCdbState->client = client;
pCdbState->freeze_maintenance = freeze_maintenance;
pCdbState->restart_training = restart_training;
pCdbState->nvlink_mode = nvlink_mode;
pCdbState->cdbPhase = CCI_CDB_PHASE_CHECK_READY;
return NVL_SUCCESS;
}

File diff suppressed because it is too large Load Diff

View File

@@ -405,6 +405,16 @@ nvswitch_translate_hw_error
{
return NVSWITCH_ERR_HW_SOE;
}
else if ((type >= NVSWITCH_ERR_HW_CCI) &&
(type < NVSWITCH_ERR_HW_CCI_LAST))
{
return NVSWITCH_ERR_HW_CCI;
}
else if ((type >= NVSWITCH_ERR_HW_OSFP_THERM) &&
(type < NVSWITCH_ERR_HW_OSFP_THERM_LAST))
{
return NVSWITCH_ERR_HW_OSFP_THERM;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT_MULTICASTTSTATE) &&
(type < NVSWITCH_ERR_HW_NPORT_MULTICASTTSTATE_LAST))
{

View File

@@ -0,0 +1,460 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "haldef_nvswitch.h"
#include "fsprpc_nvswitch.h"
#include "fsp/nvdm_payload_cmd_response.h"
#include "fsp/fsp_nvdm_format.h"
/*!
* @brief Check if FSP RM command queue is empty
*
* @param[in] device nvswitch device pointer
*
* @return NV_TRUE if queue is empty, NV_FALSE otherwise
*/
static NvBool
_nvswitch_fsp_is_queue_empty
(
nvswitch_device *device
)
{
NvU32 cmdqHead, cmdqTail;
nvswitch_fsp_get_cmdq_head_tail(device, &cmdqHead, &cmdqTail);
// FSP will set QUEUE_HEAD = TAIL after each packet is received
return (cmdqHead == cmdqTail);
}
/*!
* @brief Check if FSP RM message queue is empty
*
* @param[in] device nvswitch_device pointer
*
* @return NV_TRUE if queue is empty, NV_FALSE otherwise
*/
static NvBool
_nvswitch_fsp_is_msgq_empty
(
nvswitch_device *device
)
{
NvU32 msgqHead, msgqTail;
nvswitch_fsp_get_msgq_head_tail(device, &msgqHead, &msgqTail);
return (msgqHead == msgqTail);
}
/*!
* @brief Wait for FSP RM command queue to be empty
*
* @param[in] device nvswitch_device pointer
*
* @return NVL_SUCCESS, or NV_ERR_TIMEOUT
*/
static NvlStatus
_nvswitch_fsp_poll_for_queue_empty
(
nvswitch_device *device
)
{
NvBool bKeepPolling;
NvBool bMsgqEmpty;
NvBool bCmdqEmpty;
NVSWITCH_TIMEOUT timeout;
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
bKeepPolling = nvswitch_timeout_check(&timeout) ? NV_FALSE : NV_TRUE;
bMsgqEmpty = _nvswitch_fsp_is_msgq_empty(device);
bCmdqEmpty = _nvswitch_fsp_is_queue_empty(device);
//
// For now we assume that any response from FSP before sending a command
// indicates an error and we should abort.
//
if (!bCmdqEmpty && !bMsgqEmpty)
{
nvswitch_fsp_read_message(device, NULL, 0);
NVSWITCH_PRINT(device, ERROR, "Received error message from FSP while waiting for CMDQ to be empty.\n");
return -NVL_ERR_GENERIC;
}
if (bCmdqEmpty)
{
break;
}
if (!bKeepPolling)
{
NVSWITCH_PRINT(device, ERROR, "Timed out waiting for FSP command queue to be empty.\n");
return -NVL_ERR_GENERIC;
}
nvswitch_os_sleep(1);
}
while(bKeepPolling);
return NVL_SUCCESS;
}
/*!
* @brief Poll for response from FSP via RM message queue
*
* @param[in] device nvswitch_device pointer
*
* @return NVL_SUCCESS, or NV_ERR_TIMEOUT
*/
static NvlStatus
_nvswitch_fsp_poll_for_response
(
nvswitch_device *device
)
{
NvBool bKeepPolling;
NVSWITCH_TIMEOUT timeout;
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
bKeepPolling = nvswitch_timeout_check(&timeout) ? NV_FALSE : NV_TRUE;
//
// Poll for message queue to wait for FSP's reply
//
if (!_nvswitch_fsp_is_msgq_empty(device))
{
break;
}
if (!bKeepPolling)
{
NVSWITCH_PRINT(device, ERROR, "FSP command timed out.\n");
return -NVL_ERR_GENERIC;
}
nvswitch_os_sleep(1);
}
while(bKeepPolling);
return NVL_SUCCESS;
}
/*!
* @brief Read and process message from FSP via RM message queue.
*
* Supports both single and multi-packet message. For multi-packet messages, this
* loops until all packets are received, polling at each iteration for the next
* packet to come in. If a buffer is provided, the message payload will be
* returned there.
*
* @note: For multi-packet messages, a buffer in which the message payload will
* be reconstructed must be provided.
*
* @param[in] device nvswitch_device pointer
* @param[in/out] pPayloadBuffer Buffer in which to return message payload
* @param[in] payloadBufferSize Payload buffer size
*
* @return NVL_SUCCESS, NV_ERR_INVALID_DATA, NV_ERR_INSUFFICIENT_RESOURCES, or errors
* from functions called within
*/
NvlStatus
nvswitch_fsp_read_message
(
nvswitch_device *device,
NvU8 *pPayloadBuffer,
NvU32 payloadBufferSize
)
{
NvU8 *pPacketBuffer;
NvlStatus status;
NvU32 totalPayloadSize = 0;
NvU8 *pMessagePayload;
NvU8 packetState = MCTP_PACKET_STATE_START;
if (_nvswitch_fsp_is_msgq_empty(device))
{
NVSWITCH_PRINT(device, WARN, "Tried to read FSP response but MSG queue is empty\n");
return NVL_SUCCESS;
}
pPacketBuffer = nvswitch_os_malloc(nvswitch_fsp_get_channel_size(device));
if (pPacketBuffer == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"Failed to allocate memory for GLT!!\n");
return -NVL_NO_MEM;
}
while ((packetState != MCTP_PACKET_STATE_END) && (packetState != MCTP_PACKET_STATE_SINGLE_PACKET))
{
NvU32 msgqHead, msgqTail;
NvU32 packetSize;
NvU32 curPayloadSize;
NvU8 curHeaderSize;
NvU8 tag;
// Wait for next packet
status = _nvswitch_fsp_poll_for_response(device);
if (status != NVL_SUCCESS)
{
goto done;
}
nvswitch_fsp_get_msgq_head_tail(device, &msgqHead, &msgqTail);
// Tail points to last DWORD in packet, not DWORD immediately following it
packetSize = (msgqTail - msgqHead) + sizeof(NvU32);
if ((packetSize < sizeof(NvU32)) ||
(packetSize > nvswitch_fsp_get_channel_size(device)))
{
NVSWITCH_PRINT(device, ERROR, "FSP response packet is invalid size: size=0x%x bytes\n", packetSize);
status = -NVL_ERR_INVALID_STATE;
goto done;
}
nvswitch_fsp_read_from_emem(device, pPacketBuffer, packetSize);
status = nvswitch_fsp_get_packet_info(device, pPacketBuffer, packetSize, &packetState, &tag);
if (status != NVL_SUCCESS)
{
goto done;
}
if ((packetState == MCTP_PACKET_STATE_START) || (packetState == MCTP_PACKET_STATE_SINGLE_PACKET))
{
// Packet contains payload header
curHeaderSize = sizeof(MCTP_HEADER);
}
else
{
curHeaderSize = sizeof(NvU32);
}
curPayloadSize = packetSize - curHeaderSize;
if ((pPayloadBuffer == NULL) && (packetState != MCTP_PACKET_STATE_SINGLE_PACKET))
{
NVSWITCH_PRINT(device, ERROR, "No buffer provided when receiving multi-packet message. Buffer needed to reconstruct message\n");
status = -NVL_ERR_GENERIC;
goto done;
}
if (pPayloadBuffer != NULL)
{
if (payloadBufferSize < (totalPayloadSize + curPayloadSize))
{
NVSWITCH_PRINT(device, ERROR, "Buffer provided for message payload too small. Payload size: 0x%x Buffer size: 0x%x\n",
totalPayloadSize + curPayloadSize, payloadBufferSize);
status = -NVL_ERR_GENERIC;
goto done;
}
nvswitch_os_memcpy(pPayloadBuffer + totalPayloadSize,
pPacketBuffer + curHeaderSize, curPayloadSize);
}
totalPayloadSize += curPayloadSize;
// Set TAIL = HEAD to indicate CPU received message
nvswitch_fsp_update_msgq_head_tail(device, msgqHead, msgqHead);
}
pMessagePayload = (pPayloadBuffer == NULL) ? (pPacketBuffer + sizeof(MCTP_HEADER)) : pPayloadBuffer;
status = nvswitch_fsp_process_nvdm_msg(device, pMessagePayload, totalPayloadSize);
done:
nvswitch_os_free(pPacketBuffer);
return status;
}
/*!
* @brief Send one MCTP packet to FSP via EMEM
*
* @param[in] device nvswitch_device pointer
* @param[in] pPacket MCTP packet
* @param[in] packetSize MCTP packet size in bytes
*
* @return NVL_SUCCESS, or NV_ERR_INSUFFICIENT_RESOURCES
*/
NvlStatus
nvswitch_fsp_send_packet
(
nvswitch_device *device,
NvU8 *pPacket,
NvU32 packetSize
)
{
NvU32 paddedSize;
NvU8 *pBuffer = NULL;
NV_STATUS status = NVL_SUCCESS;
// Check that queue is ready to receive data
status = _nvswitch_fsp_poll_for_queue_empty(device);
if (status != NVL_SUCCESS)
{
return -NVL_ERR_GENERIC;
}
// Pad to align size to 4-bytes boundary since EMEMC increments by DWORDS
paddedSize = NV_ALIGN_UP(packetSize, sizeof(NvU32));
pBuffer = nvswitch_os_malloc(paddedSize);
if (pBuffer == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"Failed to allocate memory!!\n");
return -NVL_NO_MEM;
}
nvswitch_os_memset(pBuffer, 0, paddedSize);
nvswitch_os_memcpy(pBuffer, pPacket, paddedSize);
nvswitch_fsp_write_to_emem(device, pBuffer, paddedSize);
// Update HEAD and TAIL with new EMEM offset; RM always starts at offset 0.
nvswitch_fsp_update_cmdq_head_tail(device, 0, paddedSize - sizeof(NvU32));
nvswitch_os_free(pBuffer);
return status;
}
/*!
* @brief Send a MCTP message to FSP via EMEM, and read response
*
*
* Response payload buffer is optional if response fits in a single packet.
*
* @param[in] device nvswitch_device pointer
* @param[in] pPayload Pointer to message payload
* @param[in] size Message payload size
* @param[in] nvdmType NVDM type of message being sent
* @param[in] pResponsePayload Buffer in which to return response payload
* @param[in] responseBufferSize Response payload buffer size
*
* @return NVL_SUCCESS, or NV_ERR_*
*/
NvlStatus
nvswitch_fsp_send_and_read_message
(
nvswitch_device *device,
NvU8 *pPayload,
NvU32 size,
NvU32 nvdmType,
NvU8 *pResponsePayload,
NvU32 responseBufferSize
)
{
NvU32 dataSent, dataRemaining;
NvU32 packetPayloadCapacity;
NvU32 curPayloadSize;
NvU32 headerSize;
NvU32 fspEmemChannelSize;
NvBool bSinglePacket;
NV_STATUS status;
NvU8 *pBuffer = NULL;
NvU8 seq = 0;
NvU8 seid = 0;
// Allocate buffer of same size as channel
fspEmemChannelSize = nvswitch_fsp_get_channel_size(device);
pBuffer = nvswitch_os_malloc(fspEmemChannelSize);
if (pBuffer == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to allocate memory!!\n",
__FUNCTION__);
return -NVL_NO_MEM;
}
nvswitch_os_memset(pBuffer, 0, fspEmemChannelSize);
//
// Check if message will fit in single packet
// We lose 2 DWORDS to MCTP and NVDM headers
//
headerSize = 2 * sizeof(NvU32);
packetPayloadCapacity = fspEmemChannelSize - headerSize;
bSinglePacket = (size <= packetPayloadCapacity);
// First packet
seid = nvswitch_fsp_nvdm_to_seid(device, nvdmType);
((NvU32 *)pBuffer)[0] = nvswitch_fsp_create_mctp_header(device, 1, (NvU8)bSinglePacket, seid, seq); // SOM=1,EOM=?,SEID,SEQ=0
((NvU32 *)pBuffer)[1] = nvswitch_fsp_create_nvdm_header(device, nvdmType);
curPayloadSize = NV_MIN(size, packetPayloadCapacity);
nvswitch_os_memcpy(pBuffer + headerSize, pPayload, curPayloadSize);
status = nvswitch_fsp_send_packet(device, pBuffer, curPayloadSize + headerSize);
if (status != NVL_SUCCESS)
{
goto failed;
}
if (!bSinglePacket)
{
// Multi packet case
dataSent = curPayloadSize;
dataRemaining = size - dataSent;
headerSize = sizeof(NvU32); // No longer need NVDM header
packetPayloadCapacity = fspEmemChannelSize - headerSize;
while (dataRemaining > 0)
{
NvBool bLastPacket = (dataRemaining <= packetPayloadCapacity);
curPayloadSize = (bLastPacket) ? dataRemaining : packetPayloadCapacity;
nvswitch_os_memset(pBuffer, 0, fspEmemChannelSize);
((NvU32 *)pBuffer)[0] = nvswitch_fsp_create_mctp_header(device, 0, (NvU8)bLastPacket, seid, (++seq) % 4);
nvswitch_os_memcpy(pBuffer + headerSize, pPayload + dataSent, curPayloadSize);
status = nvswitch_fsp_send_packet(device, pBuffer, curPayloadSize + headerSize);
if (status != NVL_SUCCESS)
{
goto failed;
}
dataSent += curPayloadSize;
dataRemaining -= curPayloadSize;
}
}
status = _nvswitch_fsp_poll_for_response(device);
if (status != NVL_SUCCESS)
{
goto failed;
}
status = nvswitch_fsp_read_message(device, pResponsePayload, responseBufferSize);
failed:
nvswitch_os_free(pBuffer);
return status;
}

View File

@@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _BOARDS_NVSWITCH_H_
#define _BOARDS_NVSWITCH_H_
#include "common_nvswitch.h"
//
// NVSwitch board IDs
//
#define NVSWITCH_BOARD_UNKNOWN 0x0
#define NVSWITCH_BOARD_LS10_4840_0000_PC0 0x0396
#define NVSWITCH_BOARD_LS10_4840_0072_STA 0x0398
#define NVSWITCH_BOARD_LS10_5612_0002_890 0x03B7
#define NVSWITCH_BOARD_LS10_5612_0012_895 0x03B8
#define NVSWITCH_BOARD_LS10_5612_0002_ES 0x03D6
#define NVSWITCH_BOARD_LS10_4697_0000_895 0x03B9
#define NVSWITCH_BOARD_LS10_4262_0000_895 0x04FE
#define NVSWITCH_BOARD_UNKNOWN_NAME "UNKNOWN"
#define NVSWITCH_BOARD_LS10_4840_0000_PC0_NAME "LS10_4840_0000_PC0"
#define NVSWITCH_BOARD_LS10_4840_0072_STA_NAME "LS10_4840_0072_STA"
#define NVSWITCH_BOARD_LS10_5612_0002_890_NAME "LS10_5612_0002_890"
#define NVSWITCH_BOARD_LS10_5612_0012_895_NAME "LS10_5612_0012_895"
#define NVSWITCH_BOARD_LS10_5612_0002_ES_NAME "LS10_5612_0002_ES"
#define NVSWITCH_BOARD_LS10_4697_0000_895_NAME "LS10_4697_0000_895"
#define NVSWITCH_BOARD_LS10_4262_0000_895_NAME "LS10_4262_0000_895"
#endif // _BOARDS_NVSWITCH_H_

View File

@@ -0,0 +1,122 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CCI_CABLES_NVSWITCH_H_
#define _CCI_CABLES_NVSWITCH_H_
#include "cci_nvswitch.h"
#include "nvlink_errors.h"
#include "nvtypes.h"
#include "nvfixedtypes.h"
#include "nvstatus.h"
#include "ctrl_dev_nvswitch.h"
#include "export_nvswitch.h"
#include "soe/cci/cci_onboard_phases.h"
#define CABLE_TYPE_INVALID 0x0
#define CABLE_TYPE_COPPER 0x1
#define CABLE_TYPE_DIRECT 0x2
#define CABLE_TYPE_OPTICAL 0x3
#define CCI_ONBOARD_MAX_ATTEMPTS (device->regkeys.cci_max_onboard_attempts)
#define CCI_ONBOARD_PHASE_POLL_MAX (5)
// Union of different onboarding subphases
typedef union CCI_MODULE_ONBOARD_SUBPHASE
{
CCI_MODULE_ONBOARD_SUBPHASE_OPTICAL optical;
} CCI_MODULE_ONBOARD_SUBPHASE;
// Defines the phase of onboarding that a module is in
typedef struct cci_module_onboard_state
{
CCI_MODULE_ONBOARD_PHASE onboardPhase;
CCI_MODULE_ONBOARD_SUBPHASE onboardSubPhase;
} CCI_MODULE_ONBOARD_STATE;
// Used to manage the SLEEP phase of a module
typedef struct cci_module_onboard_sleep_state
{
NvU64 wakeUpTimestamp;
CCI_MODULE_ONBOARD_STATE returnState;
} CCI_MODULE_ONBOARD_SLEEP_STATE;
// Stores discovered errors in regards to module onboarding
typedef struct cci_module_onboard_error
{
NvBool bErrorsChecked;
NvBool bOnboardFailure;
CCI_MODULE_ONBOARD_STATE failedOnboardState;
} CCI_MODULE_ONBOARD_ERROR;
// Defines the current state of a module
typedef struct cci_module_state
{
NvU8 onboardAttempts;
NvBool bModuleIdentified;
NvBool bModuleOnboarded;
NvBool bLinkTrainDeferred;
NvBool bPartialLinkTrainComplete;
NvBool bLinkTrainComplete;
NvU8 preTrainCounter;
NvU64 linkTrainMask;
NvU64 linkMaskActiveSaved;
CCI_MODULE_ONBOARD_STATE currOnboardState;
CCI_MODULE_ONBOARD_STATE prevOnboardState;
CCI_MODULE_ONBOARD_SLEEP_STATE sleepState;
CCI_MODULE_ONBOARD_ERROR onboardError;
} CCI_MODULE_STATE;
void cciModulesOnboardInit(nvswitch_device *device);
void cciModulesOnboardCallback(nvswitch_device *device);
void cciCablesInitializeCopperAsync(nvswitch_device *device, NvU8 moduleId);
void cciCablesInitializeDirectAsync(nvswitch_device *device, NvU8 moduleId);
void cciCablesInitializeOpticalAsync(nvswitch_device *device, NvU8 moduleId);
NvBool cciLinkTrainIdle(nvswitch_device *device, NvU8 linkId);
NvBool cciModuleOnboardFailed(nvswitch_device *device, NvU8 moduleId);
NvlStatus cciRequestALI(nvswitch_device *device, NvU64 linkMaskTrain);
void cciModuleOnboardShutdown(nvswitch_device *device);
// For phases that have work that needs to be performed in SOE
NvlStatus cciModulesOnboardSOE(nvswitch_device *device, NvU8 moduleId);
// Error management
void cciModuleOnboardCheckErrors(nvswitch_device *device, NvU8 moduleId);
// Helper functions
NvlStatus cciSetLedsInitialize(nvswitch_device *device, NvU8 moduleId);
void cciModuleOnboardPerformPhaseTransitionAsync(nvswitch_device *device, NvU8 moduleId,
CCI_MODULE_ONBOARD_STATE nextState);
void cciModuleOnboardSleepAsync(nvswitch_device *device, NvU8 moduleId, NvU32 ms,
CCI_MODULE_ONBOARD_STATE returnState);
void cciModuleOnboardPerformPhaseAsync(nvswitch_device *device, NvU8 moduleId,
NvlStatus (*func)(nvswitch_device *, NvU8 moduleId),
CCI_MODULE_ONBOARD_STATE nextStateSuccess,
CCI_MODULE_ONBOARD_STATE nextStateFail);
#endif //_CCI_CABLES_NVSWITCH_H_

View File

@@ -0,0 +1,55 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CCI_CDB_ASYNC_NVSWITCH_H_
#define _CCI_CDB_ASYNC_NVSWITCH_H_
#include "ls10/cci_ls10.h"
#include "nvlink_errors.h"
#include "nvtypes.h"
typedef enum cci_cdb_phase
{
CCI_CDB_PHASE_IDLE = 0x0,
CCI_CDB_PHASE_CHECK_READY,
CCI_CDB_PHASE_SEND_COMMAND,
CCI_CDB_PHASE_GET_RESPONSE,
CCI_CDB_PHASE_CHECK_DONE
} CCI_CDB_PHASE;
typedef struct cci_cdb_state
{
CCI_CDB_PHASE cdbPhase;
NvU8 client;
NvU8 laneMasksPending[NVSWITCH_CCI_NUM_LINKS_PER_OSFP_LS10];
NvBool freeze_maintenance;
NvBool restart_training;
NvBool nvlink_mode;
} CCI_CDB_STATE;
// Should be called at some regular polling rate
void cciProcessCDBCallback(nvswitch_device *device);
NvlStatus cciConfigureNvlinkModeAsync(nvswitch_device *device, NvU32 client, NvU8 linkId, NvBool freeze_maintenance, NvBool restart_training, NvBool nvlink_mode);
#endif //_CCI_CDB_ASYNC_NVSWITCH_H_

View File

@@ -0,0 +1,118 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CCI_NVSWITCH_H_
#define _CCI_NVSWITCH_H_
#include "common_nvswitch.h"
#include "nvlink_errors.h"
#include "nvtypes.h"
#include "nvfixedtypes.h"
#include "nvstatus.h"
#include "ctrl_dev_nvswitch.h"
#include "export_nvswitch.h"
typedef struct CCI CCI, *PCCI;
struct nvswitch_device;
// Polling Callback ids
#define NVSWITCH_CCI_CALLBACK_SMBPBI 0
#define NVSWITCH_CCI_CALLBACK_LINK_STATE_UPDATE 1
#define NVSWITCH_CCI_CALLBACK_CDB 2
#define NVSWITCH_CCI_CALLBACK_NUM_MAX 3
//
// Determines the range of frequencies that functions can
// run at.
// This rate must be divisible by client provided frequencies.
//
#define NVSWITCH_CCI_POLLING_RATE_HZ 50
// CCI LED states on LS10 Systems
#define CCI_LED_STATE_LOCATE 0U
#define CCI_LED_STATE_FAULT 1U
#define CCI_LED_STATE_OFF 2U
#define CCI_LED_STATE_INITIALIZE 3U
#define CCI_LED_STATE_UP_WARM 4U
#define CCI_LED_STATE_UP_ACTIVE 5U
#define CCI_NUM_LED_STATES 6U
//
// Mapping between XCVR module, nvlink-id, xvcr lane-mask
//
typedef struct nvswitch_cci_module_link_lane_map
{
NvU8 moduleId;
NvU8 linkId;
NvU8 laneMask;
} NVSWITCH_CCI_MODULE_LINK_LANE_MAP;
CCI* cciAllocNew(void);
NvBool cciSupported(nvswitch_device *device);
NvlStatus cciInit(nvswitch_device *device, PCCI pCci, NvU32 pci_device_id);
NvlStatus cciLoad(nvswitch_device *device);
void cciDestroy(nvswitch_device *device, PCCI pCci);
NvlStatus cciRead(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 addr, NvU32 length, NvU8 *pVal);
NvlStatus cciWrite(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 addr, NvU32 length, NvU8 *pVal);
NvlStatus cciSetBankAndPage(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 bank, NvU8 page);
NvlStatus cciSendCDBCommandAndGetResponse(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 command, NvU32 payLength, NvU8 *payload, NvU32 *resLength, NvU8 *response, NvBool padding);
NvlStatus cciSendCDBCommand(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 command, NvU32 length, NvU8 *pValArray, NvBool padding);
NvlStatus cciGetBankAndPage(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *pBank, NvU8 *pPage);
NvlStatus cciGetCDBResponse(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *response, NvU32 *resLength);
NvlStatus cciGetCDBStatus(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *pStatus);
NvlStatus cciWaitForCDBComplete(nvswitch_device *device, NvU32 client, NvU32 osfp);
NvlStatus cciRegisterCallback(nvswitch_device *device, NvU32 callbackId, void (*functionPtr)(nvswitch_device *device), NvU32 rateHz);
NvlStatus cciModuleEject (nvswitch_device *device, NvU8 moduleId);
NvBool cciIsLinkManaged (nvswitch_device *device, NvU32 linkNumber);
NvlStatus cciGetLinkMode (nvswitch_device *device, NvU32 linkNumber, NvU64 *mode);
NvBool cciReportLinkErrors (nvswitch_device *device, NvU32 linkNumber);
NvlStatus cciGetFWRevisions (nvswitch_device *device, NvU32 client, NvU32 linkId, NVSWITCH_CCI_GET_FW_REVISIONS *pRevisions);
NvlStatus cciGetXcvrFWRevisions (nvswitch_device *device, NvU32 client, NvU32 osfp, NVSWITCH_CCI_GET_FW_REVISIONS *pRevisions);
void cciDetectXcvrsPresent (nvswitch_device *device);
NvlStatus cciGetXcvrMask (nvswitch_device *device, NvU32 *pMaskAll, NvU32 *pMaskPresent);
NvlStatus cciGetXcvrLedState (nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *pLedState);
NvlStatus cciSetXcvrLedState (nvswitch_device *device, NvU32 client, NvU32 osfp, NvBool bSetLocate);
NvlStatus cciSetNextXcvrLedState (nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 nextLedState);
NvlStatus cciConfigureNvlinkMode (nvswitch_device *device, NvU32 client, NvU32 linkId, NvBool bTx, NvBool freeze_maintenance, NvBool restart_training, NvBool nvlink_mode);
NvlStatus cciConfigureNvlinkModeModule (nvswitch_device *device, NvU32 client, NvU8 moduleId, NvU64 linkMask, NvBool freeze_maintenance, NvBool restart_training, NvBool nvlink_mode);
NvBool cciCheckForPreTraining (nvswitch_device *device, NvU32 client, NvU32 linkId, NvBool bTx);
NvlStatus cciApplyControlSetValues (nvswitch_device *device, NvU32 client, NvU32 moduleMask);
NvlStatus cciGetGradingValues (nvswitch_device *device, NvU32 client, NvU32 linkId, NvU8 *laneMask, NVSWITCH_CCI_GRADING_VALUES *pGrading);
NvlStatus cciGetCageMapping (nvswitch_device *device, NvU8 cageIndex, NvU64 *pLinkMask, NvU64 *pEncodedValue);
NvBool cciCmisAccessTryLock (nvswitch_device *device, NvU8 cageIndex);
void cciCmisAccessReleaseLock (nvswitch_device *device, NvU8 cageIndex);
NvlStatus cciCmisRead (nvswitch_device *device, NvU8 cageIndex, NvU8 bank, NvU8 page, NvU8 address, NvU8 count, NvU8 *pData);
NvlStatus cciCmisWrite (nvswitch_device *device, NvU8 cageIndex, NvU8 bank, NvU8 page, NvU8 address, NvU8 count, NvU8 *pData);
NvlStatus cciCmisCageBezelMarking (nvswitch_device *device, NvU8 cageIndex, char *pBezelMarking);
// CCI Control calls
NvlStatus nvswitch_ctrl_get_cci_fw_revisions (nvswitch_device *device, NVSWITCH_CCI_GET_FW_REVISION_PARAMS *pParams);
NvlStatus nvswitch_ctrl_get_grading_values (nvswitch_device *device, NVSWITCH_CCI_GET_GRADING_VALUES_PARAMS *pParams);
NvlStatus nvswitch_ctrl_get_ports_cpld_info (nvswitch_device *device, NVSWITCH_CCI_GET_PORTS_CPLD_INFO_PARAMS *pParams);
NvlStatus nvswitch_ctrl_set_locate_led (nvswitch_device *device, NVSWITCH_CCI_SET_LOCATE_LED_PARAMS *pParams);
NvlStatus nvswitch_ctrl_cci_request_ali (nvswitch_device *device, NVSWITCH_REQUEST_ALI_PARAMS *pParams);
#endif //_CCI_NVSWITCH_H_

View File

@@ -0,0 +1,132 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CCI_PRIV_NVSWITCH_H_
#define _CCI_PRIV_NVSWITCH_H_
#include "common_nvswitch.h"
#include "nvtypes.h"
#include "cci/cci_nvswitch.h"
#include "cci/cci_cdb_async_nvswitch.h"
#include "cci/cci_cables_nvswitch.h"
#include "soe/cci/cpld_machx03.h"
#include "soe/cci/cci_cmis.h"
//
// CCI is the top-level management state for all cable controllers on a device.
// The management tasks related to cable controllers is encapsulated by a PCS
// or Platform Cable System, for which CCI is largely a container.
//
#define NVSWITCH_CCI_LINK_NUM_MAX 64
#define NVSWITCH_CCI_OSFP_NUM_MAX 32
// Vendor specified number(Used across all LR10/LS10 systems)
#define NVSWITCH_CCI_CDB_CMD_ID 0xcd19
// LS10 Saved LED state
#define CCI_LED_STATE_LED_A CPLD_MACHXO3_LED_STATE_REG_LED_A
#define CCI_LED_STATE_LED_B CPLD_MACHXO3_LED_STATE_REG_LED_B
// Timeout for CMIS access locks
#define NVSWITCH_CCI_CMIS_LOCK_TIMEOUT (10 * NVSWITCH_INTERVAL_1SEC_IN_NS)
// Cable Controller Interface
struct CCI
{
// Links that are supported by CCI. The value here is defined in the BIOS
// and is a static property of the system. See repeater bit in NVLink.
NvU64 linkMask;
// ================================================================
// === State below this line has been moved and can be deleted. ===
// ================================================================
// Other member variables specific to CCI go here
NvBool bDiscovered;
NvBool bSupported;
NvBool bInitialized;
NvU32 osfpMaskAll; // All the possible module positions
NvU32 osfpMaskPresent; // Currently present modules
NvU32 cagesMask; // All the possible module cage positions
NvU32 modulesMask; // Currently present modules(currently mirrors osfpMaskPresent)
NvU32 numLinks;
NVSWITCH_CCI_MODULE_LINK_LANE_MAP *osfp_map;
struct NVSWITCH_I2C_DEVICE_DESCRIPTOR *osfp_i2c_info;
NvU32 osfp_map_size;
NvU32 osfp_num;
struct {
void (*functionPtr)(struct nvswitch_device*);
NvU32 interval;
} callbackList[NVSWITCH_CCI_CALLBACK_NUM_MAX];
NvU32 callbackCounter;
NvU8 xcvrCurrentLedState[NVSWITCH_CCI_OSFP_NUM_MAX];
NvU8 xcvrNextLedState[NVSWITCH_CCI_OSFP_NUM_MAX];
NvU64 tpCounterPreviousSum[NVSWITCH_CCI_LINK_NUM_MAX];
// LS10 cable initialization
NvU8 cableType[NVSWITCH_CCI_OSFP_NUM_MAX];
NvBool rxDetEnable[NVSWITCH_CCI_OSFP_NUM_MAX];
// LS10 Async module onboarding
CCI_MODULE_STATE moduleState[NVSWITCH_CCI_OSFP_NUM_MAX];
// LS10 Async CDB management
CCI_CDB_STATE cdbState[NVSWITCH_CCI_OSFP_NUM_MAX];
// LS10 Module info
NvBool isFlatMemory[NVSWITCH_CCI_OSFP_NUM_MAX];
struct {
NvBool bLocked;
NvU32 pid;
NvU64 timestamp;
} cmisAccessLock[NVSWITCH_CCI_OSFP_NUM_MAX];
// LS10 link training mode
NvBool bModeContinuousALI[NVSWITCH_CCI_OSFP_NUM_MAX];
NvU64 linkMaskAliRequested;
// LS10 Module HW state
NvBool isFaulty[NVSWITCH_CCI_OSFP_NUM_MAX];
};
// Helper functions for CCI subcomponents
NvlStatus cciGetModuleId(nvswitch_device *device, NvU32 linkId, NvU32 *pModuleId);
NvBool cciModulePresent(nvswitch_device *device, NvU32 moduleId);
void cciGetModulePresenceChange(nvswitch_device *device, NvU32 *pModuleMask);
NvlStatus cciResetModule(nvswitch_device *device, NvU32 moduleId);
NvlStatus cciGetXcvrFWInfo(nvswitch_device *device, NvU32 moduleId, NvU8 *pInfo);
NvlStatus cciSetLPMode(nvswitch_device *device, NvU8 moduleId, NvBool bAssert);
NvBool cciCheckLPMode(nvswitch_device *device, NvU8 moduleId);
void cciPingModules(nvswitch_device *device, NvU32 *pMaskPresent);
void cciGetAllLinks(nvswitch_device *device, NvU64 *pLinkMaskAll);
NvlStatus cciGetModuleMask(nvswitch_device *device, NvU64 linkMask, NvU32 *pModuleMask);
NvBool cciCheckXcvrForLinkTraffic(nvswitch_device *device, NvU32 osfp, NvU64 linkMask);
NvlStatus cciGetLaneMask(nvswitch_device *device, NvU32 linkId, NvU8 *laneMask);
NvBool cciModuleHWGood(nvswitch_device *device, NvU32 moduleId);
void cciSetModulePower(nvswitch_device *device, NvU32 moduleId, NvBool bPowerOn);
#endif //_CCI_PRIV_NVSWITCH_H_

View File

@@ -39,6 +39,9 @@
#include "spi_nvswitch.h"
#include "smbpbi_nvswitch.h"
#include "nvCpuUuid.h"
#include "fsprpc_nvswitch.h"
#include "soe/cci/cpld_machx03.h"
#define NVSWITCH_GET_BIT(v, p) (((v) >> (p)) & 1)
#define NVSWITCH_SET_BIT(v, p) ((v) | NVBIT(p))
@@ -121,6 +124,9 @@ static NV_INLINE void nvswitch_clear_flags(NvU32 *val, NvU32 flags)
nvswitch_os_malloc_trace(_size, NULL, 0)
#endif
// LS10 Saved LED state
#define ACCESS_LINK_LED_STATE CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED
//
// This macro should be used to check assertion statements and print Error messages.
//
@@ -248,6 +254,11 @@ typedef struct
NvU32 soe_disable;
NvU32 soe_enable;
NvU32 soe_boot_core;
NvU32 cci_control;
NvU32 cci_link_train_disable_mask;
NvU32 cci_link_train_disable_mask2;
NvU32 cci_max_onboard_attempts;
NvU32 cci_error_log_enable;
NvU32 latency_counter;
NvU32 nvlink_speed_control;
NvU32 inforom_bbx_periodic_flush;
@@ -404,6 +415,12 @@ struct nvswitch_device
// SOE
FLCNABLE *pSoe;
// CCI
struct CCI *pCci;
NvU8 current_led_state;
NvU8 next_led_state;
NvU64 tp_counter_previous_sum[NVSWITCH_NVLINK_MAX_LINKS];
// DMA
NvU32 dma_addr_width;
@@ -418,6 +435,7 @@ struct nvswitch_device
struct smbpbi *pSmbpbi;
// NVSWITCH_LINK_TYPE
NvBool bModeContinuousALI;
NVSWITCH_LINK_TYPE link[NVSWITCH_MAX_LINK_COUNT];
// PLL
@@ -566,6 +584,14 @@ do \
} \
} while(0)
// Access link LED states on LS10 Systems
#define ACCESS_LINK_LED_STATE_FAULT 0U
#define ACCESS_LINK_LED_STATE_OFF 1U
#define ACCESS_LINK_LED_STATE_INITIALIZE 2U
#define ACCESS_LINK_LED_STATE_UP_WARM 3U
#define ACCESS_LINK_LED_STATE_UP_ACTIVE 4U
#define ACCESS_LINK_NUM_LED_STATES 5U
#define NVSWITCH_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field))
#define NVSWITCH_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field))

View File

@@ -0,0 +1,92 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FSPRPC_NVSWITCH_H_
#define _FSPRPC_NVSWITCH_H_
#define FSP_OK (0x00U)
#define FSP_ERR_IFS_ERR_INVALID_STATE (0x9EU)
#define FSP_ERR_IFR_FILE_NOT_FOUND (0x9FU)
#define FSP_ERR_IFS_ERR_NOT_SUPPORTED (0xA0U)
#define FSP_ERR_IFS_ERR_INVALID_DATA (0xA1U)
#pragma pack(1)
typedef struct mctp_header
{
NvU32 constBlob;
NvU8 msgType;
NvU16 vendorId;
} MCTP_HEADER;
// Needed to remove unnecessary padding
#pragma pack(1)
typedef struct nvdm_payload_cot
{
NvU16 version;
NvU16 size;
NvU64 gspFmcSysmemOffset;
NvU64 frtsSysmemOffset;
NvU32 frtsSysmemSize;
// Note this is an offset from the end of FB
NvU64 frtsVidmemOffset;
NvU32 frtsVidmemSize;
// Authentication related fields
NvU32 hash384[12];
NvU32 publicKey[96];
NvU32 signature[96];
NvU64 gspBootArgsSysmemOffset;
} NVDM_PAYLOAD_COT;
#pragma pack()
typedef struct nvdm_packet
{
NvU8 nvdmType;
// We can make this a union when adding more NVDM payloads
NVDM_PAYLOAD_COT cotPayload;
} NVDM_PACKET;
// The structure cannot have embedded pointers to send as byte stream
typedef struct mctp_packet
{
MCTP_HEADER header;
NVDM_PACKET nvdmPacket;
} MCTP_PACKET, *PMCTP_PACKET;
// Type of packet, can either be SOM, EOM, neither, or both (1-packet messages)
typedef enum mctp_packet_state
{
MCTP_PACKET_STATE_START,
MCTP_PACKET_STATE_INTERMEDIATE,
MCTP_PACKET_STATE_END,
MCTP_PACKET_STATE_SINGLE_PACKET
} MCTP_PACKET_STATE, *PMCTP_PACKET_STATE;
NvlStatus nvswitch_fsp_read_message(nvswitch_device *device, NvU8 *pPayloadBuffer, NvU32 payloadBufferSize);
NvlStatus nvswitch_fsp_send_packet(nvswitch_device *device, NvU8 *pPacket, NvU32 packetSize);
NvlStatus nvswitch_fsp_send_and_read_message(nvswitch_device *device, NvU8 *pPayload, NvU32 size, NvU32 nvdmType, NvU8 *pResponsePayload, NvU32 responseBufferSize);
#endif //_FSPRPC_NVSWITCH_H_

View File

@@ -47,6 +47,36 @@
// _op(return type, function name, (parameter list), _arch)
//
#define NVSWITCH_HAL_FUNCTION_LIST_FEATURE_0(_op, _arch) \
_op(NvBool, nvswitch_is_cci_supported, (nvswitch_device *device), _arch) \
_op(void, nvswitch_cci_setup_gpio_pins, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_cci_get_cci_link_mode, (nvswitch_device *device, NvU32 linkNumber, NvU64 *mode), _arch) \
_op(NvlStatus, nvswitch_cci_discovery, (nvswitch_device *device), _arch) \
_op(void, nvswitch_cci_get_xcvrs_present, (nvswitch_device *device, NvU32 *pMaskPresent), _arch) \
_op(void, nvswitch_cci_get_xcvrs_present_change, (nvswitch_device *device, NvU32 *pMaskPresentChange), _arch) \
_op(NvlStatus, nvswitch_cci_reset, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_cci_reset_links, (nvswitch_device *device, NvU64 linkMask), _arch) \
_op(NvlStatus, nvswitch_cci_ports_cpld_read, (nvswitch_device *device, NvU8 reg, NvU8 *pData), _arch) \
_op(NvlStatus, nvswitch_cci_ports_cpld_write, (nvswitch_device *device, NvU8 reg, NvU8 Data), _arch) \
_op(void, nvswitch_cci_update_link_state_led, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_cci_set_xcvr_led_state, (nvswitch_device *device, NvU32 client, NvU32 osfp, NvBool bSetLocate), _arch) \
_op(NvlStatus, nvswitch_cci_get_xcvr_led_state, (nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *pLedState), _arch) \
_op(NvlStatus, nvswitch_cci_setup_onboard, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_cci_setup_module_path, (nvswitch_device *device, NvU32 client, NvU32 osfp), _arch) \
_op(NvlStatus, nvswitch_cci_module_access_cmd, (nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 addr, NvU32 length, \
NvU8 *pValArray, NvBool bRead, NvBool bBlk), _arch) \
_op(NvlStatus, nvswitch_cci_apply_control_set_values, (nvswitch_device *device, NvU32 client, NvU32 moduleMask), _arch) \
_op(NvlStatus, nvswitch_cci_reset_and_drain_links, (nvswitch_device *device, NvU64 link_mask, NvBool bForced), _arch) \
_op(NvlStatus, nvswitch_cci_cmis_cage_bezel_marking, (nvswitch_device *device, NvU8 cageIndex, char *pBezelMarking), _arch) \
_op(NvlStatus, nvswitch_cci_get_grading_values, (nvswitch_device *device, NvU32 client, NvU32 linkId, NvU8 *laneMask, NVSWITCH_CCI_GRADING_VALUES *pGrading), _arch) \
_op(NvlStatus, nvswitch_cci_get_xcvr_mask, (nvswitch_device *device, NvU32 *pMaskAll, NvU32 *pMaskPresent), _arch) \
_op(void, nvswitch_cci_set_xcvr_present, (nvswitch_device *device, NvU32 maskPresent), _arch) \
_op(void, nvswitch_cci_destroy, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_soe_heartbeat, (nvswitch_device *device, NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p), _arch) \
_op(void, nvswitch_update_link_state_led, (nvswitch_device *device), _arch) \
_op(void, nvswitch_led_shutdown, (nvswitch_device *device), _arch) \
#define NVSWITCH_HAL_FUNCTION_LIST(_op, _arch) \
_op(NvlStatus, nvswitch_initialize_device_state, (nvswitch_device *device), _arch) \
_op(void, nvswitch_destroy_device_state, (nvswitch_device *device), _arch) \
@@ -68,7 +98,7 @@
_op(NvlStatus, nvswitch_lib_service_interrupts, (nvswitch_device *device), _arch) \
_op(NvU64, nvswitch_hw_counter_read_counter, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_link_in_use, (nvswitch_device *device, NvU32 link_id), _arch) \
_op(NvlStatus, nvswitch_reset_and_drain_links, (nvswitch_device *device, NvU64 link_mask), _arch) \
_op(NvlStatus, nvswitch_reset_and_drain_links, (nvswitch_device *device, NvU64 link_mask, NvBool bForced), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_info, (nvswitch_device *device, NVSWITCH_GET_INFO *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_nvlink_status, (nvswitch_device *device, NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_counters, (nvswitch_device *device, NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret), _arch) \
@@ -214,6 +244,7 @@
_op(void, nvswitch_reset_persistent_link_hw_state, (nvswitch_device *device, NvU32 linkNumber), _arch)\
_op(void, nvswitch_store_topology_information, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(void, nvswitch_init_lpwr_regs, (nvlink_link *link), _arch) \
_op(void, nvswitch_program_l1_scratch_reg, (nvswitch_device *device, NvU32 linkNumber), _arch) \
_op(NvlStatus, nvswitch_set_training_mode, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_sublink_width, (nvswitch_device *device, NvU32 linkNumber), _arch) \
_op(NvBool, nvswitch_i2c_is_device_access_allowed, (nvswitch_device *device, NvU32 port, NvU8 addr, NvBool bIsRead), _arch) \
@@ -233,8 +264,31 @@
_op(NvlStatus, nvswitch_ctrl_therm_read_voltage, (nvswitch_device *device, NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info), _arch) \
_op(void, nvswitch_soe_init_l2_state, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_ctrl_therm_read_power, (nvswitch_device *device, NVSWITCH_GET_POWER_PARAMS *info), _arch) \
_op(NvlStatus, nvswitch_get_board_id, (nvswitch_device *device, NvU16 *boardId), _arch) \
_op(NvBool, nvswitch_does_link_need_termination_enabled, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(NvlStatus, nvswitch_link_termination_setup, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(NvlStatus, nvswitch_check_io_sanity, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_link_l1_capability, (nvswitch_device *device, NvU32 linkNum, NvBool *isL1Capable), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_link_l1_threshold, (nvswitch_device *device, NvU32 linkNum, NvU32 *lpThreshold), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_link_l1_threshold, (nvlink_link *link, NvU32 lpEntryThreshold), _arch) \
_op(void, nvswitch_fsp_update_cmdq_head_tail, (nvswitch_device *device, NvU32 queueHead, NvU32 queueTail), _arch) \
_op(void, nvswitch_fsp_get_cmdq_head_tail, (nvswitch_device *device, NvU32 *pQueueHead, NvU32 *pQueueTail), _arch) \
_op(void, nvswitch_fsp_update_msgq_head_tail, (nvswitch_device *device, NvU32 msgqHead, NvU32 msgqTail), _arch) \
_op(void, nvswitch_fsp_get_msgq_head_tail, (nvswitch_device *device, NvU32 *pMsgqHead, NvU32 *pMsgqTail), _arch) \
_op(NvU32, nvswitch_fsp_get_channel_size, (nvswitch_device *device), _arch) \
_op(NvU8, nvswitch_fsp_nvdm_to_seid, (nvswitch_device *device, NvU8 nvdmType), _arch) \
_op(NvU32, nvswitch_fsp_create_mctp_header, (nvswitch_device *device, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq), _arch) \
_op(NvU32, nvswitch_fsp_create_nvdm_header, (nvswitch_device *device, NvU32 nvdmType), _arch) \
_op(NvlStatus, nvswitch_fsp_validate_mctp_payload_header, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size), _arch) \
_op(NvlStatus, nvswitch_fsp_process_nvdm_msg, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size), _arch) \
_op(NvlStatus, nvswitch_fsp_process_cmd_response, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size), _arch) \
_op(NvlStatus, nvswitch_fsp_config_ememc, (nvswitch_device *device, NvU32 offset, NvBool bAincw, NvBool bAincr), _arch) \
_op(NvlStatus, nvswitch_fsp_write_to_emem, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size), _arch) \
_op(NvlStatus, nvswitch_fsp_read_from_emem, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size), _arch) \
_op(NvlStatus, nvswitch_fsp_error_code_to_nvlstatus_map, (nvswitch_device *device, NvU32 errorCode), _arch) \
_op(NvlStatus, nvswitch_fsp_get_packet_info, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size, NvU8 *pPacketState, NvU8 *pTag), _arch) \
_op(NvlStatus, nvswitch_fsprpc_get_caps, (nvswitch_device *device, NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params), _arch) \
NVSWITCH_HAL_FUNCTION_LIST_FEATURE_0(_op, _arch) \
#define NVSWITCH_HAL_FUNCTION_LIST_LS10(_op, _arch) \
_op(NvlStatus, nvswitch_launch_ALI, (nvswitch_device *device), _arch) \

View File

@@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CCI_LR10_H_
#define _CCI_LR10_H_
// HALs
NvBool nvswitch_is_cci_supported_lr10(nvswitch_device *device);
void nvswitch_cci_setup_gpio_pins_lr10(nvswitch_device *device);
NvlStatus nvswitch_cci_get_cci_link_mode_lr10(nvswitch_device *device, NvU32 linkNumber, NvU64 *mode);
NvlStatus nvswitch_cci_discovery_lr10(nvswitch_device *device);
void nvswitch_cci_get_xcvrs_present_lr10(nvswitch_device *device, NvU32 *pMaskPresent);
void nvswitch_cci_get_xcvrs_present_change_lr10(nvswitch_device *device, NvU32 *pMaskPresentChange);
NvlStatus nvswitch_cci_reset_lr10(nvswitch_device *device);
NvlStatus nvswitch_cci_reset_links_lr10(nvswitch_device *device, NvU64 linkMask);
NvlStatus nvswitch_cci_ports_cpld_read_lr10(nvswitch_device *device, NvU8 reg, NvU8 *pData);
NvlStatus nvswitch_cci_ports_cpld_write_lr10(nvswitch_device *device, NvU8 reg, NvU8 data);
void nvswitch_cci_update_link_state_led_lr10(nvswitch_device *device);
NvlStatus nvswitch_cci_set_xcvr_led_state_lr10(nvswitch_device *device, NvU32 client, NvU32 osfp, NvBool bSetLocate);
NvlStatus nvswitch_cci_get_xcvr_led_state_lr10(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *pLedState);
NvlStatus nvswitch_cci_setup_onboard_lr10(nvswitch_device *device);
NvlStatus nvswitch_cci_setup_module_path_lr10(nvswitch_device *device, NvU32 client, NvU32 osfp);
NvlStatus nvswitch_cci_module_access_cmd_lr10(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 addr, NvU32 length,
NvU8 *pValArray, NvBool bRead, NvBool bBlk);
NvlStatus nvswitch_cci_apply_control_set_values_lr10(nvswitch_device *device, NvU32 client, NvU32 moduleMask);
NvlStatus nvswitch_cci_cmis_cage_bezel_marking_lr10(nvswitch_device *device, NvU8 cageIndex, char *pBezelMarking);
NvlStatus nvswitch_cci_get_grading_values_lr10(nvswitch_device *device, NvU32 client, NvU32 linkId, NvU8 *laneMask, NVSWITCH_CCI_GRADING_VALUES *pGrading);
NvlStatus nvswitch_cci_get_xcvr_mask_lr10(nvswitch_device *device, NvU32 *pMaskAll, NvU32 *pMaskPresent);
void nvswitch_cci_set_xcvr_present_lr10(nvswitch_device *device, NvU32 maskPresent);
void nvswitch_cci_destroy_lr10(nvswitch_device *device);
#endif //_CCI_LR10_H_

View File

@@ -652,6 +652,7 @@ void nvswitch_setup_link_loopback_mode_lr10(nvswitch_device *device, NvU32
void nvswitch_reset_persistent_link_hw_state_lr10(nvswitch_device *device, NvU32 linkNumber);
void nvswitch_store_topology_information_lr10(nvswitch_device *device, nvlink_link *link);
void nvswitch_init_lpwr_regs_lr10(nvlink_link *link);
void nvswitch_program_l1_scratch_reg_lr10(nvswitch_device *device, NvU32 linkNumber);
NvlStatus nvswitch_set_training_mode_lr10(nvswitch_device *device);
NvBool nvswitch_i2c_is_device_access_allowed_lr10(nvswitch_device *device, NvU32 port, NvU8 addr, NvBool bIsRead);
NvU32 nvswitch_get_sublink_width_lr10(nvswitch_device *device,NvU32 linkNumber);
@@ -673,6 +674,14 @@ void nvswitch_setup_link_system_registers_lr10(nvswitch_device *device, nvl
void nvswitch_load_link_disable_settings_lr10(nvswitch_device *device, nvlink_link *link);
NvBool nvswitch_is_smbpbi_supported_lr10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_get_board_part_number_lr10(nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p);
NvlStatus nvswitch_ctrl_get_link_l1_capability_lr10(nvswitch_device *device, NvU32 linkId, NvBool *isL1Capable);
NvlStatus nvswitch_ctrl_get_link_l1_threshold_lr10(nvswitch_device *device, NvU32 linkNum, NvU32 *lpThreshold);
NvlStatus nvswitch_ctrl_set_link_l1_threshold_lr10(nvlink_link *link, NvU32 lpEntryThreshold);
NvlStatus nvswitch_get_board_id_lr10(nvswitch_device *device, NvU16 *boardId);
NvlStatus nvswitch_ctrl_get_soe_heartbeat_lr10(nvswitch_device *device, NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p);
void nvswitch_update_link_state_led_lr10(nvswitch_device *device);
void nvswitch_led_shutdown_lr10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_set_mc_rid_table_lr10(nvswitch_device *device, NVSWITCH_SET_MC_RID_TABLE_PARAMS *p);
NvlStatus nvswitch_ctrl_get_mc_rid_table_lr10(nvswitch_device *device, NVSWITCH_GET_MC_RID_TABLE_PARAMS *p);
@@ -682,5 +691,21 @@ NvlStatus nvswitch_reset_and_train_link_lr10(nvswitch_device *device, nvlink_lin
NvlStatus nvswitch_ctrl_get_bios_info_lr10(nvswitch_device *device, NVSWITCH_GET_BIOS_INFO_PARAMS *p);
NvBool nvswitch_does_link_need_termination_enabled_lr10(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_link_termination_setup_lr10(nvswitch_device *device, nvlink_link* link);
void nvswitch_fsp_update_cmdq_head_tail_lr10(nvswitch_device *device, NvU32 queueHead, NvU32 queueTail);
void nvswitch_fsp_get_cmdq_head_tail_lr10(nvswitch_device *device, NvU32 *pQueueHead, NvU32 *pQueueTail);
void nvswitch_fsp_update_msgq_head_tail_lr10(nvswitch_device *device, NvU32 msgqHead, NvU32 msgqTail);
void nvswitch_fsp_get_msgq_head_tail_lr10(nvswitch_device *device, NvU32 *pMsgqHead, NvU32 *pMsgqTail);
NvU32 nvswitch_fsp_get_channel_size_lr10(nvswitch_device *device);
NvU8 nvswitch_fsp_nvdm_to_seid_lr10(nvswitch_device *device, NvU8 nvdmType);
NvU32 nvswitch_fsp_create_mctp_header_lr10(nvswitch_device *device, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq);
NvU32 nvswitch_fsp_create_nvdm_header_lr10(nvswitch_device *device, NvU32 nvdmType);
NvlStatus nvswitch_fsp_get_packet_info_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size, NvU8 *pPacketState, NvU8 *pTag);
NvlStatus nvswitch_fsp_validate_mctp_payload_header_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_process_nvdm_msg_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_process_cmd_response_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_config_ememc_lr10(nvswitch_device *device, NvU32 offset, NvBool bAincw, NvBool bAincr);
NvlStatus nvswitch_fsp_write_to_emem_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_read_from_emem_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_error_code_to_nvlstatus_map_lr10(nvswitch_device *device, NvU32 errorCode);
#endif //_LR10_H_

View File

@@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CCI_LS10_H_
#define _CCI_LS10_H_
#define NVSWITCH_CCI_VBIOS_REV_LOCK_VERSION_LS10 (0x9610360000)
#define NVSWITCH_CCI_VBIOS_REV_LOCK_VERSION_STRING_LS10 ("96.10.36.00.00")
#define NVSWITCH_CCI_NUM_LINKS_PER_OSFP_LS10 4
// HALs
NvBool nvswitch_is_cci_supported_ls10(nvswitch_device *device);
void nvswitch_cci_setup_gpio_pins_ls10(nvswitch_device *device);
NvlStatus nvswitch_cci_get_cci_link_mode_ls10(nvswitch_device *device, NvU32 linkNumber, NvU64 *mode);
NvlStatus nvswitch_cci_discovery_ls10(nvswitch_device *device);
void nvswitch_cci_get_xcvrs_present_ls10(nvswitch_device *device, NvU32 *pMaskPresent);
void nvswitch_cci_get_xcvrs_present_change_ls10(nvswitch_device *device, NvU32 *pMaskPresentChange);
NvlStatus nvswitch_cci_reset_ls10(nvswitch_device *device);
NvlStatus nvswitch_cci_reset_links_ls10(nvswitch_device *device, NvU64 linkMask);
NvlStatus nvswitch_cci_ports_cpld_read_ls10(nvswitch_device *device, NvU8 reg, NvU8 *pData);
NvlStatus nvswitch_cci_ports_cpld_write_ls10(nvswitch_device *device, NvU8 reg, NvU8 data);
void nvswitch_cci_update_link_state_led_ls10(nvswitch_device *device);
NvlStatus nvswitch_cci_set_xcvr_led_state_ls10(nvswitch_device *device, NvU32 client, NvU32 osfp, NvBool bSetLocate);
NvlStatus nvswitch_cci_get_xcvr_led_state_ls10(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU8 *pLedState);
NvlStatus nvswitch_cci_setup_onboard_ls10(nvswitch_device *device);
NvlStatus nvswitch_cci_setup_module_path_ls10(nvswitch_device *device, NvU32 client, NvU32 osfp);
NvlStatus nvswitch_cci_module_access_cmd_ls10(nvswitch_device *device, NvU32 client, NvU32 osfp, NvU32 addr, NvU32 length,
NvU8 *pValArray, NvBool bRead, NvBool bBlk);
NvlStatus nvswitch_cci_apply_control_set_values_ls10(nvswitch_device *device, NvU32 client, NvU32 moduleMask);
NvlStatus nvswitch_cci_cmis_cage_bezel_marking_ls10(nvswitch_device *device, NvU8 cageIndex, char *pBezelMarking);
NvlStatus nvswitch_cci_get_grading_values_ls10(nvswitch_device *device, NvU32 client, NvU32 linkId, NvU8 *laneMask, NVSWITCH_CCI_GRADING_VALUES *pGrading);
NvlStatus nvswitch_cci_get_xcvr_mask_ls10(nvswitch_device *device, NvU32 *pMaskAll, NvU32 *pMaskPresent);
void nvswitch_cci_set_xcvr_present_ls10(nvswitch_device *device, NvU32 maskPresent);
void nvswitch_cci_destroy_ls10(nvswitch_device *device);
#endif //_CCI_LS10_H_

View File

@@ -188,6 +188,18 @@
#define SOE_VBIOS_VERSION_MASK 0xFF0000
#define SOE_VBIOS_REVLOCK_DISABLE_NPORT_FATAL_INTR 0x370000
#define SOE_VBIOS_REVLOCK_ISSUE_INGRESS_STOP 0x440000
// LS10 Saved LED state
#define ACCESS_LINK_LED_STATE CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED
// Access link LED states on LS10 Systems
#define ACCESS_LINK_LED_STATE_FAULT 0U
#define ACCESS_LINK_LED_STATE_OFF 1U
#define ACCESS_LINK_LED_STATE_INITIALIZE 2U
#define ACCESS_LINK_LED_STATE_UP_WARM 3U
#define ACCESS_LINK_LED_STATE_UP_ACTIVE 4U
#define ACCESS_LINK_NUM_LED_STATES 5U
//
// Helpful IO wrappers
@@ -503,7 +515,8 @@ typedef struct
NV_NPORT_PORTSTAT_LS10(_block, _reg, _idx, ), _data); \
}
#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1SEC_IN_NS)
#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS ((device->bModeContinuousALI ? 12 : 30) *\
NVSWITCH_INTERVAL_1SEC_IN_NS)
#define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1MSEC_IN_NS)
// Struct used for passing around error masks in error handling functions
@@ -788,7 +801,6 @@ typedef const struct
#define nvswitch_is_link_valid_ls10 nvswitch_is_link_valid_lr10
#define nvswitch_is_link_in_use_ls10 nvswitch_is_link_in_use_lr10
#define nvswitch_initialize_device_state_ls10 nvswitch_initialize_device_state_lr10
#define nvswitch_deassert_link_reset_ls10 nvswitch_deassert_link_reset_lr10
#define nvswitch_determine_platform_ls10 nvswitch_determine_platform_lr10
#define nvswitch_get_swap_clk_default_ls10 nvswitch_get_swap_clk_default_lr10
@@ -938,6 +950,7 @@ void nvswitch_corelib_clear_link_state_lr10(nvlink_link *link);
NvlStatus nvswitch_corelib_set_dl_link_mode_ls10(nvlink_link *link, NvU64 mode, NvU32 flags);
NvlStatus nvswitch_corelib_set_tx_mode_ls10(nvlink_link *link, NvU64 mode, NvU32 flags);
void nvswitch_init_lpwr_regs_ls10(nvlink_link *link);
void nvswitch_program_l1_scratch_reg_ls10(nvswitch_device *device, NvU32 linkNumber);
NvlStatus nvswitch_minion_service_falcon_interrupts_ls10(nvswitch_device *device, NvU32 instance);
@@ -987,15 +1000,16 @@ void nvswitch_execute_unilateral_link_shutdown_ls10(nvlink_link *link);
void nvswitch_setup_link_system_registers_ls10(nvswitch_device *device, nvlink_link *link);
void nvswitch_load_link_disable_settings_ls10(nvswitch_device *device, nvlink_link *link);
void nvswitch_link_disable_interrupts_ls10(nvswitch_device *device, NvU32 link);
void nvswitch_init_dlpl_interrupts_ls10(nvlink_link *link);
void nvswitch_set_dlpl_interrupts_ls10(nvlink_link *link);
NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 link_mask);
void nvswitch_service_minion_all_links_ls10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_get_board_part_number_ls10(nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p);
void nvswitch_create_deferred_link_state_check_task_ls10(nvswitch_device *device, NvU32 nvlipt_instance, NvU32 link);
NvlStatus nvswitch_request_tl_link_state_ls10(nvlink_link *link, NvU32 tlLinkState, NvBool bSync);
NvlStatus nvswitch_ctrl_get_link_l1_capability_ls10(nvswitch_device *device, NvU32 linkId, NvBool *isL1Capable);
NvlStatus nvswitch_ctrl_get_link_l1_threshold_ls10(nvswitch_device *device, NvU32 linkNum, NvU32 *lpThreshold);
NvlStatus nvswitch_ctrl_set_link_l1_threshold_ls10(nvlink_link *link, NvU32 lpEntryThreshold);
NvlStatus nvswitch_get_board_id_ls10(nvswitch_device *device, NvU16 *boardId);
//
// SU generated functions
@@ -1020,6 +1034,30 @@ NvBool nvswitch_are_link_clocks_on_ls10(nvswitch_device *device, nvlink_link
NvBool nvswitch_does_link_need_termination_enabled_ls10(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_link_termination_setup_ls10(nvswitch_device *device, nvlink_link* link);
void nvswitch_get_error_rate_threshold_ls10(nvlink_link *link);
void nvswitch_fsp_update_cmdq_head_tail_ls10(nvswitch_device *device, NvU32 queueHead, NvU32 queueTail);
void nvswitch_fsp_get_cmdq_head_tail_ls10(nvswitch_device *device, NvU32 *pQueueHead, NvU32 *pQueueTail);
void nvswitch_fsp_update_msgq_head_tail_ls10(nvswitch_device *device, NvU32 msgqHead, NvU32 msgqTail);
void nvswitch_fsp_get_msgq_head_tail_ls10(nvswitch_device *device, NvU32 *pMsgqHead, NvU32 *pMsgqTail);
NvU32 nvswitch_fsp_get_channel_size_ls10(nvswitch_device *device);
NvU8 nvswitch_fsp_nvdm_to_seid_ls10(nvswitch_device *device, NvU8 nvdmType);
NvU32 nvswitch_fsp_create_mctp_header_ls10(nvswitch_device *device, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq);
NvU32 nvswitch_fsp_create_nvdm_header_ls10(nvswitch_device *device, NvU32 nvdmType);
NvlStatus nvswitch_fsp_get_packet_info_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size, NvU8 *pPacketState, NvU8 *pTag);
NvlStatus nvswitch_fsp_validate_mctp_payload_header_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_process_nvdm_msg_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_process_cmd_response_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_config_ememc_ls10(nvswitch_device *device, NvU32 offset, NvBool bAincw, NvBool bAincr);
NvlStatus nvswitch_fsp_write_to_emem_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_read_from_emem_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_error_code_to_nvlstatus_map_ls10(nvswitch_device *device, NvU32 errorCode);
NvlStatus nvswitch_fsprpc_get_caps_ls10(nvswitch_device *device, NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params);
NvlStatus nvswitch_ctrl_get_soe_heartbeat_ls10(nvswitch_device *device, NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p);
NvlStatus nvswitch_cci_enable_iobist_ls10(nvswitch_device *device, NvU32 linkNumber, NvBool bEnable);
NvlStatus nvswitch_cci_initialization_sequence_ls10(nvswitch_device *device, NvU32 linkNumber);
NvlStatus nvswitch_cci_deinitialization_sequence_ls10(nvswitch_device *device, NvU32 linkNumber);
void nvswitch_update_link_state_led_ls10(nvswitch_device *device);
void nvswitch_led_shutdown_ls10(nvswitch_device *device);
#endif //_LS10_H_

View File

@@ -42,10 +42,11 @@
//
// Debug and trace print toggles
// To enable tracing, define NVSWITCH_MC_TRACE
// To enable extensive debug messages uncomment the below define NVSWITCH_MC_DEBUG
//
#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
#define NVSWITCH_MC_DEBUG 1
#endif
// #if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
// #define NVSWITCH_MC_DEBUG 1
// #endif
typedef struct {
NvU32 column;

View File

@@ -298,6 +298,52 @@
#define NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_DISABLE 0x00
#define NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_ENABLE 0x01
/*
* CCI Control
*
* This regkey controls enablement of CCI on LS10 systems.
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_CCI_CONTROL "CCIControl"
#define NV_SWITCH_REGKEY_CCI_CONTROL_DEFAULT 0x1
#define NV_SWITCH_REGKEY_CCI_CONTROL_ENABLE 0:0
#define NV_SWITCH_REGKEY_CCI_CONTROL_ENABLE_FALSE 0x00
#define NV_SWITCH_REGKEY_CCI_CONTROL_ENABLE_TRUE 0x01
/*
* CCI Link Train Disable Mask
*
* These regkeys will disable link training for CCI managed links.
*
* Mask contains links 0-31
* Mask2 contains links 32-63
*
* Private: Will only be applied on debug/develop drivers and MODS
*/
#define NV_SWITCH_REGKEY_CCI_DISABLE_LINK_TRAIN_MASK "CCILinkTrainDisableMask"
#define NV_SWITCH_REGKEY_CCI_DISABLE_LINK_TRAIN_MASK2 "CCILinkTrainDisableMask2"
#define NV_SWITCH_REGKEY_CCI_DISABLE_LINK_TRAIN_MASK_DEFAULT 0x0
#define NV_SWITCH_REGKEY_CCI_DISABLE_LINK_TRAIN_MASK2_DEFAULT 0x0
/*
* CCI Max Onboard Attempts
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_CCI_MAX_ONBOARD_ATTEMPTS "CCIMaxOnboardAttempts"
#define NV_SWITCH_REGKEY_CCI_MAX_ONBOARD_ATTEMPTS_DEFAULT 0x5
/*
* CCI Error Log Enable
*
* This regkey will enable emission of CCI module onboarding SXids errors.
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_CCI_ERROR_LOG_ENABLE "CCIErrorLogEnable"
#define NV_SWITCH_REGKEY_CCI_ERROR_LOG_ENABLE_DEFAULT 0x1
/*
* Disables logging of latency counters
*

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -55,6 +55,11 @@ struct SOE
/*! The event descriptor for the Thermal event handler */
NvU32 thermEvtDesc;
/*! The event descriptor for the CCI event handler */
NvU32 cciEvtDesc;
/*! The event descriptor for the Heartbeat event handler */
NvU32 heartbeatEvtDesc;
};
#endif //_SOE_PRIV_NVSWITCH_H_

View File

@@ -0,0 +1,258 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
NvBool
nvswitch_is_cci_supported_lr10
(
nvswitch_device *device
)
{
return NV_FALSE;
}
void
nvswitch_cci_setup_gpio_pins_lr10
(
nvswitch_device *device
)
{
// NOP
}
NvlStatus
nvswitch_cci_get_cci_link_mode_lr10
(
nvswitch_device *device,
NvU32 linkNumber,
NvU64 *mode
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_discovery_lr10
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_cci_get_xcvrs_present_lr10
(
nvswitch_device *device,
NvU32 *pMaskPresent
)
{
// NOP
}
void
nvswitch_cci_get_xcvrs_present_change_lr10
(
nvswitch_device *device,
NvU32 *pMaskPresentChange
)
{
// NOP
}
NvlStatus
nvswitch_cci_reset_lr10
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_reset_links_lr10
(
nvswitch_device *device,
NvU64 linkMask
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_ports_cpld_read_lr10
(
nvswitch_device *device,
NvU8 reg,
NvU8 *pData
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_ports_cpld_write_lr10
(
nvswitch_device *device,
NvU8 reg,
NvU8 data
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_cci_update_link_state_led_lr10
(
nvswitch_device *device
)
{
// Not supported
}
NvlStatus
nvswitch_cci_set_xcvr_led_state_lr10
(
nvswitch_device *device,
NvU32 client,
NvU32 osfp,
NvBool bSetLocate
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_get_xcvr_led_state_lr10
(
nvswitch_device *device,
NvU32 client,
NvU32 osfp,
NvU8 *pLedState
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_setup_onboard_lr10
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_setup_module_path_lr10
(
nvswitch_device *device,
NvU32 client,
NvU32 osfp
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_module_access_cmd_lr10
(
nvswitch_device *device,
NvU32 client,
NvU32 osfp,
NvU32 addr,
NvU32 length,
NvU8 *pValArray,
NvBool bRead,
NvBool bBlk
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_apply_control_set_values_lr10
(
nvswitch_device *device,
NvU32 client,
NvU32 moduleMask
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_cmis_cage_bezel_marking_lr10
(
nvswitch_device *device,
NvU8 cageIndex,
char *pBezelMarking
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_get_grading_values_lr10
(
nvswitch_device *device,
NvU32 client,
NvU32 linkId,
NvU8 *laneMask,
NVSWITCH_CCI_GRADING_VALUES *pGrading
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_cci_get_xcvr_mask_lr10
(
nvswitch_device *device,
NvU32 *pMaskAll,
NvU32 *pMaskPresent
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_cci_set_xcvr_present_lr10
(
nvswitch_device *device,
NvU32 maskPresent
)
{
// NOP
}
void
nvswitch_cci_destroy_lr10
(
nvswitch_device *device
)
{
// NOP
}

View File

@@ -571,6 +571,11 @@ nvswitch_init_lpwr_regs_lr10
return;
}
if (nvswitch_is_link_in_reset(device, link))
{
return;
}
if (device->regkeys.enable_pm == NV_SWITCH_REGKEY_ENABLE_PM_NO)
{
return;
@@ -686,6 +691,15 @@ nvswitch_init_lpwr_regs_lr10
tempRegVal);
}
void
nvswitch_program_l1_scratch_reg_lr10
(
nvswitch_device *device,
NvU32 linkNumber
)
{
// Not Implemented for LR10
}
void
nvswitch_init_buffer_ready_lr10
@@ -2635,3 +2649,36 @@ nvswitch_link_termination_setup_lr10
return NVL_SUCCESS;
}
NvlStatus
nvswitch_ctrl_get_link_l1_capability_lr10
(
nvswitch_device *device,
NvU32 linkNum,
NvBool *isL1Capable
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_ctrl_get_link_l1_threshold_lr10
(
nvswitch_device *device,
NvU32 linkNum,
NvU32 *lpThreshold
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_ctrl_set_link_l1_threshold_lr10
(
nvlink_link *link,
NvU32 lpEntryThreshold
)
{
return -NVL_ERR_NOT_SUPPORTED;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,6 +36,7 @@
#include "lr10/smbpbi_lr10.h"
#include "flcn/flcnable_nvswitch.h"
#include "soe/soe_nvswitch.h"
#include "lr10/cci_lr10.h"
#include "nvswitch/lr10/dev_nvs_top.h"
#include "nvswitch/lr10/dev_pri_ringmaster.h"
@@ -3656,6 +3657,15 @@ nvswitch_initialize_device_state_lr10
goto nvswitch_initialize_device_state_exit;
}
retval = nvswitch_check_io_sanity(device);
if (NVL_SUCCESS != retval)
{
NVSWITCH_PRINT(device, ERROR,
"%s: IO sanity test failed\n",
__FUNCTION__);
goto nvswitch_initialize_device_state_exit;
}
NVSWITCH_PRINT(device, SETUP,
"%s: MMIO discovery\n",
__FUNCTION__);
@@ -5974,7 +5984,8 @@ NvlStatus
nvswitch_reset_and_drain_links_lr10
(
nvswitch_device *device,
NvU64 link_mask
NvU64 link_mask,
NvBool bForced
)
{
NvlStatus status = -NVL_ERR_GENERIC;
@@ -7856,6 +7867,250 @@ nvswitch_ctrl_get_nvlink_error_threshold_lr10
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_get_board_id_lr10
(
nvswitch_device *device,
NvU16 *pBoardId
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_ctrl_get_soe_heartbeat_lr10
(
nvswitch_device *device,
NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
static NvlStatus
nvswitch_cci_reset_and_drain_links_lr10
(
nvswitch_device *device,
NvU64 link_mask,
NvBool bForced
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_update_link_state_led_lr10
(
nvswitch_device *device
)
{
return;
}
void
nvswitch_led_shutdown_lr10
(
nvswitch_device *device
)
{
return;
}
NvlStatus
nvswitch_check_io_sanity_lr10
(
nvswitch_device *device
)
{
return NVL_SUCCESS;
}
void
nvswitch_fsp_update_cmdq_head_tail_lr10
(
nvswitch_device *device,
NvU32 queueHead,
NvU32 queueTail
)
{
return; // -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_fsp_get_cmdq_head_tail_lr10
(
nvswitch_device *device,
NvU32 *pQueueHead,
NvU32 *pQueueTail
)
{
return; // -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_fsp_update_msgq_head_tail_lr10
(
nvswitch_device *device,
NvU32 msgqHead,
NvU32 msgqTail
)
{
return; // -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_fsp_get_msgq_head_tail_lr10
(
nvswitch_device *device,
NvU32 *pMsgqHead,
NvU32 *pMsgqTail
)
{
return; // -NVL_ERR_NOT_SUPPORTED;
}
NvU32
nvswitch_fsp_get_channel_size_lr10
(
nvswitch_device *device
)
{
return 0; // -NVL_ERR_NOT_SUPPORTED;
}
NvU8
nvswitch_fsp_nvdm_to_seid_lr10
(
nvswitch_device *device,
NvU8 nvdmType
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvU32
nvswitch_fsp_create_mctp_header_lr10
(
nvswitch_device *device,
NvU8 som,
NvU8 eom,
NvU8 seid,
NvU8 seq
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvU32
nvswitch_fsp_create_nvdm_header_lr10
(
nvswitch_device *device,
NvU32 nvdmType
)
{
return 0; // -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_get_packet_info_lr10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size,
NvU8 *pPacketState,
NvU8 *pTag
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_validate_mctp_payload_header_lr10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_process_nvdm_msg_lr10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_process_cmd_response_lr10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_config_ememc_lr10
(
nvswitch_device *device,
NvU32 offset,
NvBool bAincw,
NvBool bAincr
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_write_to_emem_lr10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_read_from_emem_lr10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsp_error_code_to_nvlstatus_map_lr10
(
nvswitch_device *device,
NvU32 errorCode
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_fsprpc_get_caps_lr10
(
nvswitch_device *device,
NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
//
// This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
// macro in haldef_nvswitch.h

File diff suppressed because it is too large Load Diff

View File

@@ -279,6 +279,7 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
{
NvU32 ctl, ridx, widx, bufferSize;
NvBool bWasFull;
NvU64 bios_version;
// Only supported on riscv
if (!UPROC_ENG_ARCH_FALCON_RISCV(pFlcn))
@@ -354,8 +355,13 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
break;
}
NVSWITCH_PRINT(device, ERROR, "%s: TRACE[%d] = 0x%16llx\n", __FUNCTION__, entry, pc);
NVSWITCH_PRINT_SXID_NO_BBX(device, NVSWITCH_ERR_HW_SOE_HALT, "SOE HALT data[%d] = 0x%16llx\n", entry, pc);
}
// Print VBIOS version at the end
nvswitch_lib_get_bios_version(device, &bios_version);
NVSWITCH_PRINT_SXID_NO_BBX(device, NVSWITCH_ERR_HW_SOE_HALT,
"SOE HALT data[%d] = 0x%16llx\n", entry, bios_version);
}
// reset trace buffer

View File

@@ -0,0 +1,612 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvlink_export.h"
#include "common_nvswitch.h"
#include "fsprpc_nvswitch.h"
#include "ls10/ls10.h"
#include "fsp/fsp_emem_channels.h"
#include "fsp/nvdm_payload_cmd_response.h"
#include "fsp/fsp_nvdm_format.h"
#include "fsp/fsp_mctp_format.h"
#include "fsp/fsp_tnvl_rpc.h"
#include "nvswitch/ls10/dev_fsp_pri.h"
/*!
* @brief Update command queue head and tail pointers
*
* @param[in] device nvswitch device pointer
* @param[in] queueHead Offset to write to command queue head
* @param[in] queueTail Offset to write to command queue tail
*/
void
nvswitch_fsp_update_cmdq_head_tail_ls10
(
nvswitch_device *device,
NvU32 queueHead,
NvU32 queueTail
)
{
// The write to HEAD needs to happen after TAIL because it will interrupt FSP
NVSWITCH_REG_WR32(device, _PFSP, _QUEUE_TAIL(FSP_EMEM_CHANNEL_RM), queueTail);
NVSWITCH_REG_WR32(device, _PFSP, _QUEUE_HEAD(FSP_EMEM_CHANNEL_RM), queueHead);
}
/*!
* @brief Read command queue head and tail pointers
*
* @param[in] device nvswitch device pointer
* @param[out] pQueueHead Pointer where we write command queue head
* @param[out] pQueueTail Pointer where we write command queue tail
*/
void
nvswitch_fsp_get_cmdq_head_tail_ls10
(
nvswitch_device *device,
NvU32 *pQueueHead,
NvU32 *pQueueTail
)
{
*pQueueHead = NVSWITCH_REG_RD32(device, _PFSP, _QUEUE_HEAD(FSP_EMEM_CHANNEL_RM));
*pQueueTail = NVSWITCH_REG_RD32(device, _PFSP, _QUEUE_TAIL(FSP_EMEM_CHANNEL_RM));
}
/*!
* @brief Update message queue head and tail pointers
*
* @param[in] device nvswitch device pointer
* @param[in] msgqHead Offset to write to message queue head
* @param[in] msgqTail Offset to write to message queue tail
*/
void
nvswitch_fsp_update_msgq_head_tail_ls10
(
nvswitch_device *device,
NvU32 msgqHead,
NvU32 msgqTail
)
{
NVSWITCH_REG_WR32(device, _PFSP, _MSGQ_TAIL(FSP_EMEM_CHANNEL_RM), msgqTail);
NVSWITCH_REG_WR32(device, _PFSP, _MSGQ_HEAD(FSP_EMEM_CHANNEL_RM), msgqHead);
}
/*!
* @brief Read message queue head and tail pointers
*
* @param[in] device nvswitch device pointer
* @param[out] pMsgqHead Pointer where we write message queue head
* @param[out] pMsgqTail Pointer where we write message queue tail
*/
void
nvswitch_fsp_get_msgq_head_tail_ls10
(
nvswitch_device *device,
NvU32 *pMsgqHead,
NvU32 *pMsgqTail
)
{
*pMsgqHead = NVSWITCH_REG_RD32(device, _PFSP, _MSGQ_HEAD(FSP_EMEM_CHANNEL_RM));
*pMsgqTail = NVSWITCH_REG_RD32(device, _PFSP, _MSGQ_TAIL(FSP_EMEM_CHANNEL_RM));
}
/*!
* @brief Get size of RM's channel in FSP EMEM
*
* @param[in] device nvswitch device pointer
*
* @return RM channel size in bytes
*/
NvU32
nvswitch_fsp_get_channel_size_ls10
(
nvswitch_device *device
)
{
//
// Channel size is hardcoded to 1K for now. Later we will use EMEMR to
// properly fetch the lower and higher bounds of the EMEM channel
//
return FSP_EMEM_CHANNEL_RM_SIZE;
}
/*!
* @brief Retreive SEID based on NVDM type
*
* For now, SEIDs are only needed for use-cases that send multi-packet RM->FSP
* messages. The SEID is used in these cases to route packets to the correct
* task as FSP receives them. Single-packet use-cases are given SEID 0.
*
* @param[in] device nvswitch device pointer
* @param[in] nvdmType NVDM message type
*
* @return SEID corresponding to passed-in NVDM type
*/
NvU8
nvswitch_fsp_nvdm_to_seid_ls10
(
nvswitch_device *device,
NvU8 nvdmType
)
{
NvU8 seid;
switch (nvdmType)
{
case NVDM_TYPE_INFOROM:
seid = 1;
break;
case NVDM_TYPE_HULK:
default:
seid = 0;
break;
}
return seid;
}
/*!
* @brief Create MCTP header
*
* @param[in] device nvswitch_device pointer
* @param[in] som Start of Message flag
* @param[in] eom End of Message flag
* @param[in] tag Message tag
* @param[in] seq Packet sequence number
*
* @return Constructed MCTP header
*/
NvU32
nvswitch_fsp_create_mctp_header_ls10
(
nvswitch_device *device,
NvU8 som,
NvU8 eom,
NvU8 seid,
NvU8 seq
)
{
return (REF_NUM(MCTP_HEADER_SOM, (som)) |
REF_NUM(MCTP_HEADER_EOM, (eom)) |
REF_NUM(MCTP_HEADER_SEID, (seid)) |
REF_NUM(MCTP_HEADER_SEQ, (seq)));
}
/*!
* @brief Create NVDM payload header
*
* @param[in] device nvswitch_device pointer
* @param[in] nvdmType NVDM type to include in header
*
* @return Constructed NVDM payload header
*/
NvU32
nvswitch_fsp_create_nvdm_header_ls10
(
nvswitch_device *device,
NvU32 nvdmType
)
{
return (REF_DEF(MCTP_MSG_HEADER_TYPE, _VENDOR_PCI) |
REF_DEF(MCTP_MSG_HEADER_VENDOR_ID, _NV) |
REF_NUM(MCTP_MSG_HEADER_NVDM_TYPE, (nvdmType)));
}
/*!
* @brief Retrieve and validate info in packet's MCTP headers
*
* @param[in] device nvswitch device pointer
* @param[in] pBuffer Buffer containing packet
* @param[in] size Size of buffer in bytes
* @param[out] pPacketState Pointer where we write packet state
* @param[out] pTag Pointer where we write packet's MCTP tag
*
* @return NVL_SUCCESS or NV_ERR_INVALID_DATA
*/
NvlStatus
nvswitch_fsp_get_packet_info_ls10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size,
NvU8 *pPacketState,
NvU8 *pTag
)
{
NvU32 mctpHeader;
NvU8 som, eom;
NvlStatus status = NVL_SUCCESS;
mctpHeader = ((NvU32 *)pBuffer)[0];
som = REF_VAL(MCTP_HEADER_SOM, mctpHeader);
eom = REF_VAL(MCTP_HEADER_EOM, mctpHeader);
if ((som == 1) && (eom == 0))
{
*pPacketState = MCTP_PACKET_STATE_START;
}
else if ((som == 0) && (eom == 1))
{
*pPacketState = MCTP_PACKET_STATE_END;
}
else if ((som == 1) && (eom == 1))
{
*pPacketState = MCTP_PACKET_STATE_SINGLE_PACKET;
}
else
{
*pPacketState = MCTP_PACKET_STATE_INTERMEDIATE;
}
if ((*pPacketState == MCTP_PACKET_STATE_START) ||
(*pPacketState == MCTP_PACKET_STATE_SINGLE_PACKET))
{
// Packet contains payload header, check it
status = nvswitch_fsp_validate_mctp_payload_header(device, pBuffer, size);
}
*pTag = REF_VAL(MCTP_HEADER_TAG, mctpHeader);
return status;
}
/*!
* @brief Validate packet's MCTP payload header
*
* @param[in] device nvswitch device pointer
* @param[in] pBuffer Buffer containing packet
* @param[in] size Size of buffer in bytes
*
* @return NVL_SUCCESS or NV_ERR_INVALID_DATA
*/
NvlStatus
nvswitch_fsp_validate_mctp_payload_header_ls10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
NvU32 mctpPayloadHeader;
NvU16 mctpVendorId;
NvU8 mctpMessageType;
mctpPayloadHeader = ((NvU32 *)pBuffer)[1];
mctpMessageType = REF_VAL(MCTP_MSG_HEADER_TYPE, mctpPayloadHeader);
if (mctpMessageType != MCTP_MSG_HEADER_TYPE_VENDOR_PCI)
{
NVSWITCH_PRINT(device, ERROR, "Invalid MCTP Message type 0x%0x, expecting 0x7e (Vendor Defined PCI)\n",
mctpMessageType);
return -NVL_ERR_INVALID_STATE ;
}
mctpVendorId = REF_VAL(MCTP_MSG_HEADER_VENDOR_ID, mctpPayloadHeader);
if (mctpVendorId != MCTP_MSG_HEADER_VENDOR_ID_NV)
{
NVSWITCH_PRINT(device, ERROR, "Invalid PCI Vendor Id 0x%0x, expecting 0x10de (Nvidia)\n",
mctpVendorId);
return -NVL_ERR_INVALID_STATE ;
}
if (size < (sizeof(MCTP_HEADER) + sizeof(NvU8)))
{
NVSWITCH_PRINT(device, ERROR, "Packet doesn't contain NVDM type in payload header\n");
return -NVL_ERR_INVALID_STATE ;
}
return NVL_SUCCESS;
}
/*!
* @brief Process NVDM payload
*
* @param[in] device nvswitch device pointer
* @param[in] pBuffer Buffer containing packet data
* @param[in] Size Buffer size
*
* @return NVL_SUCCESS or NV_ERR_NOT_SUPPORTED
*/
NvlStatus
nvswitch_fsp_process_nvdm_msg_ls10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
NvU8 nvdmType;
NvlStatus status = NVL_SUCCESS;
nvdmType = pBuffer[0];
switch (nvdmType)
{
case NVDM_TYPE_FSP_RESPONSE:
status = nvswitch_fsp_process_cmd_response(device, pBuffer, size);
break;
default:
NVSWITCH_PRINT(device, ERROR, "Unknown or unsupported NVDM type received: 0x%0x\n",
nvdmType);
status = -NVL_ERR_NOT_SUPPORTED;
break;
}
return status;
}
/*!
* @brief Process FSP command response
*
* @param[in] device nvswitch device pointer
* @param[in] pBuffer Buffer containing packet data
* @param[in] Size Buffer size
*
* @return NVL_SUCCESS or -NVL_ERR_INVALID_STATE
*/
NvlStatus
nvswitch_fsp_process_cmd_response_ls10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
NVDM_PAYLOAD_COMMAND_RESPONSE *pCmdResponse;
NvU32 headerSize = sizeof(NvU8); // NVDM type
NvlStatus status = NVL_SUCCESS;
if (size < (headerSize + sizeof(NVDM_PAYLOAD_COMMAND_RESPONSE)))
{
NVSWITCH_PRINT(device, ERROR, "Expected FSP command response, but packet is not big enough for payload. Size: 0x%0x\n", size);
return -NVL_ERR_INVALID_STATE;
}
pCmdResponse = (NVDM_PAYLOAD_COMMAND_RESPONSE *)&(pBuffer[1]);
NVSWITCH_PRINT(device, INFO, "Received FSP command response. Task ID: 0x%0x Command type: 0x%0x Error code: 0x%0x\n",
pCmdResponse->taskId, pCmdResponse->commandNvdmType, pCmdResponse->errorCode);
status = nvswitch_fsp_error_code_to_nvlstatus_map(device, pCmdResponse->errorCode);
if (status == NVL_SUCCESS)
{
NVSWITCH_PRINT(device, INFO, "Last command was processed by FSP successfully!\n");
}
else if (status != -NVL_NOT_FOUND)
{
NVSWITCH_PRINT(device, ERROR, "FSP response reported error. Task ID: 0x%0x Command type: 0x%0x Error code: 0x%0x\n",
pCmdResponse->taskId, pCmdResponse->commandNvdmType, pCmdResponse->errorCode);
}
return status;
}
/*!
* @brief Configure EMEMC for RM's queue in FSP EMEM
*
* @param[in] device nvswitch device pointer
* @param[in] offset Offset to write to EMEMC in DWORDS
* @param[in] bAincw Flag to set auto-increment on writes
* @param[in] bAincr Flag to set auto-increment on reads
*
* @return NVL_SUCCESS
*/
NvlStatus
nvswitch_fsp_config_ememc_ls10
(
nvswitch_device *device,
NvU32 offset,
NvBool bAincw,
NvBool bAincr
)
{
NvU32 offsetBlks, offsetDwords;
NvU32 reg32 = 0;
//
// EMEMC offset is encoded in terms of blocks (64 DWORDS) and DWORD offset
// within a block, so calculate each.
//
offsetBlks = offset / 64;
offsetDwords = offset % 64;
reg32 = FLD_SET_DRF_NUM(_PFSP, _EMEMC, _OFFS, offsetDwords, reg32);
reg32 = FLD_SET_DRF_NUM(_PFSP, _EMEMC, _BLK, offsetBlks, reg32);
if (bAincw)
{
reg32 = FLD_SET_DRF(_PFSP, _EMEMC, _AINCW, _TRUE, reg32);
}
if (bAincr)
{
reg32 = FLD_SET_DRF(_PFSP, _EMEMC, _AINCR, _TRUE, reg32);
}
NVSWITCH_REG_WR32(device, _PFSP, _EMEMC(FSP_EMEM_CHANNEL_RM), reg32);
return NVL_SUCCESS;
}
/*!
* @brief Write data in buffer to RM channel in FSP's EMEM
*
* @param[in] device nvswitch device pointer
* @param[in] pBuffer Buffer with data to write to EMEM
* @param[in] Size Size of buffer in bytes, assumed DWORD aligned
*
* @return NVL_SUCCESS
*/
NvlStatus
nvswitch_fsp_write_to_emem_ls10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
NvU32 i, reg32;
NvU32 ememOffsetEnd;
//
// First configure EMEMC, RM always writes 0 to the offset, which is OK
// because RM's channel starts at 0 on GH100 and we always start from the
// beginning for each packet. It should be improved later to use EMEMR to
// properly fetch the lower and higher bounds of the EMEM channel
//
nvswitch_fsp_config_ememc(device, 0, NV_TRUE, NV_FALSE);
NVSWITCH_PRINT(device, INFO, "About to send data to FSP, ememcOff=0, size=0x%x\n", size);
if (!NV_IS_ALIGNED(size, sizeof(NvU32)))
{
NVSWITCH_PRINT(device, WARN, "Size=0x%x is not DWORD-aligned, data will be truncated!\n", size);
}
// Now write to EMEMD
for (i = 0; i < (size / 4); i++)
{
NVSWITCH_REG_WR32(device, _PFSP, _EMEMD(FSP_EMEM_CHANNEL_RM), ((NvU32*)(void*)pBuffer)[i]);
}
// Sanity check offset. If this fails, the autoincrement did not work
reg32 = NVSWITCH_REG_RD32(device, _PFSP, _EMEMC(FSP_EMEM_CHANNEL_RM));
ememOffsetEnd = DRF_VAL(_PFSP, _EMEMC, _OFFS, reg32);
// Blocks are 64 DWORDS
ememOffsetEnd += DRF_VAL(_PFSP, _EMEMC, _BLK, reg32) * 64;
NVSWITCH_PRINT(device, INFO, "After sending data, ememcOff = 0x%x\n", ememOffsetEnd);
NVSWITCH_ASSERT((ememOffsetEnd) == (size / sizeof(NvU32)));
return NVL_SUCCESS;
}
/*!
* @brief Read data to buffer from RM channel in FSP's EMEM
*
* @param[in] device nvswitch_device pointer
* @param[in/out] pBuffer Buffer where we copy data from EMEM
* @param[in] size Size to read in bytes, assumed DWORD aligned
*
* @return NVL_SUCCESS
*/
NvlStatus
nvswitch_fsp_read_from_emem_ls10
(
nvswitch_device *device,
NvU8 *pBuffer,
NvU32 size
)
{
NvU32 i, reg32;
NvU32 ememOffsetEnd;
//
// First configure EMEMC, RM always writes 0 to the offset, which is OK
// because RM's channel starts at 0 on GH100 and we always start from the
// beginning for each packet. It should be improved later to use EMEMR to
// properly fetch the lower and higher bounds of the EMEM channel
//
nvswitch_fsp_config_ememc(device, 0, NV_FALSE, NV_TRUE);
NVSWITCH_PRINT(device, INFO, "About to read data from FSP, ememcOff=0, size=0x%x\n", size);
if (!NV_IS_ALIGNED(size, sizeof(NvU32)))
{
NVSWITCH_PRINT(device, WARN, "Size=0x%x is not DWORD-aligned, data will be truncated!\n", size);
}
// Now read from EMEMD
for (i = 0; i < (size / 4); i++)
{
((NvU32*)(void*)pBuffer)[i] = NVSWITCH_REG_RD32(device, _PFSP, _EMEMD(FSP_EMEM_CHANNEL_RM));
}
// Sanity check offset. If this fails, the autoincrement did not work
reg32 = NVSWITCH_REG_RD32(device, _PFSP, _EMEMC(FSP_EMEM_CHANNEL_RM));
ememOffsetEnd = DRF_VAL(_PFSP, _EMEMC, _OFFS, reg32);
// Blocks are 64 DWORDS
ememOffsetEnd += DRF_VAL(_PFSP, _EMEMC, _BLK, reg32) * 64;
NVSWITCH_PRINT(device, INFO, "After reading data, ememcOff = 0x%x\n", ememOffsetEnd);
NVSWITCH_ASSERT((ememOffsetEnd) == (size / sizeof(NvU32)));
return NVL_SUCCESS;
}
NvlStatus
nvswitch_fsp_error_code_to_nvlstatus_map_ls10
(
nvswitch_device *device,
NvU32 errorCode
)
{
switch (errorCode)
{
case FSP_OK:
return NVL_SUCCESS;
case FSP_ERR_IFR_FILE_NOT_FOUND:
return -NVL_NOT_FOUND;
case FSP_ERR_IFS_ERR_INVALID_STATE:
case FSP_ERR_IFS_ERR_INVALID_DATA:
return -NVL_ERR_INVALID_STATE;
default:
return -NVL_ERR_GENERIC;
}
}
NvlStatus
nvswitch_fsprpc_get_caps_ls10
(
nvswitch_device *device,
NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params
)
{
TNVL_RPC_CAPS_PAYLOAD payload;
TNVL_RPC_CAPS_RSP_PAYLOAD responsePayload;
NvlStatus status;
payload.subMessageId = TNVL_CAPS_SUBMESSAGE_ID;
nvswitch_os_memset(&responsePayload, 0, sizeof(TNVL_RPC_CAPS_RSP_PAYLOAD));
status = nvswitch_fsp_send_and_read_message(device,
(NvU8*) &payload, sizeof(TNVL_RPC_CAPS_PAYLOAD), NVDM_TYPE_CAPS_QUERY,
(NvU8*) &responsePayload, sizeof(TNVL_RPC_CAPS_RSP_PAYLOAD));
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "RPC failed for FSP caps query\n");
return status;
}
params->responseNvdmType = responsePayload.nvdmType;
params->commandNvdmType = responsePayload.cmdResponse.commandNvdmType;
params->errorCode = responsePayload.cmdResponse.errorCode;
params->pRspPayload = responsePayload.rspPayload;
return NVL_SUCCESS;
}

View File

@@ -0,0 +1,86 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "export_nvswitch.h"
#include "ls10/soe_ls10.h"
#include "soe/soeifheartbeat.h"
/*!
* @brief Callback function to recieve Heartbeat messages from SOE.
*/
void
nvswitch_heartbeat_soe_callback_ls10
(
nvswitch_device *device,
RM_FLCN_MSG *pGenMsg,
void *pParams,
NvU32 seqDesc,
NV_STATUS status
)
{
RM_FLCN_MSG_SOE *pMsg = (RM_FLCN_MSG_SOE *)pGenMsg;
switch (pMsg->msg.heartbeat.msgType)
{
case RM_SOE_HEARTBEAT_MSG_ID_OSFP_THERM_WARN_ACTIVATED:
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_OSFP_THERM_WARN_ACTIVATED,
"OSFP Thermal Warn Activated\n");
break;
}
case RM_SOE_HEARTBEAT_MSG_ID_OSFP_THERM_WARN_DEACTIVATED:
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_OSFP_THERM_WARN_DEACTIVATED,
"OSFP Thermal Warn Deactivated\n");
break;
}
case RM_SOE_HEARTBEAT_MSG_ID_OSFP_THERM_OVERT_ACTIVATED:
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_OSFP_THERM_OVERT_ACTIVATED,
"OSFP Thermal Overt Activated\n");
break;
}
case RM_SOE_HEARTBEAT_MSG_ID_OSFP_THERM_OVERT_DEACTIVATED:
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_OSFP_THERM_OVERT_DEACTIVATED,
"OSFP Thermal Overt Deactivated\n");
break;
}
case RM_SOE_HEARTBEAT_MSG_ID_OSFP_THERM_HEARTBEAT_SHUTDOWN:
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_OSFP_THERM_HEARTBEAT_SHUTDOWN,
"OSFP Thermal SOE Heartbeat Shutdown\n");
break;
}
default:
{
NVSWITCH_PRINT(device, ERROR, "%s Unknown message Id\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
}
}
}

View File

@@ -25,6 +25,7 @@
#include "intr_nvswitch.h"
#include "regkey_nvswitch.h"
#include "soe/soe_nvswitch.h"
#include "cci/cci_nvswitch.h"
#include "ls10/ls10.h"
#include "ls10/minion_ls10.h"
@@ -1264,12 +1265,12 @@ _nvswitch_service_route_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_ROUTE_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_ROUTE_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -1478,7 +1479,7 @@ _nvswitch_service_route_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -1743,12 +1744,12 @@ _nvswitch_service_ingress_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_INGRESS_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_INGRESS_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -2046,7 +2047,7 @@ _nvswitch_service_ingress_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -2189,7 +2190,7 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -2329,7 +2330,7 @@ _nvswitch_service_tstate_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -2519,12 +2520,12 @@ _nvswitch_service_tstate_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_TSTATE_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_TSTATE_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -2653,7 +2654,7 @@ _nvswitch_service_egress_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -2898,7 +2899,7 @@ _nvswitch_service_egress_nonfatal_ls10_err_status_1:
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -3175,12 +3176,12 @@ _nvswitch_service_egress_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_EGRESS_0_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_EGRESS_0_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -3306,12 +3307,12 @@ _nvswitch_service_egress_fatal_ls10_err_status_1:
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_EGRESS_1_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_EGRESS_1_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_FATAL_REPORT_EN_1,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -3410,7 +3411,7 @@ _nvswitch_service_sourcetrack_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -3538,12 +3539,12 @@ _nvswitch_service_sourcetrack_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_SOURCETRACK_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_SOURCETRACK_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -3690,7 +3691,7 @@ _nvswitch_service_multicast_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -3830,12 +3831,12 @@ _nvswitch_service_multicast_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_MULTICAST_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_MULTICAST_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -3976,7 +3977,7 @@ _nvswitch_service_reduction_nonfatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -4131,12 +4132,12 @@ _nvswitch_service_reduction_fatal_ls10
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_disable_nport_fatal_interrupts_ls10(device, link,
report.raw_enable ^ pending, RM_SOE_CORE_NPORT_REDUCTION_INTERRUPT);
report.raw_enable & ~pending, RM_SOE_CORE_NPORT_REDUCTION_INTERRUPT);
}
else
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
}
@@ -4534,7 +4535,7 @@ _nvswitch_service_nvltlc_tx_sys_fatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_STATUS_0, pending);
@@ -4654,7 +4655,7 @@ _nvswitch_service_nvltlc_rx_sys_fatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_STATUS_0, pending);
@@ -4755,7 +4756,7 @@ _nvswitch_service_nvltlc_tx_lnk_fatal_0_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -4942,7 +4943,7 @@ _nvswitch_service_nvltlc_rx_lnk_fatal_0_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -5045,7 +5046,7 @@ _nvswitch_service_nvltlc_rx_lnk_fatal_1_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_1,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -5208,7 +5209,7 @@ _nvswitch_service_nvlipt_common_fatal_ls10
(device->link[link].fatal_error_occurred))
{
NVSWITCH_ENG_WR32(device, NVLIPT, , instance, _NVLIPT_COMMON, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
break;
}
}
@@ -5331,7 +5332,7 @@ _nvswitch_service_nxbar_tile_ls10
// Disable interrupts that have occurred after fatal error.
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
NVSWITCH_TILE_WR32(device, tile, _NXBAR_TILE, _ERR_FATAL_INTR_EN,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
NVSWITCH_TILE_WR32(device, link, _NXBAR_TILE, _ERR_STATUS, pending);
@@ -5435,7 +5436,7 @@ _nvswitch_service_nxbar_tileout_ls10
// Disable interrupts that have occurred after fatal error.
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
NVSWITCH_TILEOUT_WR32(device, tileout, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
NVSWITCH_TILEOUT_WR32(device, tileout, _NXBAR_TILEOUT, _ERR_STATUS, pending);
@@ -5838,6 +5839,12 @@ _nvswitch_deferred_link_state_check_ls10
chip_device->deferredLinkErrors[link].state.bLinkStateCallBackEnabled = NV_FALSE;
bRedeferLinkStateCheck = NV_FALSE;
// Ask CCI if link state check should be futher deferred
if (cciIsLinkManaged(device, link) && !cciReportLinkErrors(device, link))
{
bRedeferLinkStateCheck = NV_TRUE;
}
// Link came up after last retrain
if (lastLinkUpTime >= lastRetrainTime)
{
@@ -6310,7 +6317,7 @@ _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -6532,7 +6539,7 @@ _nvswitch_service_nvltlc_tx_lnk_nonfatal_1_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (report.raw_first & report.mask)
@@ -7103,7 +7110,7 @@ _nvswitch_service_nvlipt_lnk_fatal_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
// clear interrupts
@@ -7768,16 +7775,29 @@ nvswitch_service_nvldl_fatal_link_ls10
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n",
__FUNCTION__);
}
dlDeferredIntrLinkMask |= bit;
dlDeferredIntrLinkMask |= bit;
//
// Disable LTSSM FAULT DOWN, NPG, and NVLW interrupts to avoid interrupt storm. The interrupts
// will be re-enabled in reset and drain
//
report.raw_enable = FLD_SET_DRF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_DOWN, _DISABLE, report.raw_enable);
NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN, report.raw_enable);
nvswitch_link_disable_interrupts_ls10(device, link);
if (device->bModeContinuousALI)
{
//
// Since reset and drain will reset the link, including clearing
// pending interrupts, skip the clear write below. There are cases
// where link clocks will not be on after reset and drain so there
// maybe PRI errors on writing to the register
//
{
bRequireResetAndDrain = NV_TRUE;
// CCI will perform reset and drain
if (!cciIsLinkManaged(device, link))
{
bRequireResetAndDrain = NV_TRUE;
}
}
nvswitch_clear_flags(&unhandled, bit);
}
@@ -7792,25 +7812,41 @@ nvswitch_service_nvldl_fatal_link_ls10
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n",
__FUNCTION__);
}
dlDeferredIntrLinkMask |= bit;
dlDeferredIntrLinkMask |= bit;
//
// Disable LTSSM FAULT UP, NPG, and NVLW link interrupts to avoid interrupt storm. The interrupts
// will be re-enabled in reset and drain
//
report.raw_enable = FLD_SET_DRF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_UP, _DISABLE, report.raw_enable);
NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN, report.raw_enable);
nvswitch_link_disable_interrupts_ls10(device, link);
if (device->bModeContinuousALI)
{
//
// Since reset and drain will reset the link, including clearing
// pending interrupts, skip the clear write below. There are cases
// where link clocks will not be on after reset and drain so there
// maybe PRI errors on writing to the register
//
{
bRequireResetAndDrain = NV_TRUE;
// CCI will perform reset and drain
if (!cciIsLinkManaged(device, link))
{
bRequireResetAndDrain = NV_TRUE;
}
}
nvswitch_clear_flags(&unhandled, bit);
}
if (bRequireResetAndDrain)
{
chip_device->deferredLinkErrors[link].data.fatalIntrMask.dl |= dlDeferredIntrLinkMask;
device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link));
chip_device->deferredLinkErrors[link].state.lastRetrainTime = nvswitch_os_get_platform_time();
device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link), NV_FALSE);
}
chip_device->deferredLinkErrors[link].data.fatalIntrMask.dl |= dlDeferredIntrLinkMask;
if (dlDeferredIntrLinkMask)
{
nvswitch_create_deferred_link_state_check_task_ls10(device, nvlipt_instance, link);
}
@@ -7820,7 +7856,7 @@ nvswitch_service_nvldl_fatal_link_ls10
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN,
report.raw_enable ^ pending);
report.raw_enable & ~pending);
}
if (!bRequireResetAndDrain)

View File

@@ -28,6 +28,7 @@
#include "regkey_nvswitch.h"
#include "ls10/ls10.h"
#include "nvswitch/ls10/dev_nvldl_ip_addendum.h"
#include "cci/cci_nvswitch.h"
#include "nvswitch/ls10/dev_nvldl_ip.h"
#include "nvswitch/ls10/dev_nvlipt_lnk_ip.h"
@@ -98,54 +99,52 @@ _nvswitch_configure_reserved_throughput_counters
DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _ENABLE, _ENABLE));
}
void
nvswitch_program_l1_scratch_reg_ls10
(
nvswitch_device *device,
NvU32 linkNumber
)
{
NvU32 scrRegVal;
NvU32 tempRegVal;
// Read L1 register and store initial/VBIOS L1 Threshold Value in Scratch register
tempRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _PWRM_L1_ENTER_THRESHOLD);
scrRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _SCRATCH_WARM);
// Update the scratch register value only if it has not been written to before
if (scrRegVal == NV_NVLIPT_LNK_SCRATCH_WARM_DATA_INIT)
{
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _SCRATCH_WARM, tempRegVal);
}
}
#define BUG_3797211_LS10_VBIOS_VERSION 0x9610410000
void
nvswitch_init_lpwr_regs_ls10
(
nvlink_link *link
)
{
nvswitch_device *device = link->dev->pDevInfo;
// NVSWITCH_BIOS_NVLINK_CONFIG *bios_config;
NvU32 linkNum = link->linkNumber;
NvU32 tempRegVal, lpEntryThreshold;
NvU8 softwareDesired;
NvBool bLpEnable;
NvlStatus status;
nvswitch_device *device;
if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
if (link == NULL)
{
return;
}
if (device->regkeys.enable_pm == NV_SWITCH_REGKEY_ENABLE_PM_NO)
device = link->dev->pDevInfo;
status = nvswitch_ctrl_set_link_l1_threshold_ls10(link, device->regkeys.lp_threshold);
if (status != NVL_SUCCESS)
{
return;
NVSWITCH_PRINT(device, ERROR, "%s: Failed to set L1 Threshold\n",
__FUNCTION__);
}
// bios_config = nvswitch_get_bios_nvlink_config(device);
// IC Enter Threshold
if (device->regkeys.lp_threshold == NV_SWITCH_REGKEY_SET_LP_THRESHOLD_DEFAULT)
{
//
// Do nothing since VBIOS sets the default L1 threshold.
// Refer Bug 3797211 for more info.
//
}
else
{
lpEntryThreshold = device->regkeys.lp_threshold;
tempRegVal = 0;
tempRegVal = FLD_SET_DRF_NUM(_NVLIPT, _LNK_PWRM_L1_ENTER_THRESHOLD, _THRESHOLD, lpEntryThreshold, tempRegVal);
NVSWITCH_LINK_WR32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK, _PWRM_L1_ENTER_THRESHOLD, tempRegVal);
}
//LP Entry Enable
bLpEnable = NV_TRUE;
softwareDesired = (bLpEnable) ? 0x1 : 0x0;
tempRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK, _PWRM_CTRL);
tempRegVal = FLD_SET_DRF_NUM(_NVLIPT, _LNK_PWRM_CTRL, _L1_SOFTWARE_DESIRED, softwareDesired, tempRegVal);
NVSWITCH_LINK_WR32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK, _PWRM_CTRL, tempRegVal);
}
void
@@ -573,6 +572,15 @@ nvswitch_corelib_get_tl_link_mode_ls10
link_state = DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE,
val);
if (cciIsLinkManaged(device, link->linkNumber))
{
if (link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_RESET)
{
*mode = NVLINK_LINKSTATE_RESET;
return NVL_SUCCESS;
}
}
switch(link_state)
{
case NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_ACTIVE:
@@ -1434,6 +1442,7 @@ nvswitch_load_link_disable_settings_ls10
if (FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, _DISABLE, regVal))
{
NVSWITCH_ASSERT(!cciIsLinkManaged(device, link->linkNumber));
// Set link to invalid and unregister from corelib
device->link[link->linkNumber].valid = NV_FALSE;
@@ -1544,6 +1553,12 @@ nvswitch_reset_and_train_link_ls10
nvswitch_execute_unilateral_link_shutdown_ls10(link);
nvswitch_corelib_clear_link_state_ls10(link);
// If link is CCI managed then return early
if (cciIsLinkManaged(device, link->linkNumber))
{
return NVL_SUCCESS;
}
//
// When a link faults there could be a race between the driver requesting
// reset and MINION processing Emergency Shutdown. Minion will notify if
@@ -1738,6 +1753,176 @@ nvswitch_request_tl_link_state_ls10
return status;
}
// Initialize optical links for pre-training
NvlStatus
nvswitch_cci_initialization_sequence_ls10
(
nvswitch_device *device,
NvU32 linkNumber
)
{
NvlStatus status;
nvlink_link link;
nvlink_device dev;
link.linkNumber = linkNumber;
link.dev = &dev;
link.dev->pDevInfo = device;
// Perform INITPHASE1
status = nvswitch_minion_send_command(device, linkNumber,
NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE1, 0);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s : NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE1 failed on link %d.\n",
__FUNCTION__, linkNumber);
return status;
}
// SET RX detect
status = nvswitch_minion_send_command(device, linkNumber,
NV_MINION_NVLINK_DL_CMD_COMMAND_TURING_RXDET, 0);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Set RXDET failed for link %d.\n",
__FUNCTION__, linkNumber);
return status;
}
// Enable Common mode on Tx
status = _nvswitch_init_dl_pll(&link);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to enable common mode for link %d.\n",
__FUNCTION__, linkNumber);
return status;
}
status = nvswitch_minion_send_command(device, linkNumber,
NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE5A, 0);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s : NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE5A failed on link %d.\n",
__FUNCTION__, linkNumber);
return status;
}
return NVL_SUCCESS;
}
NvlStatus
nvswitch_cci_deinitialization_sequence_ls10
(
nvswitch_device *device,
NvU32 linkNumber
)
{
NvlStatus status;
nvlink_link link;
nvlink_device dev;
link.linkNumber = linkNumber;
link.dev = &dev;
link.dev->pDevInfo = device;
// Perform ABORTRXDET
status = nvswitch_minion_send_command(device, linkNumber,
NV_MINION_NVLINK_DL_CMD_COMMAND_ABORTRXDET, 0);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s : NV_MINION_NVLINK_DL_CMD_COMMAND_ABORTRXDET failed on link %d.\n",
__FUNCTION__, linkNumber);
return status;
}
return NVL_SUCCESS;
}
NvlStatus
nvswitch_cci_enable_iobist_ls10
(
nvswitch_device *device,
NvU32 linkNumber,
NvBool bEnable
)
{
NvU32 val;
if (bEnable)
{
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIG, _CFGCLKGATEEN, _ENABLE, val);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIG, _PRBSALT, _PAM4, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4, _MASK_SKIP_OUT, _INIT, val);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4, _MASK_COM_OUT, _INIT, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_2);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_2, _SKIP_SYMBOL_0, _SYMBOL, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_2, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_3);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_3, _SKIP_SYMBOL_1, _SYMBOL, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_3, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_0);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_0, _COM_SYMBOL_0, _SYMBOL, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_0, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_1);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_1, _COM_SYMBOL_1, _SYMBOL, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_1, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4, _SEND_DATA_OUT, _INIT, val);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4, _RESET_WORD_CNT_OUT, _COUNT, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _SKIPCOMINSERTERGEN_4, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIGREG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIGREG, _TX_BIST_EN_IN, _ENABLE, val);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIGREG, _DISABLE_WIRED_ENABLE_IN, _ENABLE, val);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIGREG, _IO_BIST_MODE_IN, _ENABLE, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIGREG, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIG, _DPG_PRBSSEEDLD, _ENABLE, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG, val);
nvswitch_os_sleep(5);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIG, _DPG_PRBSSEEDLD, _INIT, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIG, _STARTTEST, _ENABLE, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG, val);
}
else
{
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIG, _STARTTEST, _INIT, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIG, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIGREG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIGREG, _DISABLE_WIRED_ENABLE_IN, _INIT, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIGREG, val);
val = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIGREG);
val = FLD_SET_DRF(_NVLDL_TXIOBIST, _CONFIGREG, _TX_BIST_EN_IN, _INIT, val);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TXIOBIST, _CONFIGREG, val);
}
return NVL_SUCCESS;
}
NvBool
nvswitch_does_link_need_termination_enabled_ls10
(
@@ -1759,3 +1944,116 @@ nvswitch_link_termination_setup_ls10
// Not supported for LS10
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_ctrl_get_link_l1_capability_ls10
(
nvswitch_device *device,
NvU32 linkNum,
NvBool *isL1Capable
)
{
NvU32 regData;
NvBool bL1Capable;
regData = NVSWITCH_LINK_RD32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SYSTEM_LINK_AN1_CTRL);
bL1Capable = FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_AN1_CTRL, _PWRM_L1_ENABLE, _ENABLE, regData);
regData = NVSWITCH_LINK_RD32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK, _CTRL_CAP_LOCAL_LINK_AN1);
bL1Capable &= FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CAP_LOCAL_LINK_AN1, _PWRM_L1_SUPPORT, _SUPPORTED, regData);
*isL1Capable = bL1Capable;
return NVL_SUCCESS;
}
NvlStatus
nvswitch_ctrl_get_link_l1_threshold_ls10
(
nvswitch_device *device,
NvU32 linkNum,
NvU32 *lpThreshold
)
{
NvU32 regData;
regData = NVSWITCH_LINK_RD32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK, _PWRM_L1_ENTER_THRESHOLD);
*lpThreshold = DRF_VAL(_NVLIPT, _LNK_PWRM_L1_ENTER_THRESHOLD, _THRESHOLD, regData);
return NVL_SUCCESS;
}
NvlStatus
nvswitch_ctrl_set_link_l1_threshold_ls10
(
nvlink_link *link,
NvU32 lpEntryThreshold
)
{
nvswitch_device *device = link->dev->pDevInfo;
NvU32 linkNum = link->linkNumber;
NvU32 tempRegVal, lpThreshold;
NvU8 softwareDesired;
NvU64 biosVersion;
if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (device->regkeys.enable_pm == NV_SWITCH_REGKEY_ENABLE_PM_NO)
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (nvswitch_lib_get_bios_version(device, &biosVersion) != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN, "%s Get VBIOS version failed.\n",
__FUNCTION__);
biosVersion = 0;
}
if (device->regkeys.lp_threshold != NV_SWITCH_REGKEY_SET_LP_THRESHOLD_DEFAULT)
{
lpThreshold = device->regkeys.lp_threshold;
}
else if ((lpEntryThreshold == NVSWITCH_SET_NVLINK_L1_THRESHOLD_DEFAULT) ||
(lpEntryThreshold == NV_SWITCH_REGKEY_SET_LP_THRESHOLD_DEFAULT))
{
if (biosVersion >= BUG_3797211_LS10_VBIOS_VERSION)
{
//
// Read the default L1 Threshold programmed by the
// VBIOS (version 96.10.41.00.00 and above).
//
lpThreshold = NVSWITCH_LINK_RD32_LS10(device, linkNum, NVLIPT_LNK,
_NVLIPT_LNK, _SCRATCH_WARM);
}
else
{
lpThreshold = 1;
}
}
else
{
lpThreshold = lpEntryThreshold;
}
tempRegVal = 0;
tempRegVal = FLD_SET_DRF_NUM(_NVLIPT, _LNK_PWRM_L1_ENTER_THRESHOLD,
_THRESHOLD, lpThreshold, tempRegVal);
NVSWITCH_LINK_WR32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK,
_PWRM_L1_ENTER_THRESHOLD, tempRegVal);
//LP Entry Enable
softwareDesired = NV_NVLIPT_LNK_PWRM_CTRL_L1_SOFTWARE_DESIRED_L1;
tempRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNum, NVLIPT_LNK,
_NVLIPT_LNK, _PWRM_CTRL);
tempRegVal = FLD_SET_DRF_NUM(_NVLIPT, _LNK_PWRM_CTRL, _L1_SOFTWARE_DESIRED,
softwareDesired, tempRegVal);
NVSWITCH_LINK_WR32_LS10(device, linkNum, NVLIPT_LNK, _NVLIPT_LNK,
_PWRM_CTRL, tempRegVal);
return NVL_SUCCESS;
}

View File

@@ -37,11 +37,14 @@
#include "ls10/pmgr_ls10.h"
#include "ls10/therm_ls10.h"
#include "ls10/smbpbi_ls10.h"
#include "ls10/cci_ls10.h"
#include "cci/cci_nvswitch.h"
#include "ls10/multicast_ls10.h"
#include "ls10/soe_ls10.h"
#include "ls10/gfw_ls10.h"
#include "nvswitch/ls10/dev_nvs_top.h"
#include "nvswitch/ls10/ptop_discovery_ip.h"
#include "nvswitch/ls10/dev_pri_masterstation_ip.h"
#include "nvswitch/ls10/dev_pri_hub_sys_ip.h"
#include "nvswitch/ls10/dev_nvlw_ip.h"
@@ -1405,10 +1408,12 @@ static NvlStatus
_nvswitch_reset_and_drain_links_ls10
(
nvswitch_device *device,
NvU64 link_mask
NvU64 link_mask,
NvBool bForced
)
{
NvlStatus status = NVL_SUCCESS;
ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
nvlink_link *link_info = NULL;
NvU32 link;
NvU32 data32;
@@ -1465,6 +1470,11 @@ _nvswitch_reset_and_drain_links_ls10
continue;
}
if (nvswitch_is_link_in_reset(device, link_info))
{
continue;
}
// Unregister links to make them unusable while reset is in progress.
nvlink_lib_unregister_link(link_info);
@@ -1503,46 +1513,54 @@ _nvswitch_reset_and_drain_links_ls10
// Step 3.0 :
// Prior to starting port reset, ensure the links is in emergency shutdown
//
bIsLinkInEmergencyShutdown = NV_FALSE;
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
// Forcibly shutdown links if requested
//
if (bForced)
{
bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
status = nvswitch_minion_get_dl_status(device, link_info->linkNumber,
NV_NVLSTAT_UC01, 0, &stat_data);
if (status != NVL_SUCCESS)
nvswitch_execute_unilateral_link_shutdown_ls10(link_info);
}
else
{
bIsLinkInEmergencyShutdown = NV_FALSE;
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
status = nvswitch_minion_get_dl_status(device, link_info->linkNumber,
NV_NVLSTAT_UC01, 0, &stat_data);
if (status != NVL_SUCCESS)
{
continue;
}
link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data);
bIsLinkInEmergencyShutdown = (link_state == LINKSTATUS_EMERGENCY_SHUTDOWN) ?
NV_TRUE:NV_FALSE;
if (bIsLinkInEmergencyShutdown == NV_TRUE)
{
break;
}
}
while(bKeepPolling);
if (bIsLinkInEmergencyShutdown == NV_FALSE)
{
NVSWITCH_PRINT(device, ERROR,
"%s: link %d failed to enter emergency shutdown\n",
__FUNCTION__, link);
// Re-register links.
status = nvlink_lib_register_link(device->nvlink_device, link_info);
if (status != NVL_SUCCESS)
{
nvswitch_destroy_link(link_info);
}
continue;
}
link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data);
bIsLinkInEmergencyShutdown = (link_state == LINKSTATUS_EMERGENCY_SHUTDOWN) ?
NV_TRUE:NV_FALSE;
if (bIsLinkInEmergencyShutdown == NV_TRUE)
{
break;
}
}
while(bKeepPolling);
if (bIsLinkInEmergencyShutdown == NV_FALSE)
{
NVSWITCH_PRINT(device, ERROR,
"%s: link %d failed to enter emergency shutdown\n",
__FUNCTION__, link);
// Re-register links.
status = nvlink_lib_register_link(device->nvlink_device, link_info);
if (status != NVL_SUCCESS)
{
nvswitch_destroy_link(link_info);
}
continue;
}
nvswitch_corelib_clear_link_state_ls10(link_info);
@@ -1585,30 +1603,25 @@ _nvswitch_reset_and_drain_links_ls10
NV_NVLSTAT_MN00, 0, &stat_data) == NVL_SUCCESS)
{
link_intr_subcode = DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, stat_data);
}
else
{
continue;
}
if ((link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL) &&
(link_intr_subcode == MINION_ALARM_BUSY))
{
if ((link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL) &&
(link_intr_subcode == MINION_ALARM_BUSY))
{
status = nvswitch_request_tl_link_state_ls10(link_info,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE);
//
// We retry the shutdown sequence 3 times when we see a MINION_REQUEST_FAIL
// or MINION_ALARM_BUSY
//
retry_count--;
}
else
{
break;
//
// We retry the reset sequence when we see a MINION_REQUEST_FAIL
// or MINION_ALARM_BUSY
//
}
else
{
break;
}
}
retry_count--;
}
} while(retry_count);
if (status != NVL_SUCCESS)
@@ -1651,6 +1664,9 @@ _nvswitch_reset_and_drain_links_ls10
continue;
}
// Initialize select scratch registers to 0x0
device->hal.nvswitch_init_scratch(device);
//
// Step 9.0: Launch ALI training to re-initialize and train the links
// nvswitch_launch_ALI_link_training(device, link_info);
@@ -1658,6 +1674,8 @@ _nvswitch_reset_and_drain_links_ls10
// Request active, but don't block. FM will come back and check
// active link status by blocking on this TLREQ's completion
//
// CCI will re-train links
if (!cciIsLinkManaged(device, link))
{
status = nvswitch_request_tl_link_state_ls10(link_info,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_ACTIVE,
@@ -1698,6 +1716,8 @@ _nvswitch_reset_and_drain_links_ls10
}
FOR_EACH_INDEX_IN_MASK_END;
chip_device->deferredLinkErrors[link].state.lastRetrainTime = nvswitch_os_get_platform_time();
return NVL_SUCCESS;
}
@@ -1708,12 +1728,25 @@ NvlStatus
nvswitch_reset_and_drain_links_ls10
(
nvswitch_device *device,
NvU64 link_mask
NvU64 link_mask,
NvBool bForced
)
{
NvlStatus status = NVL_SUCCESS;
status = _nvswitch_reset_and_drain_links_ls10(device, link_mask);
NvU32 link;
// CCI will call reset and drain separately
FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
{
if (cciIsLinkManaged(device, link))
{
link_mask = link_mask & ~NVBIT64(link);
}
}
FOR_EACH_INDEX_IN_MASK_END;
status = _nvswitch_reset_and_drain_links_ls10(device, link_mask, bForced);
if (status != NVL_SUCCESS)
{
return status;
@@ -5163,6 +5196,10 @@ nvswitch_launch_ALI_ls10
continue;
}
if (cciIsLinkManaged(device, i))
{
continue;
}
nvswitch_launch_ALI_link_training(device, link, NV_FALSE);
}
FOR_EACH_INDEX_IN_MASK_END;
@@ -5512,7 +5549,7 @@ nvswitch_ctrl_inband_send_data_ls10
NvlStatus status;
nvswitch_inband_send_data inBandData;
// ct_assert(NVLINK_INBAND_MAX_MSG_SIZE >= NVSWITCH_INBAND_DATA_SIZE);
ct_assert(NVLINK_INBAND_MAX_MSG_SIZE == NVSWITCH_INBAND_DATA_SIZE);
if (p->dataSize == 0 || p->dataSize > NVSWITCH_INBAND_DATA_SIZE)
{
@@ -5858,6 +5895,514 @@ nvswitch_ctrl_get_nvlink_error_threshold_ls10
return NVL_SUCCESS;
}
NvlStatus
nvswitch_get_board_id_ls10
(
nvswitch_device *device,
NvU16 *pBoardId
)
{
NvlStatus ret;
NvU32 biosOemVersionBytes;
if (pBoardId == NULL)
{
return -NVL_BAD_ARGS;
}
// Check if bios valid
ret = nvswitch_lib_get_bios_version(device, NULL);
if (ret != NVL_SUCCESS)
{
return ret;
}
biosOemVersionBytes = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW_SW,
_OEM_BIOS_VERSION);
*pBoardId = DRF_VAL(_NVLSAW_SW, _OEM_BIOS_VERSION, _BOARD_ID, biosOemVersionBytes);
return NVL_SUCCESS;
}
NvlStatus
nvswitch_check_io_sanity_ls10
(
nvswitch_device *device
)
{
NvBool keepPolling;
NVSWITCH_TIMEOUT timeout;
NvU32 val;
NvBool error = NV_FALSE;
NvU32 sxid;
const char *sxid_desc = NULL;
//
// NOTE: MMIO discovery has not been performed so only constant BAR0 offset
// addressing can be performed.
//
// BAR0 offset 0 should always contain valid data -- unless it doesn't
val = NVSWITCH_OFF_RD32(device, 0);
if (val == 0)
{
error = NV_TRUE;
sxid = NVSWITCH_ERR_HW_HOST_FIRMWARE_RECOVERY_MODE;
sxid_desc = "Firmware recovery mode";
}
else if ((val == 0xFFFFFFFF) || ((val & 0xFFFF0000) == 0xBADF0000))
{
error = NV_TRUE;
sxid = NVSWITCH_ERR_HW_HOST_IO_FAILURE;
sxid_desc = "IO failure";
}
else if (!IS_FMODEL(device))
{
// check if FSP successfully started
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
do
{
keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
val = NVSWITCH_REG_RD32(device, _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS);
if (FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, val))
{
break;
}
nvswitch_os_sleep(1);
}
while (keepPolling);
if (!FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, val))
{
error = NV_TRUE;
sxid = NVSWITCH_ERR_HW_HOST_FIRMWARE_INITIALIZATION_FAILURE;
sxid_desc = "Firmware initialization failure";
}
}
if (error)
{
NVSWITCH_RAW_ERROR_LOG_TYPE report = { 0, { 0 } };
NVSWITCH_RAW_ERROR_LOG_TYPE report_saw = {0, { 0 }};
NvU32 report_idx = 0;
NvU32 i;
val = NVSWITCH_REG_RD32(device, _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS);
report.data[report_idx++] = val;
NVSWITCH_PRINT(device, ERROR, "%s: -- _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS (0x%x) != _SUCCESS --\n",
__FUNCTION__, val);
for (i = 0; i <= 15; i++)
{
val = NVSWITCH_OFF_RD32(device,
NV_PTOP_UNICAST_SW_DEVICE_BASE_SAW_0 + NV_NVLSAW_SW_SCRATCH(i));
report_saw.data[i] = val;
NVSWITCH_PRINT(device, ERROR, "%s: -- NV_NVLSAW_SW_SCRATCH(%d) = 0x%08x\n",
__FUNCTION__, i, val);
}
for (i = 0; i < NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2__SIZE_1; i++)
{
val = NVSWITCH_REG_RD32(device, _PFSP, _FALCON_COMMON_SCRATCH_GROUP_2(i));
report.data[report_idx++] = val;
NVSWITCH_PRINT(device, ERROR, "%s: -- NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(%d) = 0x%08x\n",
__FUNCTION__, i, val);
}
// Include useful scratch information for triage
NVSWITCH_PRINT_SXID_NO_BBX(device, sxid,
"Fatal, %s (0x%x/0x%x, 0x%x, 0x%x, 0x%x/0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", sxid_desc,
report.data[0], report.data[1], report.data[2], report.data[3], report.data[4],
report_saw.data[0], report_saw.data[1], report_saw.data[12], report_saw.data[14], report_saw.data[15]);
return -NVL_INITIALIZATION_TOTAL_FAILURE;
}
return NVL_SUCCESS;
}
/*
* @brief: This function returns the current value of the SOE heartbeat gpio
* @params[in] device reference to current nvswitch device
* @params[in] p NVSWITCH_GET_SOE_HEARTBEAT_PARAMS
*/
NvlStatus
nvswitch_ctrl_get_soe_heartbeat_ls10
(
nvswitch_device *device,
NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p
)
{
NvU32 gpioVal = 0;
NvU64 hi = 0;
NvU64 lo = 0;
NvU64 test = 0;
if (!nvswitch_is_cci_supported(device))
{
return -NVL_ERR_NOT_SUPPORTED;
}
// Read status of heartbeat gpio
gpioVal = NVSWITCH_REG_RD32(device, _GPIO, _OUTPUT_CNTL(3));
// Record timestamp of gpio read from PTIMER
do
{
hi = NVSWITCH_ENG_RD32(device, PTIMER, , 0, _PTIMER, _TIME_1);
lo = NVSWITCH_ENG_RD32(device, PTIMER, , 0, _PTIMER, _TIME_0);
test = NVSWITCH_ENG_RD32(device, PTIMER, , 0, _PTIMER, _TIME_1);
}
while (hi != test);
p->timestampNs = (hi << 32) | lo;
if (FLD_TEST_DRF(_GPIO, _OUTPUT_CNTL, _IO_OUTPUT, _1, gpioVal))
{
p->gpioVal = 1;
}
else if (FLD_TEST_DRF(_GPIO, _OUTPUT_CNTL, _IO_OUTPUT, _0, gpioVal))
{
p->gpioVal = 0;
}
return NVL_SUCCESS;
}
static NvlStatus
nvswitch_cci_reset_and_drain_links_ls10
(
nvswitch_device *device,
NvU64 link_mask,
NvBool bForced
)
{
NvU32 link;
FOR_EACH_INDEX_IN_MASK(64, link, link_mask)
{
if (!cciIsLinkManaged(device, link))
{
link_mask = link_mask & ~NVBIT64(link);
}
}
FOR_EACH_INDEX_IN_MASK_END;
return _nvswitch_reset_and_drain_links_ls10(device, link_mask, bForced);
}
/*
* @brief Set the next LED state
* The HW will reflect this state on the next iteration of link
* state update.
*/
static void
_nvswitch_set_next_led_state_ls10
(
nvswitch_device *device,
NvU8 nextLedState
)
{
device->next_led_state = nextLedState;
}
/*
* Returns the CPLD register value assigned to a particular LED state
* confluence page ID: 1011518154
*/
static NvU8
_nvswitch_get_led_state_regval_ls10
(
nvswitch_device *device,
NvU8 ledState
)
{
switch (ledState)
{
case ACCESS_LINK_LED_STATE_OFF:
{
return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_OFF;
}
case ACCESS_LINK_LED_STATE_UP_WARM:
{
return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_GREEN;
}
case ACCESS_LINK_LED_STATE_INITIALIZE:
{
return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_3HZ_AMBER;
}
case ACCESS_LINK_LED_STATE_UP_ACTIVE:
{
return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_3HZ_GREEN;
}
case ACCESS_LINK_LED_STATE_FAULT:
{
return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_6HZ_AMBER;
}
default:
{
NVSWITCH_ASSERT(0);
return CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED_REG_STATE_OFF;
}
}
}
/*
* @brief Set HW LED state using CPLD write
*
*/
static NvlStatus
_nvswitch_set_led_state_ls10
(
nvswitch_device *device
)
{
NvlStatus retval;
NvU8 ledState;
NvU8 nextLedState;
NvU8 regVal = 0;
nextLedState = device->next_led_state;
ledState = REF_VAL(ACCESS_LINK_LED_STATE, nextLedState);
regVal = FLD_SET_REF_NUM(CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED,
_nvswitch_get_led_state_regval_ls10(device, ledState),
regVal);
// Set state for LED
retval = nvswitch_cci_ports_cpld_write(device, CPLD_MACHXO3_ACCESS_LINK_LED_CTL, regVal);
if (retval != NVL_SUCCESS)
{
return retval;
}
// save HW state
device->current_led_state = REF_NUM(ACCESS_LINK_LED_STATE, ledState);
return NVL_SUCCESS;
}
static NvBool
_nvswitch_check_for_link_traffic
(
nvswitch_device *device,
NvU64 linkMask
)
{
NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *pCounterParams = NULL;
NvU64 *pCounterValues;
NvU64 tpCounterPreviousSum;
NvU64 tpCounterCurrentSum;
NvBool bTraffic = NV_FALSE;
NvU8 linkNum;
pCounterParams = nvswitch_os_malloc(sizeof(*pCounterParams));
if (pCounterParams == NULL)
goto out;
pCounterParams->counterMask = NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX |
NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX;
pCounterParams->linkMask = linkMask;
if (nvswitch_ctrl_get_throughput_counters(device,
pCounterParams) != NVL_SUCCESS)
{
goto out;
}
// Sum TX/RX traffic for each link
FOR_EACH_INDEX_IN_MASK(64, linkNum, linkMask)
{
pCounterValues = pCounterParams->counters[linkNum].values;
tpCounterPreviousSum = device->tp_counter_previous_sum[linkNum];
// Sum taken to save space as it is unlikely to overflow before system is reset
tpCounterCurrentSum = pCounterValues[NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX] +
pCounterValues[NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX];
device->tp_counter_previous_sum[linkNum] = tpCounterCurrentSum;
if (tpCounterCurrentSum > tpCounterPreviousSum)
{
bTraffic = NV_TRUE;
}
}
FOR_EACH_INDEX_IN_MASK_END;
out:
nvswitch_os_free(pCounterParams);
return bTraffic;
}
static NvU8
_nvswitch_resolve_led_state_ls10
(
nvswitch_device *device,
NvU8 ledState0,
NvU8 ledState1
)
{
// Used to resolve link state discrepancies between partner links
ct_assert(ACCESS_LINK_LED_STATE_FAULT < ACCESS_LINK_LED_STATE_OFF);
ct_assert(ACCESS_LINK_LED_STATE_OFF < ACCESS_LINK_LED_STATE_INITIALIZE);
ct_assert(ACCESS_LINK_LED_STATE_INITIALIZE < ACCESS_LINK_LED_STATE_UP_WARM);
return (ledState0 < ledState1 ? ledState0 : ledState1);
}
static NvU8
_nvswitch_get_next_led_state_link_ls10
(
nvswitch_device *device,
NvU8 currentLedState,
NvU8 linkNum
)
{
nvlink_link *link;
NvU64 linkState;
link = nvswitch_get_link(device, linkNum);
if ((link == NULL) ||
(device->hal.nvswitch_corelib_get_dl_link_mode(link, &linkState) != NVL_SUCCESS))
{
return ACCESS_LINK_LED_STATE_OFF;
}
switch (linkState)
{
case NVLINK_LINKSTATE_OFF:
{
return ACCESS_LINK_LED_STATE_OFF;
}
case NVLINK_LINKSTATE_HS:
case NVLINK_LINKSTATE_RECOVERY:
case NVLINK_LINKSTATE_SLEEP:
{
return ACCESS_LINK_LED_STATE_UP_WARM;
}
case NVLINK_LINKSTATE_FAULT:
{
return ACCESS_LINK_LED_STATE_FAULT;
}
default:
{
if (currentLedState == ACCESS_LINK_LED_STATE_INITIALIZE)
{
return ACCESS_LINK_LED_STATE_INITIALIZE;
}
return ACCESS_LINK_LED_STATE_OFF;
}
}
}
static NvU8
_nvswitch_get_next_led_state_links_ls10
(
nvswitch_device *device,
NvU8 currentLedState,
NvU64 linkMask
)
{
NvU8 linkNum;
NvU8 ledState;
NvU8 nextLedState;
nextLedState = ACCESS_LINK_NUM_LED_STATES;
NVSWITCH_ASSERT(linkMask != 0);
FOR_EACH_INDEX_IN_MASK(64, linkNum, linkMask)
{
ledState = _nvswitch_get_next_led_state_link_ls10(device, currentLedState, linkNum);
nextLedState = _nvswitch_resolve_led_state_ls10(device, nextLedState, ledState);
}
FOR_EACH_INDEX_IN_MASK_END;
if (nextLedState == ACCESS_LINK_LED_STATE_UP_WARM)
{
// Only tells us that one of the links has activity
if (_nvswitch_check_for_link_traffic(device, linkMask))
{
nextLedState = ACCESS_LINK_LED_STATE_UP_ACTIVE;
}
}
return nextLedState;
}
static NvU8
_nvswitch_get_next_led_state_ls10
(
nvswitch_device *device
)
{
NvU8 linkNum;
NvU8 ledNextState = 0;
NvU8 currentLedState;
NvU64 enabledLinkMask;
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
FOR_EACH_INDEX_IN_MASK(64, linkNum, enabledLinkMask)
{
if (cciIsLinkManaged(device, linkNum))
{
enabledLinkMask = enabledLinkMask & ~NVBIT64(linkNum);
}
}
FOR_EACH_INDEX_IN_MASK_END;
currentLedState = device->current_led_state;
currentLedState = REF_VAL(ACCESS_LINK_LED_STATE, currentLedState);
ledNextState = FLD_SET_REF_NUM(ACCESS_LINK_LED_STATE,
_nvswitch_get_next_led_state_links_ls10(device,
currentLedState,
enabledLinkMask),
ledNextState);
return ledNextState;
}
void
nvswitch_update_link_state_led_ls10
(
nvswitch_device *device
)
{
NvU8 currentLedState;
NvU8 nextLedState;
currentLedState = device->current_led_state;
currentLedState = REF_VAL(ACCESS_LINK_LED_STATE, currentLedState);
nextLedState = _nvswitch_get_next_led_state_ls10(device);
// This is the next state that the LED will be set to
_nvswitch_set_next_led_state_ls10(device, nextLedState);
// Only update HW if required
if (currentLedState != nextLedState)
{
_nvswitch_set_led_state_ls10(device);
}
}
void
nvswitch_led_shutdown_ls10
(
nvswitch_device *device
)
{
NvU8 ledState = 0;
ledState = FLD_SET_REF_NUM(ACCESS_LINK_LED_STATE,
ACCESS_LINK_LED_STATE_OFF, ledState);
// This is the next state that the LED will be set to
_nvswitch_set_next_led_state_ls10(device, ledState);
_nvswitch_set_led_state_ls10(device);
}
NvlStatus
nvswitch_read_vbios_link_entries_ls10
(
@@ -6009,6 +6554,27 @@ nvswitch_ctrl_get_inforom_version_ls10
return NVL_SUCCESS;
}
/*
* @Brief : Initializes an NvSwitch hardware state
*
* @Description :
*
* @param[in] device a reference to the device to initialize
*
* @returns NVL_SUCCESS if the action succeeded
* -NVL_BAD_ARGS if bad arguments provided
* -NVL_PCI_ERROR if bar info unable to be retrieved
*/
NvlStatus
nvswitch_initialize_device_state_ls10
(
nvswitch_device *device
)
{
device->bModeContinuousALI = NV_TRUE;
return nvswitch_initialize_device_state_lr10(device);
}
//
// This function auto creates the ls10 HAL connectivity from the NVSWITCH_INIT_HAL
// macro in haldef_nvswitch.h

View File

@@ -512,10 +512,9 @@ nvswitch_soe_disable_nport_fatal_interrupts_ls10
NVSWITCH_TIMEOUT timeout;
RM_SOE_CORE_CMD_NPORT_FATAL_INTR *pNportIntrDisable;
NVSWITCH_GET_BIOS_INFO_PARAMS p = { 0 };
NvlStatus stat;
stat = device->hal.nvswitch_ctrl_get_bios_info(device, &p);
if ((stat != NVL_SUCCESS) || ((p.version & SOE_VBIOS_VERSION_MASK) <
status = device->hal.nvswitch_ctrl_get_bios_info(device, &p);
if ((status != NVL_SUCCESS) || ((p.version & SOE_VBIOS_VERSION_MASK) <
SOE_VBIOS_REVLOCK_DISABLE_NPORT_FATAL_INTR))
{
NVSWITCH_PRINT(device, ERROR,
@@ -696,6 +695,40 @@ nvswitch_soe_register_event_callbacks_ls10
return -NVL_ERR_INVALID_STATE;
}
// Register CCI callback funcion
status = flcnQueueEventRegister(
device, pFlcn,
RM_SOE_UNIT_CCI,
NULL,
nvswitch_cci_soe_callback_ls10,
NULL,
&pSoe->cciEvtDesc);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to register CCI event handler.\n",
__FUNCTION__);
return -NVL_ERR_INVALID_STATE;
}
// Register Heartbeat callback funcion
status = flcnQueueEventRegister(
device, pFlcn,
RM_SOE_UNIT_HEARTBEAT,
NULL,
nvswitch_heartbeat_soe_callback_ls10,
NULL,
&pSoe->heartbeatEvtDesc);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to register Heartbeat event handler.\n",
__FUNCTION__);
return -NVL_ERR_INVALID_STATE;
}
return NVL_SUCCESS;
}
@@ -718,6 +751,25 @@ nvswitch_soe_unregister_events_ls10
"%s: Failed to un-register thermal event handler.\n",
__FUNCTION__);
}
// un-register thermal callback funcion
status = flcnQueueEventUnregister(device, pFlcn,
pSoe->cciEvtDesc);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to un-register cci event handler.\n",
__FUNCTION__);
}
// un-register heartbeat callback funcion
status = flcnQueueEventUnregister(device, pFlcn,
pSoe->heartbeatEvtDesc);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to un-register heartbeat event handler.\n",
__FUNCTION__);
}
}
/*!

File diff suppressed because it is too large Load Diff

View File

@@ -85,7 +85,6 @@ nvswitch_smbpbi_post_init
{
return -NVL_ERR_NOT_SUPPORTED;
}
status = device->hal.nvswitch_smbpbi_post_init_hal(device);
if (status == NVL_SUCCESS)