570.86.15

This commit is contained in:
Bernhard Stoeckner
2025-01-27 19:36:56 +01:00
parent 9d0b0414a5
commit 54d69484da
1166 changed files with 318863 additions and 182687 deletions

View File

@@ -175,11 +175,4 @@ enum
RM_SOE_DMAIDX_GUEST_PHYS_SYS_NCOH_BOUND = 7
};
/*!
* SOE Debug buffer size
* Creating a copy of SOE_DMESG_BUFFER_SIZE in memmap.h
* soe/memmap.h is conflicting with sec2/memmap.h and cannot be used in the driver
*/
#define SOE_DEBUG_BUFFER_SIZE 0x1000
#endif // _GSOEIFCMN_H_

View File

@@ -125,6 +125,11 @@ enum
* Issue Ingress stop
*/
RM_SOE_CORE_CMD_ISSUE_INGRESS_STOP = 0x10,
/*
* Enable ERROR reporting
*/
RM_SOE_CORE_CMD_UPDATE_INTR_REPORT_EN = 0x12,
};
// Timeout for SOE reset callback function
@@ -273,6 +278,44 @@ enum
RM_SOE_CORE_NPORT_REDUCTION_INTERRUPT
};
typedef enum
{
RM_SOE_CORE_ENGINE_ID_NPORT,
RM_SOE_CORE_ENGINE_ID_NVLIPT,
RM_SOE_CORE_ENGINE_ID_NVLIPT_LNK,
RM_SOE_CORE_ENGINE_ID_NVLTLC
}RM_SOE_CORE_ENGINE_ID;
typedef enum
{
RM_SOE_CORE_NPORT_ROUTE_ERR_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_ROUTE_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_INGRESS_ERR_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_INGRESS_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_INGRESS_ERR_NON_FATAL_REPORT_EN_1,
RM_SOE_CORE_NPORT_TSTATE_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_TSTATE_ERR_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_EGRESS_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_EGRESS_ERR_NON_FATAL_REPORT_EN_1,
RM_SOE_CORE_NPORT_EGRESS_ERR_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_EGRESS_ERR_FATAL_REPORT_EN_1,
RM_SOE_CORE_NPORT_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_SOURCETRACK_ERR_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_MULTICASTTSTATE_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_MULTICASTTSTATE_ERR_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_REDUCTIONTSTATE_ERR_NON_FATAL_REPORT_EN_0,
RM_SOE_CORE_NPORT_REDUCTIONTSTATE_ERR_FATAL_REPORT_EN_0
}RM_SOE_CORE_NPORT_REPORT_EN_REGISTER;
typedef struct
{
NvU8 cmdType;
RM_SOE_CORE_ENGINE_ID engId;
NvU32 engInstance;
RM_SOE_CORE_NPORT_REPORT_EN_REGISTER reg;
NvU32 data;
} RM_SOE_CORE_CMD_ERROR_REPORT_EN;
typedef union
{
NvU8 cmdType;
@@ -292,6 +335,7 @@ typedef union
RM_SOE_CORE_CMD_PERFORM_ONBOARD_PHASE performOnboardPhase;
RM_SOE_CORE_CMD_NPORT_FATAL_INTR nportDisableIntr;
RM_SOE_CORE_CMD_INGRESS_STOP ingressStop;
RM_SOE_CORE_CMD_ERROR_REPORT_EN enableErrorReport;
} RM_SOE_CORE_CMD;
typedef struct

View File

@@ -34,6 +34,78 @@
* Command Messages between driver and TNVL unit of SOE
*/
#define RM_SOE_LIST_LS10_ONLY_ENGINES(_op) \
_op(GIN) \
_op(XAL) \
_op(XAL_FUNC) \
_op(XPL) \
_op(XTL) \
_op(XTL_CONFIG) \
_op(UXL) \
_op(GPU_PTOP) \
_op(PMC) \
_op(PBUS) \
_op(ROM2) \
_op(GPIO) \
_op(FSP) \
_op(SYSCTRL) \
_op(CLKS_SYS) \
_op(CLKS_SYSB) \
_op(CLKS_P0) \
_op(SAW_PM) \
_op(PCIE_PM) \
_op(PRT_PRI_HUB) \
_op(PRT_PRI_RS_CTRL) \
_op(SYS_PRI_HUB) \
_op(SYS_PRI_RS_CTRL) \
_op(SYSB_PRI_HUB) \
_op(SYSB_PRI_RS_CTRL) \
_op(PRI_MASTER_RS) \
_op(PTIMER) \
_op(CPR) \
_op(TILEOUT) \
#define RM_SOE_LIST_ALL_ENGINES(_op) \
_op(XVE) \
_op(SAW) \
_op(SOE) \
_op(SMR) \
\
_op(NPG) \
_op(NPORT) \
\
_op(NVLW) \
_op(MINION) \
_op(NVLIPT) \
_op(NVLIPT_LNK) \
_op(NVLTLC) \
_op(NVLDL) \
\
_op(NXBAR) \
_op(TILE) \
\
_op(NPG_PERFMON) \
_op(NPORT_PERFMON) \
\
_op(NVLW_PERFMON) \
#define RM_SOE_ENGINE_ID_LIST(_eng) \
RM_SOE_ENGINE_ID_##_eng,
//
// ENGINE_IDs are the complete list of all engines that are supported on
// LS10 architecture(s) that may support them. Any one architecture may or
// may not understand how to operate on any one specific engine.
// Architectures that share a common ENGINE_ID are not guaranteed to have
// compatible manuals.
//
typedef enum rm_soe_engine_id
{
RM_SOE_LIST_ALL_ENGINES(RM_SOE_ENGINE_ID_LIST)
RM_SOE_LIST_LS10_ONLY_ENGINES(RM_SOE_ENGINE_ID_LIST)
RM_SOE_ENGINE_ID_SIZE,
} RM_SOE_ENGINE_ID;
/*!
* Commands offered by the SOE Tnvl Interface.
*/
@@ -47,6 +119,10 @@ enum
* Issue pre-lock sequence
*/
RM_SOE_TNVL_CMD_ISSUE_PRE_LOCK_SEQUENCE = 0x1,
/*
* Issue engine write command
*/
RM_SOE_TNVL_CMD_ISSUE_ENGINE_WRITE = 0x2,
};
/*!
@@ -60,6 +136,17 @@ typedef struct
NvU32 data;
} RM_SOE_TNVL_CMD_REGISTER_WRITE;
typedef struct
{
NvU8 cmdType;
RM_SOE_ENGINE_ID eng_id;
NvU32 eng_bcast;
NvU32 eng_instance;
NvU32 base;
NvU32 offset;
NvU32 data;
} RM_SOE_TNVL_CMD_ENGINE_WRITE;
typedef struct
{
NvU8 cmdType;
@@ -69,8 +156,9 @@ typedef union
{
NvU8 cmdType;
RM_SOE_TNVL_CMD_REGISTER_WRITE registerWrite;
RM_SOE_TNVL_CMD_ENGINE_WRITE engineWrite;
RM_SOE_TNVL_CMD_PRE_LOCK_SEQUENCE preLockSequence;
} RM_SOE_TNVL_CMD;
#endif // _SOEIFTNVL_H_
#endif // _SOETNVL_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -831,6 +831,7 @@ typedef enum nvswitch_err_type
NVSWITCH_ERR_HW_HOST_IO_FAILURE = 10007,
NVSWITCH_ERR_HW_HOST_FIRMWARE_INITIALIZATION_FAILURE = 10008,
NVSWITCH_ERR_HW_HOST_FIRMWARE_RECOVERY_MODE = 10009,
NVSWITCH_ERR_HW_HOST_TNVL_ERROR = 10010,
NVSWITCH_ERR_HW_HOST_LAST,

View File

@@ -213,6 +213,7 @@
_op(NvU32, nvswitch_get_eng_count, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast), _arch) \
_op(NvU32, nvswitch_eng_rd, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 offset), _arch) \
_op(void, nvswitch_eng_wr, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 offset, NvU32 data), _arch) \
_op(void, nvswitch_reg_write_32, (nvswitch_device *device, NvU32 offset, NvU32 data), _arch) \
_op(NvU32, nvswitch_get_link_eng_inst, (nvswitch_device *device, NvU32 link_id, NVSWITCH_ENGINE_ID eng_id), _arch) \
_op(void *, nvswitch_alloc_chipdevice, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_thermal, (nvswitch_device *device), _arch) \

View File

@@ -189,8 +189,9 @@
#define SOE_VBIOS_VERSION_MASK 0xFF0000
#define SOE_VBIOS_REVLOCK_DISABLE_NPORT_FATAL_INTR 0x370000
#define SOE_VBIOS_REVLOCK_ISSUE_INGRESS_STOP 0x4C0000
#define SOE_VBIOS_REVLOCK_ISSUE_REGISTER_WRITE 0x580000
#define SOE_VBIOS_REVLOCK_TNVL_PRELOCK_COMMAND 0x600000
#define SOE_VBIOS_REVLOCK_TNVL_PRELOCK_COMMAND 0x590000
#define SOE_VBIOS_REVLOCK_SOE_PRI_CHECKS 0x610000
#define SOE_VBIOS_REVLOCK_REPORT_EN 0x700000
// LS10 Saved LED state
#define ACCESS_LINK_LED_STATE CPLD_MACHXO3_ACCESS_LINK_LED_CTL_NVL_CABLE_LED
@@ -1055,10 +1056,10 @@ NvlStatus nvswitch_tnvl_get_attestation_certificate_chain_ls10(nvswitch_device *
NvlStatus nvswitch_tnvl_get_attestation_report_ls10(nvswitch_device *device, NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params);
NvlStatus nvswitch_tnvl_send_fsp_lock_config_ls10(nvswitch_device *device);
NvlStatus nvswitch_tnvl_get_status_ls10(nvswitch_device *device, NVSWITCH_GET_TNVL_STATUS_PARAMS *params);
void nvswitch_tnvl_reg_wr_32_ls10(nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 base_addr, NvU32 offset, NvU32 data);
void nvswitch_tnvl_eng_wr_32_ls10(nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 base_addr, NvU32 offset, NvU32 data);
NvlStatus nvswitch_send_tnvl_prelock_cmd_ls10(nvswitch_device *device);
void nvswitch_tnvl_disable_interrupts_ls10(nvswitch_device *device);
void nvswitch_tnvl_reg_wr_32_ls10(nvswitch_device *device, NvU32 offset, NvU32 data);
NvlStatus nvswitch_ctrl_get_soe_heartbeat_ls10(nvswitch_device *device, NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p);
NvlStatus nvswitch_cci_enable_iobist_ls10(nvswitch_device *device, NvU32 linkNumber, NvBool bEnable);
NvlStatus nvswitch_cci_initialization_sequence_ls10(nvswitch_device *device, NvU32 linkNumber);

View File

@@ -51,5 +51,7 @@ NvlStatus nvswitch_soe_set_nport_interrupts_ls10(nvswitch_device *device, NvU32
void nvswitch_soe_disable_nport_fatal_interrupts_ls10(nvswitch_device *device, NvU32 nport,
NvU32 nportIntrEnable, NvU8 nportIntrType);
NvlStatus nvswitch_soe_issue_ingress_stop_ls10(nvswitch_device *device, NvU32 nport, NvBool bStop);
NvlStatus nvswitch_soe_update_intr_report_en_ls10(nvswitch_device *device, RM_SOE_CORE_ENGINE_ID eng_id, NvU32 eng_instance, RM_SOE_CORE_NPORT_REPORT_EN_REGISTER reg, NvU32 data);
NvlStatus nvswitch_soe_reg_wr_32_ls10(nvswitch_device *device, NvU32 offset, NvU32 data);
NvlStatus nvswitch_soe_eng_wr_32_ls10(nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 base_addr, NvU32 offset, NvU32 data);
#endif //_SOE_LS10_H_

View File

@@ -1329,6 +1329,13 @@ nvswitch_corelib_set_tl_link_mode_lr10
nvswitch_device *device = link->dev->pDevInfo;
NvlStatus status = NVL_SUCCESS;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
{
NVSWITCH_PRINT(device, ERROR,
@@ -1728,6 +1735,13 @@ nvswitch_corelib_set_rx_mode_lr10
NvlStatus status = NVL_SUCCESS;
NvU32 delay_ns;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber))
{
NVSWITCH_PRINT(device, ERROR,
@@ -1955,6 +1969,13 @@ nvswitch_corelib_set_rx_detect_lr10
NvlStatus status;
nvswitch_device *device = link->dev->pDevInfo;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if (nvswitch_does_link_need_termination_enabled(device, link))
{
NVSWITCH_PRINT(device, INFO,
@@ -2094,6 +2115,13 @@ nvswitch_request_tl_link_state_lr10
NvlStatus status = NVL_SUCCESS;
NvU32 linkStatus;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, link->linkNumber))
{
NVSWITCH_PRINT(device, ERROR,

View File

@@ -8319,6 +8319,26 @@ nvswitch_tnvl_disable_interrupts_lr10
return;
}
void
nvswitch_reg_write_32_lr10
(
nvswitch_device *device,
NvU32 offset,
NvU32 data
)
{
if (device->nvlink_device->pciInfo.bars[0].pBar == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: register write failed at offset 0x%x\n",
__FUNCTION__, offset);
return;
}
// Write the register
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data);
}
//
// This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
// macro in haldef_nvswitch.h

View File

@@ -1320,6 +1320,7 @@ _nvswitch_service_route_nonfatal_ls10
NvU32 pending, bit, unhandled;
NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
NvlStatus status;
report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_STATUS_0);
report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0);
@@ -1375,6 +1376,8 @@ _nvswitch_service_route_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, 0);
_nvswitch_collect_error_info_ls10(device, link,
NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
&data);
@@ -1406,6 +1409,7 @@ _nvswitch_service_route_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_GLT_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_GLT_ECC_ERROR_COUNTER, 0);
_nvswitch_collect_error_info_ls10(device, link,
NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
&data);
@@ -1437,6 +1441,7 @@ _nvswitch_service_route_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_MCRID_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_MCRID_ECC_ERROR_COUNTER, 0);
_nvswitch_collect_error_info_ls10(device, link,
NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
&data);
@@ -1468,6 +1473,7 @@ _nvswitch_service_route_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_EXTMCRID_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_EXTMCRID_ECC_ERROR_COUNTER, 0);
_nvswitch_collect_error_info_ls10(device, link,
NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
&data);
@@ -1499,6 +1505,7 @@ _nvswitch_service_route_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _ROUTE, _ERR_RAM_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_RAM_ECC_ERROR_COUNTER, 0);
_nvswitch_collect_error_info_ls10(device, link,
NVSWITCH_RAW_ERROR_LOG_DATA_FLAG_ROUTE_TIME,
&data);
@@ -1532,8 +1539,20 @@ _nvswitch_service_route_nonfatal_ls10
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_ROUTE_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -1901,6 +1920,7 @@ _nvswitch_service_ingress_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, "ingress header ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, data);
@@ -1954,6 +1974,7 @@ _nvswitch_service_ingress_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_REMAPTAB_ECC_LIMIT_ERR, "ingress remap ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_REMAPTAB_ECC_LIMIT_ERR, data);
@@ -1982,6 +2003,7 @@ _nvswitch_service_ingress_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RIDTAB_ECC_LIMIT_ERR, "ingress RID ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RIDTAB_ECC_LIMIT_ERR, data);
@@ -2010,6 +2032,7 @@ _nvswitch_service_ingress_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RLANTAB_ECC_LIMIT_ERR, "ingress RLAN ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RLANTAB_ECC_LIMIT_ERR, data);
@@ -2135,8 +2158,19 @@ _nvswitch_service_ingress_nonfatal_ls10
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_INGRESS_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -2184,6 +2218,7 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTAREMAPTAB_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_EXTAREMAPTAB_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_LIMIT_ERR, "ingress ExtA remap ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTAREMAPTAB_ECC_LIMIT_ERR, data);
@@ -2212,6 +2247,7 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_EXTBREMAPTAB_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_EXTBREMAPTAB_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_LIMIT_ERR, "ingress ExtB remap ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_EXTBREMAPTAB_ECC_LIMIT_ERR, data);
@@ -2240,6 +2276,7 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _INGRESS, _ERR_MCREMAPTAB_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_MCREMAPTAB_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_MCREMAPTAB_ECC_LIMIT_ERR, "ingress MC remap ECC");
NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_MCREMAPTAB_ECC_LIMIT_ERR, data);
@@ -2299,8 +2336,19 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_INGRESS_ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -2336,6 +2384,7 @@ _nvswitch_service_tstate_nonfatal_ls10
NvU32 pending, bit, unhandled;
NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
NvlStatus status;
report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_STATUS_0);
report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0);
@@ -2453,8 +2502,19 @@ _nvswitch_service_tstate_nonfatal_ls10
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_TSTATE_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -2736,6 +2796,7 @@ _nvswitch_service_egress_nonfatal_ls10
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, "egress input ECC error limit");
NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, data);
@@ -2770,6 +2831,7 @@ _nvswitch_service_egress_nonfatal_ls10
}
report.data[0] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER);
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, 0);
report.data[1] = NVSWITCH_ENG_RD32(device, NPORT, , link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_ADDRESS);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, "egress output ECC error limit");
NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, data);
@@ -2805,8 +2867,19 @@ _nvswitch_service_egress_nonfatal_ls10
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_EGRESS_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -3119,8 +3192,19 @@ _nvswitch_service_egress_nonfatal_ls10_err_status_1:
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_EGRESS_ERR_NON_FATAL_REPORT_EN_1,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -3609,6 +3693,7 @@ _nvswitch_service_sourcetrack_nonfatal_ls10
NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
NvU32 pending, bit, unhandled;
INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
NvlStatus status;
report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link,
_SOURCETRACK, _ERR_STATUS_0);
@@ -3652,6 +3737,8 @@ _nvswitch_service_sourcetrack_nonfatal_ls10
_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS);
report.data[2] = NVSWITCH_ENG_RD32(device, NPORT, , link, _SOURCETRACK,
_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID);
NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK,
_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER, 0);
NVSWITCH_REPORT_NONFATAL(_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR,
"sourcetrack TCEN0 crumbstore ECC limit err");
@@ -3680,8 +3767,19 @@ _nvswitch_service_sourcetrack_nonfatal_ls10
//
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -3858,6 +3956,7 @@ _nvswitch_service_multicast_nonfatal_ls10
NvU32 pending, bit, unhandled;
NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
NvlStatus status;
report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_STATUS_0);
report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0);
@@ -3983,8 +4082,19 @@ _nvswitch_service_multicast_nonfatal_ls10
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _MULTICASTTSTATE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_MULTICASTTSTATE_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -4179,6 +4289,7 @@ _nvswitch_service_reduction_nonfatal_ls10
NvU32 pending, bit, unhandled;
NVSWITCH_RAW_ERROR_LOG_TYPE data = {0, { 0 }};
INFOROM_NVS_ECC_ERROR_EVENT err_event = {0};
NvlStatus status;
report.raw_pending = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_STATUS_0);
report.raw_enable = NVSWITCH_ENG_RD32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0);
@@ -4299,8 +4410,19 @@ _nvswitch_service_reduction_nonfatal_ls10
// This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts.
if (device->link[link].fatal_error_occurred)
{
NVSWITCH_ENG_WR32(device, NPORT, , link, _REDUCTIONTSTATE, _ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending);
status = nvswitch_soe_update_intr_report_en_ls10(device,
RM_SOE_CORE_ENGINE_ID_NPORT,
link,
RM_SOE_CORE_NPORT_REDUCTIONTSTATE_ERR_NON_FATAL_REPORT_EN_0,
report.raw_enable & ~pending
);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: Disabling NPG[%d] non-fatal interrupts\n", __FUNCTION__, NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _CPU_INTR_LEAF_EN_CLEAR(NV_CTRL_CPU_INTR_NPG_NON_FATAL_IDX),
NVBIT(link / NVSWITCH_LINKS_PER_NPG_LS10));
}
}
if (report.raw_first & report.mask)
@@ -6227,8 +6349,7 @@ _nvswitch_deferred_link_state_check_ls10
lastLinkUpTime = chip_device->deferredLinkErrors[link].state.lastLinkUpTime;
lastRetrainTime = chip_device->deferredLinkErrors[link].state.lastRetrainTime;
// Sanity Check
if (!nvswitch_is_link_valid(device, link))
return;
NVSWITCH_ASSERT(nvswitch_is_link_valid(device, link));
chip_device->deferredLinkErrors[link].state.bLinkStateCallBackEnabled = NV_FALSE;
bRedeferLinkStateCheck = NV_FALSE;
@@ -6676,9 +6797,9 @@ _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10
// Driver WAR to disable ECC error and prevent an interrupt storm.
// TODO: Clear ECC_ERROR_COUNTER by sending command to SOE and remove the WAR.
//
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0,
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable &
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_RAM_ECC_LIMIT_ERR, 1));
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0, _CREQ_RAM_ECC_LIMIT_ERR, 1));
}
bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_DAT_ECC_DBE_ERR, 1);
@@ -6698,9 +6819,9 @@ _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10
// Driver WAR to disable ECC error and prevent an interrupt storm.
// TODO: Clear ECC_ERROR_COUNTER by sending command to SOE and remove the WAR.
//
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0,
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable &
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0, _RSP_RAM_ECC_LIMIT_ERR, 1));
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0, _RSP_RAM_ECC_LIMIT_ERR, 1));
}
bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_DAT_ECC_DBE_ERR, 1);
@@ -6725,9 +6846,9 @@ _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10
// Driver WAR to disable ECC error and prevent an interrupt storm.
// TODO: Clear ECC_ERROR_COUNTER by sending command to SOE and remove the WAR.
//
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0,
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable &
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0, _COM_RAM_ECC_LIMIT_ERR, 1));
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0, _COM_RAM_ECC_LIMIT_ERR, 1));
}
bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_ECC_LIMIT_ERR, 1);
@@ -6740,9 +6861,9 @@ _nvswitch_service_nvltlc_tx_lnk_nonfatal_0_ls10
// Driver WAR to disable ECC error and prevent an interrupt storm.
// TODO: Clear ECC_ERROR_COUNTER by sending command to SOE and remove the WAR.
//
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0,
NVSWITCH_LINK_WR32_LS10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0,
report.raw_enable &
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0, _RSP1_RAM_ECC_LIMIT_ERR, 1));
~DRF_NUM(_NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0, _RSP1_RAM_ECC_LIMIT_ERR, 1));
}
NVSWITCH_UNHANDLED_CHECK(device, unhandled);

View File

@@ -160,6 +160,13 @@ nvswitch_corelib_training_complete_ls10
{
nvswitch_device *device = link->dev->pDevInfo;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return; // NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
nvswitch_init_dlpl_interrupts(link);
_nvswitch_configure_reserved_throughput_counters(link);
@@ -265,6 +272,13 @@ nvswitch_corelib_set_tx_mode_ls10
NvU32 val;
NvlStatus status = NVL_SUCCESS;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLDL, link->linkNumber))
{
NVSWITCH_PRINT(device, ERROR,
@@ -352,6 +366,13 @@ nvswitch_corelib_set_dl_link_mode_ls10
NvBool keepPolling;
NVSWITCH_TIMEOUT timeout;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLDL, link->linkNumber))
{
NVSWITCH_PRINT(device, ERROR,
@@ -530,6 +551,13 @@ nvswitch_corelib_get_rx_detect_ls10
NvlStatus status;
nvswitch_device *device = link->dev->pDevInfo;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
status = nvswitch_minion_get_rxdet_status_ls10(device, link->linkNumber);
if (status != NVL_SUCCESS)
@@ -626,13 +654,22 @@ nvswitch_corelib_get_tl_link_mode_ls10
{
case NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_ACTIVE:
// If using ALI, ensure that the request to active completed
if (link->dev->enableALI)
if (nvswitch_is_tnvl_mode_locked(device))
{
status = nvswitch_wait_for_tl_request_ready_ls10(link);
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
*mode = NVLINK_LINKSTATE_HS;
}
else
{
// If using ALI, ensure that the request to active completed
if (link->dev->enableALI)
{
status = nvswitch_wait_for_tl_request_ready_ls10(link);
}
*mode = (status == NVL_SUCCESS) ? NVLINK_LINKSTATE_HS:NVLINK_LINKSTATE_OFF;
*mode = (status == NVL_SUCCESS) ? NVLINK_LINKSTATE_HS:NVLINK_LINKSTATE_OFF;
}
break;
case NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_L2:
@@ -1039,6 +1076,13 @@ nvswitch_launch_ALI_link_training_ls10
{
NvlStatus status = NVL_SUCCESS;
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s(%d): Security locked\n", __FUNCTION__, __LINE__);
return NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
if ((link == NULL) ||
!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLIPT_LNK, link->linkNumber) ||
(link->linkNumber >= NVSWITCH_NVLINK_MAX_LINKS))

View File

@@ -4625,11 +4625,11 @@ nvswitch_eng_wr_ls10
if (nvswitch_is_tnvl_mode_enabled(device))
{
nvswitch_tnvl_reg_wr_32_ls10(device, eng_id, eng_bcast, eng_instance, base_addr, offset, data);
nvswitch_tnvl_eng_wr_32_ls10(device, eng_id, eng_bcast, eng_instance, base_addr, offset, data);
}
else
{
nvswitch_reg_write_32(device, base_addr + offset, data);
nvswitch_reg_write_32(device, base_addr + offset, data);
}
#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
@@ -4647,6 +4647,33 @@ nvswitch_eng_wr_ls10
#endif //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
}
void
nvswitch_reg_write_32_ls10
(
nvswitch_device *device,
NvU32 offset,
NvU32 data
)
{
if (device->nvlink_device->pciInfo.bars[0].pBar == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: register write failed at offset 0x%x\n",
__FUNCTION__, offset);
return;
}
if (nvswitch_is_tnvl_mode_enabled(device))
{
nvswitch_tnvl_reg_wr_32_ls10(device, offset, data);
}
else
{
// Write the register
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data);
}
}
NvU32
nvswitch_get_link_eng_inst_ls10
(
@@ -5756,7 +5783,8 @@ nvswitch_send_inband_nack_ls10
msghdr->status = NV_ERR_FABRIC_MANAGER_NOT_PRESENT;
switch (msghdr->type)
{
case NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ:
case NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ_V2:
case NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ:
msghdr->type = NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_RSP;
break;
default:

View File

@@ -209,38 +209,43 @@ _nvswitch_mc_print_directives
}
#endif // defined(NVSWITCH_MC_TRACE)
//
// Build column-port bitmap. Each 32-bit portmap in the array represents a column.
// Each bit set in the portmap represents the column-relative port offset.
//
// build column-port array
static NvlStatus
_nvswitch_mc_build_cpb
_nvswitch_mc_build_ports_array
(
nvswitch_device *device,
NvU32 num_ports,
NvU32 *spray_group,
NvU32 num_columns,
NvU32 *cpb,
NVSWITCH_COLUMN_PORT_OFFSET_LS10 *ports,
NvU8 *vchop_array_sg,
NvU8 vchop_map[NVSWITCH_MC_NUM_COLUMNS_LS10][NVSWITCH_MC_PORTS_PER_COLUMN_LS10]
)
{
NvU64 ports_used = 0;
NvU32 i, ret;
NVSWITCH_COLUMN_PORT_OFFSET_LS10 cpo;
if ((spray_group == NULL) || (cpb == NULL) || (num_ports == 0) ||
(num_ports > NVSWITCH_NUM_LINKS_LS10))
if ((spray_group == NULL) || (ports == NULL) || (num_ports == 0) ||
(num_ports > NVSWITCH_MC_TCP_LIST_SIZE_LS10))
{
NVSWITCH_PRINT(device, ERROR, "%s: invalid arguments\n", __FUNCTION__);
return -NVL_BAD_ARGS;
}
nvswitch_os_memset(cpb, 0, sizeof(*cpb) * num_columns);
nvswitch_os_memset(vchop_map, 0, sizeof(NvU8) *
NVSWITCH_MC_NUM_COLUMNS_LS10 * NVSWITCH_MC_PORTS_PER_COLUMN_LS10);
for (i = 0; i < num_ports; i++)
{
// prevent duplicate ports
if (ports_used & NVBIT64(spray_group[i]))
{
NVSWITCH_PRINT(device, ERROR, "%s: duplicate port specified: %d\n", __FUNCTION__,
spray_group[i]);
return -NVL_BAD_ARGS;
}
ports_used |= NVBIT64(spray_group[i]);
ret = _nvswitch_get_column_port_offset_ls10(spray_group[i], &cpo);
if (ret != NVL_SUCCESS)
{
@@ -248,14 +253,7 @@ _nvswitch_mc_build_cpb
return ret;
}
if (nvswitch_test_flags(cpb[cpo.column], NVBIT(cpo.port_offset)))
{
NVSWITCH_PRINT(device, ERROR, "%s: duplicate port specified: %d\n", __FUNCTION__,
spray_group[i]);
return -NVL_BAD_ARGS;
}
nvswitch_set_flags(&cpb[cpo.column], NVBIT(cpo.port_offset));
ports[i] = cpo;
if (vchop_array_sg[i] > NVSWITCH_MC_VCHOP_FORCE1)
{
@@ -263,8 +261,6 @@ _nvswitch_mc_build_cpb
vchop_array_sg[i]);
return -NVL_BAD_ARGS;
}
vchop_map[cpo.column][cpo.port_offset] = vchop_array_sg[i];
}
@@ -297,80 +293,6 @@ _is_primary_replica
return NV_FALSE;
}
//
// This function compacts the directive list and updates port_list_size
//
static NvlStatus
_nvswitch_mc_compact_portlist
(
nvswitch_device *device,
NVSWITCH_TCP_DIRECTIVE_LS10 *port_list,
NvU32 *port_list_size
)
{
NvU32 cur_portlist_pos, new_portlist_pos;
NVSWITCH_TCP_DIRECTIVE_LS10 *cur_dir, *old_list;
if (port_list_size == NULL)
{
NVSWITCH_PRINT(device, ERROR, "%s: port list size ptr is null\n", __FUNCTION__);
return -NVL_BAD_ARGS;
}
if ((port_list == NULL) || (*port_list_size == 0))
return NVL_SUCCESS;
if ((*port_list_size) > NVSWITCH_MC_TCP_LIST_SIZE_LS10)
{
NVSWITCH_PRINT(device, ERROR, "%s: port list size out of range\n", __FUNCTION__);
return -NVL_BAD_ARGS;
}
#ifdef NVSWITCH_MC_DEBUG
NVSWITCH_PRINT(device, INFO, "%s: old size: %d\n", __FUNCTION__, *port_list_size);
#endif
// create temporary directive list
old_list = nvswitch_os_malloc(sizeof(NVSWITCH_TCP_DIRECTIVE_LS10) * (*port_list_size));
if (!old_list)
{
NVSWITCH_PRINT(device, ERROR, "%s: error allocating temporary portlist\n", __FUNCTION__);
return -NVL_NO_MEM;
}
nvswitch_os_memcpy(old_list, port_list, sizeof(NVSWITCH_TCP_DIRECTIVE_LS10) * (*port_list_size));
// rebuild list using only valid entries
new_portlist_pos = 0;
for (cur_portlist_pos = 0; cur_portlist_pos < (*port_list_size); cur_portlist_pos++)
{
cur_dir = &old_list[cur_portlist_pos];
if (cur_dir->tcp != NVSWITCH_MC_INVALID)
{
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: valid directive:\n", __FUNCTION__);
_nvswitch_mc_print_directive(device, &old_list[cur_portlist_pos]);
#endif
nvswitch_os_memcpy(&port_list[new_portlist_pos], &old_list[cur_portlist_pos],
sizeof(NVSWITCH_TCP_DIRECTIVE_LS10));
new_portlist_pos++;
}
}
nvswitch_os_free(old_list);
#ifdef NVSWITCH_MC_DEBUG
NVSWITCH_PRINT(device, INFO, "%s: new size: %d\n", __FUNCTION__, new_portlist_pos);
#endif
*port_list_size = new_portlist_pos;
return NVL_SUCCESS;
}
//
// Set the round flags to indicate the size of each multicast round.
// See IAS section "6.12. Consistent MC Semantics" for more info.
@@ -484,118 +406,13 @@ _nvswitch_mc_set_port_flags
}
}
//
// This function "pops" the next port offset from the portlist bitmap.
//
static NV_INLINE NvU8
_nvswitch_mc_get_next_port
(
NvU32 *portmap
)
{
NvU32 port;
if (!portmap)
{
NVSWITCH_ASSERT(0);
return NVSWITCH_MC_NULL_PORT_LS10;
}
//
// We have to do some gymnastics here because LOWESTBITIDX_32 is
// destructive on the input variable, and the result is not assignable.
//
port = *portmap;
LOWESTBITIDX_32(port);
nvswitch_clear_flags(portmap, NVBIT(port));
if (port >= NVSWITCH_MC_PORTS_PER_COLUMN_LS10)
{
NVSWITCH_ASSERT(0);
return NVSWITCH_MC_NULL_PORT_LS10;
}
return (NvU8)port;
}
//
// This helper function generates a map of directive list offsets indexed by tile/column pair
// port offsets. This is used during construction of the directive list to point to where each
// newly constructed directive will be placed in the list. This process has to account for the
// fact that the middle two columns contain 10 ports each, while the rest have 11, all mapping
// into a 32-entry directive list.
//
static NV_INLINE void
_nvswitch_mc_build_mcplist_position_map
(
NvU32 port_offsets_by_tcp[NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10][NVSWITCH_MC_PORTS_PER_COLUMN_LS10]
)
{
NvU32 i, j, tcp;
if (!port_offsets_by_tcp)
{
NVSWITCH_ASSERT(0);
return;
}
for (tcp = 0; tcp < NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10; tcp++)
{
if (tcp == 0)
{
j = 0;
for (i = 0; i < NVSWITCH_MC_PORTS_PER_COLUMN_LS10; i++)
{
port_offsets_by_tcp[tcp][i] = j;
j += NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10;
}
}
if (tcp == 1)
{
j = 1;
for (i = 0; i < NVSWITCH_MC_PORTS_PER_COLUMN_LS10 - 1; i++)
{
port_offsets_by_tcp[tcp][i] = j;
j += NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10;
}
}
if (tcp == 2)
{
j = 2;
for (i = 0; i < NVSWITCH_MC_PORTS_PER_COLUMN_LS10; i++)
{
port_offsets_by_tcp[tcp][i] = (j == NVSWITCH_MC_TCP_LIST_SIZE_LS10) ?
(NVSWITCH_MC_TCP_LIST_SIZE_LS10 - 1) : j;
j += NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10;
}
}
}
}
//
// Wrapper for the NUMSETBITS_32 macro, which is destructive on input.
//
static NV_INLINE NvU32
_nvswitch_mc_get_pop_count
(
NvU32 i
)
{
NvU32 tmp = i;
NUMSETBITS_32(tmp);
return tmp;
}
//
// Build a list of TCP directives. This is the main conversion function which is used to build a
// TCP directive list for each spray group from a given column/port bitmap.
//
// @param device [in] pointer to the nvswitch device struct
// @param cpb [in] pointer to the column/port bitmap used to build directive list
// @param num_ports [in] number of ports
// @param ports [in] ports array (pair of column and port offset)
// @param primary_replica [in] the primary replica port for this spray group, if specified
// @param vchop_map [in] array containing per-port vchop values in column/port format
// @param port_list [out] array where the newly built directive list is written
@@ -605,330 +422,66 @@ static NvlStatus
_nvswitch_mc_build_portlist
(
nvswitch_device *device,
NvU32 *cpb,
NvU32 num_ports,
NVSWITCH_COLUMN_PORT_OFFSET_LS10 *ports,
NvU32 primary_replica,
NvU8 vchop_map[NVSWITCH_MC_NUM_COLUMNS_LS10][NVSWITCH_MC_PORTS_PER_COLUMN_LS10],
NVSWITCH_TCP_DIRECTIVE_LS10 *port_list,
NvU32 *entries_used
)
{
NvU32 ecol_idx, ocol_idx, ecol_portcount, ocol_portcount, ecol_portmap, ocol_portmap;
NvU32 cur_portlist_pos, j, cur_portlist_slot, last_portlist_pos;
NvU8 cur_eport, cur_oport, i;
NvS32 extra_ports;
NvU32 port_offsets_by_tcp[NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10][NVSWITCH_MC_PORTS_PER_COLUMN_LS10];
NvU8 last_portlist_pos;
NvU8 i;
NVSWITCH_TCP_DIRECTIVE_LS10 *cur_dir;
if ((cpb == NULL) || (port_list == NULL))
if ((ports == NULL) || (port_list == NULL))
{
NVSWITCH_PRINT(device, ERROR, "%s: Invalid arguments\n", __FUNCTION__);
return -NVL_BAD_ARGS;
}
_nvswitch_mc_build_mcplist_position_map(port_offsets_by_tcp);
//
// process columns pairwise. if one column is larger than the other by 2 or more entries,
// set the port as alt path
//
cur_portlist_pos = 0;
last_portlist_pos = 0;
cur_portlist_slot = 0;
for ( i = 0; i < NVSWITCH_MC_NUM_COLUMN_PAIRS_LS10; i++ )
// 6.15.2. Route Interface Description
for (i = 0 ; i < num_ports ; ++i)
{
ecol_idx = 2 * i;
ocol_idx = 2 * i + 1;
ecol_portmap = cpb[ecol_idx];
ocol_portmap = cpb[ocol_idx];
NvU8 cur_port = ports[i].port_offset;
NvU32 cur_col = ports[i].column;
ecol_portcount = _nvswitch_mc_get_pop_count(ecol_portmap);
ocol_portcount = _nvswitch_mc_get_pop_count(ocol_portmap);
extra_ports = ecol_portcount - ocol_portcount;
// Start current portlist position on column offset of the current column
cur_portlist_slot = 0;
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
if ( extra_ports >= 0 )
{
//
// even column has more ports or both columns have an equal number
// iterate on odd column port count to go through both columns
//
for (j = 0; j < ocol_portcount; j++, cur_portlist_slot++)
// Populating either EVEN or ODD column but not both
cur_dir = &port_list[i];
cur_dir->primaryReplica = PRIMARY_REPLICA_NONE;
cur_dir->tcp = cur_col/2; // tile-column pair numbering
if (cur_col % 2 == 1)
{ // odd column port
cur_dir->tcpOPort = cur_port;
cur_dir->tcpEPort = NVSWITCH_MC_NULL_PORT_LS10;
cur_dir->tcpOVCHop = vchop_map[cur_col][cur_port];
if (_is_primary_replica(cur_col, cur_port, primary_replica))
{
cur_eport = _nvswitch_mc_get_next_port(&ecol_portmap);
cur_oport = _nvswitch_mc_get_next_port(&ocol_portmap);
if ((cur_eport == NVSWITCH_MC_NULL_PORT_LS10) ||
(cur_oport == NVSWITCH_MC_NULL_PORT_LS10))
{
return -NVL_ERR_GENERIC;
}
cur_dir->primaryReplica = PRIMARY_REPLICA_ODD;
// assign the ports to the current directive
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
cur_dir = &port_list[cur_portlist_pos];
cur_dir->tcpEPort = cur_eport;
cur_dir->tcpOPort = cur_oport;
cur_dir->tcpEVCHop = vchop_map[ecol_idx][cur_eport];
cur_dir->tcpOVCHop = vchop_map[ocol_idx][cur_oport];
cur_dir->tcp = i;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_eport: %d, cur_oport %d\n",
__FUNCTION__, i, extra_ports, cur_eport, cur_oport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#ifdef NVSWITCH_MC_DEBUG
NVSWITCH_PRINT(device, DEBUG, "%s: Odd column primary replica programmed: %d %d\n",
__FUNCTION__, primary_replica, i);
#endif
// set primary replica
if (_is_primary_replica(ocol_idx, cur_oport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_ODD;
if (_is_primary_replica(ecol_idx, cur_eport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_EVEN;
}
// if both columns had the same number of ports, move on to the next column pair
if (!extra_ports)
{
last_portlist_pos = NV_MAX(last_portlist_pos, cur_portlist_pos);
continue;
}
//
// otherwise, handle remaining ports in even column
// for the first extra port, assign it directly
// cur_portlist_slot is incremented by the last iteration, or 0
//
cur_eport = _nvswitch_mc_get_next_port(&ecol_portmap);
if (cur_eport == NVSWITCH_MC_NULL_PORT_LS10)
{
return -NVL_ERR_GENERIC;
}
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
cur_dir = &port_list[cur_portlist_pos];
cur_dir->tcpEPort = cur_eport;
cur_dir->tcpEVCHop = vchop_map[ecol_idx][cur_eport];
cur_dir->tcp = i;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_eport: %d\n",
__FUNCTION__, i, extra_ports, cur_eport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#endif
// if this is the primary replica port, mark it
if (_is_primary_replica(ecol_idx, cur_eport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_EVEN;
extra_ports--;
// if there are more, assign to altpath
while (extra_ports)
{
// get next port from even column
cur_eport = _nvswitch_mc_get_next_port(&ecol_portmap);
if (cur_eport == NVSWITCH_MC_NULL_PORT_LS10)
{
return -NVL_ERR_GENERIC;
}
// assign it to odd port in current directive (altpath)
cur_dir->tcpOPort = cur_eport;
cur_dir->tcpOAltPath = NV_TRUE;
cur_dir->tcpOVCHop = vchop_map[ecol_idx][cur_eport];
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_eport: %d (alt)\n",
__FUNCTION__, i, extra_ports, cur_eport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#endif
// if this is the primary replica port, mark _ODD due to altpath
if (_is_primary_replica(ecol_idx, cur_eport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_ODD;
extra_ports--;
// if there are more ports remaining, start the next entry
if (extra_ports)
{
// advance the portlist entry
cur_portlist_slot++;
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
cur_dir = &port_list[cur_portlist_pos];
cur_eport = _nvswitch_mc_get_next_port(&ecol_portmap);
if (cur_eport == NVSWITCH_MC_NULL_PORT_LS10)
{
return -NVL_ERR_GENERIC;
}
cur_dir->tcpEPort = cur_eport;
cur_dir->tcpEVCHop = vchop_map[ecol_idx][cur_eport];
cur_dir->tcp = i;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_eport: %d\n",
__FUNCTION__, i, extra_ports, cur_eport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#endif
// if this is the primary replica port, mark it
if (_is_primary_replica(ecol_idx, cur_eport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_EVEN;
extra_ports--;
}
}
}
else
{
// odd column has more ports
extra_ports = -extra_ports;
// iterate over even column to go through port pairs
for (j = 0; j < ecol_portcount; j++, cur_portlist_slot++)
{ // even column port
cur_dir->tcpEPort = cur_port;
cur_dir->tcpOPort = NVSWITCH_MC_NULL_PORT_LS10;
cur_dir->tcpEVCHop = vchop_map[cur_col][cur_port];
if (_is_primary_replica(cur_col, cur_port, primary_replica))
{
cur_eport = _nvswitch_mc_get_next_port(&ecol_portmap);
cur_oport = _nvswitch_mc_get_next_port(&ocol_portmap);
if ((cur_eport == NVSWITCH_MC_NULL_PORT_LS10) ||
(cur_oport == NVSWITCH_MC_NULL_PORT_LS10))
{
return -NVL_ERR_GENERIC;
}
// assign the ports to the current directive
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
cur_dir = &port_list[cur_portlist_pos];
cur_dir->tcpEPort = cur_eport;
cur_dir->tcpOPort = cur_oport;
cur_dir->tcpEVCHop = vchop_map[ecol_idx][cur_eport];
cur_dir->tcpOVCHop = vchop_map[ocol_idx][cur_oport];
cur_dir->tcp = i;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_eport: %d, cur_oport %d\n",
__FUNCTION__, i, extra_ports, cur_eport, cur_oport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
cur_dir->primaryReplica = PRIMARY_REPLICA_EVEN;
#ifdef NVSWITCH_MC_DEBUG
NVSWITCH_PRINT(device, DEBUG, "%s: Even column primary replica programmed: %d %d\n",
__FUNCTION__, primary_replica, i);
#endif
if (_is_primary_replica(ocol_idx, cur_oport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_ODD;
if (_is_primary_replica(ecol_idx, cur_eport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_EVEN;
}
// handle the leftover ports in odd column
cur_oport = _nvswitch_mc_get_next_port(&ocol_portmap);
if (cur_oport == NVSWITCH_MC_NULL_PORT_LS10)
{
return -NVL_ERR_GENERIC;
}
// cur_portlist_slot is incremented by the last iteration, or 0
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
cur_dir = &port_list[cur_portlist_pos];
cur_dir->tcpOPort = cur_oport;
cur_dir->tcpOVCHop = vchop_map[ocol_idx][cur_oport];
cur_dir->tcp = i;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_oport %d\n",
__FUNCTION__, i, extra_ports, cur_oport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#endif
if (_is_primary_replica(ocol_idx, cur_oport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_ODD;
extra_ports--;
// process any remaining ports in odd column
while (extra_ports)
{
// get next odd port
cur_oport = _nvswitch_mc_get_next_port(&ocol_portmap);
if (cur_oport == NVSWITCH_MC_NULL_PORT_LS10)
{
return -NVL_ERR_GENERIC;
}
// set it as even altpath port in current directive
cur_dir->tcpEPort = cur_oport;
cur_dir->tcpEAltPath = NV_TRUE;
cur_dir->tcpEVCHop = vchop_map[ocol_idx][cur_oport];
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_oport %d (alt)\n",
__FUNCTION__, i, extra_ports, cur_oport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#endif
// if this is the primary replica port, mark _EVEN due to altpath
if (_is_primary_replica(ocol_idx, cur_oport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_EVEN;
extra_ports--;
// if there is another port, it goes in the next directive
if (extra_ports)
{
cur_portlist_slot++;
cur_portlist_pos = port_offsets_by_tcp[i][cur_portlist_slot];
cur_dir = &port_list[cur_portlist_pos];
cur_oport = _nvswitch_mc_get_next_port(&ocol_portmap);
if (cur_oport == NVSWITCH_MC_NULL_PORT_LS10)
{
return -NVL_ERR_GENERIC;
}
cur_dir->tcpOPort = cur_oport;
cur_dir->tcpOVCHop = vchop_map[ocol_idx][cur_oport];
cur_dir->tcp = i;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: tcp: %d, extra: %d, cur_oport %d\n",
__FUNCTION__, i, extra_ports, cur_oport);
NVSWITCH_PRINT(device, INFO, "%s: cur_portlist_pos: %d\n", __FUNCTION__,
cur_portlist_pos);
#endif
if (_is_primary_replica(ocol_idx, cur_oport, primary_replica))
cur_dir->primaryReplica = PRIMARY_REPLICA_ODD;
extra_ports--;
}
}
}
last_portlist_pos = NV_MAX(last_portlist_pos, cur_portlist_pos);
last_portlist_pos = i;
}
// set the lastRound flag for the last entry in the spray string
@@ -939,8 +492,8 @@ _nvswitch_mc_build_portlist
#ifdef NVSWITCH_MC_DEBUG
NVSWITCH_PRINT(device, INFO,
"%s: entries_used: %d, cur_portlist_pos: %d last_portlist_pos: %d\n",
__FUNCTION__, *entries_used, cur_portlist_pos, last_portlist_pos);
"%s: entries_used: %d, last_portlist_pos: %d\n",
__FUNCTION__, *entries_used, last_portlist_pos);
#endif
return NVL_SUCCESS;
@@ -1068,7 +621,7 @@ nvswitch_mc_build_mcp_list_ls10
NvU32 dir_entries_used_sg = 0;
NvU32 dir_entries_used = 0;
NvU32 mcplist_offset = 0;
NvU32 cpb[NVSWITCH_MC_NUM_COLUMNS_LS10] = { 0 };
NVSWITCH_COLUMN_PORT_OFFSET_LS10 ports[NVSWITCH_NUM_LINKS_LS10];
NvU8 vchop_map[NVSWITCH_MC_NUM_COLUMNS_LS10][NVSWITCH_MC_PORTS_PER_COLUMN_LS10];
NVSWITCH_TCP_DIRECTIVE_LS10 tmp_mcp_list[NVSWITCH_MC_TCP_LIST_SIZE_LS10];
NVSWITCH_TCP_DIRECTIVE_LS10 *mcp_list;
@@ -1108,7 +661,7 @@ nvswitch_mc_build_mcp_list_ls10
return -NVL_BAD_ARGS;
}
if (ports_per_spray_group[i] > NVSWITCH_NUM_LINKS_LS10)
if (ports_per_spray_group[i] > NVSWITCH_MC_TCP_LIST_SIZE_LS10)
{
NVSWITCH_PRINT(device, ERROR, "%s: Too many ports in spray group %d\n",
__FUNCTION__, i);
@@ -1118,7 +671,7 @@ nvswitch_mc_build_mcp_list_ls10
j += ports_per_spray_group[i];
}
if (j > NVSWITCH_NUM_LINKS_LS10)
if (j > NVSWITCH_MC_TCP_LIST_SIZE_LS10)
{
NVSWITCH_PRINT(device, ERROR, "%s: Too many ports specified in total spray groups: %d\n",
__FUNCTION__, j);
@@ -1143,14 +696,13 @@ nvswitch_mc_build_mcp_list_ls10
if (ret != NVL_SUCCESS)
return ret;
ret = _nvswitch_mc_build_cpb(device, spray_group_size, &port_list[spray_group_offset],
NVSWITCH_MC_NUM_COLUMNS_LS10, cpb,
&vchop_array[spray_group_offset], vchop_map);
ret = _nvswitch_mc_build_ports_array(device, spray_group_size, &port_list[spray_group_offset],
ports, &vchop_array[spray_group_offset], vchop_map);
if (ret != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: error building column-port bitmap for spray group %d: %d\n",
"%s: error building port-column array for spray group %d: %d\n",
__FUNCTION__, spray_group_idx, ret);
return ret;
}
@@ -1158,25 +710,6 @@ nvswitch_mc_build_mcp_list_ls10
// Set the offset to this spray group in the mcp list.
spray_group_ptrs[spray_group_idx] = (NvU8)dir_entries_used;
#ifdef NVSWITCH_MC_TRACE
NVSWITCH_PRINT(device, INFO, "%s: spray group offset for group %d is %d\n",
__FUNCTION__, spray_group_idx, dir_entries_used);
for (i = 0; i < NVSWITCH_MC_NUM_COLUMNS_LS10; i++)
{
NVSWITCH_PRINT(device, INFO, "%d Relative ports in column %d\n",
_nvswitch_mc_get_pop_count(cpb[i]), i);
for ( j = 0; j < 32; j++ )
{
if (nvswitch_test_flags(cpb[i], NVBIT(j)))
{
NVSWITCH_PRINT(device, INFO, "%4d", j);
}
}
NVSWITCH_PRINT(device, INFO, "\n");
}
#endif
// if primary replica is specified for this spray group, find the port number
if (replica_valid_array[spray_group_idx])
{
@@ -1220,8 +753,7 @@ nvswitch_mc_build_mcp_list_ls10
NVSWITCH_PRINT(device, INFO, "%s: building tmp mc portlist at mcp offset %d, size %d\n",
__FUNCTION__, mcplist_offset, spray_group_size);
#endif
ret = _nvswitch_mc_build_portlist(device, cpb, primary_replica_port, vchop_map,
ret = _nvswitch_mc_build_portlist(device, spray_group_size, ports, primary_replica_port, vchop_map,
tmp_mcp_list, &dir_entries_used_sg);
if (ret != NVL_SUCCESS)
@@ -1234,14 +766,6 @@ nvswitch_mc_build_mcp_list_ls10
NVSWITCH_PRINT(device, INFO, "%s: entries used after building portlist: %d\n",
__FUNCTION__, dir_entries_used_sg);
#endif
ret = _nvswitch_mc_compact_portlist(device, tmp_mcp_list, &dir_entries_used_sg);
if (ret != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: error compacting MC portlist\n", __FUNCTION__);
return ret;
}
_nvswitch_mc_set_round_flags(tmp_mcp_list, dir_entries_used_sg);
_nvswitch_mc_set_port_flags(tmp_mcp_list, dir_entries_used_sg);
@@ -1306,7 +830,7 @@ nvswitch_mc_unwind_directives_ls10
{
NvU32 ret = NVL_SUCCESS;
NvU32 i, port_idx, cur_sg, ports_in_cur_sg, port, primary_replica;
NVSWITCH_TCP_DIRECTIVE_LS10 cur_dir, prev_dir;
NVSWITCH_TCP_DIRECTIVE_LS10 cur_dir;
cur_sg = 0;
port_idx = 0;
@@ -1344,33 +868,30 @@ nvswitch_mc_unwind_directives_ls10
}
//
// If the previous TCP directive's portFlag = 0, and if it was not
// used to select the even or odd port of its predecessor, and this
// directive's portFlag == 1, this TCP directive contains the
// If directive's portFlag == 1, this TCP directive contains the
// primary replica, and the next TCP directive's portFlag = 0/1
// selects the even/odd port of this TCP directive.
//
// If we don't have the first or last directive and portFlag == 1
else if ((i < (NVSWITCH_MC_TCP_LIST_SIZE_LS10 - 1)) && (i > 0) && (cur_dir.portFlag == 1))
else if ((i < (NVSWITCH_MC_TCP_LIST_SIZE_LS10 - 1)) && (cur_dir.portFlag == 1))
{
prev_dir = directives[i - 1];
// Is the previous directive in the same sg and is the portFlag == 0?
if ((prev_dir.lastRound == 0) && (prev_dir.portFlag == 0))
{
// Check if there is no predecessor, or if the predecessor's portFlag == 0
if ((i < 2) || (directives[i - 2].portFlag == 0))
{
// The next directive's portFlags specify even or odd
if (directives[i + 1].portFlag)
primary_replica = PRIMARY_REPLICA_ODD;
else
primary_replica = PRIMARY_REPLICA_EVEN;
}
}
// The next directive's portFlags specify even or odd
if (directives[i + 1].portFlag)
primary_replica = PRIMARY_REPLICA_ODD;
else
primary_replica = PRIMARY_REPLICA_EVEN;
}
#ifdef NVSWITCH_MC_TRACE
if (primary_replica == PRIMARY_REPLICA_ODD) {
NVSWITCH_PRINT(device, INFO, "%s: Odd primary replica detected: %d\n",
__FUNCTION__, i);
}
if (primary_replica == PRIMARY_REPLICA_EVEN) {
NVSWITCH_PRINT(device, INFO, "%s: Even primary replica detected: %d\n",
__FUNCTION__, i);
}
#endif
if (cur_dir.tcpEPort != NVSWITCH_MC_NULL_PORT_LS10)
{
ports_in_cur_sg++;
@@ -1862,4 +1383,3 @@ nvswitch_mc_read_mc_rid_entry_ls10
return NVL_SUCCESS;
}

View File

@@ -666,6 +666,87 @@ nvswitch_soe_issue_ingress_stop_ls10
return NVL_SUCCESS;
}
/*
* @Brief : Perform register writes in SOE during TNVL
*
* @param[in] device
* @param[in] eng_id
* @param[in] eng_instance
* @param[in] reg
* @param[in] data
*/
NvlStatus
nvswitch_soe_update_intr_report_en_ls10
(
nvswitch_device *device,
RM_SOE_CORE_ENGINE_ID eng_id,
NvU32 eng_instance,
RM_SOE_CORE_NPORT_REPORT_EN_REGISTER reg,
NvU32 data
)
{
FLCN *pFlcn;
NvU32 cmdSeqDesc = 0;
NV_STATUS status;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
RM_SOE_CORE_CMD_ERROR_REPORT_EN *pErrorReportEnable;
NVSWITCH_GET_BIOS_INFO_PARAMS params = { 0 };
if (!nvswitch_is_soe_supported(device))
{
NVSWITCH_PRINT(device, INFO,
"%s: SOE is not supported\n",
__FUNCTION__);
return -NVL_ERR_NOT_SUPPORTED;
}
status = device->hal.nvswitch_ctrl_get_bios_info(device, &params);
if ((status != NVL_SUCCESS) || ((params.version & SOE_VBIOS_VERSION_MASK) <
SOE_VBIOS_REVLOCK_REPORT_EN))
{
NVSWITCH_PRINT(device, INFO,
"%s: Unable to update REPORT_EN register and disabiling NVLW interrupt. Update firmware "
"from .%02x to .%02x\n",
__FUNCTION__, (NvU32)((params.version & SOE_VBIOS_VERSION_MASK) >> 16),
SOE_VBIOS_REVLOCK_REPORT_EN);
return -NVL_ERR_NOT_SUPPORTED;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
cmd.hdr.size = RM_SOE_CMD_SIZE(CORE, ERROR_REPORT_EN);
pErrorReportEnable = &cmd.cmd.core.enableErrorReport;
pErrorReportEnable->cmdType = RM_SOE_CORE_CMD_UPDATE_INTR_REPORT_EN;
pErrorReportEnable->engId = RM_SOE_CORE_ENGINE_ID_NPORT;
pErrorReportEnable->engInstance = eng_instance;
pErrorReportEnable->reg = reg;
pErrorReportEnable->data = data;
nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg
NULL, // pPayload
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to update [0x%x] REPORT_EN register through SOE, status 0x%x\n",
__FUNCTION__, reg, status);
return -NVL_ERR_GENERIC;
}
return NVL_SUCCESS;
}
/*
* @Brief : Perform register writes in SOE during TNVL
*
@@ -697,11 +778,19 @@ nvswitch_soe_reg_wr_32_ls10
return NVL_SUCCESS; // -NVL_ERR_NOT_SUPPORTED
}
if (device->nvlink_device->pciInfo.bars[0].pBar == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: register write failed at offset 0x%x\n",
__FUNCTION__, offset);
return -NVL_IO_ERROR;
}
status = device->hal.nvswitch_ctrl_get_bios_info(device, &params);
if ((status != NVL_SUCCESS) || ((params.version & SOE_VBIOS_VERSION_MASK) <
SOE_VBIOS_REVLOCK_ISSUE_REGISTER_WRITE))
SOE_VBIOS_REVLOCK_SOE_PRI_CHECKS))
{
nvswitch_reg_write_32(device, offset, data);
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data);
return NVL_SUCCESS;
}
@@ -736,6 +825,96 @@ nvswitch_soe_reg_wr_32_ls10
return NVL_SUCCESS;
}
/*
* @Brief : Perform engine writes in SOE during TNVL
*
* @param[in] device
* @param[in] eng_id NVSWITCH_ENGINE_ID*
* @param[in] eng_bcast NVSWITCH_GET_ENG_DESC_TYPE*
* @param[in] eng_instance
* @param[in] base_addr
* @param[in] offset
* @param[in] data
*/
NvlStatus
nvswitch_soe_eng_wr_32_ls10
(
nvswitch_device *device,
NVSWITCH_ENGINE_ID eng_id,
NvU32 eng_bcast,
NvU32 eng_instance,
NvU32 base_addr,
NvU32 offset,
NvU32 data
)
{
FLCN *pFlcn;
NvU32 cmdSeqDesc = 0;
NV_STATUS status;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
RM_SOE_TNVL_CMD_ENGINE_WRITE *pEngineWrite;
NVSWITCH_GET_BIOS_INFO_PARAMS params = { 0 };
if (!nvswitch_is_soe_supported(device))
{
NVSWITCH_PRINT(device, INFO,
"%s: SOE is not supported\n",
__FUNCTION__);
return NVL_SUCCESS; // -NVL_ERR_NOT_SUPPORTED
}
if (device->nvlink_device->pciInfo.bars[0].pBar == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: register write failed at offset 0x%x\n",
__FUNCTION__, offset);
return -NVL_IO_ERROR;
}
status = device->hal.nvswitch_ctrl_get_bios_info(device, &params);
if ((status != NVL_SUCCESS) || ((params.version & SOE_VBIOS_VERSION_MASK) <
SOE_VBIOS_REVLOCK_SOE_PRI_CHECKS))
{
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data);
return NVL_SUCCESS;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_TNVL;
cmd.hdr.size = RM_SOE_CMD_SIZE(TNVL, ENGINE_WRITE);
pEngineWrite = &cmd.cmd.tnvl.engineWrite;
pEngineWrite->cmdType = RM_SOE_TNVL_CMD_ISSUE_ENGINE_WRITE;
pEngineWrite->eng_id = eng_id;
pEngineWrite->eng_bcast = eng_bcast;
pEngineWrite->eng_instance = eng_instance;
pEngineWrite->base = base_addr;
pEngineWrite->offset = offset;
pEngineWrite->data = data;
nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg
NULL, // pPayload
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to send ENGINE_WRITE command to SOE, offset = 0x%x, data = 0x%x\n",
__FUNCTION__, offset, data);
return -NVL_ERR_GENERIC;
}
return NVL_SUCCESS;
}
/*
* @Brief : Init sequence for SOE FSP RISCV image
*
@@ -1009,7 +1188,6 @@ _soeService_LS10
)
{
NvBool bRecheckMsgQ = NV_FALSE;
NvBool bRecheckPrintQ = NV_FALSE;
NvU32 clearBits = 0;
NvU32 intrStatus;
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
@@ -1075,8 +1253,6 @@ _soeService_LS10
NVSWITCH_PRINT(device, INFO,
"%s: Received a SWGEN1 interrupt\n",
__FUNCTION__);
flcnDebugBufferDisplay_HAL(device, pFlcn);
bRecheckPrintQ = NV_TRUE;
}
// Clear any sources that were serviced and get the new status.
@@ -1112,22 +1288,6 @@ _soeService_LS10
}
}
//
// If we just processed a SWGEN1 interrupt (Debug Buffer interrupt), peek
// into the Debug Buffer and see if any text was missed the last time
// the buffer was displayed (above). If it is not empty, re-generate SWGEN1
// (since it is now cleared) and exit. As long as an interrupt is pending,
// this function will be re-entered and the message(s) will be processed.
//
if (bRecheckPrintQ)
{
if (!flcnDebugBufferIsEmpty_HAL(device, pFlcn))
{
flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQSSET,
DRF_DEF(_PFALCON, _FALCON_IRQSSET, _SWGEN1, _SET));
}
}
flcnIntrRetrigger_HAL(device, pFlcn);
return intrStatus;

View File

@@ -936,6 +936,7 @@ nvswitch_nvs_top_prod_ls10
NVSWITCH_ENG_WR32(device, SYS_PRI_RS_CTRL, , 0, _PPRIV_RS_CTRL_SYS, _CG1,
DRF_DEF(_PPRIV_RS_CTRL_SYS, _CG1, _SLCG, __PROD));
#if 0
NVSWITCH_ENG_WR32(device, XAL, , 0, _XAL_EP, _CG,
DRF_DEF(_XAL_EP, _CG, _IDLE_CG_DLY_CNT, __PROD) |
DRF_DEF(_XAL_EP, _CG, _IDLE_CG_EN, __PROD) |
@@ -961,7 +962,8 @@ nvswitch_nvs_top_prod_ls10
DRF_DEF(_XAL_EP, _CG1, _SLCG_TXMAP, __PROD) |
DRF_DEF(_XAL_EP, _CG1, _SLCG_UNROLL_MEM, __PROD) |
DRF_DEF(_XAL_EP, _CG1, _SLCG_UPARB, __PROD));
#endif //0
NVSWITCH_ENG_WR32(device, XPL, , 0, _XPL, _PL_PAD_CTL_PRI_XPL_RXCLK_CG,
DRF_DEF(_XPL, _PL_PAD_CTL_PRI_XPL_RXCLK_CG, _IDLE_CG_DLY_CNT, __PROD) |
DRF_DEF(_XPL, _PL_PAD_CTL_PRI_XPL_RXCLK_CG, _IDLE_CG_EN, __PROD) |

View File

@@ -32,6 +32,12 @@
#include "nvswitch/ls10/dev_nvlsaw_ip_addendum.h"
#include "nvswitch/ls10/dev_ctrl_ip.h"
#include "nvswitch/ls10/dev_ctrl_ip_addendum.h"
#include "nvswitch/ls10/dev_cpr_ip.h"
#include "nvswitch/ls10/dev_npg_ip.h"
#include "nvswitch/ls10/dev_fsp_pri.h"
#include "nvswitch/ls10/dev_soe_ip.h"
#include "nvswitch/ls10/ptop_discovery_ip.h"
#include "nvswitch/ls10/dev_minion_ip.h"
#include <stddef.h>
@@ -1056,7 +1062,7 @@ nvswitch_tnvl_get_status_ls10
}
static NvBool
_nvswitch_reg_cpu_write_allow_list_ls10
_nvswitch_tnvl_eng_wr_cpu_allow_list_ls10
(
nvswitch_device *device,
NVSWITCH_ENGINE_ID eng_id,
@@ -1070,15 +1076,43 @@ _nvswitch_reg_cpu_write_allow_list_ls10
case NVSWITCH_ENGINE_ID_FSP:
return NV_TRUE;
case NVSWITCH_ENGINE_ID_SAW:
{
if (offset == NV_NVLSAW_DRIVER_ATTACH_DETACH)
return NV_TRUE;
break;
}
case NVSWITCH_ENGINE_ID_NPG:
{
if ((offset == NV_NPG_INTR_RETRIGGER(0)) ||
(offset == NV_NPG_INTR_RETRIGGER(1)))
return NV_TRUE;
break;
}
case NVSWITCH_ENGINE_ID_CPR:
{
if ((offset == NV_CPR_SYS_INTR_RETRIGGER(0)) ||
(offset == NV_CPR_SYS_INTR_RETRIGGER(1)))
return NV_TRUE;
break;
}
case NVSWITCH_ENGINE_ID_MINION:
{
if ((offset == NV_MINION_NVLINK_DL_STAT(0)) ||
(offset == NV_MINION_NVLINK_DL_STAT(1)) ||
(offset == NV_MINION_NVLINK_DL_STAT(2)) ||
(offset == NV_MINION_NVLINK_DL_STAT(3)))
return NV_TRUE;
break;
}
default :
return NV_FALSE;
}
return NV_FALSE;
}
void
nvswitch_tnvl_reg_wr_32_ls10
nvswitch_tnvl_eng_wr_32_ls10
(
nvswitch_device *device,
NVSWITCH_ENGINE_ID eng_id,
@@ -1089,45 +1123,124 @@ nvswitch_tnvl_reg_wr_32_ls10
NvU32 data
)
{
if (!nvswitch_is_tnvl_mode_enabled(device))
if (device->nvlink_device->pciInfo.bars[0].pBar == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TNVL mode is not enabled\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
"%s: register write failed at offset 0x%x\n",
__FUNCTION__, offset);
return;
}
if (!nvswitch_is_tnvl_mode_enabled(device))
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"ENG reg-write failed. TNVL mode is not enabled\n");
return;
}
if (_nvswitch_tnvl_eng_wr_cpu_allow_list_ls10(device, eng_id, offset))
{
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + base_addr + offset, data);
return;
}
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"TNVL ENG_WR failure - 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
eng_id, eng_instance, eng_bcast, base_addr, offset, data);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"TNVL mode is locked\n");
return;
}
if (nvswitch_soe_eng_wr_32_ls10(device, eng_id, eng_bcast, eng_instance, base_addr, offset, data) != NVL_SUCCESS)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"TNVL ENG_WR failure - 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
eng_id, eng_instance, eng_bcast, base_addr, offset, data);
NVSWITCH_PRINT(device, ERROR,
"%s: TNVL mode is locked\n",
__FUNCTION__);
"%s: SOE ENG_WR failed for 0x%x[%d] %s @0x%08x+0x%06x = 0x%08x\n",
__FUNCTION__,
eng_id, eng_instance,
(
(eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
(eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
(eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
"??"
),
base_addr, offset, data);
}
}
static NvBool
_nvswitch_tnvl_reg_wr_cpu_allow_list_ls10
(
nvswitch_device *device,
NvU32 offset
)
{
if ((offset >= DRF_BASE(NV_PFSP)) &&
(offset <= DRF_EXTENT(NV_PFSP)))
{
return NV_TRUE;
}
if ((offset >= NV_PTOP_UNICAST_SW_DEVICE_BASE_SOE_0 + DRF_BASE(NV_SOE)) &&
(offset <= NV_PTOP_UNICAST_SW_DEVICE_BASE_SOE_0 + DRF_EXTENT(NV_SOE)))
{
return NV_TRUE;
}
return NV_FALSE;
}
void
nvswitch_tnvl_reg_wr_32_ls10
(
nvswitch_device *device,
NvU32 offset,
NvU32 data
)
{
if (device->nvlink_device->pciInfo.bars[0].pBar == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s: register write failed at offset 0x%x\n",
__FUNCTION__, offset);
NVSWITCH_ASSERT(0);
return;
}
if (_nvswitch_reg_cpu_write_allow_list_ls10(device, eng_id, offset))
if (!nvswitch_is_tnvl_mode_enabled(device))
{
nvswitch_reg_write_32(device, base_addr + offset, data);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"Reg-write failed. TNVL mode is not enabled\n");
return;
}
else
if (_nvswitch_tnvl_reg_wr_cpu_allow_list_ls10(device, offset))
{
if (nvswitch_soe_reg_wr_32_ls10(device, base_addr + offset, data) != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: SOE ENG_WR failed for 0x%x[%d] %s @0x%08x+0x%06x = 0x%08x\n",
__FUNCTION__,
eng_id, eng_instance,
(
(eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" :
(eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" :
(eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" :
"??"
),
base_addr, offset, data);
NVSWITCH_ASSERT(0);
}
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data);
return;
}
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"TNVL REG_WR failure - 0x%08x, 0x%08x\n", offset, data);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"TNVL mode is locked\n");
return;
}
if (nvswitch_soe_reg_wr_32_ls10(device, offset, data) != NVL_SUCCESS)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_TNVL_ERROR,
"TNVL REG_WR failure - 0x%08x, 0x%08x\n", offset, data);
}
}

View File

@@ -1557,6 +1557,7 @@ nvswitch_send_nack_or_drop
{
switch(msghdr->type)
{
case NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ_V2:
case NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ:
device->hal.nvswitch_send_inband_nack(device, (NvU32 *)msghdr, linkId);
NVSWITCH_PRINT(device, ERROR,
@@ -4983,10 +4984,7 @@ nvswitch_reg_write_32
device->nvlink_device->pciInfo.bars[0].baseAddr, offset, data);
#endif
// Write the register
nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data);
return;
device->hal.nvswitch_reg_write_32(device, offset, data);
}
NvU64
@@ -6093,6 +6091,141 @@ _nvswitch_ctrl_set_device_tnvl_lock
return status;
}
/*
* Service ioctls supported when TNVL mode is locked
*/
NvlStatus
nvswitch_lib_ctrl_tnvl_lock_only
(
nvswitch_device *device,
NvU32 cmd,
void *params,
NvU64 size,
void *osPrivate
)
{
NvlStatus retval;
NvU64 flags = 0;
if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device) || params == NULL)
{
return -NVL_BAD_ARGS;
}
flags = NVSWITCH_DEV_CMD_CHECK_ADMIN | NVSWITCH_DEV_CMD_CHECK_FM;
switch (cmd)
{
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_INFOROM_VERSION,
_nvswitch_ctrl_get_inforom_version,
NVSWITCH_GET_INFOROM_VERSION_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_NVLINK_MAX_ERROR_RATES,
_nvswitch_ctrl_get_inforom_nvlink_max_correctable_error_rate,
NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_NVLINK_ERROR_COUNTS,
_nvswitch_ctrl_get_inforom_nvlink_errors,
NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_ECC_ERROR_COUNTS,
_nvswitch_ctrl_get_inforom_ecc_errors,
NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_SXIDS,
_nvswitch_ctrl_get_inforom_bbx_sxid,
NVSWITCH_GET_SXIDS_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_SYS_INFO,
_nvswitch_ctrl_get_inforom_bbx_sys_info,
NVSWITCH_GET_SYS_INFO_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_TIME_INFO,
_nvswitch_ctrl_get_inforom_bbx_time_info,
NVSWITCH_GET_TIME_INFO_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_TEMP_DATA,
_nvswitch_ctrl_get_inforom_bbx_temp_data,
NVSWITCH_GET_TEMP_DATA_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_TEMP_SAMPLES,
_nvswitch_ctrl_get_inforom_bbx_temp_samples,
NVSWITCH_GET_TEMP_SAMPLES_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH(
CTRL_NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN,
_nvswitch_ctrl_get_attestation_certificate_chain,
NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(
CTRL_NVSWITCH_GET_ATTESTATION_REPORT,
_nvswitch_ctrl_get_attestation_report,
NVSWITCH_GET_ATTESTATION_REPORT_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(
CTRL_NVSWITCH_GET_TNVL_STATUS,
_nvswitch_ctrl_get_tnvl_status,
NVSWITCH_GET_TNVL_STATUS_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_SET_FM_DRIVER_STATE,
nvswitch_ctrl_set_fm_driver_state,
NVSWITCH_SET_FM_DRIVER_STATE_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_ERRORS,
nvswitch_ctrl_get_errors,
NVSWITCH_GET_ERRORS_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_BIOS_INFO,
_nvswitch_ctrl_get_bios_info,
NVSWITCH_GET_BIOS_INFO_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_TEMPERATURE,
_nvswitch_ctrl_therm_read_temperature,
NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(
CTRL_NVSWITCH_GET_TEMPERATURE_LIMIT,
_nvswitch_ctrl_therm_get_temperature_limit,
NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_FATAL_ERROR_SCOPE,
_nvswitch_ctrl_get_fatal_error_scope,
NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_INFO,
_nvswitch_ctrl_get_info,
NVSWITCH_GET_INFO);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_VOLTAGE,
_nvswitch_ctrl_therm_read_voltage,
NVSWITCH_CTRL_GET_VOLTAGE_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_POWER,
_nvswitch_ctrl_therm_read_power,
NVSWITCH_GET_POWER_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_NVLINK_STATUS,
_nvswitch_ctrl_get_nvlink_status,
NVSWITCH_GET_NVLINK_STATUS_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_NVLINK_ECC_ERRORS,
_nvswitch_ctrl_get_nvlink_ecc_errors,
NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_INTERNAL_LATENCY,
_nvswitch_ctrl_get_internal_latency,
NVSWITCH_GET_INTERNAL_LATENCY);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_NVLINK_ERROR_THRESHOLD,
_nvswitch_ctrl_set_nvlink_error_threshold,
NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_NVLINK_ERROR_THRESHOLD,
_nvswitch_ctrl_get_nvlink_error_threshold,
NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS);
default:
nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "ioctl %x is not permitted when TNVL is locked\n", cmd);
return -NVL_ERR_INSUFFICIENT_PERMISSIONS;
}
return retval;
}
NvlStatus
nvswitch_lib_ctrl
(
@@ -6106,6 +6239,11 @@ nvswitch_lib_ctrl
NvlStatus retval;
NvU64 flags = 0;
if (nvswitch_is_tnvl_mode_locked(device))
{
return nvswitch_lib_ctrl_tnvl_lock_only(device, cmd, params, size, osPrivate);
}
if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device) || params == NULL)
{
return -NVL_BAD_ARGS;