515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,243 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "bios_nvswitch.h"
#include "error_nvswitch.h"
#include "rmsoecmdif.h"
#include "nvswitch/lr10/dev_ext_devices.h"
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
static NvlStatus
_nvswitch_core_bios_read
(
nvswitch_device *device,
NvU8 readType,
NvU32 reqSize,
NvU8 *pData
)
{
#define MAX_READ_SIZE 0x2000
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
NvU32 cmdSeqDesc = 0;
NV_STATUS status;
FLCN *pFlcn = NULL;
RM_SOE_CORE_CMD_BIOS *pParams = &cmd.cmd.core.bios;
NvU64 dmaHandle = 0;
NvU8 *pReadBuffer = NULL;
NvU32 spiReadCnt = 0;
NvU32 offset = 0;
NvU32 bufferSize = (reqSize < SOE_DMA_MAX_SIZE) ? SOE_DMA_MAX_SIZE : MAX_READ_SIZE;
// Create DMA mapping for SOE CORE transactions
status = nvswitch_os_alloc_contig_memory(device->os_handle,
(void**)&pReadBuffer, bufferSize, (device->dma_addr_width == 32));
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to allocate contig memory\n");
return status;
}
status = nvswitch_os_map_dma_region(device->os_handle,
pReadBuffer,
&dmaHandle,
bufferSize,
NVSWITCH_DMA_DIR_TO_SYSMEM);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to map dma region to sysmem\n");
nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize);
return status;
}
pFlcn = device->pSoe->pFlcn;
while (offset < reqSize)
{
nvswitch_os_memset(pReadBuffer, 0, bufferSize);
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
cmd.hdr.size = sizeof(cmd);
cmd.cmd.core.bios.cmdType = readType;
RM_FLCN_U64_PACK(&pParams->dmaHandle, &dmaHandle);
pParams->offset = offset;
pParams->sizeInBytes = NV_MIN((reqSize - offset), MAX_READ_SIZE);
cmdSeqDesc = 0;
status = nvswitch_os_sync_dma_region_for_device(device->os_handle, dmaHandle,
bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM);
if (status != NV_OK)
{
nvswitch_os_unmap_dma_region(device->os_handle, pReadBuffer, dmaHandle,
bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM);
nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize);
NVSWITCH_PRINT(device, ERROR, "Failed to yield to DMA controller\n");
return status;
}
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 4, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg - not used for now
NULL, // pPayload - not used for now
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s: CORE read failed. rc:%d\n",
__FUNCTION__, status);
break;
}
status = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle,
bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "DMA controller failed to yield back\n");
break;
}
if (readType == RM_SOE_CORE_CMD_READ_BIOS)
{
nvswitch_os_memcpy(((NvU8*)&pData[offset]), pReadBuffer, pParams->sizeInBytes);
}
else if (readType == RM_SOE_CORE_CMD_READ_BIOS_SIZE)
{
nvswitch_os_memcpy(((NvU8*)pData), pReadBuffer, reqSize);
break;
}
offset += pParams->sizeInBytes;
spiReadCnt++;
}
nvswitch_os_unmap_dma_region(device->os_handle, pReadBuffer, dmaHandle,
bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM);
nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize);
return status;
}
NvlStatus
nvswitch_bios_read
(
nvswitch_device *device,
NvU32 size,
void *pData
)
{
return _nvswitch_core_bios_read(device, RM_SOE_CORE_CMD_READ_BIOS, size, (NvU8*)pData);
}
NvlStatus
nvswitch_bios_read_size
(
nvswitch_device *device,
NvU32 *pSize
)
{
if (pSize == NULL)
{
return -NVL_BAD_ARGS;
}
return _nvswitch_core_bios_read(device, RM_SOE_CORE_CMD_READ_BIOS_SIZE, sizeof(NvU32), (NvU8*)pSize);
}
/*!
* @brief Retrieves BIOS Image via SOE's CORE task
* This function needs SOE to be initialized for the Util task to respond.
* Upon success the BIOS Image will be place in device.biosImage
* @param[in/out] device - pointer to the nvswitch device.
*/
NvlStatus
nvswitch_bios_get_image
(
nvswitch_device *device
)
{
NvU8 *pBiosRawBuffer = NULL;
NvlStatus status = NVL_SUCCESS;
NvU32 biosSize = 0;
if (device->biosImage.pImage != NULL)
{
NVSWITCH_PRINT(device, ERROR,
"NVRM: %s: bios already available, skip reading"
"\n", __FUNCTION__);
return NVL_SUCCESS;
}
if (!device->pSoe)
{
NVSWITCH_PRINT(device, ERROR,
"%s: SOE not initialized yet. \n",
__FUNCTION__);
return NVL_SUCCESS;
}
status = nvswitch_bios_read_size(device, &biosSize);
if (status != NVL_SUCCESS || biosSize == 0)
{
NVSWITCH_PRINT(device, ERROR,
"NVRM: %s: bios read size failed"
"\n", __FUNCTION__);
return status;
}
NVSWITCH_PRINT(device, SETUP,
"NVRM: %s: BIOS Size = 0x%x"
"\n", __FUNCTION__, biosSize);
pBiosRawBuffer = (NvU8*) nvswitch_os_malloc(biosSize);
if (pBiosRawBuffer == NULL)
{
NVSWITCH_PRINT(device, ERROR,
"%s : failed memory allocation"
"\n", __FUNCTION__);
return -NVL_NO_MEM;
}
nvswitch_os_memset(pBiosRawBuffer, 0, biosSize);
status = nvswitch_bios_read(device, biosSize, pBiosRawBuffer);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, " Failed to retrieve BIOS image, Code 0x%x\n", status);
nvswitch_os_free(pBiosRawBuffer);
return status;
}
device->biosImage.pImage = pBiosRawBuffer;
device->biosImage.size = biosSize;
return NVL_SUCCESS;
}

View File

@@ -0,0 +1,544 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#define NVSWITCH_DATE_LEN 64
//
// Error logging
//
static void
_nvswitch_dump_error_entry
(
nvswitch_device *device,
NvU32 error_count,
NVSWITCH_ERROR_TYPE *error_entry
)
{
if ((error_entry != NULL) &&
(error_entry->error_src == NVSWITCH_ERROR_SRC_HW))
{
NVSWITCH_PRINT_SXID(device, error_entry->error_type,
"Severity %d Engine instance %02d Sub-engine instance %02d\n",
error_entry->severity, error_entry->instance, error_entry->subinstance);
NVSWITCH_PRINT_SXID(device, error_entry->error_type,
"Data {0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x}\n",
error_entry->data.raw.data[0], error_entry->data.raw.data[1],
error_entry->data.raw.data[2], error_entry->data.raw.data[3],
error_entry->data.raw.data[4], error_entry->data.raw.data[5],
error_entry->data.raw.data[6], error_entry->data.raw.data[7]);
if ((error_entry->data.raw.data[ 8] != 0) ||
(error_entry->data.raw.data[ 9] != 0) ||
(error_entry->data.raw.data[10] != 0) ||
(error_entry->data.raw.data[11] != 0) ||
(error_entry->data.raw.data[12] != 0) ||
(error_entry->data.raw.data[13] != 0) ||
(error_entry->data.raw.data[14] != 0) ||
(error_entry->data.raw.data[15] != 0))
{
NVSWITCH_PRINT_SXID(device, error_entry->error_type,
"Data {0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x}\n",
error_entry->data.raw.data[ 8], error_entry->data.raw.data[ 9],
error_entry->data.raw.data[10], error_entry->data.raw.data[11],
error_entry->data.raw.data[12], error_entry->data.raw.data[13],
error_entry->data.raw.data[14], error_entry->data.raw.data[15]);
}
}
}
//
// Construct an error log
//
// If error_log_size > 0 a circular buffer is created to record errors
//
NvlStatus
nvswitch_construct_error_log
(
NVSWITCH_ERROR_LOG_TYPE *errors,
NvU32 error_log_size,
NvBool overwritable
)
{
NvlStatus retval = NVL_SUCCESS;
NVSWITCH_ASSERT(errors != NULL);
errors->error_start = 0;
errors->error_count = 0;
errors->error_total = 0;
errors->error_log_size = 0;
errors->error_log = NULL;
errors->overwritable = overwritable;
if (error_log_size > 0)
{
errors->error_log = nvswitch_os_malloc(error_log_size * sizeof(NVSWITCH_ERROR_TYPE));
}
if (errors->error_log != NULL)
{
errors->error_log_size = error_log_size;
nvswitch_os_memset(errors->error_log, 0, errors->error_log_size * sizeof(NVSWITCH_ERROR_TYPE));
}
if (error_log_size != errors->error_log_size)
{
retval = -NVL_NO_MEM;
}
return retval;
}
//
// Destroy an error log
//
void
nvswitch_destroy_error_log
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors
)
{
if (errors == NULL)
return;
errors->error_start = 0;
errors->error_count = 0;
//errors->error_total = 0; // Don't reset total count of errors logged
errors->error_log_size = 0;
if (errors->error_log != NULL)
{
nvswitch_os_free(errors->error_log);
errors->error_log = NULL;
}
}
void
nvswitch_record_error
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors,
NvU32 error_type, // NVSWITCH_ERR_*
NvU32 instance,
NvU32 subinstance,
NVSWITCH_ERROR_SRC_TYPE error_src, // NVSWITCH_ERROR_SRC_*
NVSWITCH_ERROR_SEVERITY_TYPE severity, // NVSWITCH_ERROR_SEVERITY_*
NvBool error_resolved,
void *data,
NvU32 data_size,
NvU32 line
)
{
NvU32 idx_error;
NVSWITCH_ASSERT(errors != NULL);
NVSWITCH_ASSERT(data_size <= sizeof(errors->error_log[idx_error].data));
// If no error log has been created, don't log it.
if ((errors->error_log_size != 0) && (errors->error_log != NULL))
{
idx_error = (errors->error_start + errors->error_count) % errors->error_log_size;
if (errors->error_count == errors->error_log_size)
{
// Error ring buffer already full.
if (errors->overwritable)
{
errors->error_start = (errors->error_start + 1) % errors->error_log_size;
}
else
{
// Return: ring buffer full
return;
}
}
else
{
errors->error_count++;
}
// Log error info
errors->error_log[idx_error].error_type = error_type;
errors->error_log[idx_error].instance = instance;
errors->error_log[idx_error].subinstance = subinstance;
errors->error_log[idx_error].error_src = error_src;
errors->error_log[idx_error].severity = severity;
errors->error_log[idx_error].error_resolved = error_resolved;
errors->error_log[idx_error].line = line;
// Log tracking info
errors->error_log[idx_error].timer_count = nvswitch_hw_counter_read_counter(device);
errors->error_log[idx_error].time = nvswitch_os_get_platform_time();
errors->error_log[idx_error].local_error_num = errors->error_total;
errors->error_log[idx_error].global_error_num = device->error_total;
// Copy ancillary data blob
nvswitch_os_memset(&errors->error_log[idx_error].data, 0, sizeof(errors->error_log[idx_error].data));
if ((data != NULL) && (data_size > 0))
{
nvswitch_os_memcpy(&errors->error_log[idx_error].data, data, data_size);
}
_nvswitch_dump_error_entry(device, idx_error, &errors->error_log[idx_error]);
}
errors->error_total++;
device->error_total++;
}
//
// Discard N errors from the specified log
//
void
nvswitch_discard_errors
(
NVSWITCH_ERROR_LOG_TYPE *errors,
NvU32 error_discard_count
)
{
error_discard_count = NV_MIN(error_discard_count, errors->error_count);
errors->error_start = (errors->error_start+error_discard_count) % errors->error_log_size;
errors->error_count -= error_discard_count;
}
//
// Retrieve an error entry by index.
// 0 = oldest error
// Out-of-range index does not return an error, but does return an error of type "NO_ERROR"
// error_count returns how many errors in the error log
//
void
nvswitch_get_error
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors,
NVSWITCH_ERROR_TYPE *error_entry,
NvU32 error_idx,
NvU32 *error_count
)
{
NVSWITCH_ASSERT(errors != NULL);
if (error_entry != NULL)
{
if (error_idx >= errors->error_count)
{
// Index out-of-range
nvswitch_os_memset(error_entry, 0, sizeof(*error_entry));
error_entry->error_type = 0;
error_entry->instance = 0;
error_entry->subinstance = 0;
error_entry->local_error_num = errors->error_total;
error_entry->global_error_num = ((device == NULL) ? 0 : device->error_total);
error_entry->error_src = NVSWITCH_ERROR_SRC_NONE;
error_entry->severity = NVSWITCH_ERROR_SEVERITY_NONFATAL;
error_entry->error_resolved = NV_TRUE;
error_entry->line = 0;
error_entry->timer_count =
((device == NULL) ? 0 : nvswitch_hw_counter_read_counter(device));
error_entry->time = nvswitch_os_get_platform_time();
}
else
{
*error_entry = errors->error_log[(errors->error_start + error_idx) % errors->error_log_size];
}
}
if (error_count)
{
*error_count = errors->error_count;
}
}
//
// Retrieve the oldest logged error entry.
// Optionally remove the error entry after reading
// error_count returns how many remaining errors in the error log
//
void
nvswitch_get_next_error
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors,
NVSWITCH_ERROR_TYPE *error_entry,
NvU32 *error_count,
NvBool remove_from_list
)
{
nvswitch_get_error(device, errors, error_entry, 0, error_count);
// Optionally remove the error from the log
if (remove_from_list)
{
nvswitch_discard_errors(errors, 1);
}
}
NVSWITCH_NVLINK_HW_ERROR
nvswitch_translate_hw_error
(
NVSWITCH_ERR_TYPE type
)
{
if ((type >= NVSWITCH_ERR_HW_NPORT_INGRESS) &&
(type < NVSWITCH_ERR_HW_NPORT_INGRESS_LAST))
{
return NVSWITCH_NVLINK_HW_INGRESS;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT_EGRESS) &&
(type < NVSWITCH_ERR_HW_NPORT_EGRESS_LAST))
{
return NVSWITCH_NVLINK_HW_EGRESS;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT_FSTATE) &&
(type < NVSWITCH_ERR_HW_NPORT_FSTATE_LAST))
{
return NVSWITCH_NVLINK_HW_FSTATE;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT_TSTATE) &&
(type < NVSWITCH_ERR_HW_NPORT_TSTATE_LAST))
{
return NVSWITCH_NVLINK_HW_TSTATE;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT_ROUTE) &&
(type < NVSWITCH_ERR_HW_NPORT_ROUTE_LAST))
{
return NVSWITCH_NVLINK_HW_ROUTE;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT) &&
(type < NVSWITCH_ERR_HW_NPORT_LAST))
{
return NVSWITCH_NVLINK_HW_NPORT;
}
else if ((type >= NVSWITCH_ERR_HW_NVLCTRL) &&
(type < NVSWITCH_ERR_HW_NVLCTRL_LAST))
{
return NVSWITCH_NVLINK_HW_NVLCTRL;
}
else if ((type >= NVSWITCH_ERR_HW_NVLIPT) &&
(type < NVSWITCH_ERR_HW_NVLIPT_LAST))
{
return NVSWITCH_NVLINK_HW_NVLIPT;
}
else if ((type >= NVSWITCH_ERR_HW_NVLTLC) &&
(type < NVSWITCH_ERR_HW_NVLTLC_LAST))
{
return NVSWITCH_NVLINK_HW_NVLTLC;
}
else if ((type >= NVSWITCH_ERR_HW_DLPL) &&
(type < NVSWITCH_ERR_HW_DLPL_LAST))
{
return NVSWITCH_NVLINK_HW_DLPL;
}
else if ((type >= NVSWITCH_ERR_HW_AFS) &&
(type < NVSWITCH_ERR_HW_AFS_LAST))
{
return NVSWITCH_NVLINK_HW_AFS;
}
else if ((type >= NVSWITCH_ERR_HW_HOST) &&
(type < NVSWITCH_ERR_HW_HOST_LAST))
{
return NVSWITCH_NVLINK_HW_HOST;
}
else if ((type >= NVSWITCH_ERR_HW_MINION) &&
(type < NVSWITCH_ERR_HW_MINION_LAST))
{
return NVSWITCH_NVLINK_HW_MINION;
}
else if ((type >= NVSWITCH_ERR_HW_NXBAR) &&
(type < NVSWITCH_ERR_HW_NXBAR_LAST))
{
return NVSWITCH_NVLINK_HW_NXBAR;
}
else if ((type >= NVSWITCH_ERR_HW_NPORT_SOURCETRACK) &&
(type < NVSWITCH_ERR_HW_NPORT_SOURCETRACK_LAST))
{
return NVSWITCH_NVLINK_HW_SOURCETRACK;
}
else if ((type >= NVSWITCH_ERR_HW_NVLIPT_LNK) &&
(type < NVSWITCH_ERR_HW_NVLIPT_LNK_LAST))
{
return NVSWITCH_ERR_HW_NVLIPT_LNK;
}
else if ((type >= NVSWITCH_ERR_HW_SOE) &&
(type < NVSWITCH_ERR_HW_SOE_LAST))
{
return NVSWITCH_ERR_HW_SOE;
}
else
{
// Update this assert after adding a new translation entry above
ct_assert(NVSWITCH_ERR_HW_SOE_LAST == (NVSWITCH_ERR_LAST - 1));
NVSWITCH_PRINT(NULL, ERROR,
"%s: Undefined error type\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return NVSWITCH_NVLINK_HW_GENERIC;
}
}
static NVSWITCH_NVLINK_ARCH_ERROR
_nvswitch_translate_arch_error
(
NVSWITCH_ERROR_TYPE *error_entry
)
{
if (error_entry->severity == NVSWITCH_ERROR_SEVERITY_FATAL)
{
return NVSWITCH_NVLINK_ARCH_ERROR_HW_FATAL;
}
else if (error_entry->severity == NVSWITCH_ERROR_SEVERITY_NONFATAL)
{
if (error_entry->error_resolved)
{
return NVSWITCH_NVLINK_ARCH_ERROR_HW_CORRECTABLE;
}
else
{
return NVSWITCH_NVLINK_ARCH_ERROR_HW_UNCORRECTABLE;
}
}
return NVSWITCH_NVLINK_ARCH_ERROR_GENERIC;
}
void
nvswitch_translate_error
(
NVSWITCH_ERROR_TYPE *error_entry,
NVSWITCH_NVLINK_ARCH_ERROR *arch_error,
NVSWITCH_NVLINK_HW_ERROR *hw_error
)
{
NVSWITCH_ASSERT(error_entry != NULL);
if (arch_error)
{
*arch_error = NVSWITCH_NVLINK_ARCH_ERROR_NONE;
}
if (hw_error)
{
*hw_error = NVSWITCH_NVLINK_HW_ERROR_NONE;
}
if (error_entry->error_src == NVSWITCH_ERROR_SRC_HW)
{
if (arch_error)
{
*arch_error = _nvswitch_translate_arch_error(error_entry);
}
if (hw_error)
{
*hw_error = nvswitch_translate_hw_error(error_entry->error_type);
}
}
else
{
NVSWITCH_PRINT(NULL, ERROR,
"%s: Undefined error source\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
}
}
NvlStatus
nvswitch_ctrl_get_errors
(
nvswitch_device *device,
NVSWITCH_GET_ERRORS_PARAMS *p
)
{
NvU32 index = 0;
NvU32 count = 0;
NVSWITCH_ERROR_LOG_TYPE *error_log;
NVSWITCH_ERROR_TYPE error;
switch (p->errorType)
{
case NVSWITCH_ERROR_SEVERITY_FATAL:
error_log = &device->log_FATAL_ERRORS;
break;
case NVSWITCH_ERROR_SEVERITY_NONFATAL:
error_log = &device->log_NONFATAL_ERRORS;
break;
default:
return -NVL_BAD_ARGS;
}
nvswitch_os_memset(p->error, 0, sizeof(NVSWITCH_ERROR) *
NVSWITCH_ERROR_COUNT_SIZE);
p->nextErrorIndex = NVSWITCH_ERROR_NEXT_LOCAL_NUMBER(error_log);
p->errorCount = 0;
// If there is nothing to do, return.
nvswitch_get_error(device, error_log, &error, index, &count);
if (count == 0)
{
return NVL_SUCCESS;
}
//
// If the error's local_error_num is smaller than the errorIndex
// passed in by the client, fast-forward index by the difference.
// This will skip over errors that were previously read by the client.
//
if (error.local_error_num < p->errorIndex)
{
index = (NvU32) (p->errorIndex - error.local_error_num);
}
// If there is nothing to do after fast-forwarding, return.
if (index >= count)
{
return NVL_SUCCESS;
}
while ((p->errorCount < NVSWITCH_ERROR_COUNT_SIZE) && (index < count))
{
// Get the next error to consider from the log
nvswitch_get_error(device, error_log, &error, index, NULL);
p->error[p->errorCount].error_value = error.error_type;
p->error[p->errorCount].error_src = error.error_src;
p->error[p->errorCount].instance = error.instance;
p->error[p->errorCount].subinstance = error.subinstance;
p->error[p->errorCount].time = error.time;
p->error[p->errorCount].error_resolved = error.error_resolved;
p->errorCount++;
index++;
}
p->errorIndex = error.local_error_num + 1;
return NVL_SUCCESS;
}

View File

@@ -0,0 +1,714 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "flcn/haldefs_flcn_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "flcnifcmn.h"
typedef union RM_FLCN_CMD RM_FLCN_CMD, *PRM_FLCN_CMD;
typedef union RM_FLCN_MSG RM_FLCN_MSG, *PRM_FLCN_MSG;
// OBJECT Interfaces
NV_STATUS
flcnQueueReadData
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 queueId,
void *pData,
NvBool bMsg
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueReadData != (void *)0);
return pFlcn->pHal->queueReadData(device, pFlcn, queueId, pData, bMsg);
}
NV_STATUS
flcnQueueCmdWrite
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 queueId,
RM_FLCN_CMD *pCmd,
NVSWITCH_TIMEOUT *pTimeout
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueCmdWrite != (void *)0);
return pFlcn->pHal->queueCmdWrite(device, pFlcn, queueId, pCmd, pTimeout);
}
NV_STATUS
flcnQueueCmdCancel
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 seqDesc
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueCmdCancel != (void *)0);
return pFlcn->pHal->queueCmdCancel(device, pFlcn, seqDesc);
}
NV_STATUS
flcnQueueCmdPostNonBlocking
(
nvswitch_device *device,
PFLCN pFlcn,
PRM_FLCN_CMD pCmd,
PRM_FLCN_MSG pMsg,
void *pPayload,
NvU32 queueIdLogical,
FlcnQMgrClientCallback pCallback,
void *pCallbackParams,
NvU32 *pSeqDesc,
NVSWITCH_TIMEOUT *pTimeout
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueCmdPostNonBlocking != (void *)0);
return pFlcn->pHal->queueCmdPostNonBlocking(device, pFlcn, pCmd, pMsg, pPayload, queueIdLogical, pCallback, pCallbackParams, pSeqDesc, pTimeout);
}
NV_STATUS
flcnQueueCmdPostBlocking
(
nvswitch_device *device,
PFLCN pFlcn,
PRM_FLCN_CMD pCmd,
PRM_FLCN_MSG pMsg,
void *pPayload,
NvU32 queueIdLogical,
NvU32 *pSeqDesc,
NVSWITCH_TIMEOUT *pTimeout
)
{
NV_STATUS status;
status = flcnQueueCmdPostNonBlocking(device, pFlcn, pCmd, pMsg, pPayload,
queueIdLogical, NULL, NULL, pSeqDesc, pTimeout);
if (status != NV_OK)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_COMMAND_QUEUE,
"Failed to post command to SOE\n");
return status;
}
status = flcnQueueCmdWait(device, pFlcn, *pSeqDesc, pTimeout);
if (status == NV_ERR_TIMEOUT)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT,
"Timed out while waiting for SOE command completion\n");
flcnQueueCmdCancel(device, pFlcn, *pSeqDesc);
}
return status;
}
NV_STATUS
flcnQueueCmdWait
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 seqDesc,
NVSWITCH_TIMEOUT *pTimeout
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueCmdWait != (void *)0);
return pFlcn->pHal->queueCmdWait(device, pFlcn, seqDesc, pTimeout);
}
NvU8
flcnCoreRevisionGet
(
struct nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->coreRevisionGet != (void *)0);
return pFlcn->pHal->coreRevisionGet(device, pFlcn);
}
void
flcnMarkNotReady
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->markNotReady != (void *)0);
pFlcn->pHal->markNotReady(device, pFlcn);
}
NV_STATUS
flcnCmdQueueHeadGet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pHead
)
{
NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueHeadGet != (void *)0);
return pFlcn->pHal->cmdQueueHeadGet(device, pFlcn, pQueue, pHead);
}
NV_STATUS
flcnMsgQueueHeadGet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pHead
)
{
NVSWITCH_ASSERT(pFlcn->pHal->msgQueueHeadGet != (void *)0);
return pFlcn->pHal->msgQueueHeadGet(device, pFlcn, pQueue, pHead);
}
NV_STATUS
flcnCmdQueueTailGet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pTail
)
{
NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueTailGet != (void *)0);
return pFlcn->pHal->cmdQueueTailGet(device, pFlcn, pQueue, pTail);
}
NV_STATUS
flcnMsgQueueTailGet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pTail
)
{
NVSWITCH_ASSERT(pFlcn->pHal->msgQueueTailGet != (void *)0);
return pFlcn->pHal->msgQueueTailGet(device, pFlcn, pQueue, pTail);
}
NV_STATUS
flcnCmdQueueHeadSet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 head
)
{
NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueHeadSet != (void *)0);
return pFlcn->pHal->cmdQueueHeadSet(device, pFlcn, pQueue, head);
}
NV_STATUS
flcnMsgQueueHeadSet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 head
)
{
NVSWITCH_ASSERT(pFlcn->pHal->msgQueueHeadSet != (void *)0);
return pFlcn->pHal->msgQueueHeadSet(device, pFlcn, pQueue, head);
}
NV_STATUS
flcnCmdQueueTailSet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 tail
)
{
NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueTailSet != (void *)0);
return pFlcn->pHal->cmdQueueTailSet(device, pFlcn, pQueue, tail);
}
NV_STATUS
flcnMsgQueueTailSet
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 tail
)
{
NVSWITCH_ASSERT(pFlcn->pHal->msgQueueTailSet != (void *)0);
return pFlcn->pHal->msgQueueTailSet(device, pFlcn, pQueue, tail);
}
PFLCN_QMGR_SEQ_INFO
flcnQueueSeqInfoFind
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 seqDesc
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoFind != (void *)0);
return pFlcn->pHal->queueSeqInfoFind(device, pFlcn, seqDesc);
}
PFLCN_QMGR_SEQ_INFO
flcnQueueSeqInfoAcq
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoAcq != (void *)0);
return pFlcn->pHal->queueSeqInfoAcq(device, pFlcn);
}
void
flcnQueueSeqInfoRel
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoRel != (void *)0);
pFlcn->pHal->queueSeqInfoRel(device, pFlcn, pSeqInfo);
}
void
flcnQueueSeqInfoStateInit
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoStateInit != (void *)0);
pFlcn->pHal->queueSeqInfoStateInit(device, pFlcn);
}
void
flcnQueueSeqInfoCancelAll
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoCancelAll != (void *)0);
pFlcn->pHal->queueSeqInfoCancelAll(device, pFlcn);
}
NV_STATUS
flcnQueueSeqInfoFree
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoFree != (void *)0);
return pFlcn->pHal->queueSeqInfoFree(device, pFlcn, pSeqInfo);
}
NV_STATUS
flcnQueueEventRegister
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 unitId,
NvU8 *pMsg,
FlcnQMgrClientCallback pCallback,
void *pParams,
NvU32 *pEvtDesc
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueEventRegister != (void *)0);
return pFlcn->pHal->queueEventRegister(device, pFlcn, unitId, pMsg, pCallback, pParams, pEvtDesc);
}
NV_STATUS
flcnQueueEventUnregister
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 evtDesc
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueEventUnregister != (void *)0);
return pFlcn->pHal->queueEventUnregister(device, pFlcn, evtDesc);
}
NV_STATUS
flcnQueueEventHandle
(
nvswitch_device *device,
PFLCN pFlcn,
RM_FLCN_MSG *pMsg,
NV_STATUS evtStatus
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueEventHandle != (void *)0);
return pFlcn->pHal->queueEventHandle(device, pFlcn, pMsg, evtStatus);
}
NV_STATUS
flcnQueueResponseHandle
(
nvswitch_device *device,
PFLCN pFlcn,
RM_FLCN_MSG *pMsg
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueResponseHandle != (void *)0);
return pFlcn->pHal->queueResponseHandle(device, pFlcn, pMsg);
}
NvU32
flcnQueueCmdStatus
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 seqDesc
)
{
NVSWITCH_ASSERT(pFlcn->pHal->queueCmdStatus != (void *)0);
return pFlcn->pHal->queueCmdStatus(device, pFlcn, seqDesc);
}
NV_STATUS
flcnDmemCopyFrom
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port
)
{
NVSWITCH_ASSERT(pFlcn->pHal->dmemCopyFrom != (void *)0);
return pFlcn->pHal->dmemCopyFrom(device, pFlcn, src, pDst, sizeBytes, port);
}
NV_STATUS
flcnDmemCopyTo
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvU8 port
)
{
NVSWITCH_ASSERT(pFlcn->pHal->dmemCopyTo != (void *)0);
return pFlcn->pHal->dmemCopyTo(device, pFlcn, dst, pSrc, sizeBytes, port);
}
void
flcnPostDiscoveryInit
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->postDiscoveryInit != (void *)0);
pFlcn->pHal->postDiscoveryInit(device, pFlcn);
}
void
flcnDbgInfoDmemOffsetSet
(
nvswitch_device *device,
PFLCN pFlcn,
NvU16 debugInfoDmemOffset
)
{
NVSWITCH_ASSERT(pFlcn->pHal->dbgInfoDmemOffsetSet != (void *)0);
pFlcn->pHal->dbgInfoDmemOffsetSet(device, pFlcn, debugInfoDmemOffset);
}
// HAL Interfaces
NV_STATUS
flcnConstruct_HAL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->construct != (void *)0);
return pFlcn->pHal->construct(device, pFlcn);
}
void
flcnDestruct_HAL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->destruct != (void *)0);
pFlcn->pHal->destruct(device, pFlcn);
}
NvU32
flcnRegRead_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset
)
{
NVSWITCH_ASSERT(pFlcn->pHal->regRead != (void *)0);
return pFlcn->pHal->regRead(device, pFlcn, offset);
}
void
flcnRegWrite_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset,
NvU32 data
)
{
NVSWITCH_ASSERT(pFlcn->pHal->regWrite != (void *)0);
pFlcn->pHal->regWrite(device, pFlcn, offset, data);
}
const char *
flcnGetName_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->getName != (void *)0);
return pFlcn->pHal->getName(device, pFlcn);
}
NvU8
flcnReadCoreRev_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->readCoreRev != (void *)0);
return pFlcn->pHal->readCoreRev(device, pFlcn);
}
void
flcnGetCoreInfo_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->getCoreInfo != (void *)0);
pFlcn->pHal->getCoreInfo(device, pFlcn);
}
NV_STATUS
flcnDmemTransfer_HAL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port,
NvBool bCopyFrom
)
{
NVSWITCH_ASSERT(pFlcn->pHal->dmemTransfer != (void *)0);
return pFlcn->pHal->dmemTransfer(device, pFlcn, src, pDst, sizeBytes, port, bCopyFrom);
}
void
flcnIntrRetrigger_HAL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->intrRetrigger != (void *)0);
pFlcn->pHal->intrRetrigger(device, pFlcn);
}
NvBool
flcnAreEngDescsInitialized_HAL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->areEngDescsInitialized != (void *)0);
return pFlcn->pHal->areEngDescsInitialized(device, pFlcn);
}
NV_STATUS
flcnWaitForResetToFinish_HAL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->waitForResetToFinish != (void *)0);
return pFlcn->pHal->waitForResetToFinish(device, pFlcn);
}
void
flcnDbgInfoCapturePcTrace_HAL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
if (pFlcn->pHal->dbgInfoCapturePcTrace == (void *)0)
{
NVSWITCH_ASSERT(0);
return;
}
pFlcn->pHal->dbgInfoCapturePcTrace(device, pFlcn);
}
void
flcnDbgInfoCaptureRiscvPcTrace_HAL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
if (pFlcn->pHal->dbgInfoCaptureRiscvPcTrace == (void *)0)
{
NVSWITCH_ASSERT(0);
return;
}
pFlcn->pHal->dbgInfoCaptureRiscvPcTrace(device, pFlcn);
}
NvU32
flcnDmemSize_HAL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
if (pFlcn->pHal->dmemSize == (void *)0)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pFlcn->pHal->dmemSize(device, pFlcn);
}
NvU32
flcnSetImemAddr_HAL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 dst
)
{
if (pFlcn->pHal->setImemAddr == (void *)0)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pFlcn->pHal->setImemAddr(device, pFlcn, dst);
}
void
flcnImemCopyTo_HAL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvBool bSecure,
NvU32 tag,
NvU8 port
)
{
if (pFlcn->pHal->imemCopyTo == (void *)0)
{
NVSWITCH_ASSERT(0);
return;
}
pFlcn->pHal->imemCopyTo(device, pFlcn, dst, pSrc, sizeBytes, bSecure, tag, port);
}
NvU32
flcnSetDmemAddr_HAL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 dst
)
{
if (pFlcn->pHal->setDmemAddr == (void *)0)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pFlcn->pHal->setDmemAddr(device, pFlcn, dst);
}
NvU32
flcnRiscvRegRead_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset
)
{
NVSWITCH_ASSERT(pFlcn->pHal->riscvRegRead != (void *)0);
return pFlcn->pHal->riscvRegRead(device, pFlcn, offset);
}
void
flcnRiscvRegWrite_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset,
NvU32 data
)
{
NVSWITCH_ASSERT(pFlcn->pHal->riscvRegWrite != (void *)0);
pFlcn->pHal->riscvRegWrite(device, pFlcn, offset, data);
}

View File

@@ -0,0 +1,542 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
static void flcnSetupIpHal(nvswitch_device *device, PFLCN pFlcn);
/*!
* @brief Get the falcon core revision and subversion.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
*
* @return the falcon core revision in the format of NV_FLCN_CORE_REV_X_Y.
*/
static NvU8
_flcnCoreRevisionGet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
if (pFlcn->coreRev == 0x00)
{
// Falcon core revision has not yet been set. Set it now.
flcnGetCoreInfo_HAL(device, pFlcn);
}
return pFlcn->coreRev;
}
/*!
* @brief Mark the falcon as not ready and inaccessible from RM.
* osHandleGpuSurpriseRemoval will use this routine to prevent access to the
* Falcon, which could crash due to absense of GPU, during driver cleanup.
*
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn FLCN pointer
*
* @returns nothing
*/
static void
_flcnMarkNotReady_IMPL
(
nvswitch_device *device,
PFLCN pFlcn
)
{
pFlcn->bOSReady = NV_FALSE;
}
/*!
* Retrieves the current head pointer for given physical command queue index.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[out] pHead Pointer to write with the queue's head pointer
*
* @return 'NV_OK' if head value was successfully retrieved.
*/
static NV_STATUS
_flcnCmdQueueHeadGet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pHead
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQHeadSize);
NVSWITCH_ASSERT(pHead != NULL);
*pHead = flcnRegRead_HAL(device, pFlcn,
(pQueueInfo->cmdQHeadBaseAddress +
(pQueue->queuePhyId * pQueueInfo->cmdQHeadStride)));
return NV_OK;
}
/*!
* Sets the head pointer for the given physical command queue index.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[in] head The desired head value for the queue
*
* @return 'NV_OK' if the head value was successfully set.
*/
static NV_STATUS
_flcnCmdQueueHeadSet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 head
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQHeadSize);
flcnRegWrite_HAL(device, pFlcn,
(pQueueInfo->cmdQHeadBaseAddress +
(pQueue->queuePhyId * pQueueInfo->cmdQHeadStride)),
head);
return NV_OK;
}
/*!
* Retrieves the current tail pointer for given physical command queue index.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[out] pTail Pointer to write with the queue's tail value
*
* @return 'NV_OK' if the tail value was successfully retrieved.
*/
static NV_STATUS
_flcnCmdQueueTailGet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pTail
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQTailSize);
NVSWITCH_ASSERT(pTail != NULL);
*pTail = flcnRegRead_HAL(device, pFlcn,
(pQueueInfo->cmdQTailBaseAddress +
(pQueue->queuePhyId * pQueueInfo->cmdQTailStride)));
return NV_OK;
}
/*!
* Set the Command Queue tail pointer.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[in] tail The desired tail value
*
* @return 'NV_OK' if the tail value was successfully set.
*/
static NV_STATUS
_flcnCmdQueueTailSet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 tail
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQTailSize);
flcnRegWrite_HAL(device, pFlcn,
(pQueueInfo->cmdQTailBaseAddress +
(pQueue->queuePhyId * pQueueInfo->cmdQTailStride)),
tail);
return NV_OK;
}
/*!
* Retrieve the current Message Queue Head pointer.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[in] pHead Pointer to write with the queue's head value
*
* @return 'NV_OK' if the queue's head value was successfully retrieved.
*/
static NV_STATUS
_flcnMsgQueueHeadGet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pHead
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQHeadSize);
NVSWITCH_ASSERT(pHead != NULL);
*pHead = flcnRegRead_HAL(device, pFlcn,
(pQueueInfo->msgQHeadBaseAddress +
(pQueue->queuePhyId * pQueueInfo->msgQHeadStride)));
return NV_OK;
}
/*!
* Set the Message Queue Head pointer.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[in] head The desired head value
*
* @return 'NV_OK' if the head value was successfully set.
*/
static NV_STATUS
_flcnMsgQueueHeadSet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 head
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQHeadSize);
flcnRegWrite_HAL(device, pFlcn,
(pQueueInfo->msgQHeadBaseAddress +
(pQueue->queuePhyId * pQueueInfo->msgQHeadStride)),
head);
return NV_OK;
}
/*!
* Retrieve the current Message Queue Tail pointer.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[out] pTail Pointer to write with the message queue's tail value
*
* @return 'NV_OK' if the tail value was successfully retrieved.
*/
static NV_STATUS
_flcnMsgQueueTailGet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 *pTail
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQTailSize);
NVSWITCH_ASSERT(pTail != NULL);
*pTail = flcnRegRead_HAL(device, pFlcn,
(pQueueInfo->msgQTailBaseAddress +
(pQueue->queuePhyId * pQueueInfo->msgQTailStride)));
return NV_OK;
}
/*!
* Set the Message Queue Tail pointer.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[in] tail The desired tail value for the message queue
*
* @return 'NV_OK' if the tail value was successfully set.
*/
static NV_STATUS
_flcnMsgQueueTailSet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
NvU32 tail
)
{
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQTailSize);
flcnRegWrite_HAL(device, pFlcn,
(pQueueInfo->msgQTailBaseAddress +
(pQueue->queuePhyId * pQueueInfo->msgQTailStride)),
tail);
return NV_OK;
}
/*!
* Copies 'sizeBytes' from DMEM offset 'src' to 'pDst' using DMEM access
* port 'port'.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN pointer
* @param[in] src The DMEM offset for the source of the copy
* @param[out] pDst Pointer to write with copied data from DMEM
* @param[in] sizeBytes The number of bytes to copy from DMEM
* @param[in] port The DMEM port index to use when accessing the DMEM
*/
static NV_STATUS
_flcnDmemCopyFrom_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port
)
{
return flcnDmemTransfer_HAL(device, pFlcn,
src, pDst, sizeBytes, port,
NV_TRUE); // bCopyFrom
}
/*!
* Copies 'sizeBytes' from 'pDst' to DMEM offset 'dst' using DMEM access port
* 'port'.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN pointer
* @param[in] dst The destination DMEM offset for the copy
* @param[in] pSrc The pointer to the buffer containing the data to copy
* @param[in] sizeBytes The number of bytes to copy into DMEM
* @param[in] port The DMEM port index to use when accessing the DMEM
*/
static NV_STATUS
_flcnDmemCopyTo_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvU8 port
)
{
return flcnDmemTransfer_HAL(device, pFlcn,
dst, pSrc, sizeBytes, port,
NV_FALSE); // bCopyFrom
}
static void
_flcnPostDiscoveryInit_IMPL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
flcnableFetchEngines_HAL(device, pFlcn->pFlcnable, &pFlcn->engDescUc, &pFlcn->engDescBc);
flcnSetupIpHal(device, pFlcn);
}
/* -------------------- Object construction/initialization ------------------- */
/**
* @brief set hal object-interface function pointers to flcn implementations
*
* this function has to be at the end of the file so that all the
* other functions are already defined.
*
* @param[in] pFlcn The flcn for which to set hals
*/
static void
flcnSetupHal
(
PFLCN pFlcn,
NvU32 pci_device_id
)
{
flcn_hal *pHal = NULL;
if (nvswitch_is_lr10_device_id(pci_device_id))
{
flcnSetupHal_LR10(pFlcn);
goto _flcnSetupHal_success;
}
NVSWITCH_PRINT(NULL, ERROR,
"Flcn hal can't be setup due to unknown device id\n");
NVSWITCH_ASSERT(0);
_flcnSetupHal_success:
//init hal OBJ Interfaces
pHal = pFlcn->pHal;
pHal->coreRevisionGet = _flcnCoreRevisionGet_IMPL;
pHal->markNotReady = _flcnMarkNotReady_IMPL;
pHal->cmdQueueHeadGet = _flcnCmdQueueHeadGet_IMPL;
pHal->msgQueueHeadGet = _flcnMsgQueueHeadGet_IMPL;
pHal->cmdQueueTailGet = _flcnCmdQueueTailGet_IMPL;
pHal->msgQueueTailGet = _flcnMsgQueueTailGet_IMPL;
pHal->cmdQueueHeadSet = _flcnCmdQueueHeadSet_IMPL;
pHal->msgQueueHeadSet = _flcnMsgQueueHeadSet_IMPL;
pHal->cmdQueueTailSet = _flcnCmdQueueTailSet_IMPL;
pHal->msgQueueTailSet = _flcnMsgQueueTailSet_IMPL;
pHal->dmemCopyFrom = _flcnDmemCopyFrom_IMPL;
pHal->dmemCopyTo = _flcnDmemCopyTo_IMPL;
pHal->postDiscoveryInit = _flcnPostDiscoveryInit_IMPL;
flcnQueueSetupHal(pFlcn);
flcnRtosSetupHal(pFlcn);
flcnQueueRdSetupHal(pFlcn);
}
static void
flcnSetupIpHal
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NvU8 coreRev = flcnableReadCoreRev(device, pFlcn->pFlcnable);
switch (coreRev) {
case NV_FLCN_CORE_REV_3_0:
{
flcnSetupHal_v03_00(pFlcn);
break;
}
case NV_FLCN_CORE_REV_4_0:
case NV_FLCN_CORE_REV_4_1:
{
flcnSetupHal_v04_00(pFlcn);
break;
}
case NV_FLCN_CORE_REV_5_0:
case NV_FLCN_CORE_REV_5_1:
{
flcnSetupHal_v05_01(pFlcn);
break;
}
case NV_FLCN_CORE_REV_6_0:
{
flcnSetupHal_v06_00(pFlcn);
break;
}
default:
{
NVSWITCH_PRINT(device, ERROR,
"%s: Unsupported falcon core revision: %hhu!\n",
__FUNCTION__, coreRev);
NVSWITCH_ASSERT(0);
break;
}
}
}
FLCN *
flcnAllocNew(void)
{
FLCN *pFlcn = nvswitch_os_malloc(sizeof(*pFlcn));
if (pFlcn != NULL)
{
nvswitch_os_memset(pFlcn, 0, sizeof(*pFlcn));
}
return pFlcn;
}
NvlStatus
flcnInit
(
nvswitch_device *device,
FLCN *pFlcn,
NvU32 pci_device_id
)
{
NvlStatus retval = NVL_SUCCESS;
// allocate hal if a child class hasn't already
if (pFlcn->pHal == NULL)
{
flcn_hal *pHal = pFlcn->pHal = nvswitch_os_malloc(sizeof(*pHal));
if (pHal == NULL)
{
NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n");
retval = -NVL_NO_MEM;
goto flcn_init_fail;
}
nvswitch_os_memset(pHal, 0, sizeof(*pHal));
}
//don't have a parent class to init, go straight to setupHal
flcnSetupHal(pFlcn, pci_device_id);
return retval;
flcn_init_fail:
flcnDestroy(device, pFlcn);
return retval;
}
// reverse of flcnInit()
void
flcnDestroy
(
nvswitch_device *device,
FLCN *pFlcn
)
{
if (pFlcn->pHal != NULL)
{
nvswitch_os_free(pFlcn->pHal);
pFlcn->pHal = NULL;
}
}

View File

@@ -0,0 +1,219 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "flcn/haldefs_flcnable_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
#include "flcnifcmn.h"
#include "export_nvswitch.h"
#include "common_nvswitch.h"
typedef struct FALCON_EXTERNAL_CONFIG FALCON_EXTERNAL_CONFIG, *PFALCON_EXTERNAL_CONFIG;
typedef struct FLCN_QMGR_SEQ_INFO FLCN_QMGR_SEQ_INFO, *PFLCN_QMGR_SEQ_INFO;
typedef union RM_FLCN_CMD RM_FLCN_CMD, *PRM_FLCN_CMD;
typedef union RM_FLCN_MSG RM_FLCN_MSG, *PRM_FLCN_MSG;
typedef struct ENGINE_DESCRIPTOR_TYPE ENGINE_DESCRIPTOR_TYPE, *PENGINE_DESCRIPTOR_TYPE;
// OBJECT Interfaces
NvU8
flcnableReadCoreRev
(
nvswitch_device *device,
PFLCNABLE pFlcnable
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->readCoreRev != (void *)0);
return pFlcnable->pHal->readCoreRev(device, pFlcnable);
}
void
flcnableGetExternalConfig
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PFALCON_EXTERNAL_CONFIG pConfig
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->getExternalConfig != (void *)0);
pFlcnable->pHal->getExternalConfig(device, pFlcnable, pConfig);
}
void
flcnableEmemCopyFrom
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->ememCopyFrom != (void *)0);
pFlcnable->pHal->ememCopyFrom(device, pFlcnable, src, pDst, sizeBytes, port);
}
void
flcnableEmemCopyTo
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvU8 port
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->ememCopyTo != (void *)0);
pFlcnable->pHal->ememCopyTo(device, pFlcnable, dst, pSrc, sizeBytes, port);
}
NV_STATUS
flcnableHandleInitEvent
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
RM_FLCN_MSG *pGenMsg
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->handleInitEvent != (void *)0);
return pFlcnable->pHal->handleInitEvent(device, pFlcnable, pGenMsg);
}
PFLCN_QMGR_SEQ_INFO
flcnableQueueSeqInfoGet
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
NvU32 seqIndex
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->queueSeqInfoGet != (void *)0);
return pFlcnable->pHal->queueSeqInfoGet(device, pFlcnable, seqIndex);
}
void
flcnableQueueSeqInfoClear
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->queueSeqInfoClear != (void *)0);
pFlcnable->pHal->queueSeqInfoClear(device, pFlcnable, pSeqInfo);
}
void
flcnableQueueSeqInfoFree
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->queueSeqInfoFree != (void *)0);
pFlcnable->pHal->queueSeqInfoFree(device, pFlcnable, pSeqInfo);
}
NvBool
flcnableQueueCmdValidate
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
RM_FLCN_CMD *pCmd,
RM_FLCN_MSG *pMsg,
void *pPayload,
NvU32 queueIdLogical
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->queueCmdValidate != (void *)0);
return pFlcnable->pHal->queueCmdValidate(device, pFlcnable, pCmd, pMsg, pPayload, queueIdLogical);
}
NV_STATUS
flcnableQueueCmdPostExtension
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
RM_FLCN_CMD *pCmd,
RM_FLCN_MSG *pMsg,
void *pPayload,
NVSWITCH_TIMEOUT *pTimeout,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->queueCmdPostExtension != (void *)0);
return pFlcnable->pHal->queueCmdPostExtension(device, pFlcnable, pCmd, pMsg, pPayload, pTimeout, pSeqInfo);
}
void
flcnablePostDiscoveryInit
(
nvswitch_device *device,
FLCNABLE *pFlcnable
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->postDiscoveryInit != (void *)0);
pFlcnable->pHal->postDiscoveryInit(device, pFlcnable);
}
// HAL Interfaces
NV_STATUS
flcnableConstruct_HAL
(
nvswitch_device *device,
FLCNABLE *pFlcnable
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->construct != (void *)0);
return pFlcnable->pHal->construct(device, pFlcnable);
}
void
flcnableDestruct_HAL
(
nvswitch_device *device,
FLCNABLE *pFlcnable
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->destruct != (void *)0);
pFlcnable->pHal->destruct(device, pFlcnable);
}
void
flcnableFetchEngines_HAL
(
nvswitch_device *device,
FLCNABLE *pFlcnable,
ENGINE_DESCRIPTOR_TYPE *pEngDescUc,
ENGINE_DESCRIPTOR_TYPE *pEngDescBc
)
{
NVSWITCH_ASSERT(pFlcnable->pHal->fetchEngines != (void *)0);
pFlcnable->pHal->fetchEngines(device, pFlcnable, pEngDescUc, pEngDescBc);
}

View File

@@ -0,0 +1,359 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "flcn/flcnable_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
#include "common_nvswitch.h"
#include "nvstatus.h"
/*!
* @brief Read the falcon core revision and subversion.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcnable FLCNABLE object pointer
*
* @return @ref NV_FLCN_CORE_REV_X_Y.
*/
static NvU8
_flcnableReadCoreRev_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable
)
{
return flcnReadCoreRev_HAL(device, pFlcnable->pFlcn);
}
/*!
* @brief Get external config
*/
static void
_flcnableGetExternalConfig_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PFALCON_EXTERNAL_CONFIG pConfig
)
{
pConfig->bResetInPmc = NV_FALSE;
pConfig->blkcgBase = 0xffffffff;
pConfig->fbifBase = 0xffffffff;
}
/*!
* @brief Retrieve content from falcon's EMEM.
*/
static void
_flcnableEmemCopyFrom_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port
)
{
NVSWITCH_PRINT(device, ERROR,
"%s: FLCNABLE interface not implemented on this falcon!\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
}
/*!
* @brief Write content to falcon's EMEM.
*/
static void
_flcnableEmemCopyTo_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvU8 port
)
{
NVSWITCH_PRINT(device, ERROR,
"%s: FLCNABLE interface not implemented on this falcon!\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
}
/*
* @brief Handle INIT Event
*/
static NV_STATUS
_flcnableHandleInitEvent_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
RM_FLCN_MSG *pGenMsg
)
{
return NV_OK;
}
/*!
* @brief Retrieves a pointer to the engine specific SEQ_INFO structure.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcnable FLCNABLE object pointer
* @param[in] seqIndex Index of the structure to retrieve
*
* @return Pointer to the SEQ_INFO structure or NULL on invalid index.
*/
static PFLCN_QMGR_SEQ_INFO
_flcnableQueueSeqInfoGet_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
NvU32 seqIndex
)
{
NVSWITCH_PRINT(device, ERROR,
"%s: FLCNABLE interface not implemented on this falcon!\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
return NULL;
}
/*!
* @brief Clear out the engine specific portion of the SEQ_INFO structure.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcnable FLCNABLE object pointer
* @param[in] pSeqInfo SEQ_INFO structure pointer
*/
static void
_flcnableQueueSeqInfoClear_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
}
/*!
* @brief Free up all the engine specific sequence allocations.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcnable FLCNABLE object pointer
* @param[in] pSeqInfo SEQ_INFO structure pointer
*/
static void
_flcnableQueueSeqInfoFree_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
}
/*!
* @brief Validate that the given CMD and related params are properly formed.
*
* @copydoc flcnQueueCmdPostNonBlocking_IMPL
*
* @return Boolean if command was properly formed.
*/
static NvBool
_flcnableQueueCmdValidate_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PRM_FLCN_CMD pCmd,
PRM_FLCN_MSG pMsg,
void *pPayload,
NvU32 queueIdLogical
)
{
NVSWITCH_PRINT(device, ERROR,
"%s: FLCNABLE interface not implemented on this falcon!\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
return NV_FALSE;
}
/*!
* @brief Engine specific command post actions.
*
* @copydoc flcnQueueCmdPostNonBlocking_IMPL
*
* @return NV_OK on success
* Failure specific error codes
*/
static NV_STATUS
_flcnableQueueCmdPostExtension_IMPL
(
nvswitch_device *device,
PFLCNABLE pFlcnable,
PRM_FLCN_CMD pCmd,
PRM_FLCN_MSG pMsg,
void *pPayload,
NVSWITCH_TIMEOUT *pTimeout,
PFLCN_QMGR_SEQ_INFO pSeqInfo
)
{
return NV_OK;
}
static void
_flcnablePostDiscoveryInit_IMPL
(
nvswitch_device *device,
FLCNABLE *pSoe
)
{
flcnPostDiscoveryInit(device, pSoe->pFlcn);
}
/**
* @brief sets pEngDescUc and pEngDescBc to the discovered
* engine that matches this flcnable instance
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE pointer
* @param[out] pEngDescUc pointer to the UniCast Engine
* Descriptor Pointer
* @param[out] pEngDescBc pointer to the BroadCast Engine
* Descriptor Pointer
*/
static void
_flcnableFetchEngines_IMPL
(
nvswitch_device *device,
FLCNABLE *pSoe,
ENGINE_DESCRIPTOR_TYPE *pEngDescUc,
ENGINE_DESCRIPTOR_TYPE *pEngDescBc
)
{
// Every falcon REALLY needs to implement this. If they don't flcnRegRead and flcnRegWrite won't work
NVSWITCH_PRINT(device, ERROR,
"%s: FLCNABLE interface not implemented on this falcon!\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
}
/* -------------------- Object construction/initialization ------------------- */
static void
flcnableSetupHal
(
FLCNABLE *pFlcnable,
NvU32 pci_device_id
)
{
flcnable_hal *pHal = pFlcnable->pHal;
//init hal Interfaces
pHal->readCoreRev = _flcnableReadCoreRev_IMPL;
pHal->getExternalConfig = _flcnableGetExternalConfig_IMPL;
pHal->ememCopyFrom = _flcnableEmemCopyFrom_IMPL;
pHal->ememCopyTo = _flcnableEmemCopyTo_IMPL;
pHal->handleInitEvent = _flcnableHandleInitEvent_IMPL;
pHal->queueSeqInfoGet = _flcnableQueueSeqInfoGet_IMPL;
pHal->queueSeqInfoClear = _flcnableQueueSeqInfoClear_IMPL;
pHal->queueSeqInfoFree = _flcnableQueueSeqInfoFree_IMPL;
pHal->queueCmdValidate = _flcnableQueueCmdValidate_IMPL;
pHal->queueCmdPostExtension = _flcnableQueueCmdPostExtension_IMPL;
pHal->postDiscoveryInit = _flcnablePostDiscoveryInit_IMPL;
pHal->fetchEngines = _flcnableFetchEngines_IMPL;
}
NvlStatus
flcnableInit
(
nvswitch_device *device,
FLCNABLE *pFlcnable,
NvU32 pci_device_id
)
{
NvlStatus retval;
FLCN *pFlcn = NULL;
// allocate hal if a child class hasn't already
if (pFlcnable->pHal == NULL)
{
flcnable_hal *pHal = pFlcnable->pHal = nvswitch_os_malloc(sizeof(*pHal));
if (pHal == NULL)
{
NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n");
retval = -NVL_NO_MEM;
goto flcnable_init_fail;
}
nvswitch_os_memset(pHal, 0, sizeof(*pHal));
}
// init flcn - a little out of place here, since we're really only
// supposed to be initializing hals. However, we need pci_device_id
// to initialize flcn's hals and flcn is _very_ closely tied to
// flcnable so it kind of makes some sense to allocate it here
pFlcn = pFlcnable->pFlcn = flcnAllocNew();
if (pFlcn == NULL)
{
NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n");
retval = -NVL_NO_MEM;
goto flcnable_init_fail;
}
retval = flcnInit(device, pFlcn, pci_device_id);
if (retval != NVL_SUCCESS)
{
goto flcnable_init_fail;
}
//don't have a parent class to init, go straight to setupHal
flcnableSetupHal(pFlcnable, pci_device_id);
return retval;
flcnable_init_fail:
flcnableDestroy(device, pFlcnable);
return retval;
}
// reverse of flcnableInit()
void
flcnableDestroy
(
nvswitch_device *device,
FLCNABLE *pFlcnable
)
{
if (pFlcnable->pFlcn != NULL)
{
flcnDestroy(device, pFlcnable->pFlcn);
nvswitch_os_free(pFlcnable->pFlcn);
pFlcnable->pFlcn = NULL;
}
if (pFlcnable->pHal != NULL)
{
nvswitch_os_free(pFlcnable->pHal);
pFlcnable->pHal = NULL;
}
}

View File

@@ -0,0 +1,48 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcndmem_nvswitch.c
* @brief FLCN Data-Memory Manager
*
* This module is intended to serve as the primary interface between all upper-
* level Falcon-object layers and the HAL-layer. It provides APIs for accessing
* the Falcon DMEM (read and write) as well as managing all allocations in the
* RM-managed region of the Falcon DMEM.
*
* DMEM allocations are satisfied out of a carved-out portion of the Falcon
* DMEM. The location of this region is determined when the Falcon image is
* built and is communicated to the RM from the Falcon via the INIT message
* that the Falcon sends upon initialization. Therefore, allocations cannot be
* satisfied until this message arrives (occurs immediately after STATE_LOAD).
*/
/* ------------------------ Includes --------------------------------------- */
#include "flcn/flcn_nvswitch.h"
#include "common_nvswitch.h"
/* ------------------------ Static Function Prototypes --------------------- */
/* ------------------------ Globals ---------------------------------------- */
/* ------------------------ Public Functions ------------------------------ */
/* ------------------------ Private Static Functions ----------------------- */

View File

@@ -0,0 +1,730 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvmisc.h"
#include "common_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "flcn/flcnqueue_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
/*!
* @file flcnqueue_dmem_nvswitch.c
* @brief Provides all the fundamental logic for reading/writing queues.
*
* Queues are the primary communication mechanism between the RM and various
* falcon-based engines such as the PMU and Display Falcon. The RM requests
* actions by inserting a data packet (command) into a command queue. This
* generates an interrupt to the falcon which allows it to wake-up and service
* the request. Upon completion of the command, the falcon can optionally
* write an acknowledgment packet (message) into a separate queue designated
* for RM-bound messages.
*
* There are currently two types of queues used:
* 1) DMEM queues. The original queue type. This file contains the routines
* specific to DMEM queues.
* 2) FB Queues For infomation specific to FB Queues, see the HDR of
* flcnqueue_fb.c.
* File flcnqueue.c has all routines common to both types of queues.
*
* Each queue has distinct "head" and "tail" pointers. The "head" pointer marks
* the position in the queue where the next write operation will take place;
* the "tail" marks the position of the next read. When the head and tail
* pointers are equal, the queue is empty. When non-equal, data exists in the
* queue that needs processed. Queues are always allocated contiguously in the
* falcon's DMEM. It may never be assumed that the queue's head pointer will
* always be greater than the tail pointer. Such a condition is legal and
* occurs when the head pointer approaches the end of the queue and data must
* be written at the beginning of the queue to fit. This is known as a
* "rewind" condition. For simplicity, wrapping is not supported. That is, a
* single packet cannot wrap around the boundaries of the queue. The writer of
* the queue must ensure that this never occurs. When the writer determines
* that a packet won't fit in the end of the queue, it must write a "rewind"
* command telling the reader to ignore the rest of the queue and look at the
* beginning of the queue for the next packet. When the reader finds the
* rewind packet, it must look to the beginning of the queue to find the packet
* to read. The writer is responsible for ensuring that sufficient space will
* always exist at the end of the queue for the rewind packet. The writer is
* also responsible for ensuring that sufficient space exists at the beginning
* of the queue for the real packet before writing the rewind command.
* Finally, upon a rewind condition, the writer is also responsible for
* ensuring that the head pointer never intercepts the tail pointer. Such a
* condition indicates that the queue is full, but is completely
* indistinguishable from the empty condition (in both cases head and tail are
* equivalent).
*
* The general queue insertion algorithm is as follows:
* @code
* if head >= tail
* if packet_size <= (queue_size - head - rewind_cmd_size)
* write packet
* else
* if packet_size <= (tail - queue_start - 1)
* write rewind command
* write packet
* else
* abort
* else
* if packet_size <= (tail - head - 1)
* write packet
* else
* abort
* @endcode
*
* This module provides a basic queue library to support this mechanism. For
* simplicity, this module makes minimal distinction between command queues and
* message queues. It simply provides APIs for opening a queue and performing
* basic read/write operations. The only complexity handled here is the
* rewind operation that is required as the end of a queue is reached during a
* write operation. This module handles that case by requiring the write size
* as a parameter to the "open for write" function. For the specifics, see
* @ref flcnQueueOpenWrite.
*
* The following diagrams may be used for reference in several of the space
* calculations performed by this module. The two most interesting queue states
* exist when the head pointer is greater than the tail and vice versa. Head
* equal to tail is just a sub-case of head greater than tail.
*
* <pre>
* (head > tail) (tail > head)
* .-+-+-+-+-+-+-. <-- qSize .-+-+-+-+-+-+-. <-- qSize
* | | | |
* | free | | used |
* | | | |
* +-------------+ <-- head +-------------+ <-- tail
* | | | |
* | | | |
* | used | | free |
* | | | |
* | | | |
* +-------------+ <-- tail +-------------+ <-- head
* | | | |
* | free | | used |
* | | | |
* `-+-+-+-+-+-+-' <-- qOffset `-+-+-+-+-+-+-' <-- qOffset
*
* To be read bottom-to-top (low-address to high-address)
*/
static NV_STATUS _flcnQueueOpenWrite_dmem (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, NvU32 writeSize);
static NV_STATUS _flcnQueuePop_dmem (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, void *pData, NvU32 size, NvU32 *pBytesRead);
static void _flcnQueueRewind_dmem (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue);
static void _flcnQueuePush_dmem (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, void *pData, NvU32 size);
static NV_STATUS _flcnQueueTailGet_dmem (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 *pTail);
static NV_STATUS _flcnQueueTailSet_dmem (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 tail );
static void _flcnQueueRead_dmem (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 offset, NvU8 *pDst, NvU32 sizeBytes);
static void _flcnQueueWrite_dmem (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 offset, NvU8 *pSrc, NvU32 sizeBytes);
static NV_STATUS _flcnQueueHasRoom_dmem (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 writeSize, NvBool *pBRewind);
/*!
* @brief Construct a Falcon Queue object for a DMEM queue.
*
* This is a constructor/initialization function for Falcon Queue objects.
* Callers can choose to either provide a pre-allocated Falcon Queue object or
* allow this function to perform the allocation. The former case is more
* ideal where a collection of queues must be allocated or when static
* allocation is desired.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in,out] pQueue Pointer to the queue to construct.
*
* @return 'NV_OK' upon successful construction/initialization.
* @return 'NV_ERR_INVALID_POINTER' when pQueue is NULL.
*/
NV_STATUS
flcnQueueConstruct_dmem_nvswitch
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE *ppQueue,
NvU32 queueLogId,
NvU32 queuePhyId,
NvU32 offset,
NvU32 queueSize,
NvU32 cmdHdrSize
)
{
PFLCNQUEUE pQueue;
NV_STATUS status = NV_OK;
status = flcnQueueConstruct_common_nvswitch(device, pFlcn,
ppQueue, // ppQueue
queueLogId, // Logical ID of the queue
queuePhyId, // Physical ID of the queue
offset, // offset
queueSize, // size
cmdHdrSize); // cmdHdrSize
if (status != NV_OK)
{
NVSWITCH_ASSERT(status == NV_OK);
return status;
}
if (*ppQueue == NULL)
{
NVSWITCH_ASSERT(*ppQueue != NULL);
return NV_ERR_INVALID_POINTER;
}
pQueue = *ppQueue;
pQueue->openWrite = _flcnQueueOpenWrite_dmem;
pQueue->rewind = _flcnQueueRewind_dmem;
pQueue->pop = _flcnQueuePop_dmem;
pQueue->push = _flcnQueuePush_dmem;
pQueue->tailGet = _flcnQueueTailGet_dmem;
pQueue->tailSet = _flcnQueueTailSet_dmem;
pQueue->read = _flcnQueueRead_dmem;
pQueue->write = _flcnQueueWrite_dmem;
pQueue->hasRoom = _flcnQueueHasRoom_dmem;
//
// Command size cannot be larger than queue size / 2. Otherwise, it is
// impossible to send two commands back to back if we start from the
// beginning of the queue.
//
pQueue->maxCmdSize = queueSize / 2;
return status;
}
/*!
* @brief Opens a queue for writing.
*
* Opens the given command queue for write operations. Any number of write
* operations may be performed between a call to 'open' and the subsequent call
* to 'close'. However, the full write-size of the entire transaction must be
* specified when the queue is opened to ensure that the transaction may be
* written into a contiguous portion of the queue (the falcon ucode does not
* support wrapping within a single transaction). This function handles all
* wrapping/rewinding of the queue as it becomes necessary to find space.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to open
* @param[in] writeSize The size (in bytes) of the entire transaction
*
* @return 'NV_OK' if the queue is successfully opened.
* @return 'NV_ERR_INSUFFICIENT_RESOURCES' if there is insufficient queue space
* @return 'NV_ERR_GENERIC' otherwise.
* @see flcnQueuePush
* @see flcnQueueRewind
*/
static NV_STATUS
_flcnQueueOpenWrite_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
NvU32 writeSize
)
{
NvBool bRewind = NV_FALSE;
NV_STATUS status;
//
// Verify that the queue is not already opened. This is not expected to
// occur.
//
if (pQueue->bOpened)
{
NVSWITCH_PRINT(device, ERROR,
"%s: unable to open queue (already opened, queueLogId=0x%x).\n",
__FUNCTION__, pQueue->queueLogId);
NVSWITCH_ASSERT(0);
return NV_ERR_GENERIC;
}
//
// Look at the queue's head and tail pointers and determine if enough space
// exists in the queue for the write.
//
status = _flcnQueueHasRoom_dmem(device, pFlcn, pQueue, writeSize, &bRewind);
if (NV_OK != status)
{
if (NV_ERR_INSUFFICIENT_RESOURCES == status)
{
NVSWITCH_PRINT(device, INFO,
"%s: queue is too full to write data (write-size=0x%x).\n",
__FUNCTION__, writeSize);
}
return status;
}
//
// Reaching this point indicates that the queue is successfully opened
// and sufficient space exists to write the desired data. Simply set the
// queue's write position, set the oflag, and mark the queue as "opened".
//
(void)pQueue->headGet(device, pFlcn, pQueue, &pQueue->position);
pQueue->oflag = FLCNQUEUE_OFLAG_WRITE;
pQueue->bOpened = NV_TRUE;
// finally, rewind the queue if necessary
if (bRewind)
{
pQueue->rewind(device, pFlcn, pQueue);
}
return NV_OK;
}
/*!
* @brief Reads a buffer of data from the given queue.
*
* Read a buffer of data from the given queue. This function does not
* interpret the data read in any way. Consequently, it cannot feasibly
* detect each and every rewind condition that is possible. For this
* reason, it is up the to the caller to interpret the returned data and
* rewind the queue as necessary. This function keeps track of the current
* read position in queue (set when the queue is opened). To maintain the
* required DMEM alignment, the queue position is updated with aligned-read
* size (size rounded-up to the next DMEM alignment).
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to read from
* @param[in] pData The buffer to write the read data to
* @param[in] size The number of bytes to read
* @param[out] pBytesRead The number of bytes read from the queue
*
* @return 'NV_OK' if the read operation is successful. 'NV_ERR_GENERIC' upon
* error. Reading zero bytes from an empty queue is not considered an
* error condition.
*
* @see flcnQueueRewind
*/
static NV_STATUS
_flcnQueuePop_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
void *pData,
NvU32 size,
NvU32 *pBytesRead
)
{
NvU32 head;
NvU32 tail;
NvU32 used;
// set the bytes read to zero in case and error occurs
*pBytesRead = 0;
// ensure the queue is currently opened for read
if (!QUEUE_OPENED_FOR_READ(pQueue))
{
NVSWITCH_PRINT(device, ERROR,
"%s: queue not opened for read (queueLogId=0x%x).\n",
__FUNCTION__, pQueue->queueLogId);
NVSWITCH_ASSERT(0);
return NV_ERR_GENERIC;
}
//
// The calculations performed in this function are best described using
// head and tail terminology. The current head pointer values are always
// used whereas the cached queue position is used for the tail value. This
// allows read-operations to be transacted without writing the tail pointer
// for each read.
//
(void)pQueue->headGet(device, pFlcn, pQueue, &head);
tail = pQueue->position;
// there is no data in the queue when the head and tail are equal
if (head == tail)
{
return NV_OK;
}
//
// Calculate the used space in the queue (this limits how much can be read).
// Two cases:
// 1. When the head is greater than the tail the amount of data in the
// queue is defined by the difference between the head and tail
// pointers.
//
// 2. When the head is less than the tail, a potential rewind condition
// exists. In that case, the amount of data that can be read
// (without wrapping) is defined as the difference between the
// queue's size and the current tail pointer. Note that 'tail' is
// absolute so we need to factor-in the starting-offset of the queue.
//
if (head > tail)
{
used = head - tail;
}
else
{
used = pQueue->queueOffset + pQueue->queueSize - tail;
}
// ensure we only read what is available and no more
if (size > used)
{
NVSWITCH_PRINT(device, ERROR,
"%s: suspicious read op - read size > used size. "
"(queueLogId=0x%x, read size=0x%x, used size=0x%x).\n",
__FUNCTION__, pQueue->queueLogId, size, used);
NVSWITCH_ASSERT(0);
// best thing we can do is cap the read size
size = used;
}
//
// Copy the data into the output buffer, update the queue's current
// position, and return the number of bytes that have been read.
//
pQueue->read(device, pFlcn, pQueue, tail, pData, size);
pQueue->position += NV_ALIGN_UP(size, QUEUE_ALIGNMENT);
*pBytesRead = size;
return NV_OK;
}
/*!
* @brief Rewinds a queue back to its starting offset in DMEM.
*
* When the queue is opened for "write", this function writes the rewind
* command to current queue position and updates the queue position to the
* beginning of the queue. When opened for "read", only the queue position
* is updated.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to rewind.
*
* @pre The queue must be opened for prior to calling this function
* @see flcnQueueOpenRead
* @see flcnQueueOpenWrite
*/
static void
_flcnQueueRewind_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue
)
{
RM_FLCN_CMD rewindCmd;
NvU32 size = 0;
//
// Ensure that the queue is opened before continuing. Failure here
// is never expected.
//
if (!pQueue->bOpened)
{
NVSWITCH_PRINT(device, ERROR,
"%s: queue not opened (queueLogId=0x%x).\n",
__FUNCTION__, pQueue->queueLogId);
NVSWITCH_ASSERT(0);
return;
}
// write the rewind the command when the queue is opened for "write"
if (QUEUE_OPENED_FOR_WRITE(pQueue))
{
// populate the rewind command
size = pQueue->populateRewindCmd(device, pFlcn, pQueue, &rewindCmd);
// write out the rewind command
pQueue->push(device, pFlcn, pQueue, &rewindCmd, size);
}
// manually set the queue position back to the beginning of the queue
pQueue->position = pQueue->queueOffset;
return;
}
/*!
* @brief Writes a buffer of data to a queue.
*
* Writes a buffer of data to the given command queue. This function
* cannot fail since space checks are performed during the call to open to
* ensure that sufficient space exists in the queue for the data.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to write to
* @param[in] pData The buffer of data to write
* @param[in] size The number of bytes to write from the buffer
*/
static void
_flcnQueuePush_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
void *pData,
NvU32 size
)
{
// ensure the queue is currently opened for write
if (!QUEUE_OPENED_FOR_WRITE(pQueue))
{
NVSWITCH_PRINT(device, ERROR,
"%s: queue not opened for write (queueLogId=0x%x).\n",
__FUNCTION__, pQueue->queueLogId);
NVSWITCH_ASSERT(0);
return;
}
// write that data out to the PMU/DPU DMEM
pQueue->write(device, pFlcn, pQueue, pQueue->position, pData, size);
pQueue->position += NV_ALIGN_UP(size, QUEUE_ALIGNMENT);
return;
}
/*!
* Checks a queue to see if it has room for a writing data of a specific size.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to check for space
* @param[in] writeSize The amount of space to check for
* @param[out] pBRewind
* Set to 'NV_TRUE' when space may be found if the queue is rewound. This
* parameter is optional (may be NULL) for callers not interested in
* rewind information.
*
* @return 'NV_OK' if the queue contains space (has room) for the write.
* 'NV_ERR_INSUFFICIENT_RESOURCES' if queue is full.
*/
static NV_STATUS
_flcnQueueHasRoom_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
NvU32 writeSize,
NvBool *pBRewind
)
{
NvU32 head;
NvU32 tail;
NvU32 free = 0;
NvBool bRewind = NV_FALSE;
//
// Align the writeSize up to to the size the buffer will actually take in
// the queue.
//
writeSize = NV_ALIGN_UP(writeSize, QUEUE_ALIGNMENT);
// retrieve the current queue's head and tail pointers.
(void)pQueue->headGet(device, pFlcn, pQueue, &head);
(void)pQueue->tailGet(device, pFlcn, pQueue, &tail);
//
// In the case where the head pointer is greater than the tail pointer,
// calculate the amount of space in the command queue that may be used
// before a REWIND command must be written. Be sure to account for the
// size of the REWIND command to ensure it can ALWAYS be written.
//
if (head >= tail)
{
free = pQueue->queueOffset + pQueue->queueSize - head;
free -= pQueue->cmdHdrSize;
//
// Set the rewind flag to check if space would exist if the queue
// were rewound.
//
if (writeSize > free)
{
bRewind = NV_TRUE;
head = pQueue->queueOffset;
}
}
//
// In the event that the head pointer has wrapped around the queue and
// the tail has no yet caught up, calculate the amount of space in the
// command queue that may be used before the head pointer reaches the tail
// pointer (this can never be allowed to happen). This condition is also
// met if a rewind condition is detected above.
//
if (head < tail)
{
//
// Subtract off one byte from the free space to guarantee that the tail
// is never allowed to be equal to the head pointer unless the queue is
// truly empty.
//
free = tail - head - 1;
}
// return the rewind flag
if (pBRewind != NULL)
{
*pBRewind = bRewind;
}
return (writeSize <= free) ? NV_OK : NV_ERR_INSUFFICIENT_RESOURCES;
}
/*!
* Retrieve the current tail pointer for given FLCN queue.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[out] pTail Pointer to write with the queue's tail value
*
* @return 'NV_OK' if the tail value was successfully retrieved.
* @return 'NV_ERR_GENERIC' otherwise
*/
static NV_STATUS
_flcnQueueTailGet_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
NvU32 *pTail
)
{
NVSWITCH_ASSERT(pFlcn->pQueueInfo != NULL);
if (RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pFlcn->pQueueInfo, pQueue->queueLogId))
{
return flcnCmdQueueTailGet(device, pFlcn, pQueue, pTail);
}
else
{
return flcnMsgQueueTailGet(device, pFlcn, pQueue, pTail);
}
}
/*!
* Set the tail pointer for the given FLCN queue.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue Pointer to the queue
* @param[in] tail The desired tail value
*
* @return 'NV_OK' if the tail value was successfully set.
* @return 'NV_ERR_GENERIC' otherwise
*/
static NV_STATUS
_flcnQueueTailSet_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
NvU32 tail
)
{
NVSWITCH_ASSERT(pFlcn->pQueueInfo != NULL);
if (RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pFlcn->pQueueInfo, pQueue->queueLogId))
{
return flcnCmdQueueTailSet(device, pFlcn, pQueue, tail);
}
else
{
return flcnMsgQueueTailSet(device, pFlcn, pQueue, tail);
}
}
/*!
* Read a buffer of data from FLCN queue.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to read from
* @param[in] offset Offset (from the start of DMEM) to start the read
* @param[out] pDst Buffer to store the read-data
* @param[in] sizeBytes The number of bytes to read
*
* @return void
*/
static void
_flcnQueueRead_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
NvU32 offset,
NvU8 *pDst,
NvU32 sizeBytes
)
{
if (pFlcn->bEmemEnabled)
{
flcnableEmemCopyFrom(device, pFlcn->pFlcnable,
offset, pDst, sizeBytes, 0);
}
else
{
if (flcnDmemCopyFrom(device, pFlcn, offset, pDst, sizeBytes, 0)
!= NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to copy from flcn DMEM\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
}
}
}
/*!
* Write a buffer of data to a FLCN queue.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
* @param[in] pQueue The queue to write to
* @param[in] offset Offset (from the start of DMEM) to start the write
* @param[in] pSrc Buffer containing the write-data
* @param[in] sizeBytes The number of bytes to write
*/
static void
_flcnQueueWrite_dmem
(
nvswitch_device *device,
PFLCN pFlcn,
PFLCNQUEUE pQueue,
NvU32 offset,
NvU8 *pSrc,
NvU32 sizeBytes
)
{
if (pFlcn->bEmemEnabled)
{
flcnableEmemCopyTo(device, pFlcn->pFlcnable,
offset, pSrc, sizeBytes, 0);
}
else
{
if (flcnDmemCopyTo(device, pFlcn, offset, pSrc, sizeBytes, 0)
!= NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to copy to flcn DMEM\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
}
}
}

View File

@@ -0,0 +1,56 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "flcn/flcnqueue_nvswitch.h"
/*!
* @file flcnqueue_fb.c
* @brief Provides all functions specific to FB Queue (non-DMEM queues).
*
* Queues are the primary communication mechanism between the RM and various
* falcon-based engines such as the PMU and Display Falcon. The RM requests
* actions by inserting a data packet (command) into a command queue. This
* generates an interrupt to the falcon which allows it to wake-up and service
* the request. Upon completion of the command, the falcon can optionally
* write an acknowledgment packet (message) into a separate queue designated
* for RM-bound messages. CMDs sent by an FB CMD queue must send a
* response, as that is required to clear that CMD queue element's "in use bit"
* and, free the DMEM allocation associated with it.
*
* For more information on FB Queue see:
* PMU FB Queue (RID-70296)
* For general queue information, see the HDR of flcnqueue.c.
* For information specific to DMEM queues, see the HDR of flcnqueue_dmem.c
*
* Each queue has distinct "head" and "tail" pointers. The "head" pointer is the
* index of the queue Element where the next write operation will take place;
* the "tail" marks the index of the queue Element for the next read. When the
* head and tail pointers are equal, the queue is empty. When non-equal, data
* exists in the queue that needs to be processed. Queues are always allocated
* in the Super Surface in FB.
*/

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,441 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "flcn/flcnqueue_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
/*!
* @file flcnqueuerd_nvswitch.c
*/
static NV_STATUS _flcnQueueReaderGetNextHeader(nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, void *pData, NvBool bMsg);
static NV_STATUS _flcnQueueReaderReadHeader (nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, void *pData, NvBool bMsg);
static NV_STATUS _flcnQueueReaderReadBody (nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, void *pData, NvBool bMsg);
#define NVSWITCH_PRINT_QUEUE_READER_ERR_CLOSING(id, status) \
NVSWITCH_PRINT(device, ERROR, \
"%s: error while closing queue (id=0x%x, status=" \
"0x%x).\n", __FUNCTION__, (id), (status))
#define NVSWITCH_PRINT_QUEUE_READER_INVALID_UNITID(id, unitId) \
NVSWITCH_PRINT(device, ERROR, \
"%s: invalid unit-id read from queue (qid=0x%x, " \
"uid=0x%x).\n", __FUNCTION__, (id), (unitId))
#define NVSWITCH_PRINT_QUEUE_READER_ERR_OPENING(id, status) \
NVSWITCH_PRINT(device, WARN, \
"%s: error while opening queue (id=0x%x, status=" \
"0x%x).\n", __FUNCTION__, (id), (status))
#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING(id, status) \
NVSWITCH_PRINT(device, ERROR, \
"%s: error while reading from queue (id=0x%x, " \
"status=0x%x).\n", __FUNCTION__, (id), (status))
#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGBODY(id, status) \
NVSWITCH_PRINT(device, ERROR, \
"%s: error reading body from queue (id=0x%x, " \
"status=0x%x).\n", __FUNCTION__, (id), (status))
#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(id, status) \
NVSWITCH_PRINT(device, ERROR, \
"%s: error reading header from queue (id=" \
"0x%x, status=0x%x).\n", __FUNCTION__, (id), (status))
#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING_UNKNOWN_DATA(id, status) \
NVSWITCH_PRINT(device, ERROR, \
"%s: unrecognizable data read from queue (id=0x%x, " \
"status=0x%x).\n", __FUNCTION__, (id), (status))
#define NVSWITCH_PRINT_QUEUE_READER_PRINT_HDR_READ_INFO(offset) \
NVSWITCH_PRINT(device, INFO, \
"%s: Reading a header from DMEM @ 0x%x.\n", \
__FUNCTION__, (offset))
/*!
* Reads the queue and retrieves the next unread message/command.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] queueLogId Logical ID of the queue
* @param[in,out] pData The buffer to fill with the queue data
* @param[in] bMsg Message/Command
*
* @return NV_OK when the read operation is successful.
* NV_ERR_NOT_READY Queue is Empty
* NV_ERR_GENERIC otherwise.
*/
static NV_STATUS
_flcnQueueReadData_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 queueLogId,
void *pData,
NvBool bMsg
)
{
NV_STATUS status = NV_OK;
NV_STATUS retStatus = NV_OK;
FLCNQUEUE *pQueue;
PFALCON_QUEUE_INFO pQueueInfo;
RM_FLCN_QUEUE_HDR bufferGenHdr;
NVSWITCH_ASSERT(pFlcn != NULL);
pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
NVSWITCH_ASSERT(pQueueInfo->pQueues != NULL);
pQueue = &pQueueInfo->pQueues[queueLogId];
//
// If the queue is empty, simply return NV_ERR_NOT_READY to indicate that a message is
// not available.
//
if (pQueue->isEmpty(device, pFlcn, pQueue))
{
return NV_ERR_NOT_READY;
}
status = pQueue->openRead(device, pFlcn, pQueue);
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_OPENING(pQueue->queueLogId, status);
return status;
}
status = _flcnQueueReaderGetNextHeader(device, pFlcn, pQueue, pData, bMsg);
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(pQueue->queueLogId, status);
retStatus = status;
}
else
{
bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr :
((RM_FLCN_CMD *)pData)->cmdGen.hdr;
//
// If the size of the message in the header is greater than the size of
// the structure which will hold the message, then log a breakpoint.
// Copying data more than the structure can hold can lead to buffer overrun
// on the stack and lead to fatal errors. Logging a breakpoint here will
// make sure that we can catch this condition in release drivers by looking
// at the RmJournal.
//
// Note: When this happens, we are essentially not purging the message queue
// so the TAIL pointer will still point to the start of this message.
// The next time RM gets a new message from Falcon, it will try to purge this
// message and will keep on looping trying to purge. It will eventually
// bugcheck, but at least the breakpoint in the logs will point to this bug
//
if ((bufferGenHdr.size > pQueueInfo->maxMsgSize) && (bMsg))
{
retStatus = NV_ERR_GENERIC;
NVSWITCH_ASSERT(0);
}
//
// Check the message header to see if the message has a body. If it does,
// read it. It is not considered an error for a message to contain only
// a header.
//
else if (bufferGenHdr.size > RM_FLCN_QUEUE_HDR_SIZE)
{
status = _flcnQueueReaderReadBody(device, pFlcn, pQueue, pData, bMsg);
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGBODY(pQueue->queueLogId, status);
retStatus = status;
}
}
}
//
// Queue needs to be closed even if there is error in
// reading header/message above
//
status = pQueue->close(device, pFlcn, pQueue, NV_TRUE);
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_CLOSING(pQueue->queueLogId, status);
//
// Update the retStatus only if there was no error reading
// header/message earlier.
//
if (NV_OK == retStatus)
{
retStatus = status;
}
}
return retStatus;
}
/*!
* @brief Retrieves the next valid header from the queue.
*
* This function attempts to read a message header from the message queue. Upon
* a successful read, the header is be validated and a check is made to see if
* the header read is the rewind header. If found, the queue is rewound and
* another attempt is be made to read a valid header.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] pQueue The queue to read from
* @param[in] pData The buffer to fill-in
* @param[in] bMsg Msg/Cmd
*
* @return 'NV_OK' If a VALID message is read from the message queue.
* @return 'NV_ERR_GENERIC' Otherwise.
*/
static NV_STATUS
_flcnQueueReaderGetNextHeader
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
void *pData,
NvBool bMsg
)
{
NV_STATUS status;
RM_FLCN_QUEUE_HDR bufferGenHdr;
// attempt to read a message header from the message queue
status = _flcnQueueReaderReadHeader(device, pFlcn, pQueue, pData, bMsg);
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(pQueue->queueLogId, status);
return NV_ERR_GENERIC;
}
bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr :
((RM_FLCN_CMD *)pData)->cmdGen.hdr;
//
// If the rewind header is received, rewind the message queue and re-
// attempt to read a message header.
//
if (bufferGenHdr.unitId == RM_FLCN_UNIT_ID_REWIND)
{
pQueue->rewind(device, pFlcn, pQueue);
status = _flcnQueueReaderReadHeader(device, pFlcn, pQueue, pData, bMsg);
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(pQueue->queueLogId, status);
return NV_ERR_GENERIC;
}
}
bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr :
((RM_FLCN_CMD *)pData)->cmdGen.hdr;
//
// Validate the header's unit identifier. This step is performed AFTER the
// rewind check as an optimization in the event that we did read a rewind
// message. In the event of receiving an invalid unit-id, the rewind check
// would also have failed.
//
if (!RM_FLCN_UNIT_ID_IS_VALID(pFlcn->pQueueInfo, bufferGenHdr.unitId))
{
NVSWITCH_PRINT_QUEUE_READER_INVALID_UNITID(pQueue->queueLogId, bufferGenHdr.unitId);
return NV_ERR_GENERIC;
}
return NV_OK;
}
/*!
* @brief Reads the body of a message/command into the buffer.
*
* Simply performs a read operation on a previously opened queue in attempt to
* read a message body. This function does not make any attempts to interpret
* the body's data.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] pQueue The queue to read from
* @param[in] pData The buffer to fill-in
* @param[in] bMsg Msg/Cmd
*
* @return 'NV_OK' If a message is read from the message queue.
* @return 'NV_ERR_GENERIC' Otherwise.
*/
static NV_STATUS
_flcnQueueReaderReadBody
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
void *pData,
NvBool bMsg
)
{
NvU32 bytesRead;
NvU32 readSize;
NV_STATUS status;
RM_FLCN_QUEUE_HDR bufferGenHdr;
NVSWITCH_ASSERT(!pQueue->isEmpty(device, pFlcn, pQueue));
bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr :
((RM_FLCN_CMD *)pData)->cmdGen.hdr;
//
// The header contains the size to read for the message/command body. Note that
// size in the header accounts for the size of the header itself.
//
readSize = bufferGenHdr.size - RM_FLCN_QUEUE_HDR_SIZE;
if(bMsg)
{
status = pQueue->pop(device, pFlcn, pQueue, &((RM_FLCN_MSG *)pData)->msgGen.msg,
readSize, &bytesRead);
}
else
{
status = pQueue->pop(device, pFlcn, pQueue, &((RM_FLCN_CMD *)pData)->cmdGen.cmd,
readSize, &bytesRead);
}
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING(pQueue->queueLogId, status);
return status;
}
//
// The number of bytes should always be greater than zero in virtue of the
// fact the queue is known to be non-empty at this point.
//
NVSWITCH_ASSERT(bytesRead != 0);
//
// Verify that enough data is read to constitute a full message body.
// Anything less is considered a logic error as it indicates that we are
// out of sync with the data that's in the queue (ie. we cannot recognize
// it). This is not expected to occur.
//
if (bytesRead != readSize)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING_UNKNOWN_DATA(pQueue->queueLogId, status);
NVSWITCH_ASSERT(0);
return NV_ERR_GENERIC;
}
return NV_OK;
}
/*!
* @brief Read a message/command header from the given queue.
*
* Simply performs a read operation on a previously opened queue in attempt to
* read a message header. This function does not make any attempts to
* interpret or validate the message header
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] pQueue The queue to read from
* @param[in] pData The buffer to fill-in
* @param[in] bMsg Msg/Cmd
*
* @return 'NV_OK' If a message is read from the message queue.
* @return 'NV_ERR_INVALID_STATE' If queue is empty.
* @return 'NV_ERR_GENERIC' Otherwise.
*/
static NV_STATUS
_flcnQueueReaderReadHeader
(
nvswitch_device *device,
PFLCN pFlcn,
FLCNQUEUE *pQueue,
void *pData,
NvBool bMsg
)
{
NvU32 bytesRead;
NV_STATUS status;
if (pQueue->isEmpty(device, pFlcn, pQueue))
{
NVSWITCH_ASSERT(0);
return NV_ERR_INVALID_STATE;
}
NVSWITCH_PRINT_QUEUE_READER_PRINT_HDR_READ_INFO(pQueue->position);
if(bMsg)
{
// read a header's worth of data from the queue
status = pQueue->pop(
device, pFlcn, pQueue, &((RM_FLCN_MSG *)pData)->msgGen.hdr,
RM_FLCN_QUEUE_HDR_SIZE, &bytesRead);
}
else
{
status = pQueue->pop(
device, pFlcn, pQueue, &((RM_FLCN_CMD *)pData)->cmdGen.hdr,
RM_FLCN_QUEUE_HDR_SIZE, &bytesRead);
}
if (status != NV_OK)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING(pQueue->queueLogId, status);
return status;
}
//
// The number of bytes should always be greater than zero in virtue of the
// fact the queue is known to be non-empty at this point.
//
NVSWITCH_ASSERT(bytesRead != 0);
//
// Verify that enough data is read to constitute a full header. Anything
// less is considered a logic error as it indicates that we are out of sync
// with the data that's in the queue (ie. we cannot recognize it). This is
// not expected to occur.
//
if (bytesRead != RM_FLCN_QUEUE_HDR_SIZE)
{
NVSWITCH_PRINT_QUEUE_READER_ERR_READING_UNKNOWN_DATA(pQueue->queueLogId, status);
NVSWITCH_ASSERT(0);
return NV_ERR_GENERIC;
}
return NV_OK;
}
void
flcnQueueRdSetupHal
(
FLCN *pFlcn
)
{
flcn_hal *pHal = pFlcn->pHal;
pHal->queueReadData = _flcnQueueReadData_IMPL;
}

View File

@@ -0,0 +1,129 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcnrtosdebug_nvswitch.c
* @brief Provides support for capturing RTOS's state in case of Falcon
* related failures.
*/
/* ------------------------- Includes --------------------------------------- */
#include "common_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
#include "flcn/flcnrtosdebug_nvswitch.h"
/*!
* Dump the complete stack by iterating from tail to head pointer
*
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn FLCN pointer
* @param[in] queueLogId Logical ID of the queue
* @param[in] pFlcnCmd Pointer to the command buffer to read
*
*/
NV_STATUS
flcnRtosDumpCmdQueue_nvswitch
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 queueLogId,
RM_FLCN_CMD *pFlcnCmd
)
{
FLCNQUEUE *pQueue;
NvU32 head;
NvU32 tail;
NvU32 tailcache;
NV_STATUS status = NV_OK;
PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo;
pQueue = &pQueueInfo->pQueues[queueLogId];
(void)pQueue->tailGet(device, pFlcn, pQueue, &tail);
(void)pQueue->headGet(device, pFlcn, pQueue, &head);
// caching the current tail pointer
(void)pQueue->tailGet(device, pFlcn, pQueue, &tailcache);
if (head == tail)
{
return status;
}
while (tail != head)
{
status = flcnQueueReadData(device,pFlcn,
queueLogId,
pFlcnCmd, NV_FALSE);
NVSWITCH_PRINT(device, ERROR, "%s:" \
"Cmd_Dump UnitId %d size %d sq %d ctl %d cmd %d\n",
__FUNCTION__,
pFlcnCmd->cmdGen.hdr.unitId,
pFlcnCmd->cmdGen.hdr.size,
pFlcnCmd->cmdGen.hdr.seqNumId,
pFlcnCmd->cmdGen.hdr.ctrlFlags,
pFlcnCmd->cmdGen.cmd);
(void)pQueue->tailGet(device, pFlcn, pQueue, &tail);
}
// restoring the cached tail pointer
(void)pQueue->tailSet(device, pFlcn, pQueue, tailcache);
return status;
}
/*!
* @brief Populates falcon DMEM pointer in its internal debug info structure
*
* @param[in] device GPU object pointer
* @param[in] pFlcn FLCN pointer
* @param[in] debugInfoDmemOffset DMEM offset of the falcon debug info
*/
static void
_flcnDbgInfoDmemOffsetSet_IMPL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU16 debugInfoDmemOffset
)
{
pFlcn->debug.debugInfoDmemOffset = debugInfoDmemOffset;
}
void
flcnRtosSetupHal
(
FLCN *pFlcn
)
{
flcn_hal *pHal = pFlcn->pHal;
pHal->dbgInfoDmemOffsetSet = _flcnDbgInfoDmemOffsetSet_IMPL;
}

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcn0300_nvswitch.c
* @brief Provides the implementation for all falcon 3.0 HAL interfaces.
*/
#include "nvmisc.h"
#include "common_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "nvswitch/lr10/dev_falcon_v4.h"
/*!
* @brief Get information about the falcon core
*
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn FLCN pointer
*
* @returns nothing
*/
static void
_flcnGetCoreInfo_v03_00
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NvU32 hwcfg1 = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_HWCFG1);
if (FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG1, _SECURITY_MODEL, _HEAVY, hwcfg1))
{
NVSWITCH_PRINT(device, INFO,
"%s: Engine '%s' is using the heavy security model\n",
__FUNCTION__, flcnGetName_HAL(device, pFlcn));
}
// Save off the security model.
pFlcn->securityModel = DRF_VAL(_PFALCON, _FALCON_HWCFG1, _SECURITY_MODEL, hwcfg1);
// Combine Falcon core revision and subversion for easy version comparison.
pFlcn->coreRev = flcnableReadCoreRev(device, pFlcn->pFlcnable);
pFlcn->supportsDmemApertures = FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG1, _DMEM_APERTURES, _ENABLE, hwcfg1);
}
/**
* @brief set hal function pointers for functions defined in v03_00 (i.e. this file)
*
* this function has to be at the end of the file so that all the
* other functions are already defined.
*
* @param[in] pFlcn The flcn for which to set hals
*/
void
flcnSetupHal_v03_00
(
PFLCN pFlcn
)
{
flcn_hal *pHal = pFlcn->pHal;
pHal->getCoreInfo = _flcnGetCoreInfo_v03_00;
}

View File

@@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcn0400_nvswitch.c
* @brief Provides the implementation for all falcon 04.00 HAL interfaces.
*/
#include "flcn/flcn_nvswitch.h"
/**
* @brief set hal function pointers for functions defined in v04_00 (i.e. this file)
*
* this function has to be at the end of the file so that all the
* other functions are already defined.
*
* @param[in] pFlcn The flcn for which to set hals
*/
void
flcnSetupHal_v04_00
(
PFLCN pFlcn
)
{
// default to using definitions from v03_00
flcnSetupHal_v03_00(pFlcn);
}

View File

@@ -0,0 +1,48 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcn0501_nvswitch.c
* @brief Provides the implementation for all falcon 5.1 HAL interfaces.
*/
#include "flcn/flcn_nvswitch.h"
/**
* @brief set hal function pointers for functions defined in v05_01 (i.e. this file)
*
* this function has to be at the end of the file so that all the
* other functions are already defined.
*
* @param[in] pFlcn The flcn for which to set hals
*/
void
flcnSetupHal_v05_01
(
PFLCN pFlcn
)
{
// default to using definitions from v04_00
flcnSetupHal_v04_00(pFlcn);
}

View File

@@ -0,0 +1,48 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcn0600_nvswitch.c
* @brief Provides the implementation for all falcon 06.00 HAL interfaces.
*/
#include "flcn/flcn_nvswitch.h"
/**
* @brief set hal function pointers for functions defined in v06_00 (i.e. this file)
*
* this function has to be at the end of the file so that all the
* other functions are already defined.
*
* @param[in] pFlcn The flcn for which to set hals
*/
void
flcnSetupHal_v06_00
(
PFLCN pFlcn
)
{
// default to using definitions from v05_01
flcnSetupHal_v05_01(pFlcn);
}

View File

@@ -0,0 +1,33 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _BIOS_NVSWITCH_H_
#define _BIOS_NVSWITCH_H_
#include "common_nvswitch.h"
NvlStatus nvswitch_bios_read(nvswitch_device *, NvU32, void *);
NvlStatus nvswitch_bios_read_size(nvswitch_device *, NvU32 *);
NvlStatus nvswitch_bios_get_image(nvswitch_device *device);
#endif //_BIOS_NVSWITCH_H_

View File

@@ -0,0 +1,537 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _COMMON_NVSWITCH_H_
#define _COMMON_NVSWITCH_H_
#ifdef INCLUDE_NVLINK_LIB
#include "nvlink.h"
#endif
#include "export_nvswitch.h"
#include "error_nvswitch.h"
#include "io_nvswitch.h"
#include "rom_nvswitch.h"
#include "haldef_nvswitch.h"
#include "nvctassert.h"
#include "flcn/flcnable_nvswitch.h"
#include "inforom/inforom_nvswitch.h"
#include "spi_nvswitch.h"
#include "smbpbi_nvswitch.h"
#include "nvCpuUuid.h"
#define NVSWITCH_GET_BIT(v, p) (((v) >> (p)) & 1)
#define NVSWITCH_SET_BIT(v, p) ((v) | NVBIT(p))
#define NVSWITCH_CLEAR_BIT(v, p) ((v) & ~NVBIT(p))
#define NVSWITCH_MASK_BITS(n) (~(0xFFFFFFFF << (n)))
static NV_INLINE NvBool nvswitch_test_flags(NvU32 val, NvU32 flags)
{
return !!(val & flags);
}
static NV_INLINE void nvswitch_set_flags(NvU32 *val, NvU32 flags)
{
*val |= flags;
}
static NV_INLINE void nvswitch_clear_flags(NvU32 *val, NvU32 flags)
{
*val &= ~flags;
}
// Destructive operation to reverse bits in a mask
#define NVSWITCH_REVERSE_BITMASK_32(numBits, mask) \
{ \
NvU32 i, reverse = 0; \
FOR_EACH_INDEX_IN_MASK(32, i, mask) \
{ \
reverse |= NVBIT((numBits - 1) - i); \
} \
FOR_EACH_INDEX_IN_MASK_END; \
\
mask = reverse; \
}
#define NVSWITCH_CHECK_STATUS(_d, _status) \
if (_status != NVL_SUCCESS) \
{ \
NVSWITCH_PRINT(_d, MMIO, "%s(%d): status=%d\n", \
__FUNCTION__, __LINE__, \
_status); \
}
#define IS_RTLSIM(device) (device->is_rtlsim)
#define IS_FMODEL(device) (device->is_fmodel)
#define IS_EMULATION(device) (device->is_emulation)
#define NVSWITCH_DEVICE_NAME "nvswitch"
#define NVSWITCH_LINK_NAME "link"
// Max size of sprintf("%d", valid_instance) compile time check
#if NVSWITCH_DEVICE_INSTANCE_MAX < 100
#define NVSWITCH_INSTANCE_LEN 2
#endif
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#define NVSWITCH_DBG_LEVEL NVSWITCH_DBG_LEVEL_INFO
#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
#define NVSWITCH_PRINT(_d, _lvl, _fmt, ...) \
((NVSWITCH_DBG_LEVEL <= NVSWITCH_DBG_LEVEL_ ## _lvl) ? \
nvswitch_os_print(NVSWITCH_DBG_LEVEL_ ## _lvl, \
"%s[%-5s]: " _fmt, \
((_d == NULL) ? \
"nvswitchx" : \
((nvswitch_device *)_d)->name), \
#_lvl, \
## __VA_ARGS__) : \
((void)(0)) \
)
#else
#define NVSWITCH_PRINT(_d, _lvl, _fmt, ...) ((void)0)
#endif
#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
#define nvswitch_os_malloc(_size) \
nvswitch_os_malloc_trace(_size, __FILE__, __LINE__)
#else
#define nvswitch_os_malloc(_size) \
nvswitch_os_malloc_trace(_size, NULL, 0)
#endif
//
// This macro should be used to check assertion statements and print Error messages.
//
#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
#define NVSWITCH_ASSERT(_cond) \
nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed in %s() at %s:%d\n", \
__FUNCTION__ , __FILE__, __LINE__)
#else
#define NVSWITCH_ASSERT(_cond) \
nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed \n")
#endif
#define NVSWITCH_ASSERT_ERROR_INFO(errorCategory, errorInfo) NVSWITCH_ASSERT(0x0)
#define NVSWITCH_ASSERT_INFO(errCode, errLinkMask, errSubcode) NVSWITCH_ASSERT(0x0)
//
// This macro should be used cautiously as it prints information in the release
// drivers.
//
#define NVSWITCH_PRINT_SXID(_d, _sxid, _fmt, ...) \
do \
{ \
NVSWITCH_ASSERT(nvswitch_translate_hw_error(_sxid) != NVSWITCH_NVLINK_HW_GENERIC); \
nvswitch_os_print(NVSWITCH_DBG_LEVEL_ERROR, \
"nvidia-%s: SXid (PCI:" NVLINK_PCI_DEV_FMT "): %05d, " _fmt, \
(_d)->name, NVLINK_PCI_DEV_FMT_ARGS(&(_d)->nvlink_device->pciInfo), _sxid, \
##__VA_ARGS__); \
nvswitch_lib_smbpbi_log_sxid(_d, _sxid, _fmt, ##__VA_ARGS__); \
nvswitch_inforom_bbx_add_sxid(_d, _sxid, 0, 0, 0); \
} while(0)
#define NVSWITCH_DEV_CMD_DISPATCH_WITH_PRIVATE_DATA(cmd, function, type, private)\
case cmd: \
{ \
if (sizeof(type) == size) \
{ \
retval = function(device, params, private); \
} \
else \
{ \
retval = -NVL_BAD_ARGS; \
} \
break; \
}
#define NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, supported, function, type) \
case cmd: \
{ \
if (!supported) \
{ \
retval = -NVL_ERR_NOT_SUPPORTED; \
} \
else if (sizeof(type) == size) \
{ \
retval = function(device, params); \
} \
else \
{ \
retval = -NVL_BAD_ARGS; \
} \
break; \
}
#define NVSWITCH_DEV_CMD_DISPATCH(cmd, function, type) \
NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, NV_TRUE, function, type)
#define NVSWITCH_MODS_CMDS_SUPPORTED NV_FALSE
#if defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS)
#define NVSWITCH_TEST_CMDS_SUPPORTED NV_TRUE
#else
#define NVSWITCH_TEST_CMDS_SUPPORTED NV_FALSE
#endif
#define NVSWITCH_DEV_CMD_DISPATCH_MODS(cmd, function, type) \
NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, NVSWITCH_MODS_CMDS_SUPPORTED, function, type)
#define NVSWITCH_DEV_CMD_DISPATCH_TEST(cmd, function, type) \
NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, NVSWITCH_TEST_CMDS_SUPPORTED, function, type)
#define NVSWITCH_MAX_NUM_LINKS 100
#if NVSWITCH_MAX_NUM_LINKS <= 100
#define NVSWITCH_LINK_INSTANCE_LEN 2
#endif
extern const nvlink_link_handlers nvswitch_link_handlers;
//
// link_info is used to store private link information
//
typedef struct
{
char name[sizeof(NVSWITCH_LINK_NAME) + NVSWITCH_LINK_INSTANCE_LEN];
} LINK_INFO;
typedef struct
{
NvU32 external_fabric_mgmt;
NvU32 txtrain_control;
NvU32 crossbar_DBI;
NvU32 link_DBI;
NvU32 ac_coupled_mask;
NvU32 ac_coupled_mask2;
NvU32 swap_clk;
NvU32 link_enable_mask;
NvU32 link_enable_mask2;
NvU32 bandwidth_shaper;
NvU32 ssg_control;
NvU32 skip_buffer_ready;
NvU32 enable_pm;
NvU32 chiplib_forced_config_link_mask;
NvU32 chiplib_forced_config_link_mask2;
NvU32 soe_dma_self_test;
NvU32 soe_disable;
NvU32 soe_enable;
NvU32 soe_boot_core;
NvU32 latency_counter;
NvU32 nvlink_speed_control;
NvU32 inforom_bbx_periodic_flush;
NvU32 inforom_bbx_write_periodicity;
NvU32 inforom_bbx_write_min_duration;
NvU32 ato_control;
NvU32 sto_control;
NvU32 minion_disable;
NvU32 set_ucode_target;
NvU32 set_simmode;
NvU32 set_smf_settings;
NvU32 select_uphy_tables;
NvU32 link_training_mode;
NvU32 i2c_access_control;
NvU32 link_recal_settings;
NvU32 crc_bit_error_rate_short;
NvU32 crc_bit_error_rate_long;
} NVSWITCH_REGKEY_TYPE;
//
// Background tasks
//
typedef struct NVSWITCH_TASK
{
struct NVSWITCH_TASK *next;
void (*task_fn)(nvswitch_device *);
NvU64 period_nsec;
NvU64 last_run_nsec;
NvU32 flags;
} NVSWITCH_TASK_TYPE;
#define NVSWITCH_TASK_TYPE_FLAGS_ALWAYS_RUN 0x1 // Run even the if not initialized
//
// PLL
//
typedef struct
{
NvU32 src_freq_khz;
NvU32 M;
NvU32 N;
NvU32 PL;
NvU32 dist_mode;
NvU32 refclk_div;
NvU32 vco_freq_khz;
NvU32 freq_khz;
} NVSWITCH_PLL_INFO;
// Per-unit interrupt masks
typedef struct
{
NvU32 fatal;
NvU32 nonfatal;
NvU32 correctable;
} NVSWITCH_INTERRUPT_MASK;
// BIOS Image
typedef struct
{
// Size of the image.
NvU32 size;
// pointer to the BIOS image.
NvU8* pImage;
} NVSWITCH_BIOS_IMAGE;
struct NVSWITCH_CLIENT_EVENT
{
NVListRec entry;
NvU32 eventId;
void *private_driver_data;
};
//
// common device information
//
struct nvswitch_device
{
#ifdef INCLUDE_NVLINK_LIB
nvlink_device *nvlink_device;
#endif
char name[sizeof(NVSWITCH_DEVICE_NAME) + NVSWITCH_INSTANCE_LEN];
void *os_handle;
NvU32 os_instance;
NvBool is_emulation;
NvBool is_rtlsim;
NvBool is_fmodel;
NVSWITCH_REGKEY_TYPE regkeys;
// Tasks
NVSWITCH_TASK_TYPE *tasks;
// Errors
NvU64 error_total; // Total errors recorded across all error logs
NVSWITCH_ERROR_LOG_TYPE log_FATAL_ERRORS;
NVSWITCH_ERROR_LOG_TYPE log_NONFATAL_ERRORS;
NVSWITCH_FIRMWARE firmware;
// HAL connectivity
nvswitch_hal hal;
// SOE
FLCNABLE *pSoe;
// DMA
NvU32 dma_addr_width;
// InfoROM
struct inforom *pInforom;
// I2C
struct NVSWITCH_OBJI2C *pI2c;
// SMBPBI
struct smbpbi *pSmbpbi;
// NVSWITCH_LINK_TYPE
NVSWITCH_LINK_TYPE link[NVSWITCH_MAX_LINK_COUNT];
// PLL
NVSWITCH_PLL_INFO switch_pll;
// Device specific information
NvU32 chip_arch; // NVSWITCH_GET_INFO_INDEX_ARCH_*
NvU32 chip_impl; // NVSWITCH_GET_INFO_INDEX_IMPL_*
//
NvU32 chip_id; // NV_PSMC/PMC_BOOT_42_CHIP_ID_*
void * chip_device;
// UUID in big-endian format
NvUuid uuid;
// Fabric Manager timeout value for the heartbeat
NvU32 fm_timeout;
// Fabric State
NVSWITCH_DRIVER_FABRIC_STATE driver_fabric_state;
NVSWITCH_DEVICE_FABRIC_STATE device_fabric_state;
NVSWITCH_DEVICE_BLACKLIST_REASON device_blacklist_reason;
NvU64 fabric_state_timestamp;
NvU32 fabric_state_sequence_number;
// Full BIOS image
NVSWITCH_BIOS_IMAGE biosImage;
// List of client events
NVListRec client_events_list;
};
#define NVSWITCH_IS_DEVICE_VALID(device) \
((device != NULL) && \
(device->nvlink_device->type == NVLINK_DEVICE_TYPE_NVSWITCH))
#define NVSWITCH_IS_DEVICE_ACCESSIBLE(device) \
(NVSWITCH_IS_DEVICE_VALID(device) && \
(device->nvlink_device->pciInfo.bars[0].pBar != NULL))
#define NVSWITCH_IS_DEVICE_INITIALIZED(device) \
(NVSWITCH_IS_DEVICE_ACCESSIBLE(device) && \
(device->nvlink_device->initialized))
//
// Error Function defines
//
NvlStatus
nvswitch_construct_error_log
(
NVSWITCH_ERROR_LOG_TYPE *errors,
NvU32 error_log_size,
NvBool overwritable
);
void
nvswitch_destroy_error_log
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors
);
void
nvswitch_record_error
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors,
NvU32 error_type, // NVSWITCH_ERR_*
NvU32 instance,
NvU32 subinstance,
NVSWITCH_ERROR_SRC_TYPE error_src, // NVSWITCH_ERROR_SRC_*
NVSWITCH_ERROR_SEVERITY_TYPE severity, // NVSWITCH_ERROR_SEVERITY_*
NvBool error_resolved,
void *data,
NvU32 data_size,
NvU32 line
);
void
nvswitch_discard_errors
(
NVSWITCH_ERROR_LOG_TYPE *errors,
NvU32 error_discard_count
);
void
nvswitch_get_error
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors,
NVSWITCH_ERROR_TYPE *error_entry,
NvU32 error_idx,
NvU32 *error_count
);
void
nvswitch_get_next_error
(
nvswitch_device *device,
NVSWITCH_ERROR_LOG_TYPE *errors,
NVSWITCH_ERROR_TYPE *error_entry,
NvU32 *error_count,
NvBool remove_from_list
);
void
nvswitch_get_link_handlers
(
nvlink_link_handlers *nvswitch_link_handlers
);
//
// Timeout checking
//
typedef struct NVSWITCH_TIMEOUT
{
NvU64 timeout_ns;
} NVSWITCH_TIMEOUT;
#define NVSWITCH_INTERVAL_1USEC_IN_NS 1000LL
#define NVSWITCH_INTERVAL_50USEC_IN_NS 50000LL
#define NVSWITCH_INTERVAL_1MSEC_IN_NS 1000000LL
#define NVSWITCH_INTERVAL_5MSEC_IN_NS 5000000LL
#define NVSWITCH_INTERVAL_1SEC_IN_NS 1000000000LL
#define NVSWITCH_HEARTBEAT_INTERVAL_NS NVSWITCH_INTERVAL_1SEC_IN_NS
// This should only be used for short delays
#define NVSWITCH_NSEC_DELAY(nsec_delay) \
do \
{ \
if (!IS_FMODEL(device)) \
{ \
NVSWITCH_TIMEOUT timeout; \
nvswitch_timeout_create(nsec_delay, &timeout); \
do { } \
while (!nvswitch_timeout_check(&timeout)); \
} \
} while(0)
#define NVSWITCH_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field))
#define NVSWITCH_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field))
NvBool nvswitch_is_lr10_device_id(NvU32 device_id);
NvU32 nvswitch_reg_read_32(nvswitch_device *device, NvU32 offset);
void nvswitch_reg_write_32(nvswitch_device *device, NvU32 offset, NvU32 data);
NvU64 nvswitch_read_64bit_counter(nvswitch_device *device, NvU32 lo_offset, NvU32 hi_offset);
void nvswitch_timeout_create(NvU64 timeout_ns, NVSWITCH_TIMEOUT *time);
NvBool nvswitch_timeout_check(NVSWITCH_TIMEOUT *time);
void nvswitch_task_create(nvswitch_device *device,
void (*task_fn)(nvswitch_device *device), NvU64 period_nsec, NvU32 flags);
void nvswitch_tasks_destroy(nvswitch_device *device);
void nvswitch_free_chipdevice(nvswitch_device *device);
NvlStatus nvswitch_create_link(nvswitch_device *device, NvU32 link_number, nvlink_link **link);
nvlink_link* nvswitch_get_link(nvswitch_device *device, NvU8 link_id);
NvU64 nvswitch_get_enabled_link_mask(nvswitch_device *device);
void nvswitch_destroy_link(nvlink_link *link);
NvlStatus nvswitch_validate_pll_config(nvswitch_device *device,
NVSWITCH_PLL_INFO *switch_pll,
NVSWITCH_PLL_LIMITS default_pll_limits);
NvlStatus nvswitch_poll_sublink_state(nvswitch_device *device, nvlink_link *link);
void nvswitch_setup_link_loopback_mode(nvswitch_device *device, NvU32 linkNumber);
void nvswitch_reset_persistent_link_hw_state(nvswitch_device *device, NvU32 linkNumber);
void nvswitch_store_topology_information(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_set_training_mode(nvswitch_device *device);
NvBool nvswitch_is_link_in_reset(nvswitch_device *device, nvlink_link *link);
void nvswitch_apply_recal_settings(nvswitch_device *device, nvlink_link *link);
void nvswitch_init_buffer_ready(nvswitch_device *device, nvlink_link *link, NvBool bNportBufferReady);
#endif //_COMMON_NVSWITCH_H_

View File

@@ -0,0 +1,223 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _ERROR_NVSWITCH_H_
#define _ERROR_NVSWITCH_H_
#include "nvtypes.h"
#include "ctrl_dev_nvswitch.h"
//
// Error logging
//
typedef struct
{
NvU32 addr;
NvU32 data;
NvU32 info;
NvU32 code;
} NVSWITCH_PRI_ERROR_LOG_TYPE;
typedef struct
{
NvU32 addr;
NvU32 data;
NvU32 write;
NvU32 dest;
NvU32 subId;
NvU32 errCode;
NvU32 raw_data[4];
} NVSWITCH_PRI_TIMEOUT_ERROR_LOG_TYPE;
typedef struct
{
NvU32 raw_pending; // raw pending interrupt status
NvU32 mask; // localized mask for current handler
NvU32 raw_first; // raw first register
NvU32 raw_enable; // raw mask/enable register
NvU32 data[4]; // record of interrupt specific data
} NVSWITCH_INTERRUPT_LOG_TYPE;
typedef struct
{
NvU32 data[16];
} NVSWITCH_RAW_ERROR_LOG_TYPE;
#define NVSWITCH_ERROR_NEXT_LOCAL_NUMBER(log) (log->error_total)
typedef struct
{
NvU32 error_type; // NVSWITCH_ERR_*
NvU64 local_error_num; // Count of preceding errors (local error log)
NvU64 global_error_num; // Count of preceding errors (globally)
NVSWITCH_ERROR_SRC_TYPE error_src; // NVSWITCH_ERROR_SRC_*
NVSWITCH_ERROR_SEVERITY_TYPE severity; // NVSWITCH_ERROR_SEVERITY_*
NvU32 instance; // Used for link# or subengine instance
NvU32 subinstance; // Used for lane# or similar
NvBool error_resolved;
NvU64 timer_count; // NvSwitch timer count
NvU64 time; // Platform time, in ns
NvU32 line;
union
{
NvU64 address;
NVSWITCH_PRI_ERROR_LOG_TYPE pri_error;
NVSWITCH_PRI_TIMEOUT_ERROR_LOG_TYPE pri_timeout;
NVSWITCH_INTERRUPT_LOG_TYPE intr;
NVSWITCH_RAW_ERROR_LOG_TYPE raw;
} data;
} NVSWITCH_ERROR_TYPE;
typedef struct
{
NvU32 error_start; // Start index within CB
NvU32 error_count; // Count of current errors in CB
NvU64 error_total; // Count of total errors logged
NvU32 error_log_size; // CB size
NVSWITCH_ERROR_TYPE *error_log;
NvBool overwritable; // Old CB entries can be overwritten
} NVSWITCH_ERROR_LOG_TYPE;
//
// Helpful error logging wrappers
//
#define NVSWITCH_LOG_FATAL(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved)\
nvswitch_record_error( \
_device, \
&(_device->log_FATAL_ERRORS), \
NVSWITCH_ERR ## _errtype, \
_instance, _subinstance, \
NVSWITCH_ERROR_SRC ## _errsrc, \
NVSWITCH_ERROR_SEVERITY_FATAL, \
_errresolved, \
NULL, 0, \
__LINE__)
#define NVSWITCH_LOG_FATAL_DATA(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved, _errdata) \
nvswitch_record_error( \
_device, \
&(_device->log_FATAL_ERRORS), \
NVSWITCH_ERR ## _errtype, \
_instance, _subinstance, \
NVSWITCH_ERROR_SRC ## _errsrc, \
NVSWITCH_ERROR_SEVERITY_FATAL, \
_errresolved, \
_errdata, sizeof(*_errdata), \
__LINE__)
#define NVSWITCH_LOG_NONFATAL(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved) \
nvswitch_record_error( \
_device, \
&(_device->log_NONFATAL_ERRORS), \
NVSWITCH_ERR ## _errtype, \
_instance, _subinstance, \
NVSWITCH_ERROR_SRC ## _errsrc, \
NVSWITCH_ERROR_SEVERITY_NONFATAL, \
_errresolved, \
NULL, 0, \
__LINE__)
#define NVSWITCH_LOG_NONFATAL_DATA(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved, _errdata) \
nvswitch_record_error( \
_device, \
&(_device->log_NONFATAL_ERRORS), \
NVSWITCH_ERR ## _errtype, \
_instance, _subinstance, \
NVSWITCH_ERROR_SRC ## _errsrc, \
NVSWITCH_ERROR_SEVERITY_NONFATAL, \
_errresolved, \
_errdata, sizeof(*_errdata), \
__LINE__)
#define NVSWITCH_NVLINK_ARCH_ERROR_NONE 0
#define NVSWITCH_NVLINK_ARCH_ERROR_GENERIC 1
#define NVSWITCH_NVLINK_ARCH_ERROR_HW_FATAL 2
#define NVSWITCH_NVLINK_ARCH_ERROR_HW_CORRECTABLE 3
#define NVSWITCH_NVLINK_ARCH_ERROR_HW_UNCORRECTABLE 4
#define NVSWITCH_NVLINK_HW_ERROR_NONE 0x0
#define NVSWITCH_NVLINK_HW_GENERIC 0x1
#define NVSWITCH_NVLINK_HW_INGRESS 0x2
#define NVSWITCH_NVLINK_HW_EGRESS 0x3
#define NVSWITCH_NVLINK_HW_FSTATE 0x4
#define NVSWITCH_NVLINK_HW_TSTATE 0x5
#define NVSWITCH_NVLINK_HW_ROUTE 0x6
#define NVSWITCH_NVLINK_HW_NPORT 0x7
#define NVSWITCH_NVLINK_HW_NVLCTRL 0x8
#define NVSWITCH_NVLINK_HW_NVLIPT 0x9
#define NVSWITCH_NVLINK_HW_NVLTLC 0xA
#define NVSWITCH_NVLINK_HW_DLPL 0xB
#define NVSWITCH_NVLINK_HW_AFS 0xC
#define NVSWITCH_NVLINK_HW_MINION 0xD
#define NVSWITCH_NVLINK_HW_HOST 0xE
#define NVSWITCH_NVLINK_HW_NXBAR 0XF
#define NVSWITCH_NVLINK_HW_SOURCETRACK 0x10
typedef NvU32 NVSWITCH_NVLINK_ARCH_ERROR;
typedef NvU32 NVSWITCH_NVLINK_HW_ERROR;
NVSWITCH_NVLINK_HW_ERROR nvswitch_translate_hw_error(NVSWITCH_ERR_TYPE type);
void nvswitch_translate_error(NVSWITCH_ERROR_TYPE *error_entry,
NVSWITCH_NVLINK_ARCH_ERROR *arch_error,
NVSWITCH_NVLINK_HW_ERROR *hw_error);
NvlStatus nvswitch_ctrl_get_errors(nvswitch_device *device,
NVSWITCH_GET_ERRORS_PARAMS *p);
// Log correctable per-device error with data
#define NVSWITCH_REPORT_CORRECTABLE_DEVICE_DATA(_device, _logenum, _data, _fmt, ...) \
do \
{ \
NVSWITCH_PRINT_SXID(_device, NVSWITCH_ERR ## _logenum, \
"Correctable, " _fmt "\n", ## __VA_ARGS__ ); \
NVSWITCH_LOG_NONFATAL_DATA(_device, _HW, _logenum, \
0, 0, NV_TRUE, _data); \
} while(0)
// Log correctable per-link error with data
#define NVSWITCH_REPORT_CORRECTABLE_LINK_DATA(_device, _link, _logenum, _data, _fmt, ...) \
do \
{ \
NVSWITCH_PRINT_SXID(_device, NVSWITCH_ERR ## _logenum, \
"Correctable, Link %02d " _fmt "\n", _link, ## __VA_ARGS__ ); \
NVSWITCH_LOG_NONFATAL_DATA(_device, _HW, _logenum, \
_link, 0, NV_TRUE, _data); \
} while(0)
// Log nonfatal per-link error
#define NVSWITCH_REPORT_NONFATAL_LINK(_device, _link, _logenum, _fmt, ...) \
do \
{ \
NVSWITCH_PRINT_SXID(_device, NVSWITCH_ERR ## _logenum, \
"Non-fatal, Link %02d " _fmt "\n", _link, ## __VA_ARGS__ ); \
NVSWITCH_LOG_NONFATAL(_device, _HW, _logenum, \
_link, 0, NV_FALSE); \
} while(0)
#endif //_ERROR_NVSWITCH_H_

View File

@@ -0,0 +1,436 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FLCN_NVSWITCH_H_
#define _FLCN_NVSWITCH_H_
#include "flcn/flcnrtosdebug_nvswitch.h" // <TODO - HEADER CLEANUP>
#include "flcnifcmn.h"
#include "flcn/flcnqueue_nvswitch.h"
#include "flcn/haldefs_flcn_nvswitch.h"
#include "common_nvswitch.h"
/**************** Resource Manager Defines and Structures ******************\
* *
* Module: FLCN_NVSWITCH.H *
* Defines and structures used for the Falcon Object. The Falcon *
* object is the base object for all Falcon-derived engines. *
\***************************************************************************/
/*!
* Compares an unit id against the values in the unit_id enumeration and
* verifies that the id is valid. It is expected that the id is specified
* as an unsigned integer.
*/
#define RM_FLCN_UNIT_ID_IS_VALID(pQeueInfo, id) \
((id) < (pQeueInfo)->maxUnitId)
/*!
* Verifies that the given queue identifier is a valid command queue id. It
* is expected that the id is specified as an unsigned integer.
*/
#define RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pQeueInfo, id) \
((id) <= (pQeueInfo)->maxCmdQueueIndex)
/*!
* Define a sequence descriptor that may be used during initialization that
* represents an invalid sequence descriptor (one in which will never be
* assigned when a sequence/command is submitted).
*/
#define FLCN_INVALID_SEQ_DESC NV_U32_MAX
/*!
* Define a event descriptor that may be used during initialization that
* represents an invalid event descriptor (one in which will never be assigned
* when a event is registered).
*/
#define FLCN_INVALID_EVT_DESC NV_U32_MAX
/*!
* Defines the alignment/granularity of falcon memory blocks
*/
#define FLCN_BLK_ALIGNMENT (256)
/*!
* Defines the required address/offset alignment for all DMEM accesses
*/
#define FLCN_DMEM_ACCESS_ALIGNMENT (4)
typedef struct FLCN_EVENT_INFO FLCN_EVENT_INFO, *PFLCN_EVENT_INFO;
/*!
* @brief Tracks all information for each client that has registered for a
* specific type of event-notification.
*/
struct FLCN_EVENT_INFO
{
/*!
* A unique identifier given to each event info instance to provide a
* fast way to identify and track an event registration.
*/
NvU32 evtDesc;
/*!
* An identifier that describes the type of event the client wants
* notification of.
*/
NvU32 unitId;
/*!
* The client's pre-allocated message buffer. This is the buffer that
* the message data will be written to when extracted from the Message
* Queue. This buffer must be sufficiently sized to hold the largest
* possible event for type 'unitId'.
*/
union RM_FLCN_MSG *pMsg;
/*! The client function to be called when the event triggers. */
FlcnQMgrClientCallback pCallback;
/*!
* Any client-specified private parameters that must be provided in the
* callback function.
*/
void *pCallbackParams;
/*!
* Client's are tracked as a linked list. This is a pointer to the next
* client in the list. The ordering of this list implies no association
* between the clients.
*/
FLCN_EVENT_INFO *pNext;
};
/*!
* @brief Enumeration for each discrete command state.
*/
typedef enum FLCN_CMD_STATE
{
/*!
* Indicates the the command does not have a state. Commands/sequences
* that have never been submitted while possess this state.
*/
FLCN_CMD_STATE_NONE = 0,
/*! Indicates that the command is being processed by the FLCN. */
FLCN_CMD_STATE_RUNNING,
/*! Indicates that the command has finished execution on the FLCN. */
FLCN_CMD_STATE_DONE
} FLCN_CMD_STATE;
typedef struct FALCON_EXTERNAL_CONFIG
{
NvBool bResetInPmc; // If TRUE, Reset Falcon using PMC Enable
NvU32 riscvRegisterBase; // Falcon's RISCV base offset.
NvU32 fbifBase; // Falcon's FB Interface base.
NvU32 blkcgBase; // Falcon's BLKCG base.
} FALCON_EXTERNAL_CONFIG, *PFALCON_EXTERNAL_CONFIG;
typedef struct
{
NvU8 maxUnitId; //<! Maximum valid Unit Id
NvU8 initEventUnitId; //<! INIT event unit id
NvU8 cmdQHeadSize; //<! Command Queue Head Size
NvU8 cmdQTailSize; //<! Command Queue Tail size
NvU8 msgQHeadSize; //<! Message Queue Head Size
NvU8 msgQTailSize; //<! Message Queue Tail Size
NvU32 maxCmdQueueIndex; //<! Maximum command queue Index
NvU32 maxMsgSize; //<! Maximum valid MSG size
NvU32 cmdQHeadBaseAddress; //<! Base Register Address of Command Queue Head.
NvU32 cmdQHeadStride; //<! Stride used to access indexed Command Queue Head registers.
NvU32 cmdQTailBaseAddress; //<! Base Register Address of Command Queue Tail.
NvU32 cmdQTailStride; //<! Stride used to access indexed Command Queue Tail registers.
NvU32 msgQHeadBaseAddress; //<! Base Register Address of Message Queue Head.
NvU32 msgQHeadStride; //<! Stride used to access indexed Message Queue Head registers.
NvU32 msgQTailBaseAddress; //<! Base Register Address of Message Queue Tail.
NvU32 msgQTailStride; //<! Stride used to access indexed Message Queue Head registers.
FLCNQUEUE *pQueues; //<! Queues allocated
/*!
* A linked-list of event information structures tracking all clients that
* have registered for event notification.
*/
PFLCN_EVENT_INFO pEventInfo;
/*!
* Each time a client registers for an event notification, an internal
* data structure is created and attached to the event-info list. Each
* structure will have a unique identifier/descriptor assigned that will
* be used to track and manage the registration. This variable keeps track
* of the next descriptor that will be assigned at any given time.
*/
NvU32 nextEvtDesc;
/*!
* Similar to 'nextEvtDesc' keeps track of the command descriptor that
* will be assigned to the next queued command.
*/
NvU32 nextSeqDesc;
/*!
* Keeps track the latest used sequence number. We always search the free
* sequence starting from the next to the latest used sequence since it is
* the most possible free sequence if we consume the sequence in serial.
*/
NvU32 latestUsedSeqNum;
} FALCON_QUEUE_INFO,
*PFALCON_QUEUE_INFO;
/*!
* Data specific Falcon debugging features.
*/
typedef struct
{
NvU32 dumpEngineTag; // NVDUMP_COMPONENT_ENG_xyz.
NvU32 pbFalconId; // Protobuf falcon ID. RTOS_FLCN_xyz.
NvU16 debugInfoDmemOffset; // DMEM address of the Falcon's
// DEBUG_INFO structure.
NvBool bCrashed; // Falcon has crashed at least once
// since RM was initialized.
NvBool bCallbackTriggered; // Flag indicating that callback
// was actually called.
} FLCN_DEBUG, *PFLCN_DEBUG;
struct FLCNABLE;
typedef struct ENGINE_DESCRIPTOR_TYPE
{
NvU32 base;
NvBool initialized;
} ENGINE_DESCRIPTOR_TYPE, *PENGINE_DESCRIPTOR_TYPE;
typedef enum ENGINE_TAG
{
ENG_TAG_INVALID,
ENG_TAG_SOE,
ENG_TAG_END_INVALID
} ENGINE_TAG, *PENGINE_TAG;
typedef struct FLCN
{
// pointer to our function table - should always be the first thing in any object
flcn_hal *pHal;
// we don't have a parent class, so we go straight to our members
const char *name;
ENGINE_DESCRIPTOR_TYPE engDescUc;
ENGINE_DESCRIPTOR_TYPE engDescBc;
FALCON_EXTERNAL_CONFIG extConfig;
//
// State variables
//
NvBool bConstructed;
/*! The FLCN is ready to accept work from the RM. */
NvBool bOSReady;
/*! This Falcon will have queue support */
NvBool bQueuesEnabled;
NvU8 numQueues; //<! Number of queues constructed
NvU32 numSequences; //<! Number of sequences constructed
FLCN_DEBUG debug; //<! Data specific to debugging
NvU8 coreRev; //<! Core revision. 0x51 is 5.1.
NvU8 securityModel; //<! Follows _FALCON_HWCFG1_SECURITY_MODEL_xyz
// Replacement for a PDB Property: PDB_PROP_FLCN_SUPPORTS_DMEM_APERTURES
NvBool supportsDmemApertures;
// We need to save a pointer to the FLCNABLE interface
struct FLCNABLE *pFlcnable;
ENGINE_TAG engineTag;
PFALCON_QUEUE_INFO pQueueInfo;
/*!
* Determines whether to use EMEM in place of DMEM for RM queues and
* the RM managed heap. EMEM is a memory region outside of the core engine
* of some falcons which allows for RM access even when the falcon is
* locked down in HS mode. This is required so that engines like SEC2
* can receive new commands from RM without blocking.
*/
NvBool bEmemEnabled;
/*! HW arch that is enabled and running on corresponding uproc engine. */
NvU32 engArch;
} FLCN, *PFLCN;
// hal functions
// OBJECT Interfaces
NV_STATUS flcnQueueReadData(struct nvswitch_device *, PFLCN, NvU32 queueId, void *pData, NvBool bMsg);
NV_STATUS flcnQueueCmdWrite(struct nvswitch_device *, PFLCN, NvU32 queueId, union RM_FLCN_CMD *pCmd, struct NVSWITCH_TIMEOUT *pTimeout);
NV_STATUS flcnQueueCmdCancel(struct nvswitch_device *, PFLCN, NvU32 seqDesc);
NV_STATUS flcnQueueCmdPostNonBlocking(struct nvswitch_device *, PFLCN, union RM_FLCN_CMD *pCmd, union RM_FLCN_MSG *pMsg, void *pPayload, NvU32 queueIdLogical, FlcnQMgrClientCallback pCallback, void *pCallbackParams, NvU32 *pSeqDesc, struct NVSWITCH_TIMEOUT *pTimeout);
NV_STATUS flcnQueueCmdPostBlocking(struct nvswitch_device *, PFLCN, union RM_FLCN_CMD *pCmd, union RM_FLCN_MSG *pMsg, void *pPayload, NvU32 queueIdLogical, NvU32 *pSeqDesc, struct NVSWITCH_TIMEOUT *pTimeout);
NV_STATUS flcnQueueCmdWait(struct nvswitch_device *, PFLCN, NvU32, struct NVSWITCH_TIMEOUT *pTimeout);
NvU8 flcnCoreRevisionGet(struct nvswitch_device *, PFLCN);
void flcnMarkNotReady(struct nvswitch_device *, PFLCN);
NV_STATUS flcnCmdQueueHeadGet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 *pHead);
NV_STATUS flcnMsgQueueHeadGet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 *pHead);
NV_STATUS flcnCmdQueueTailGet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 *pTail);
NV_STATUS flcnMsgQueueTailGet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 *pTail);
NV_STATUS flcnCmdQueueHeadSet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 head);
NV_STATUS flcnMsgQueueHeadSet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 head);
NV_STATUS flcnCmdQueueTailSet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 tail);
NV_STATUS flcnMsgQueueTailSet(struct nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, NvU32 tail);
PFLCN_QMGR_SEQ_INFO flcnQueueSeqInfoFind(struct nvswitch_device *, PFLCN, NvU32 seqDesc);
PFLCN_QMGR_SEQ_INFO flcnQueueSeqInfoAcq(struct nvswitch_device *, PFLCN);
void flcnQueueSeqInfoRel(struct nvswitch_device *, PFLCN, PFLCN_QMGR_SEQ_INFO pSeqInfo);
void flcnQueueSeqInfoStateInit(struct nvswitch_device *, PFLCN);
void flcnQueueSeqInfoCancelAll(struct nvswitch_device *, PFLCN);
NV_STATUS flcnQueueSeqInfoFree(struct nvswitch_device *, PFLCN, PFLCN_QMGR_SEQ_INFO);
NV_STATUS flcnQueueEventRegister(struct nvswitch_device *, PFLCN, NvU32 unitId, NvU8 *pMsg, FlcnQMgrClientCallback pCallback, void *pParams, NvU32 *pEvtDesc);
NV_STATUS flcnQueueEventUnregister(struct nvswitch_device *, PFLCN, NvU32 evtDesc);
NV_STATUS flcnQueueEventHandle(struct nvswitch_device *, PFLCN, union RM_FLCN_MSG *pMsg, NV_STATUS evtStatus);
NV_STATUS flcnQueueResponseHandle(struct nvswitch_device *, PFLCN, union RM_FLCN_MSG *pMsg);
NvU32 flcnQueueCmdStatus(struct nvswitch_device *, PFLCN, NvU32 seqDesc);
NV_STATUS flcnDmemCopyFrom(struct nvswitch_device *, PFLCN, NvU32 src, NvU8 *pDst, NvU32 sizeBytes, NvU8 port);
NV_STATUS flcnDmemCopyTo(struct nvswitch_device *, PFLCN, NvU32 dst, NvU8 *pSrc, NvU32 sizeBytes, NvU8 port);
void flcnPostDiscoveryInit(struct nvswitch_device *, PFLCN);
void flcnDbgInfoDmemOffsetSet(struct nvswitch_device *, PFLCN, NvU16 debugInfoDmemOffset);
// HAL Interfaces
NV_STATUS flcnConstruct_HAL (struct nvswitch_device *, PFLCN);
void flcnDestruct_HAL (struct nvswitch_device *, PFLCN);
NvU32 flcnRegRead_HAL (struct nvswitch_device *, PFLCN, NvU32 offset);
void flcnRegWrite_HAL (struct nvswitch_device *, PFLCN, NvU32 offset, NvU32 data);
const char *flcnGetName_HAL (struct nvswitch_device *, PFLCN);
NvU8 flcnReadCoreRev_HAL (struct nvswitch_device *, PFLCN);
void flcnGetCoreInfo_HAL (struct nvswitch_device *, PFLCN);
NV_STATUS flcnDmemTransfer_HAL (struct nvswitch_device *, PFLCN, NvU32 src, NvU8 *pDst, NvU32 sizeBytes, NvU8 port, NvBool bCopyFrom);
void flcnIntrRetrigger_HAL (struct nvswitch_device *, PFLCN);
NvBool flcnAreEngDescsInitialized_HAL (struct nvswitch_device *, PFLCN);
NV_STATUS flcnWaitForResetToFinish_HAL (struct nvswitch_device *, PFLCN);
void flcnDbgInfoCapturePcTrace_HAL (struct nvswitch_device *, PFLCN);
void flcnDbgInfoCaptureRiscvPcTrace_HAL (struct nvswitch_device *, PFLCN);
NvU32 flcnDmemSize_HAL (struct nvswitch_device *, struct FLCN *);
NvU32 flcnSetImemAddr_HAL (struct nvswitch_device *, struct FLCN *, NvU32 dst);
void flcnImemCopyTo_HAL (struct nvswitch_device *, struct FLCN *, NvU32 dst, NvU8 *pSrc, NvU32 sizeBytes, NvBool bSecure, NvU32 tag, NvU8 port);
NvU32 flcnSetDmemAddr_HAL (struct nvswitch_device *, struct FLCN *, NvU32 dst);
NvU32 flcnRiscvRegRead_HAL (struct nvswitch_device *, PFLCN, NvU32 offset);
void flcnRiscvRegWrite_HAL (struct nvswitch_device *, PFLCN, NvU32 offset, NvU32 data);
// Falcon core revision / subversion definitions.
#define NV_FLCN_CORE_REV_3_0 0x30 // 3.0 - Core revision 3 subversion 0.
#define NV_FLCN_CORE_REV_4_0 0x40 // 4.0 - Core revision 4 subversion 0.
#define NV_FLCN_CORE_REV_4_1 0x41 // 4.1 - Core revision 4 subversion 1.
#define NV_FLCN_CORE_REV_5_0 0x50 // 5.0 - Core revision 5 subversion 0.
#define NV_FLCN_CORE_REV_5_1 0x51 // 5.1 - Core revision 5 subversion 1.
#define NV_FLCN_CORE_REV_6_0 0x60 // 6.0 - Core revision 6 subversion 0.
//
// Convert Falcon core rev/subver to the IP version format that can be recognized
// by the chip-config dynamic HAL infra.
//
#define NV_FLCN_CORE_REV_TO_IP_VER(coreVer) \
(DRF_NUM(_PFALCON, _IP_VER, _MAJOR, ((coreVer >> 4) & 0xf)) | \
DRF_NUM(_PFALCON, _IP_VER, _MINOR, (coreVer & 0xf)))
#define NV_PFALCON_IP_VER_MINOR 23:16
#define NV_PFALCON_IP_VER_MAJOR 31:24
// Some mailbox defines (should be shared with MSDEC OS)
#define NV_FALCON_MAILBOX0_MSDECOS_STATUS 11:0
#define NV_FALCON_MAILBOX0_MSDECOS_INVALID_METHOD_MTHDCNT 19:12
#define NV_FALCON_MAILBOX0_MSDECOS_INVALID_METHOD_MTHDID 31:20
#define NV_FALCON_MAILBOX1_MSDECOS_INVALID_METHOD_MTHDDATA 31:0
PFLCN flcnAllocNew(void);
NvlStatus flcnInit(nvswitch_device *device, PFLCN pFlcn, NvU32 pci_device_id);
void flcnDestroy(nvswitch_device *device, FLCN *pFlcn);
/*!
* The HW arch (e.g. FALCON or FALCON + RISCV) that can be actively enabled and
* running on an uproc engine.
*/
#define NV_UPROC_ENGINE_ARCH_DEFAULT (0x0)
#define NV_UPROC_ENGINE_ARCH_FALCON (0x1)
#define NV_UPROC_ENGINE_ARCH_FALCON_RISCV (0x2)
/*!
* Hepler macro to check what HW arch is enabled and running on an uproc engine.
*/
#define UPROC_ENG_ARCH_FALCON(pFlcn) (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_FALCON)
#define UPROC_ENG_ARCH_FALCON_RISCV(pFlcn) (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_FALCON_RISCV)
// Falcon Register index
#define NV_FALCON_REG_R0 (0)
#define NV_FALCON_REG_R1 (1)
#define NV_FALCON_REG_R2 (2)
#define NV_FALCON_REG_R3 (3)
#define NV_FALCON_REG_R4 (4)
#define NV_FALCON_REG_R5 (5)
#define NV_FALCON_REG_R6 (6)
#define NV_FALCON_REG_R7 (7)
#define NV_FALCON_REG_R8 (8)
#define NV_FALCON_REG_R9 (9)
#define NV_FALCON_REG_R10 (10)
#define NV_FALCON_REG_R11 (11)
#define NV_FALCON_REG_R12 (12)
#define NV_FALCON_REG_R13 (13)
#define NV_FALCON_REG_R14 (14)
#define NV_FALCON_REG_R15 (15)
#define NV_FALCON_REG_IV0 (16)
#define NV_FALCON_REG_IV1 (17)
#define NV_FALCON_REG_UNDEFINED (18)
#define NV_FALCON_REG_EV (19)
#define NV_FALCON_REG_SP (20)
#define NV_FALCON_REG_PC (21)
#define NV_FALCON_REG_IMB (22)
#define NV_FALCON_REG_DMB (23)
#define NV_FALCON_REG_CSW (24)
#define NV_FALCON_REG_CCR (25)
#define NV_FALCON_REG_SEC (26)
#define NV_FALCON_REG_CTX (27)
#define NV_FALCON_REG_EXCI (28)
#define NV_FALCON_REG_RSVD0 (29)
#define NV_FALCON_REG_RSVD1 (30)
#define NV_FALCON_REG_RSVD2 (31)
#define NV_FALCON_REG_SIZE (32)
#define FALC_REG(x) NV_FALCON_REG_##x
#endif // _FLCN_NVSWITCH_H_
/*!
* Defines the Falcon IMEM block-size (as a power-of-2).
*/
#define FALCON_IMEM_BLKSIZE2 (8)
/*!
* Defines the Falcon DMEM block-size (as a power-of-2).
*/
#define FALCON_DMEM_BLKSIZE2 (8)

View File

@@ -0,0 +1,93 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FLCNABLE_NVSWITCH_H_
#define _FLCNABLE_NVSWITCH_H_
/*!
* @file flcnable_nvswitch.h
* @brief Provides definitions for all FLCNABLE data structures and interfaces.
*/
#include "flcn/haldefs_flcnable_nvswitch.h"
#include "flcnifcmn.h"
#include "nvlink_errors.h"
struct nvswitch_device;
struct FLCN;
struct FALCON_EXTERNAL_CONFIG;
struct FLCN_QMGR_SEQ_INFO;
union RM_FLCN_MSG;
union RM_FLCN_CMD;
struct ENGINE_DESCRIPTOR_TYPE;
/*!
* Defines the structure used to contain all generic information related to
* the FLCNABLE.
*/
typedef struct FLCNABLE
{
// pointer to our function table - should always be the first thing in any object
flcnable_hal *pHal;
// we don't have a parent class, so we go straight to our members
/* Pointer to FLCN object for the object represented by this FLCNABLE */
struct FLCN *pFlcn;
} FLCNABLE, *PFLCNABLE;
NvlStatus flcnableInit(struct nvswitch_device *device, PFLCNABLE pFlcnable, NvU32 pci_device_id);
void flcnableDestroy(struct nvswitch_device *device, PFLCNABLE pFlcnable);
/*!
* Utility to get the FLCN object for the engine
*/
#define ENG_GET_FLCN(pObj) (((PFLCNABLE)pObj)->pFlcn)
/*!
* Safe (from NULL parent) version of utility to get the FLCN object for the engine
*/
#define ENG_GET_FLCN_IFF(pObj) ((NULL!=(pObj))?ENG_GET_FLCN(pObj):NULL)
// hal functions
NvU8 flcnableReadCoreRev (struct nvswitch_device *device, PFLCNABLE);
void flcnableGetExternalConfig (struct nvswitch_device *device, PFLCNABLE, struct FALCON_EXTERNAL_CONFIG *);
void flcnableEmemCopyFrom (struct nvswitch_device *device, PFLCNABLE, NvU32, NvU8 *, NvU32, NvU8);
void flcnableEmemCopyTo (struct nvswitch_device *device, PFLCNABLE, NvU32, NvU8 *, NvU32, NvU8);
NV_STATUS flcnableHandleInitEvent (struct nvswitch_device *device, PFLCNABLE, union RM_FLCN_MSG *);
struct FLCN_QMGR_SEQ_INFO * flcnableQueueSeqInfoGet (struct nvswitch_device *device, PFLCNABLE, NvU32);
void flcnableQueueSeqInfoClear (struct nvswitch_device *device, PFLCNABLE, struct FLCN_QMGR_SEQ_INFO *);
void flcnableQueueSeqInfoFree (struct nvswitch_device *device, PFLCNABLE, struct FLCN_QMGR_SEQ_INFO *);
NvBool flcnableQueueCmdValidate (struct nvswitch_device *device, PFLCNABLE, union RM_FLCN_CMD *, union RM_FLCN_MSG *, void *, NvU32);
NV_STATUS flcnableQueueCmdPostExtension (struct nvswitch_device *device, PFLCNABLE, union RM_FLCN_CMD *, union RM_FLCN_MSG *, void *, struct NVSWITCH_TIMEOUT *, struct FLCN_QMGR_SEQ_INFO *);
void flcnablePostDiscoveryInit (struct nvswitch_device *device, PFLCNABLE);
NV_STATUS flcnableConstruct_HAL (struct nvswitch_device *device, PFLCNABLE);
void flcnableDestruct_HAL (struct nvswitch_device *device, PFLCNABLE);
void flcnableFetchEngines_HAL (struct nvswitch_device *device, PFLCNABLE, struct ENGINE_DESCRIPTOR_TYPE *, struct ENGINE_DESCRIPTOR_TYPE *);
#endif // _FLCNABLE_NVSWITCH_H_

View File

@@ -0,0 +1,263 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FLCNQUEUE_NVSWITCH_H_
#define _FLCNQUEUE_NVSWITCH_H_
/*!
* @file flcnqueue_nvswitch.h
* @copydoc flcnqueue_nvswitch.c
*/
#include "nvstatus.h"
struct nvswitch_device;
struct NVSWITCH_TIMEOUT;
struct FLCN;
struct FLCNQUEUE;
union RM_FLCN_MSG;
union RM_FLCN_CMD;
/*!
* Define the signature of the callback function that FLCN clients must
* register when sending a FLCN command or registering for FLCN event
* notification. Upon completion of the command or upon intercepting an event
* of a specific type, the callback will be invoked passing the completed
* sequence or event descriptor to the client along with status to indicate if
* the message buffer was properly populated.
*
* @param[in] device nvswitch_device pointer
* @param[in] pMsg Pointer to the received message
* @param[in] pParams Pointer to the parameters
* @param[in] seqDesc Sequencer descriptor number
* @param[in] status Status for command execution result
*/
typedef void (*FlcnQMgrClientCallback)(struct nvswitch_device *, union RM_FLCN_MSG *pMsg, void *pParams, NvU32 seqDesc, NV_STATUS status);
typedef NV_STATUS (*FlcnQueueClose )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvBool);
typedef NvBool (*FlcnQueueIsEmpty )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue);
typedef NV_STATUS (*FlcnQueueOpenRead )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue);
typedef NV_STATUS (*FlcnQueueOpenWrite)(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32);
typedef NV_STATUS (*FlcnQueuePop )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, void*, NvU32, NvU32 *);
typedef void (*FlcnQueuePush )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, void*, NvU32);
typedef void (*FlcnQueueRewind )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue);
typedef NV_STATUS (*FlcnQueueHeadGet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 *pHead);
typedef NV_STATUS (*FlcnQueueHeadSet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 head );
typedef NV_STATUS (*FlcnQueueTailGet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 *pTail);
typedef NV_STATUS (*FlcnQueueTailSet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 tail );
typedef void (*FlcnQueueRead )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 offset, NvU8 *pDst, NvU32 sizeBytes);
typedef void (*FlcnQueueWrite )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 offset, NvU8 *pSrc, NvU32 sizeBytes);
typedef NV_STATUS (*FlcnQueueHasRoom )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 writeSize, NvBool *pBRewind);
typedef NV_STATUS (*FlcnQueueLock )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, struct NVSWITCH_TIMEOUT *pTimeout);
typedef NV_STATUS (*FlcnQueueUnlock )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue);
typedef NvU32 (*FlcnQueuePopulateRewindCmd )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, union RM_FLCN_CMD *pFlcnCmd);
typedef NV_STATUS (*FlcnQueueElementUseStateClr)(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 queuePos);
/*!
* This structure defines the various flags that may be passed to the queue
* "open" API. Read-operations are allowed on queues opened for 'READ';
* write-operations are allowed when opened for "WRITE". The specific flag
* used when a queue is opened defines behavior of the "close" operation.
*/
typedef enum
{
FLCNQUEUE_OFLAG_READ = 0,
FLCNQUEUE_OFLAG_WRITE
} FLCNQUEUE_OFLAG;
/*!
* Contains all fields, attributes, and functions pertaining to Falcon Queues.
*/
typedef struct FLCNQUEUE
{
FlcnQueueClose close;
FlcnQueueIsEmpty isEmpty;
FlcnQueueOpenRead openRead;
FlcnQueueOpenWrite openWrite;
FlcnQueuePop pop;
FlcnQueuePush push;
FlcnQueueRewind rewind;
FlcnQueueHeadGet headGet;
FlcnQueueHeadSet headSet;
FlcnQueueTailGet tailGet;
FlcnQueueTailSet tailSet;
FlcnQueueRead read;
FlcnQueueWrite write;
FlcnQueueHasRoom hasRoom;
FlcnQueuePopulateRewindCmd populateRewindCmd;
FlcnQueueElementUseStateClr elementUseStateClr;
/*!
* When the queue is currently opened for writing, this value stores the
* current write position. This allows multiple writes to be streamed into
* the queue without updating the head pointer for each individual write.
*/
NvU32 position;
/*! The physical DMEM offset where this queue resides/begins. */
NvU32 queueOffset;
/*!
* The logical queue identifier for the queue which we use to index into
* the queue structures inside RM.
*/
NvU32 queueLogId;
/*!
* The physical queue index indicates the index of the queue pertaining to
* its type. We can use it to index into the head and tail registers of
* a particular type(CMD or MSG) of queue.
* For e.g., consider we have 3 command queues and 2 message queues allocated
* for a particular falcon, their queueLogId and queuePhyId values will be as:
* <Assuming the command queues are allocated first>
* CMDQ0 queuePhyId = 0, queueLogId = 0
* CMDQ1 queuePhyId = 1, queueLogId = 1
* CMDQ2 queuePhyId = 2, queueLogId = 2
*
* MSGQ0 queuePhyId = 0, queueLogId = 3
* MSGQ1 queuePhyId = 1, queueLogId = 4
*/
NvU32 queuePhyId;
/*! The size of the queue in bytes for DMEM queue, number of entries for FB queue */
NvU32 queueSize;
/*! The size of the command header in bytes. */
NvU32 cmdHdrSize;
/*!
* Maximum size for each command.
*/
NvU32 maxCmdSize;
/*! The open-flag that was specified when the queue was opened. */
FLCNQUEUE_OFLAG oflag;
/*!
* 'NV_TRUE' when data is currently being written info the queue (only
* pertains to command queues).
*/
NvBool bOpened;
/*!
* 'NV_TRUE' when locked granting exclusive access the the lock owner.
*/
NvBool bLocked;
} FLCNQUEUE, *PFLCNQUEUE;
/*!
* @brief Enumeration to represent each discrete sequence state
*
* Each sequence stored in the Sequence Table must have a state associated
* with it to keep track of used vs. available sequences.
*/
typedef enum
{
/*! Indicates the sequence is not be used and is available */
FLCN_QMGR_SEQ_STATE_FREE = 0,
/*!
* Indicates the sequence has been reserved for a command, but command has
* not yet been queued in a command queue.
*/
FLCN_QMGR_SEQ_STATE_PENDING,
/*!
* Indicates the sequence has been reserved for a command and has been
* queued.
*/
FLCN_QMGR_SEQ_STATE_USED,
/*!
* Indicates that an event has occurred (shutdown/reset/...) that caused
* the sequence to be canceled.
*/
FLCN_QMGR_SEQ_STATE_CANCELED
} FLCN_QMGR_SEQ_STATE;
/*!
* @brief Common SEQ_INFO used by all falcons.
*/
typedef struct FLCN_QMGR_SEQ_INFO
{
/*!
* The unique identifier used by the FLCN ucode to distinguish sequences.
* The ID is unique to all sequences currently in-flight but may be reused
* as sequences are completed by the FLCN.
*/
NvU8 seqNum;
/*!
* Similar to 'seqNum' but unique for all sequences ever submitted (i.e.
* never reused).
*/
NvU32 seqDesc;
/*!
* The state of the sequence (@ref FLCN_QMGR_SEQ_STATE).
*/
FLCN_QMGR_SEQ_STATE seqState;
/*!
* The client function to be called when the sequence completes.
*/
FlcnQMgrClientCallback pCallback;
/*!
* Client-specified params that must be provided to the callback function.
*/
void *pCallbackParams;
/*!
* CMD Queue associated with this Seq.
*/
struct FLCNQUEUE *pCmdQueue;
} FLCN_QMGR_SEQ_INFO, *PFLCN_QMGR_SEQ_INFO;
NV_STATUS flcnQueueConstruct_common_nvswitch(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE **ppQueue, NvU32 queueId, NvU32 queuePhyId, NvU32 offset, NvU32 queueSize, NvU32 cmdHdrSize);
NV_STATUS flcnQueueConstruct_dmem_nvswitch (struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE **ppQueue, NvU32 queueId, NvU32 queuePhyId, NvU32 offset, NvU32 queueSize, NvU32 cmdHdrSize);
// Dumping queues for debugging purpose
NV_STATUS flcnRtosDumpCmdQueue_nvswitch(struct nvswitch_device *device, struct FLCN *pFlcn, NvU32 queueLogId, union RM_FLCN_CMD *FlcnCmd);
/*!
* Alignment to use for all head/tail pointer updates. Pointers are always
* rouned up to the nearest multiple of this value.
*/
#define QUEUE_ALIGNMENT (4)
/*!
* Checks if the given queue is currently opened for read.
*/
#define QUEUE_OPENED_FOR_READ(pQueue) \
(((pQueue)->bOpened) && ((pQueue)->oflag == FLCNQUEUE_OFLAG_READ))
/*!
* Checks if the given queue is currently opened for write.
*/
#define QUEUE_OPENED_FOR_WRITE(pQueue) \
(((pQueue)->bOpened) && ((pQueue)->oflag == FLCNQUEUE_OFLAG_WRITE))
#endif // _FLCNQUEUE_NVSWITCH_H_

View File

@@ -0,0 +1,38 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FLCN_RTOS_DEBUG_NVSWITCH_H_
#define _FLCN_RTOS_DEBUG_NVSWITCH_H_
/*!
* @file flcnrtosdebug_nvswitch.h
* @copydoc flcnrtosdebug_nvswitch.c
*/
/* ------------------------- Includes --------------------------------------- */
/* ------------------------- Macros ----------------------------------------- */
/* ------------------------- Datatypes -------------------------------------- */
/* ------------------------- Function Prototypes ---------------------------- */
#endif // _FLCN_RTOS_DEBUG_NVSWITCH_H_

View File

@@ -0,0 +1,107 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _HALDEFS_FLCN_NVSWITCH_H_
#define _HALDEFS_FLCN_NVSWITCH_H_
#include "nvstatus.h"
#include "flcn/flcnqueue_nvswitch.h"
#include "flcnifcmn.h"
struct nvswitch_device;
struct NVSWITCH_TIMEOUT;
struct FLCN;
union RM_FLCN_MSG;
union RM_FLCN_CMD;
struct FLCNQUEUE;
struct FLCN_QMGR_SEQ_INFO;
typedef struct {
// OBJECT Interfaces
NV_STATUS (*queueReadData) (struct nvswitch_device *, struct FLCN *, NvU32 queueId, void *pData, NvBool bMsg);
NV_STATUS (*queueCmdWrite) (struct nvswitch_device *, struct FLCN *, NvU32 queueId, union RM_FLCN_CMD *pCmd, struct NVSWITCH_TIMEOUT *pTimeout);
NV_STATUS (*queueCmdCancel) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc);
NV_STATUS (*queueCmdPostNonBlocking) (struct nvswitch_device *, struct FLCN *, union RM_FLCN_CMD *pCmd, union RM_FLCN_MSG *pMsg, void *pPayload, NvU32 queueIdLogical, FlcnQMgrClientCallback pCallback, void *pCallbackParams, NvU32 *pSeqDesc, struct NVSWITCH_TIMEOUT *pTimeout);
NV_STATUS (*queueCmdWait) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc, struct NVSWITCH_TIMEOUT *pTimeout);
NvU8 (*coreRevisionGet) (struct nvswitch_device *, struct FLCN *);
void (*markNotReady) (struct nvswitch_device *, struct FLCN *);
NV_STATUS (*cmdQueueHeadGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pHead);
NV_STATUS (*msgQueueHeadGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pHead);
NV_STATUS (*cmdQueueTailGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pTail);
NV_STATUS (*msgQueueTailGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pTail);
NV_STATUS (*cmdQueueHeadSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 head);
NV_STATUS (*msgQueueHeadSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 head);
NV_STATUS (*cmdQueueTailSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 tail);
NV_STATUS (*msgQueueTailSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 tail);
struct FLCN_QMGR_SEQ_INFO *(*queueSeqInfoFind) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc);
struct FLCN_QMGR_SEQ_INFO *(*queueSeqInfoAcq) (struct nvswitch_device *, struct FLCN *);
void (*queueSeqInfoRel) (struct nvswitch_device *, struct FLCN *, struct FLCN_QMGR_SEQ_INFO *pSeqInfo);
void (*queueSeqInfoStateInit) (struct nvswitch_device *, struct FLCN *);
void (*queueSeqInfoCancelAll) (struct nvswitch_device *, struct FLCN *);
NV_STATUS (*queueSeqInfoFree) (struct nvswitch_device *, struct FLCN *, struct FLCN_QMGR_SEQ_INFO *);
NV_STATUS (*queueEventRegister) (struct nvswitch_device *, struct FLCN *, NvU32 unitId, NvU8 *pMsg, FlcnQMgrClientCallback pCallback, void *pParams, NvU32 *pEvtDesc);
NV_STATUS (*queueEventUnregister) (struct nvswitch_device *, struct FLCN *, NvU32 evtDesc);
NV_STATUS (*queueEventHandle) (struct nvswitch_device *, struct FLCN *, union RM_FLCN_MSG *pMsg, NV_STATUS evtStatus);
NV_STATUS (*queueResponseHandle) (struct nvswitch_device *, struct FLCN *, union RM_FLCN_MSG *pMsg);
NvU32 (*queueCmdStatus) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc);
NV_STATUS (*dmemCopyFrom) (struct nvswitch_device *, struct FLCN *, NvU32 src, NvU8 *pDst, NvU32 sizeBytes, NvU8 port);
NV_STATUS (*dmemCopyTo) (struct nvswitch_device *, struct FLCN *, NvU32 dst, NvU8 *pSrc, NvU32 sizeBytes, NvU8 port);
void (*postDiscoveryInit) (struct nvswitch_device *, struct FLCN *);
void (*dbgInfoDmemOffsetSet) (struct nvswitch_device *, struct FLCN *, NvU16 debugInfoDmemOffset);
//HAL Interfaces
NV_STATUS (*construct) (struct nvswitch_device *, struct FLCN *);
void (*destruct) (struct nvswitch_device *, struct FLCN *);
NvU32 (*regRead) (struct nvswitch_device *, struct FLCN *, NvU32 offset);
void (*regWrite) (struct nvswitch_device *, struct FLCN *, NvU32 offset, NvU32 data);
const char *(*getName) (struct nvswitch_device *, struct FLCN *);
NvU8 (*readCoreRev) (struct nvswitch_device *, struct FLCN *);
void (*getCoreInfo) (struct nvswitch_device *, struct FLCN *);
NV_STATUS (*dmemTransfer) (struct nvswitch_device *, struct FLCN *, NvU32 src, NvU8 *pDst, NvU32 sizeBytes, NvU8 port, NvBool bCopyFrom);
void (*intrRetrigger) (struct nvswitch_device *, struct FLCN *);
NvBool (*areEngDescsInitialized) (struct nvswitch_device *, struct FLCN *);
NV_STATUS (*waitForResetToFinish) (struct nvswitch_device *, struct FLCN *);
void (*dbgInfoCapturePcTrace) (struct nvswitch_device *, struct FLCN *);
void (*dbgInfoCaptureRiscvPcTrace) (struct nvswitch_device *, struct FLCN *);
NvU32 (*dmemSize) (struct nvswitch_device *, struct FLCN *);
NvU32 (*setImemAddr) (struct nvswitch_device *, struct FLCN *, NvU32 dst);
void (*imemCopyTo) (struct nvswitch_device *, struct FLCN *, NvU32 dst, NvU8 *pSrc, NvU32 sizeBytes, NvBool bSecure, NvU32 tag, NvU8 port);
NvU32 (*setDmemAddr) (struct nvswitch_device *, struct FLCN *, NvU32 dst);
NvU32 (*riscvRegRead) (struct nvswitch_device *, struct FLCN *, NvU32 offset);
void (*riscvRegWrite) (struct nvswitch_device *, struct FLCN *, NvU32 offset, NvU32 data);
} flcn_hal;
void flcnQueueSetupHal(struct FLCN *pFlcn);
void flcnRtosSetupHal(struct FLCN *pFlcn);
void flcnQueueRdSetupHal(struct FLCN *pFlcn);
void flcnSetupHal_LR10(struct FLCN *pFlcn);
void flcnSetupHal_v03_00(struct FLCN *pFlcn);
void flcnSetupHal_v04_00(struct FLCN *pFlcn);
void flcnSetupHal_v05_01(struct FLCN *pFlcn);
void flcnSetupHal_v06_00(struct FLCN *pFlcn);
#endif //_HALDEFS_FLCN_NVSWITCH_H_

View File

@@ -0,0 +1,112 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _HALDEFS_FLCNABLE_NVSWITCH_H_
#define _HALDEFS_FLCNABLE_NVSWITCH_H_
#include "nvstatus.h"
#include "flcnifcmn.h"
struct nvswitch_device;
struct NVSWITCH_TIMEOUT;
struct FLCNABLE;
struct FALCON_EXTERNAL_CONFIG;
struct FLCN_QMGR_SEQ_INFO;
union RM_FLCN_MSG;
union RM_FLCN_CMD;
struct ENGINE_DESCRIPTOR_TYPE;
typedef struct {
NvU8 (*readCoreRev)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable);
void (*getExternalConfig)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
struct FALCON_EXTERNAL_CONFIG *pConfig);
void (*ememCopyFrom)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port);
void (*ememCopyTo)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvU8 port);
NV_STATUS (*handleInitEvent)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
union RM_FLCN_MSG *pGenMsg);
struct FLCN_QMGR_SEQ_INFO* (*queueSeqInfoGet)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
NvU32 seqIndex);
void (*queueSeqInfoClear)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
struct FLCN_QMGR_SEQ_INFO *pSeqInfo);
void (*queueSeqInfoFree)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
struct FLCN_QMGR_SEQ_INFO *pSeqInfo);
NvBool (*queueCmdValidate)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
union RM_FLCN_CMD *pCmd,
union RM_FLCN_MSG *pMsg,
void *pPayload,
NvU32 queueIdLogical);
NV_STATUS (*queueCmdPostExtension)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
union RM_FLCN_CMD *pCmd,
union RM_FLCN_MSG *pMsg,
void *pPayload,
struct NVSWITCH_TIMEOUT *pTimeout,
struct FLCN_QMGR_SEQ_INFO *pSeqInfo);
void (*postDiscoveryInit)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable);
NV_STATUS (*construct)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable);
void (*destruct)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable);
void (*fetchEngines)(
struct nvswitch_device *device,
struct FLCNABLE *pFlcnable,
struct ENGINE_DESCRIPTOR_TYPE *pEngDescUc,
struct ENGINE_DESCRIPTOR_TYPE *pEngDescBc);
} flcnable_hal;
#endif //_HALDEFS_FLCNABLE_NVSWITCH_H_

View File

@@ -0,0 +1,262 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _HALDEF_NVSWITCH_H_
#define _HALDEF_NVSWITCH_H_
#include "ctrl_dev_nvswitch.h"
#include "inforom/ifrstruct.h"
#include "inforom/omsdef.h"
//
// List of functions halified in the NVSwitch Driver
//
// Note: All hal fns must be implemented for each chip.
// There is no automatic stubbing here.
//
// This 'xmacro' list is fed into generator macros which then use the
// _FUNCTION_LIST to generate HAL declarations, function prototypes, and HAL
// construction. Xmacros are a useful way to maintain consistency between
// parallel lists.
// The components of the _FUNCTION_LIST are similar to a function prototype
// declaration, with the addition of an '_arch' parameter suffixed on to it
// which is used on some _FUNCTION_LIST expansions to generate arch-specific
// information.
// The format of each line is:
// _op(return type, function name, (parameter list), _arch)
//
#define NVSWITCH_HAL_FUNCTION_LIST(_op, _arch) \
_op(NvlStatus, nvswitch_initialize_device_state, (nvswitch_device *device), _arch) \
_op(void, nvswitch_destroy_device_state, (nvswitch_device *device), _arch) \
_op(void, nvswitch_determine_platform, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_num_links, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_link_valid, (nvswitch_device *device, NvU32 link_id), _arch) \
_op(void, nvswitch_set_fatal_error, (nvswitch_device *device, NvBool device_fatal, NvU32 link_id), _arch) \
_op(NvU32, nvswitch_get_swap_clk_default, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_latency_sample_interval_msec, (nvswitch_device *device), _arch) \
_op(void, nvswitch_internal_latency_bin_log,(nvswitch_device *device), _arch) \
_op(void, nvswitch_ecc_writeback_task, (nvswitch_device *device), _arch) \
_op(void, nvswitch_monitor_thermal_alert, (nvswitch_device *device), _arch) \
_op(void, nvswitch_hw_counter_shutdown, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_get_rom_info, (nvswitch_device *device, NVSWITCH_EEPROM_TYPE *eeprom), _arch) \
_op(void, nvswitch_lib_enable_interrupts, (nvswitch_device *device), _arch) \
_op(void, nvswitch_lib_disable_interrupts, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_lib_check_interrupts, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_lib_service_interrupts, (nvswitch_device *device), _arch) \
_op(NvU64, nvswitch_hw_counter_read_counter, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_link_in_use, (nvswitch_device *device, NvU32 link_id), _arch) \
_op(NvlStatus, nvswitch_reset_and_drain_links, (nvswitch_device *device, NvU64 link_mask), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_info, (nvswitch_device *device, NVSWITCH_GET_INFO *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_nvlink_status, (nvswitch_device *device, NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_counters, (nvswitch_device *device, NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_switch_port_config, (nvswitch_device *device, NVSWITCH_SET_SWITCH_PORT_CONFIG *p), _arch) \
_op(NvlStatus, nvswitch_set_nport_port_config, (nvswitch_device *device, NVSWITCH_SET_SWITCH_PORT_CONFIG *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_ingress_request_table, (nvswitch_device *device, NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_ingress_request_table, (nvswitch_device *device, NVSWITCH_SET_INGRESS_REQUEST_TABLE *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_ingress_request_valid, (nvswitch_device *device, NVSWITCH_SET_INGRESS_REQUEST_VALID *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_ingress_response_table, (nvswitch_device *device, NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_ingress_response_table, (nvswitch_device *device, NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_ganged_link_table, (nvswitch_device *device, NVSWITCH_SET_GANGED_LINK_TABLE *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_remap_policy, (nvswitch_device *device, NVSWITCH_SET_REMAP_POLICY *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_remap_policy, (nvswitch_device *device, NVSWITCH_GET_REMAP_POLICY_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_remap_policy_valid, (nvswitch_device *device, NVSWITCH_SET_REMAP_POLICY_VALID *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_routing_id, (nvswitch_device *device, NVSWITCH_SET_ROUTING_ID *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_routing_id, (nvswitch_device *device, NVSWITCH_GET_ROUTING_ID_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_routing_id_valid, (nvswitch_device *device, NVSWITCH_SET_ROUTING_ID_VALID *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_routing_lan, (nvswitch_device *device, NVSWITCH_SET_ROUTING_LAN *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_routing_lan, (nvswitch_device *device, NVSWITCH_GET_ROUTING_LAN_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_routing_lan_valid, (nvswitch_device *device, NVSWITCH_SET_ROUTING_LAN_VALID *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_internal_latency, (nvswitch_device *device, NVSWITCH_GET_INTERNAL_LATENCY *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_latency_bins, (nvswitch_device *device, NVSWITCH_SET_LATENCY_BINS *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_ingress_reqlinkid, (nvswitch_device *device, NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params), _arch) \
_op(NvU32, nvswitch_i2c_get_port_info, (nvswitch_device *device, NvU32 port), _arch) \
_op(NvlStatus, nvswitch_ctrl_i2c_indexed, (nvswitch_device *device, NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams), _arch) \
_op(NvlStatus, nvswitch_ctrl_therm_read_temperature, (nvswitch_device *device, NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info), _arch) \
_op(NvlStatus, nvswitch_ctrl_therm_get_temperature_limit, (nvswitch_device *device, NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *info), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_throughput_counters, (nvswitch_device *device, NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_corelib_add_link, (nvlink_link *link), _arch) \
_op(NvlStatus, nvswitch_corelib_remove_link, (nvlink_link *link), _arch) \
_op(NvlStatus, nvswitch_corelib_set_dl_link_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \
_op(NvlStatus, nvswitch_corelib_get_dl_link_mode, (nvlink_link *link, NvU64 *mode), _arch) \
_op(NvlStatus, nvswitch_corelib_set_tl_link_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \
_op(NvlStatus, nvswitch_corelib_get_tl_link_mode, (nvlink_link *link, NvU64 *mode), _arch) \
_op(NvlStatus, nvswitch_corelib_set_tx_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \
_op(NvlStatus, nvswitch_corelib_get_tx_mode, (nvlink_link *link, NvU64 *mode, NvU32 *subMode), _arch) \
_op(NvlStatus, nvswitch_corelib_set_rx_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \
_op(NvlStatus, nvswitch_corelib_get_rx_mode, (nvlink_link *link, NvU64 *mode, NvU32 *subMode), _arch) \
_op(NvlStatus, nvswitch_corelib_set_rx_detect, (nvlink_link *link, NvU32 flags), _arch) \
_op(NvlStatus, nvswitch_corelib_get_rx_detect, (nvlink_link *link), _arch) \
_op(void, nvswitch_corelib_training_complete, (nvlink_link *link), _arch) \
_op(NvU32, nvswitch_get_device_dma_width, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_link_ip_version, (nvswitch_device *device, NvU32 link_id), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_fom_values, (nvswitch_device *device, NVSWITCH_GET_FOM_VALUES_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_deassert_link_reset, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(NvBool, nvswitch_is_soe_supported, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_soe_set_ucode_core, (nvswitch_device *device, NvBool bFalcon), _arch) \
_op(NvlStatus, nvswitch_init_soe, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_inforom_supported, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_spi_supported, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_smbpbi_supported, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_soe_prepare_for_reset, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_post_init_device_setup, (nvswitch_device *device), _arch) \
_op(void, nvswitch_post_init_blacklist_device_setup, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_setup_link_system_registers, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_get_nvlink_ecc_errors, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_inforom_ecc_log_error_event, (nvswitch_device *device, INFOROM_ECC_OBJECT *pEccGeneric, INFOROM_NVS_ECC_ERROR_EVENT *error_event), _arch) \
_op(void, nvswitch_oms_set_device_disable, (INFOROM_OMS_STATE *pOmsState, NvBool bForceDeviceDisable), _arch) \
_op(NvBool, nvswitch_oms_get_device_disable, (INFOROM_OMS_STATE *pOmsState), _arch) \
_op(NvlStatus, nvswitch_inforom_nvl_log_error_event, (nvswitch_device *device, void *pNvlGeneric, void *error_event, NvBool *bDirty), _arch) \
_op(NvlStatus, nvswitch_inforom_nvl_update_link_correctable_error_info, (nvswitch_device *device, void *pNvlGeneric, void *pData, NvU8 linkId, NvU8 nvliptInstance, NvU8 localLinkIdx, void *pErrorCounts, NvBool *bDirty), _arch) \
_op(NvlStatus, nvswitch_inforom_nvl_get_max_correctable_error_rate, (nvswitch_device *device, NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_inforom_nvl_get_errors, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_inforom_ecc_get_errors, (nvswitch_device *device, NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *p), _arch) \
_op(void, nvswitch_load_uuid, (nvswitch_device *device), _arch) \
_op(void, nvswitch_i2c_set_hw_speed_mode, (nvswitch_device *device, NvU32 port, NvU32 speedMode), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_bios_info, (nvswitch_device *device, NVSWITCH_GET_BIOS_INFO_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_read_oob_blacklist_state, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_write_fabric_state, (nvswitch_device *device), _arch) \
_op(void, nvswitch_initialize_oms_state, (nvswitch_device *device, INFOROM_OMS_STATE *pOmsState), _arch) \
_op(NvlStatus, nvswitch_oms_inforom_flush, (nvswitch_device *device), _arch) \
_op(void, nvswitch_inforom_ecc_get_total_errors, (nvswitch_device *device, INFOROM_ECC_OBJECT *pEccGeneric, NvU64 *corCount, NvU64 *uncCount), _arch) \
_op(NvlStatus, nvswitch_bbx_setup_prologue, (nvswitch_device *device, void *pInforomBbxState), _arch) \
_op(NvlStatus, nvswitch_bbx_setup_epilogue, (nvswitch_device *device, void *pInforomBbxState), _arch) \
_op(NvlStatus, nvswitch_bbx_add_data_time, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \
_op(NvlStatus, nvswitch_bbx_add_sxid, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \
_op(NvlStatus, nvswitch_bbx_add_temperature, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \
_op(void, nvswitch_bbx_set_initial_temperature, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \
_op(NvlStatus, nvswitch_inforom_bbx_get_sxid, (nvswitch_device *device, NVSWITCH_GET_SXIDS_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_smbpbi_get_dem_num_messages, (nvswitch_device *device, NvU8 *pMsgCount), _arch) \
_op(NvlStatus, nvswitch_set_minion_initialized, (nvswitch_device *device, NvU32 idx_minion, NvBool initialized), _arch) \
_op(NvBool, nvswitch_is_minion_initialized, (nvswitch_device *device, NvU32 idx_minion), _arch) \
_op(NvlStatus, nvswitch_get_link_public_id, (nvswitch_device *device, NvU32 linkId, NvU32 *publicId), _arch) \
_op(NvlStatus, nvswitch_get_link_local_idx, (nvswitch_device *device, NvU32 linkId, NvU32 *localLinkIdx), _arch) \
_op(NvlStatus, nvswitch_set_training_error_info, (nvswitch_device *device, NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_fatal_error_scope, (nvswitch_device *device, NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams), _arch) \
_op(void, nvswitch_init_scratch, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_device_discovery, (nvswitch_device *device, NvU32 discovery_offset), _arch) \
_op(void, nvswitch_filter_discovery, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_process_discovery, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_minion, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_eng_base, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance), _arch) \
_op(NvU32, nvswitch_get_eng_count, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast), _arch) \
_op(NvU32, nvswitch_eng_rd, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 offset), _arch) \
_op(void, nvswitch_eng_wr, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 offset, NvU32 data), _arch) \
_op(NvU32, nvswitch_get_link_eng_inst, (nvswitch_device *device, NvU32 link_id, NVSWITCH_ENGINE_ID eng_id), _arch) \
_op(void *, nvswitch_alloc_chipdevice, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_thermal, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_pll_config, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_pll, (nvswitch_device *device), _arch) \
_op(void, nvswitch_init_clock_gating, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_read_physical_id, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_caps_nvlink_version, (nvswitch_device *device), _arch) \
_op(void, nvswitch_initialize_interrupt_tree, (nvswitch_device *device), _arch) \
_op(void, nvswitch_init_dlpl_interrupts, (nvlink_link *link), _arch) \
_op(NvlStatus, nvswitch_initialize_pmgr, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_initialize_ip_wrappers, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_initialize_route, (nvswitch_device *device), _arch) \
_op(void, nvswitch_soe_unregister_events, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_soe_register_event_callbacks, (nvswitch_device *device), _arch) \
_op(NVSWITCH_BIOS_NVLINK_CONFIG *, nvswitch_get_bios_nvlink_config, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_minion_send_command, (nvswitch_device *device, NvU32 linkNumber, NvU32 command, NvU32 scratch0), _arch) \
_op(NvlStatus, nvswitch_init_nport, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_nxbar, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_clear_nport_rams, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_pri_ring_init, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_get_soe_ucode_binaries, (nvswitch_device *device, const NvU32 **soe_ucode_data, const NvU32 **soe_ucode_header), _arch) \
_op(NvlStatus, nvswitch_get_remap_table_selector, (nvswitch_device *device, NVSWITCH_TABLE_SELECT_REMAP table_selector, NvU32 *remap_ram_sel), _arch) \
_op(NvU32, nvswitch_get_ingress_ram_size, (nvswitch_device *device, NvU32 ingress_ram_selector), _arch) \
_op(NvlStatus, nvswitch_minion_get_dl_status, (nvswitch_device *device, NvU32 linkId, NvU32 statusIdx, NvU32 statusArgs, NvU32 *statusData), _arch) \
_op(void, nvswitch_corelib_get_uphy_load, (nvlink_link *link, NvBool *bUnlocked), _arch) \
_op(NvBool, nvswitch_is_i2c_supported, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_poll_sublink_state, (nvswitch_device *device, nvlink_link *link), _arch)\
_op(void, nvswitch_setup_link_loopback_mode, (nvswitch_device *device, NvU32 linkNumber), _arch)\
_op(void, nvswitch_reset_persistent_link_hw_state, (nvswitch_device *device, NvU32 linkNumber), _arch)\
_op(void, nvswitch_store_topology_information, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(void, nvswitch_init_lpwr_regs, (nvlink_link *link), _arch) \
_op(NvlStatus, nvswitch_set_training_mode, (nvswitch_device *device), _arch) \
_op(NvU32, nvswitch_get_sublink_width, (nvswitch_device *device, NvU32 linkNumber), _arch) \
_op(NvBool, nvswitch_i2c_is_device_access_allowed, (nvswitch_device *device, NvU32 port, NvU8 addr, NvBool bIsRead), _arch) \
_op(NvlStatus, nvswitch_parse_bios_image, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_link_in_reset, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(void, nvswitch_init_buffer_ready, (nvswitch_device *device, nvlink_link * link, NvBool bNportBufferReady), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_nvlink_lp_counters, (nvswitch_device *device, NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_residency_bins, (nvswitch_device *device, NVSWITCH_SET_RESIDENCY_BINS *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_residency_bins, (nvswitch_device *device, NVSWITCH_GET_RESIDENCY_BINS *p), _arch) \
_op(void, nvswitch_apply_recal_settings, (nvswitch_device *device, nvlink_link *), _arch) \
_op(NvlStatus, nvswitch_service_nvldl_fatal_link, (nvswitch_device *device, NvU32 nvliptInstance, NvU32 link), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_rb_stall_busy, (nvswitch_device *device, NVSWITCH_GET_RB_STALL_BUSY *p), _arch) \
_op(NvlStatus, nvswitch_service_minion_link, (nvswitch_device *device, NvU32 link_id), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_sw_info, (nvswitch_device *device, NVSWITCH_GET_SW_INFO_PARAMS *p), _arch)
//
// Declare HAL function pointer table
//
// This macro takes the xmacro _FUNCTION_LIST and uses some components in it to
// automatically generate the HAL structure declaration in a form:
// NvU32 (*function_foo1)(nvswitch_device device);
// void (*function_foo2)(nvswitch_device device, NvU32 parameter1);
// NvlStatus (*function_foo3)(nvswitch_device device, NvU32 parameter1, void *parameter2);
//
#define DECLARE_HAL_FUNCTIONS(_return, _function, _params, _arch) \
_return (*_function)_params;
typedef struct nvswitch_hal_functions
{
NVSWITCH_HAL_FUNCTION_LIST(DECLARE_HAL_FUNCTIONS, HAL)
} nvswitch_hal;
//
// Fill in HAL function pointer table
//
// This macro takes the xmacro _FUNCTION_LIST and uses some components in it to
// automatically generate all the HAL function fill-in assignments for a given
// architecture.
//
#define CREATE_HAL_FUNCTIONS(_return, _function, _params, _arch) \
device->hal._function = _function##_##_arch; \
#define NVSWITCH_INIT_HAL(device, arch) \
NVSWITCH_HAL_FUNCTION_LIST(CREATE_HAL_FUNCTIONS, arch) \
//
// Declare HAL function dispatch functions
//
// This macro takes the xmacro _FUNCTION_LIST and uses some components in it to
// automatically generate the function prototypes for the dispatcher functions
// that dereference the correct arch HAL function.
//
#define DECLARE_HAL_DISPATCHERS(_return, _function, _params, _arch) \
_return _function _params;
NVSWITCH_HAL_FUNCTION_LIST(DECLARE_HAL_DISPATCHERS, unused_argument)
// HAL functions
void nvswitch_setup_hal_lr10(nvswitch_device *device);
#endif //_HALDEF_NVSWITCH_H_

View File

@@ -0,0 +1,162 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INFOROM_NVSWITCH_H_
#define _INFOROM_NVSWITCH_H_
#include "inforom/ifrstruct.h"
#include "inforom/omsdef.h"
#include "nv_list.h"
#include "smbpbi_shared_nvswitch.h"
#define INFOROM_MAX_PACKED_SIZE (32*1024)
#define INFOROM_FS_FILE_SIZE(pPackedFile) \
(((pPackedFile)[INFOROM_OBJECT_HEADER_V1_00_SIZE_OFFSET]) | \
((pPackedFile)[INFOROM_OBJECT_HEADER_V1_00_SIZE_OFFSET + 1] << 8))
#define INFOROM_FS_FILE_NAMES_MATCH(fileName1, fileName2) \
((((NvU8)((fileName1)[0])) == ((NvU8)((fileName2)[0]))) && \
(((NvU8)((fileName1)[1])) == ((NvU8)((fileName2)[1]))) && \
(((NvU8)((fileName1)[2])) == ((NvU8)((fileName2)[2]))))
#define INFOROM_FS_COPY_FILE_NAME(destName, srcName) \
{ \
(destName)[0] = (srcName)[0]; \
(destName)[1] = (srcName)[1]; \
(destName)[2] = (srcName)[2]; \
}
struct INFOROM_OBJECT_CACHE_ENTRY
{
INFOROM_OBJECT_HEADER_V1_00 header;
struct INFOROM_OBJECT_CACHE_ENTRY *pNext;
};
struct inforom
{
// InfoROM Objects
// RO objects - statically allocated as the InfoROM should always contain
// these objects.
struct
{
NvBool bValid;
NvU8 packedObject[INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE];
INFOROM_OBD_OBJECT_V1_XX object;
} OBD;
struct
{
NvBool bValid;
NvU8 packedObject[INFOROM_OEM_OBJECT_V1_00_PACKED_SIZE];
INFOROM_OEM_OBJECT_V1_00 object;
} OEM;
struct
{
NvBool bValid;
NvU8 packedObject[INFOROM_IMG_OBJECT_V1_00_PACKED_SIZE];
INFOROM_IMG_OBJECT_V1_00 object;
} IMG;
INFOROM_ECC_STATE *pEccState;
INFOROM_OMS_STATE *pOmsState;
//
// descriptor cache for all the inforom objects. This is to handle inforom objects in a generic way.
//
struct INFOROM_OBJECT_CACHE_ENTRY *pObjectCache;
};
// Generic InfoROM APIs
NvlStatus nvswitch_initialize_inforom(nvswitch_device *device);
NvlStatus nvswitch_inforom_read_object(nvswitch_device* device,
const char *objectName, const char *pObjectFormat,
NvU8 *pPackedObject, void *pObject);
NvlStatus nvswitch_inforom_write_object(nvswitch_device* device,
const char *objectName, const char *pObjectFormat,
void *pObject, NvU8 *pOldPackedObject);
void nvswitch_destroy_inforom(nvswitch_device *device);
NvlStatus nvswitch_inforom_add_object(struct inforom *pInforom,
INFOROM_OBJECT_HEADER_V1_00 *pHeader);
NvlStatus nvswitch_inforom_get_object_version_info(nvswitch_device *device,
const char *objectName, NvU8 *pVersion, NvU8 *pSubVersion);
void *nvswitch_add_halinfo_node(NVListPtr head, int type, int size);
void *nvswitch_get_halinfo_node(NVListPtr head, int type);
void nvswitch_inforom_post_init(nvswitch_device *device);
NvlStatus nvswitch_initialize_inforom_objects(nvswitch_device *device);
void nvswitch_destroy_inforom_objects(nvswitch_device *device);
NvlStatus nvswitch_inforom_load_object(nvswitch_device* device,
struct inforom *pInforom, const char *objectName,
const char *pObjectFormat, NvU8 *pPackedObject, void *pObject);
void nvswitch_inforom_read_static_data(nvswitch_device *device,
struct inforom *pInforom, RM_SOE_SMBPBI_INFOROM_DATA *pData);
// InfoROM RO APIs
NvlStatus nvswitch_inforom_read_only_objects_load(nvswitch_device *device);
// InfoROM NVL APIs
NvlStatus nvswitch_inforom_nvlink_load(nvswitch_device *device);
void nvswitch_inforom_nvlink_unload(nvswitch_device *device);
NvlStatus nvswitch_inforom_nvlink_flush(nvswitch_device *device);
NvlStatus nvswitch_inforom_nvlink_get_minion_data(nvswitch_device *device,
NvU8 linkId, NvU32 *seedData);
NvlStatus nvswitch_inforom_nvlink_set_minion_data(nvswitch_device *device,
NvU8 linkId, NvU32 *seedData, NvU32 size);
NvlStatus nvswitch_inforom_nvlink_log_error_event(nvswitch_device *device, void *error_event);
NvlStatus nvswitch_inforom_nvlink_get_max_correctable_error_rate(nvswitch_device *device,
NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params);
NvlStatus nvswitch_inforom_nvlink_get_errors(nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params);
// InfoROM ECC APIs
NvlStatus nvswitch_inforom_ecc_load(nvswitch_device *device);
void nvswitch_inforom_ecc_unload(nvswitch_device *device);
NvlStatus nvswitch_inforom_ecc_flush(nvswitch_device *device);
NvlStatus nvswitch_inforom_ecc_log_err_event(nvswitch_device *device,
INFOROM_NVS_ECC_ERROR_EVENT *err_event);
NvlStatus nvswitch_inforom_ecc_get_errors(nvswitch_device *device,
NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params);
// InfoROM OMS APIs
NvlStatus nvswitch_inforom_oms_load(nvswitch_device *device);
void nvswitch_inforom_oms_unload(nvswitch_device *device);
NvlStatus nvswitch_inforom_oms_set_device_disable(nvswitch_device *device,
NvBool bDisable);
NvlStatus nvswitch_inforom_oms_get_device_disable(nvswitch_device *device,
NvBool *pBDisabled);
// InfoROM BBX APIs
NvlStatus nvswitch_inforom_bbx_load(nvswitch_device *device);
void nvswitch_inforom_bbx_unload(nvswitch_device * device);
NvlStatus nvswitch_inforom_bbx_add_sxid(nvswitch_device *device,
NvU32 exceptionType, NvU32 data0,
NvU32 data1, NvU32 data2);
void nvswitch_bbx_collect_current_time(nvswitch_device *device,
void *pBbxState);
NvlStatus nvswitch_inforom_bbx_get_sxid(nvswitch_device *device,
NVSWITCH_GET_SXIDS_PARAMS *params);
// InfoROM DEM APIs
NvlStatus nvswitch_inforom_dem_load(nvswitch_device *device);
void nvswitch_inforom_dem_unload(nvswitch_device * device);
#endif // _INFOROM_NVSWITCH_H_

View File

@@ -0,0 +1,211 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTR_NVSWITCH_H_
#define _INTR_NVSWITCH_H_
#include "error_nvswitch.h"
//
// Wrapper to track interrupt servicing
//
#define NVSWITCH_UNHANDLED_INIT(val) (unhandled = (val))
#define NVSWITCH_HANDLED(mask) (unhandled &= ~(mask))
#define NVSWITCH_UNHANDLED_CHECK(_device, _unhandled) \
do \
{ \
if (_unhandled) \
{ \
NVSWITCH_PRINT(_device, ERROR, \
"%s:%d unhandled interrupt! %x\n", \
__FUNCTION__, __LINE__, _unhandled); \
NVSWITCH_PRINT_SXID(_device, \
NVSWITCH_ERR_HW_HOST_UNHANDLED_INTERRUPT, \
"Fatal, unhandled interrupt\n"); \
NVSWITCH_LOG_FATAL_DATA(_device, _HW, \
_HW_HOST_UNHANDLED_INTERRUPT, 0, 0, NV_FALSE, &_unhandled);\
} \
} while(0)
//
// Wrappers for basic leaf interrupt handling
//
#define NVSWITCH_PENDING(_bit) ((bit = (_bit)) && (pending & (_bit)))
#define NVSWITCH_FIRST() (bit & report.raw_first) ? " (First)" : ""
//
// Report/log error interrupt helper.
//
//
// Print an intermediate point (non-leaf) in the interrupt tree.
//
#define NVSWITCH_REPORT_TREE(_logenum) \
do \
{ \
NVSWITCH_PRINT(device, ERROR, "Intermediate, Link %02d \n", link); \
} while(0)
// Log correctable errors
#define NVSWITCH_REPORT_CORRECTABLE(_logenum, _str) \
do \
{ \
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \
"Correctable, Link %02d %s%s\n", link, _str, NVSWITCH_FIRST()); \
NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, \
link, 0, NV_TRUE, &report); \
if (nvswitch_lib_notify_client_events(device, \
NVSWITCH_DEVICE_EVENT_NONFATAL) != NVL_SUCCESS) \
{ \
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \
__FUNCTION__); \
} \
} while(0)
// Log uncorrectable error that is not fatal to the fabric
#define NVSWITCH_REPORT_NONFATAL(_logenum, _str) \
do \
{ \
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \
"Non-fatal, Link %02d %s%s\n", link, _str, NVSWITCH_FIRST()); \
NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, \
link, 0, NV_FALSE, &report); \
if (nvswitch_lib_notify_client_events(device, \
NVSWITCH_DEVICE_EVENT_NONFATAL) != NVL_SUCCESS) \
{ \
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \
__FUNCTION__); \
} \
} while(0)
// Log uncorrectable error that is fatal to the fabric
#define NVSWITCH_REPORT_FATAL(_logenum, _str, device_fatal) \
do \
{ \
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \
"Fatal, Link %02d %s%s\n", link, _str, NVSWITCH_FIRST()); \
NVSWITCH_LOG_FATAL_DATA(device, _HW, _logenum, \
link, 0, NV_FALSE, &report); \
nvswitch_set_fatal_error(device, device_fatal, link); \
if (nvswitch_lib_notify_client_events(device, \
NVSWITCH_DEVICE_EVENT_FATAL) != NVL_SUCCESS) \
{ \
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n",\
__FUNCTION__); \
} \
} while(0)
#define NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_logenum, _str, instance, chiplet, err_data) \
do \
{ \
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \
"Non-fatal, %s, instance=%d, chiplet=%d\n", _str, instance, chiplet); \
NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, \
instance, chiplet, NV_FALSE, &err_data); \
if (nvswitch_lib_notify_client_events(device, \
NVSWITCH_DEVICE_EVENT_NONFATAL) != NVL_SUCCESS) \
{ \
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \
__FUNCTION__); \
} \
} while(0)
#define NVSWITCH_REPORT_PRI_ERROR_FATAL(_logenum, _str, device_fatal, instance, chiplet, err_data) \
do \
{ \
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \
"Fatal, %s, instance=%d, chiplet=%d\n", _str, instance, chiplet); \
NVSWITCH_LOG_FATAL_DATA(device, _HW, _logenum, \
instance, chiplet, NV_FALSE, &err_data); \
nvswitch_set_fatal_error(device, device_fatal, 0); \
if (nvswitch_lib_notify_client_events(device, \
NVSWITCH_DEVICE_EVENT_FATAL) != NVL_SUCCESS) \
{ \
NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \
__FUNCTION__); \
} \
} while(0)
/*
* Automatically determine if error is fatal to the fabric based on
* if it is contained and will lock the port.
*/
#define NVSWITCH_REPORT_CONTAIN(_logenum, _str, device_fatal) \
do \
{ \
if (bit & contain) \
{ \
NVSWITCH_REPORT_FATAL(_logenum, _str, device_fatal); \
} \
else \
{ \
NVSWITCH_REPORT_NONFATAL(_logenum, _str); \
} \
} while (0)
/*
* REPORT_*_DATA macros - optionally log data record for additional HW state. This
* is typically a captured packet, but there are a few other cases.
*
* Most interrupt controllers only latch additional data for errors tagged as first.
* For those cases use _FIRST to only log the data record when it is accurate. If
* two errors are detected in the same cycle, they will both be set in first.
*/
#define NVSWITCH_REPORT_DATA(_logenum, _data) \
NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, link, 0, NV_TRUE, &_data)
#define NVSWITCH_REPORT_DATA_FIRST(_logenum, _data) \
do \
{ \
if (report.raw_first & bit) \
{ \
NVSWITCH_REPORT_DATA(_logenum, _data); \
} \
} while(0)
#define NVSWITCH_REPORT_CONTAIN_DATA(_logenum, _data) \
do \
{ \
if (bit & contain) \
{ \
NVSWITCH_LOG_FATAL_DATA(device, _HW, _logenum, link, \
0, NV_FALSE, &_data); \
} \
else \
{ \
NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, link, \
0, NV_FALSE, &data); \
} \
} while(0)
#define NVSWITCH_REPORT_CONTAIN_DATA_FIRST(_logenum, _data) \
do \
{ \
if (bit & report.raw_first) \
{ \
NVSWITCH_REPORT_CONTAIN_DATA(_logenum, _data); \
} \
} while(0)
#endif //_INTR_NVSWITCH_H_

View File

@@ -0,0 +1,437 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _IO_NVSWITCH_H_
#define _IO_NVSWITCH_H_
// NVSWITCH_REG_* MMIO wrappers are to be used for absolute symbolic BAR0 offset
// register references like SMC, CLOCK, BUS, and PRIV_MASTER.
//
#define NVSWITCH_REG_RD32(_d, _dev, _reg) \
( \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_RD: %s, %s (+%04x)\n", \
__FUNCTION__, \
#_dev, #_reg, NV ## _dev ## _reg) \
, \
nvswitch_reg_read_32(_d, NV##_dev##_reg) \
); \
((void)(_d))
#define NVSWITCH_REG_WR32(_d, _dev, _reg, _data) \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_WR: %s, %s (+%04x) 0x%08x\n", \
__FUNCTION__, \
#_dev, #_reg, NV ## _dev ## _reg, _data); \
nvswitch_reg_write_32(_d, NV##_dev##_reg, _data); \
((void)(_d))
//
// NVSWITCH_OFF_* MMIO wrappers are used to access a fully formed BAR0 offset.
//
#define NVSWITCH_OFF_RD32(_d, _off) \
nvswitch_reg_read_32(_d, _off); \
((void)(_d))
#define NVSWITCH_OFF_WR32(_d, _off, _data) \
nvswitch_reg_write_32(_d, _off, _data); \
((void)(_d))
#define NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE 64
#define NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE 3
#define NVSWITCH_ENGINE_INSTANCE_INVALID ((NvU32) (~0))
typedef struct engine_descriptor
{
const char *eng_name;
NvU32 eng_id; // REGISTER_RW_ENGINE_*
NvU32 eng_count;
NvU32 uc_addr[NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE];
NvU32 bc_addr;
NvU32 mc_addr[NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE];
NvU32 mc_addr_count;
} NVSWITCH_ENGINE_DESCRIPTOR_TYPE;
#define NVSWITCH_DECLARE_IO_DESCRIPTOR(_engine, _bcast) \
NVSWITCH_ENGINE_DESCRIPTOR_TYPE _engine;
#define NVSWITCH_BASE_ADDR_INVALID ((NvU32) (~0))
//
// All IP-based (0-based register manuals) engines that ever existed on *ANY*
// architecture(s) must be listed here in order to use the common IO wrappers.
// New engines need to be added here as well as in the chip-specific lists in
// their respective headers that generate chip-specific handlers.
// Absolute BAR0 offset-based units are legacy units in which the unit's offset
// in BAR0 is included in the register definition in the manuals. For these
// legacy units the discovered base is not used since it is already part of the
// register. Legacy units (e.g. PSMC, CLOCK, BUS, and PRIV_MASTER) should use
// NVSWITCH_REG_RD/WR IO wrappers.
//
#define NVSWITCH_LIST_ALL_ENGINES(_op) \
_op(XVE) \
_op(SAW) \
_op(SOE) \
_op(SMR) \
_op(GIN) \
_op(XAL) \
_op(XAL_FUNC) \
_op(XPL) \
_op(XTL) \
_op(XTL_CONFIG) \
_op(UXL) \
_op(GPU_PTOP) \
_op(PMC) \
_op(PBUS) \
_op(ROM2) \
_op(GPIO) \
_op(FSP) \
_op(SYSCTRL) \
_op(CLKS_SYS) \
_op(CLKS_SYSB) \
_op(CLKS_P0) \
_op(SAW_PM) \
_op(PCIE_PM) \
_op(PRT_PRI_HUB) \
_op(PRT_PRI_RS_CTRL) \
_op(SYS_PRI_HUB) \
_op(SYS_PRI_RS_CTRL) \
_op(SYSB_PRI_HUB) \
_op(SYSB_PRI_RS_CTRL) \
_op(PRI_MASTER_RS) \
_op(PTIMER) \
\
_op(NPG) \
_op(NPORT) \
\
_op(NVLW) \
_op(MINION) \
_op(NVLIPT) \
_op(NVLIPT_LNK) \
_op(NVLTLC) \
_op(NVLDL) \
_op(CPR) \
\
_op(NXBAR) \
_op(TILE) \
_op(TILEOUT) \
\
_op(NPG_PERFMON) \
_op(NPORT_PERFMON) \
\
_op(NVLW_PERFMON) \
_op(RX_PERFMON) \
_op(TX_PERFMON) \
\
_op(NXBAR_PERFMON) \
_op(TILE_PERFMON) \
_op(TILEOUT_PERFMON) \
#define ENGINE_ID_LIST(_eng) \
NVSWITCH_ENGINE_ID_##_eng,
//
// ENGINE_IDs are the complete list of all engines that are supported on
// *ANY* architecture(s) that may support them. Any one architecture may or
// may not understand how to operate on any one specific engine.
// Architectures that share a common ENGINE_ID are not guaranteed to have
// compatible manuals.
//
typedef enum nvswitch_engine_id
{
NVSWITCH_LIST_ALL_ENGINES(ENGINE_ID_LIST)
NVSWITCH_ENGINE_ID_SIZE,
} NVSWITCH_ENGINE_ID;
//
// NVSWITCH_ENG_* MMIO wrappers are to be used for top level discovered
// devices like SAW, FUSE, PMGR, XVE, etc.
//
#define NVSWITCH_GET_ENG_DESC_TYPE 0
#define NVSWITCH_GET_ENG_DESC_TYPE_UNICAST NVSWITCH_GET_ENG_DESC_TYPE
#define NVSWITCH_GET_ENG_DESC_TYPE_BCAST 1
#define NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST 2
#define NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx) \
((_d)->hal.nvswitch_get_eng_base( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx))
#define NVSWITCH_ENG_COUNT(_d, _eng, _bcast) \
((_d)->hal.nvswitch_get_eng_count( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast))
#define NVSWITCH_ENG_IS_VALID(_d, _eng, _engidx) \
( \
NVSWITCH_GET_ENG(_d, _eng, , _engidx) != NVSWITCH_BASE_ADDR_INVALID \
)
#define NVSWITCH_ENG_WR32(_d, _eng, _bcast, _engidx, _dev, _reg, _data) \
{ \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_WR %s[%d]: %s, %s (%06x+%04x) 0x%08x\n", \
__FUNCTION__, \
#_eng#_bcast, _engidx, \
#_dev, #_reg, \
NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \
NV ## _dev ## _reg, _data); \
\
((_d)->hal.nvswitch_eng_wr( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx, \
NV ## _dev ## _reg, _data)); \
}
#define NVSWITCH_ENG_RD32(_d, _eng, _bcast, _engidx, _dev, _reg) \
( \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_RD %s[%d]: %s, %s (%06x+%04x)\n", \
__FUNCTION__, \
#_eng#_bcast, _engidx, \
#_dev, #_reg, \
NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \
NV ## _dev ## _reg) \
, \
((_d)->hal.nvswitch_eng_rd( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx, \
NV ## _dev ## _reg)) \
); \
((void)(_d))
#define NVSWITCH_ENG_WR32_IDX(_d, _eng, _bcast, _engidx, _dev, _reg, _idx, _data) \
{ \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_WR %s[%d]: %s, %s(%d) (%06x+%04x) 0x%08x\n", \
__FUNCTION__, \
#_eng#_bcast, _engidx, \
#_dev, #_reg, _idx, \
NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \
NV ## _dev ## _reg(_idx), _data); \
\
((_d)->hal.nvswitch_eng_wr( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx, \
NV ## _dev ## _reg(_idx), _data)); \
}
#define NVSWITCH_ENG_RD32_IDX(_d, _eng, _bcast, _engidx, _dev, _reg, _idx) \
( \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_RD %s[%d]: %s, %s(%d) (%06x+%04x)\n", \
__FUNCTION__, \
#_eng#_bcast, _engidx, \
#_dev, #_reg, _idx, \
NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \
NV ## _dev ## _reg(_idx)) \
, \
((_d)->hal.nvswitch_eng_rd( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx, \
NV ## _dev ## _reg(_idx))) \
); \
((void)(_d))
#define NVSWITCH_ENG_OFF_WR32(_d, _eng, _bcast, _engidx, _offset, _data) \
{ \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_WR %s[%d]: 0x%x (%06x+%04x) 0x%08x\n", \
__FUNCTION__, \
#_eng#_bcast, _engidx, \
_offset, \
NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \
_offset, _data); \
((_d)->hal.nvswitch_eng_wr( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx, \
_offset, _data)); \
}
#define NVSWITCH_ENG_OFF_RD32(_d, _eng, _bcast, _engidx, _offset) \
( \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_RD %s[%d]: 0x%x (%06x+%04x)\n", \
__FUNCTION__, \
#_eng#_bcast, _engidx, \
_offset, \
NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \
_offset) \
, \
((_d)->hal.nvswitch_eng_rd( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \
_engidx, \
_offset)) \
)
//
// Per-link information
//
#define NVSWITCH_MAX_LINK_COUNT 64
#define NVSWITCH_MAX_SEED_BUFFER_SIZE NVSWITCH_MAX_SEED_NUM + 1
#define NVSWITCH_MAX_INBAND_BUFFER_SIZE 256*8
#define NVSWITCH_MAX_INBAND_BITS_SENT_AT_ONCE 32
#define NVSWITCH_MAX_INBAND_BUFFER_ENTRIES NVSWITCH_MAX_INBAND_BUFFER_SIZE/NVSWITCH_MAX_INBAND_BITS_SENT_AT_ONCE
//
// Inband data structure
//
struct nvswitch_inband_data
{
// Inband bufer at sender Minion
NvU32 sendBuffer[NVSWITCH_MAX_INBAND_BUFFER_ENTRIES];
// Inband buffer at receiver Minion
NvU32 receiveBuffer[NVSWITCH_MAX_INBAND_BUFFER_ENTRIES];
// Is the current Minion a sender or receiver of Inband Data?
NvBool bIsSenderMinion;
// Bool to say fail or not
NvBool bTransferFail;
// # of transmisions done - count
// NvU32 txCount;
};
typedef struct
{
NvBool valid;
NvU32 link_clock_khz;
NvBool fatal_error_occurred;
NvBool ingress_packet_latched;
NvBool egress_packet_latched;
NvBool nea; // Near end analog
NvBool ned; // Near end digital
NvU32 lane_rxdet_status_mask;
NvBool bIsRepeaterMode;
// Minion Inband Data structure
struct nvswitch_inband_data inBandData;
} NVSWITCH_LINK_TYPE;
//
// Per link register access routines
// LINK_* MMIO wrappers are used to reference per-link engine instances
//
#define NVSWITCH_LINK_COUNT(_d) \
(nvswitch_get_num_links(_d))
#define NVSWITCH_GET_LINK_ENG_INST(_d, _linknum, _eng) \
nvswitch_get_link_eng_inst(_d, _linknum, NVSWITCH_ENGINE_ID_##_eng)
#define NVSWITCH_IS_LINK_ENG_VALID(_d, _linknum, _eng) \
( \
(NVSWITCH_GET_ENG(_d, _eng, , \
NVSWITCH_GET_LINK_ENG_INST(_d, _linknum, _eng)) \
!= NVSWITCH_BASE_ADDR_INVALID) && \
nvswitch_is_link_valid(_d, _linknum) \
)
#define NVSWITCH_LINK_OFFSET(_d, _physlinknum, _eng, _dev, _reg) \
( \
NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID(_d, _physlinknum, _eng)) \
, \
NVSWITCH_PRINT(_d, MMIO, \
"%s: LINK_OFFSET link[%d] %s: %s,%s (+%04x)\n", \
__FUNCTION__, \
_physlinknum, \
#_eng, #_dev, #_reg, NV ## _dev ## _reg) \
, \
NVSWITCH_GET_ENG(_d, _eng, , \
NVSWITCH_GET_LINK_ENG_INST(_d, _physlinknum, _eng)) + \
NV##_dev##_reg \
)
#define NVSWITCH_LINK_WR32(_d, _physlinknum, _eng, _dev, _reg, _data) \
NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID(_d, _physlinknum, _eng)); \
NVSWITCH_PRINT(_d, MMIO, \
"%s: LINK_WR link[%d] %s: %s,%s (+%04x) 0x%08x\n", \
__FUNCTION__, \
_physlinknum, \
#_eng, #_dev, #_reg, NV ## _dev ## _reg, _data); \
((_d)->hal.nvswitch_eng_wr( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE_UNICAST, \
NVSWITCH_GET_LINK_ENG_INST(_d, _physlinknum, _eng), \
NV ## _dev ## _reg, _data)); \
((void)(_d))
#define NVSWITCH_LINK_RD32(_d, _physlinknum, _eng, _dev, _reg) \
( \
NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID(_d, _physlinknum, _eng)) \
, \
NVSWITCH_PRINT(_d, MMIO, \
"%s: LINK_RD link[%d] %s: %s,%s (+%04x)\n", \
__FUNCTION__, \
_physlinknum, \
#_eng, #_dev, #_reg, NV ## _dev ## _reg) \
, \
((_d)->hal.nvswitch_eng_rd( \
_d, \
NVSWITCH_ENGINE_ID_##_eng, \
NVSWITCH_GET_ENG_DESC_TYPE_UNICAST, \
NVSWITCH_GET_LINK_ENG_INST(_d, _physlinknum, _eng), \
NV ## _dev ## _reg)) \
); \
((void)(_d))
#define NVSWITCH_LINK_WR32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx, _data) \
NVSWITCH_LINK_WR32(_d, _physlinknum, _eng, _dev, _reg(_idx), _data); \
((void)(_d))
#define NVSWITCH_LINK_RD32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx) \
NVSWITCH_LINK_RD32(_d, _physlinknum, _eng, _dev, _reg(_idx)); \
((void)(_d))
#endif //_IO_NVSWITCH_H_

View File

@@ -0,0 +1,84 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _FRU_NVSWITCH_H_
#define _FRU_NVSWITCH_H_
#include "common_nvswitch.h"
//
// FRU EEPROM board data
// Defined according to
// ipmi-platform-mgt-fru-infostorage-def-v1-0-rev-1-3-spec
//
#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE 7:6
#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_LENGTH 5:0
#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_6BIT (0x2)
#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_8BIT (0x3)
#define NVSWITCH_IPMI_FRU_SENTINEL (0xC1)
// this includes null term
#define NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN 64
// mfgDateTime is in minutes from 0:00 hrs 1/1/1996
typedef struct
{
NvU32 mfgDateTime;
char mfg[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN];
char productName[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN];
char serialNum[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN];
char partNum[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN];
char fileId[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN];
char customMfgInfo[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN];
} NVSWITCH_IPMI_FRU_BOARD_INFO;
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER, 1)
{
NvU8 version;
NvU8 internalUseOffset;
NvU8 chassisInfoOffset;
NvU8 boardInfoOffset;
NvU8 productInfoOffset;
NvU8 multirecordOffset;
NvU8 padding;
NvU8 checksum;
} NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
//
// Board Info area will be (size * 8) bytes. The last byte is a checksum byte
//
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO, 1)
{
NvU8 version;
NvU8 size;
NvU8 languageCode;
NVSWITCH_IPMI_FRU_BOARD_INFO boardInfo; // True size in rom could be smaller, layout will be different
} NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NvlStatus nvswitch_read_partition_fru_board_info(nvswitch_device *device,
NVSWITCH_IPMI_FRU_BOARD_INFO *pBoardInfo,
NvU8 *pRomImage);
#endif //_FRU_NVSWITCH_H_

View File

@@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CLOCK_LR10_H_
#define _CLOCK_LR10_H_
NvlStatus
nvswitch_init_pll_config_lr10
(
nvswitch_device *device
);
NvlStatus
nvswitch_init_pll_lr10
(
nvswitch_device *device
);
void
nvswitch_init_hw_counter_lr10
(
nvswitch_device *device
);
void
nvswitch_hw_counter_shutdown_lr10
(
nvswitch_device *device
);
NvU64
nvswitch_hw_counter_read_counter_lr10
(
nvswitch_device *device
);
void
nvswitch_init_clock_gating_lr10
(
nvswitch_device *device
);
#endif //_CLOCK_LR10_H_

View File

@@ -0,0 +1,163 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INFOROM_LR10_H_
#define _INFOROM_LR10_H_
NvlStatus nvswitch_inforom_nvl_log_error_event_lr10
(
nvswitch_device *device,
void *pNvlGeneric,
void *pNvlErrorEvent,
NvBool *bDirty
);
NvlStatus nvswitch_inforom_nvl_update_link_correctable_error_info_lr10
(
nvswitch_device *device,
void *pNvlGeneric,
void *pData,
NvU8 linkId,
NvU8 nvliptInstance,
NvU8 localLinkIdx,
void *pNvlErrorCounts,
NvBool *bDirty
);
NvlStatus
nvswitch_inforom_nvl_get_max_correctable_error_rate_lr10
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params
);
NvlStatus
nvswitch_inforom_nvl_get_errors_lr10
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params
);
NvlStatus
nvswitch_inforom_ecc_log_error_event_lr10
(
nvswitch_device *device,
INFOROM_ECC_OBJECT *pEccGeneric,
INFOROM_NVS_ECC_ERROR_EVENT *err_event
);
void
nvswitch_inforom_ecc_get_total_errors_lr10
(
nvswitch_device *device,
INFOROM_ECC_OBJECT *pEccGeneric,
NvU64 *pCorrectedTotal,
NvU64 *pUncorrectedTotal
);
NvlStatus
nvswitch_inforom_ecc_get_errors_lr10
(
nvswitch_device *device,
NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params
);
void
nvswitch_initialize_oms_state_lr10
(
nvswitch_device *device,
INFOROM_OMS_STATE *pOmsState
);
NvBool
nvswitch_oms_get_device_disable_lr10
(
INFOROM_OMS_STATE *pOmsState
);
void
nvswitch_oms_set_device_disable_lr10
(
INFOROM_OMS_STATE *pOmsState,
NvBool bForceDeviceDisable
);
NvlStatus
nvswitch_oms_inforom_flush_lr10
(
struct nvswitch_device *device
);
NvlStatus
nvswitch_bbx_setup_prologue_lr10
(
nvswitch_device *device,
void *pInforomBbxState
);
NvlStatus
nvswitch_bbx_setup_epilogue_lr10
(
nvswitch_device *device,
void *pInforomBbxState
);
NvlStatus
nvswitch_bbx_add_data_time_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
);
NvlStatus
nvswitch_bbx_add_sxid_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
);
NvlStatus
nvswitch_bbx_add_temperature_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
);
void
nvswitch_bbx_set_initial_temperature_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
);
NvlStatus
nvswitch_inforom_bbx_get_sxid_lr10
(
nvswitch_device *device,
NVSWITCH_GET_SXIDS_PARAMS *params
);
#endif //_INFOROM_LR10_H_

View File

@@ -0,0 +1,653 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _LR10_H_
#define _LR10_H_
#include "nvlink.h"
#include "nvCpuUuid.h"
#include "export_nvswitch.h"
#include "common_nvswitch.h"
#include "pmgr_nvswitch.h"
#include "rom_nvswitch.h"
#include "error_nvswitch.h"
#include "ctrl_dev_nvswitch.h"
#include "nvswitch/lr10/dev_nvs_master.h"
//
// Re-direction to use new common link access wrappers
//
#define NVSWITCH_IS_LINK_ENG_VALID_LR10(_d, _eng, _linknum) \
NVSWITCH_IS_LINK_ENG_VALID(_d, _linknum, _eng)
#define NVSWITCH_LINK_OFFSET_LR10(_d, _physlinknum, _eng, _dev, _reg) \
NVSWITCH_LINK_OFFSET(_d, _physlinknum, _eng, _dev, _reg)
#define NVSWITCH_LINK_WR32_LR10(_d, _physlinknum, _eng, _dev, _reg, _data) \
NVSWITCH_LINK_WR32(_d, _physlinknum, _eng, _dev, _reg, _data)
#define NVSWITCH_LINK_RD32_LR10(_d, _physlinknum, _eng, _dev, _reg) \
NVSWITCH_LINK_RD32(_d, _physlinknum, _eng, _dev, _reg)
#define NVSWITCH_LINK_WR32_IDX_LR10(_d, _physlinknum, _eng, _dev, _reg, _idx, _data) \
NVSWITCH_LINK_WR32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx, _data)
#define NVSWITCH_LINK_RD32_IDX_LR10(_d, _physlinknum, _eng, _dev, _reg, _idx) \
NVSWITCH_LINK_RD32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx)
//
// NVSWITCH_ENG_* MMIO wrappers are to be used for top level discovered
// devices like SAW, FUSE, PMGR, XVE, etc.
//
#define NVSWITCH_ENG_WR32_LR10(_d, _eng, _bcast, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32(_d, _eng, _bcast, _engidx, _dev, _reg, _data)
#define NVSWITCH_ENG_RD32_LR10(_d, _eng, _engidx, _dev, _reg) \
NVSWITCH_ENG_RD32(_d, _eng, , _engidx, _dev, _reg)
#define NVSWITCH_ENG_WR32_IDX_LR10(_d, _eng, _bcast, _engidx, _dev, _reg, _idx, _data) \
NVSWITCH_ENG_WR32_IDX(_d, _eng, _bcast, _engidx, _dev, _reg, _idx, _data)
#define NVSWITCH_BCAST_WR32_LR10(_d, _eng, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, _eng, _BCAST, 0, _dev, _reg, _data)
#define NVSWITCH_BCAST_RD32_LR10(_d, _eng, _dev, _reg) \
NVSWITCH_ENG_RD32(_d, _eng, _BCAST, 0, bc, _dev, _reg)
#define NVSWITCH_CLK_NVLINK_RD32_LR10(_d, _reg, _idx) \
NVSWITCH_REG_RD32(_d, _PCLOCK, _NVSW_NVLINK##_reg(_idx))
#define NVSWITCH_CLK_NVLINK_WR32_LR10(_d, _reg, _idx, _data) \
if (IS_RTLSIM(_d) || IS_FMODEL(_d)) \
{ \
NVSWITCH_PRINT(_d, MMIO, \
"%s: Skip write NV_PCLOCK_NVSW_NVLINK%d %s (0x%06x) on FSF\n", \
__FUNCTION__, \
_idx, #_reg, \
NV_PCLOCK_NVSW_NVLINK##_reg(_idx)); \
} \
else \
{ \
NVSWITCH_REG_WR32(_d, _PCLOCK, _NVSW_NVLINK##_reg(_idx), _data); \
}
#define NVSWITCH_ENG_VALID_LR10(_d, _eng, _engidx) \
( \
((_engidx < NUM_##_eng##_ENGINE_LR10) && \
(NVSWITCH_GET_CHIP_DEVICE_LR10(_d)->eng##_eng[_engidx].valid)) ? \
NV_TRUE : NV_FALSE \
)
#define NVSWITCH_SAW_RD32_LR10(_d, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, SAW, 0, _dev, _reg)
#define NVSWITCH_SAW_WR32_LR10(_d, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, SAW, , 0, _dev, _reg, _data)
#define NVSWITCH_NPG_RD32_LR10(_d, _engidx, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, NPG, _engidx, _dev, _reg)
#define NVSWITCH_NPG_WR32_LR10(_d, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, NPG, , _engidx, _dev, _reg, _data)
#define NVSWITCH_NPGPERF_WR32_LR10(_d, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, NPG_PERFMON, , _engidx, _dev, _reg, _data)
#define NVSWITCH_NPORT_RD32_LR10(_d, _engidx, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, NPORT, _engidx, _dev, _reg)
#define NVSWITCH_NPORT_WR32_LR10(_d, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, NPORT, , _engidx, _dev, _reg, _data)
#define NVSWITCH_NPORT_MC_BCAST_WR32_LR10(_d, _dev, _reg, _data) \
NVSWITCH_BCAST_WR32_LR10(_d, NPORT, _dev, _reg, _data)
#define NVSWITCH_NVLIPT_RD32_LR10(_d, _engidx, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, NVLIPT, _engidx, _dev, _reg)
#define NVSWITCH_NVLIPT_WR32_LR10(_d, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, NVLIPT, , _engidx, _dev, _reg, _data)
typedef struct
{
NvBool valid;
NvU32 initialized;
NvU32 version;
NvU32 disc_type;
union
{
struct
{
NvU32 cluster;
NvU32 cluster_id;
NvU32 discovery; // Used for top level only
} top;
struct
{
NvU32 uc_addr;
} uc;
struct
{
NvU32 bc_addr;
NvU32 mc_addr[3];
} bc;
} info;
} ENGINE_DESCRIPTOR_TYPE_LR10;
#define NUM_PTOP_ENGINE_LR10 1
#define NUM_CLKS_ENGINE_LR10 1
#define NUM_FUSE_ENGINE_LR10 1
#define NUM_JTAG_ENGINE_LR10 1
#define NUM_PMGR_ENGINE_LR10 1
#define NUM_SAW_ENGINE_LR10 1
#define NUM_XP3G_ENGINE_LR10 1
#define NUM_XVE_ENGINE_LR10 1
#define NUM_ROM_ENGINE_LR10 1
#define NUM_EXTDEV_ENGINE_LR10 1
#define NUM_PRIVMAIN_ENGINE_LR10 1
#define NUM_PRIVLOC_ENGINE_LR10 10
#define NUM_PTIMER_ENGINE_LR10 1
#define NUM_SOE_ENGINE_LR10 1
#define NUM_SMR_ENGINE_LR10 2
#define NUM_I2C_ENGINE_LR10 1
#define NUM_SE_ENGINE_LR10 1
#define NUM_THERM_ENGINE_LR10 1
#define NUM_NPG_ENGINE_LR10 9
#define NUM_NPG_BCAST_ENGINE_LR10 1
#define NUM_NPG_PERFMON_ENGINE_LR10 9
#define NUM_NPG_PERFMON_BCAST_ENGINE_LR10 1
#define NUM_NPORT_ENGINE_LR10 36
#define NUM_NPORT_BCAST_ENGINE_LR10 4
#define NUM_NPORT_MULTICAST_ENGINE_LR10 9
#define NUM_NPORT_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_NPORT_PERFMON_ENGINE_LR10 36
#define NUM_NPORT_PERFMON_BCAST_ENGINE_LR10 4
#define NUM_NPORT_PERFMON_MULTICAST_ENGINE_LR10 9
#define NUM_NPORT_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_NXBAR_ENGINE_LR10 4
#define NUM_NXBAR_BCAST_ENGINE_LR10 1
#define NUM_NXBAR_PERFMON_ENGINE_LR10 4
#define NUM_NXBAR_PERFMON_BCAST_ENGINE_LR10 1
#define NUM_TILE_ENGINE_LR10 16
#define NUM_TILE_BCAST_ENGINE_LR10 4
#define NUM_TILE_MULTICAST_ENGINE_LR10 4
#define NUM_TILE_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_TILE_PERFMON_ENGINE_LR10 16
#define NUM_TILE_PERFMON_BCAST_ENGINE_LR10 4
#define NUM_TILE_PERFMON_MULTICAST_ENGINE_LR10 4
#define NUM_TILE_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1
//
// Tile Column consists of 4 Tile blocks and 9 Tileout blocks.
// There are 4 Tile Columns, one per each NXBAR.
#define NUM_NXBAR_TCS_LR10 NUM_NXBAR_ENGINE_LR10
#define NUM_NXBAR_TILEOUTS_PER_TC_LR10 9
#define NUM_NXBAR_TILES_PER_TC_LR10 4
#define TILE_TO_LINK(_device, _tc, _tile) \
( \
NVSWITCH_ASSERT((_tc < NUM_NXBAR_TCS_LR10)) \
, \
NVSWITCH_ASSERT((_tile < NUM_NXBAR_TILES_PER_TC_LR10)) \
, \
((_tc) * NUM_NXBAR_TILES_PER_TC_LR10 + (_tile)) \
)
#define NV_NXBAR_TC_TILEOUT_ERR_FATAL_INTR_EN(i) (NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN + \
i * (NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN - NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN))
#define NV_NXBAR_TC_TILEOUT_ERR_STATUS(i) (NV_NXBAR_TC_TILEOUT0_ERR_STATUS + \
i * (NV_NXBAR_TC_TILEOUT1_ERR_STATUS - NV_NXBAR_TC_TILEOUT0_ERR_STATUS))
#define NV_NXBAR_TC_TILEOUT_ERR_FIRST(i) (NV_NXBAR_TC_TILEOUT0_ERR_FIRST + \
i * (NV_NXBAR_TC_TILEOUT1_ERR_FIRST - NV_NXBAR_TC_TILEOUT0_ERR_FIRST))
#define NV_NXBAR_TC_TILEOUT_ERR_CYA(i) (NV_NXBAR_TC_TILEOUT0_ERR_CYA + \
i * (NV_NXBAR_TC_TILEOUT1_ERR_CYA - NV_NXBAR_TC_TILEOUT0_ERR_CYA))
#define NVSWITCH_NXBAR_RD32_LR10(_d, _engidx, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, NXBAR, _engidx, _dev, _reg)
#define NVSWITCH_NXBAR_WR32_LR10(_d, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, NXBAR, , _engidx, _dev, _reg, _data)
#define NVSWITCH_TILE_RD32_LR10(_d, _engidx, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, TILE, _engidx, _dev, _reg)
#define NVSWITCH_TILE_WR32_LR10(_d, _engidx, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, TILE, , _engidx, _dev, _reg, _data)
#define NV_PPRIV_PRT_PRT_PRIV_ERROR_ADR(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR + \
i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR - NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR))
#define NV_PPRIV_PRT_PRT_PRIV_ERROR_WRDAT(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT + \
i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_WRDAT - NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT))
#define NV_PPRIV_PRT_PRT_PRIV_ERROR_INFO(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO + \
i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO - NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO))
#define NV_PPRIV_PRT_PRT_PRIV_ERROR_CODE(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE + \
i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_CODE - NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE))
#define NUM_NVLW_ENGINE_LR10 9
#define NUM_NVLW_BCAST_ENGINE_LR10 1
#define NUM_NVLW_PERFMON_ENGINE_LR10 9
#define NUM_NVLW_PERFMON_BCAST_ENGINE_LR10 1
#define NUM_MINION_ENGINE_LR10 9
#define NUM_MINION_BCAST_ENGINE_LR10 1
#define NUM_NVLIPT_ENGINE_LR10 9
#define NUM_NVLIPT_BCAST_ENGINE_LR10 1
#define NUM_NVLIPT_SYS_PERFMON_ENGINE_LR10 9
#define NUM_NVLIPT_SYS_PERFMON_BCAST_ENGINE_LR10 1
#define NUM_NVLTLC_ENGINE_LR10 36
#define NUM_NVLTLC_BCAST_ENGINE_LR10 4
#define NUM_NVLTLC_MULTICAST_ENGINE_LR10 9
#define NUM_NVLTLC_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_TX_PERFMON_ENGINE_LR10 36
#define NUM_TX_PERFMON_BCAST_ENGINE_LR10 4
#define NUM_TX_PERFMON_MULTICAST_ENGINE_LR10 9
#define NUM_TX_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_RX_PERFMON_ENGINE_LR10 36
#define NUM_RX_PERFMON_BCAST_ENGINE_LR10 4
#define NUM_RX_PERFMON_MULTICAST_ENGINE_LR10 9
#define NUM_RX_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_PLL_ENGINE_LR10 9
#define NUM_PLL_BCAST_ENGINE_LR10 1
#define NUM_NVLDL_ENGINE_LR10 36
#define NUM_NVLDL_BCAST_ENGINE_LR10 4
#define NUM_NVLDL_MULTICAST_ENGINE_LR10 9
#define NUM_NVLDL_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_NVLIPT_LNK_ENGINE_LR10 36
#define NUM_NVLIPT_LNK_BCAST_ENGINE_LR10 4
#define NUM_NVLIPT_LNK_MULTICAST_ENGINE_LR10 9
#define NUM_NVLIPT_LNK_MULTICAST_BCAST_ENGINE_LR10 1
#define NUM_SYS_PERFMON_ENGINE_LR10 36
#define NUM_SYS_PERFMON_BCAST_ENGINE_LR10 4
#define NUM_SYS_PERFMON_MULTICAST_ENGINE_LR10 9
#define NUM_SYS_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1
#define NVSWITCH_NUM_PRIV_PRT_LR10 9
#define NVSWITCH_NPORT_PER_NPG (NUM_NPORT_ENGINE_LR10/NUM_NPG_ENGINE_LR10)
#define NPORT_TO_LINK(_device, _npg, _nport) \
( \
NVSWITCH_ASSERT((_npg < NUM_NPG_ENGINE_LR10)) \
, \
NVSWITCH_ASSERT((_nport < NVSWITCH_NPORT_PER_NPG))\
, \
((_npg) * NVSWITCH_NPORT_PER_NPG + (_nport)) \
)
#define NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(_nvlipt_idx) \
(NVBIT64(NVSWITCH_LINKS_PER_NVLIPT) - 1) << (_nvlipt_idx * NVSWITCH_LINKS_PER_NVLIPT);
#define NVSWITCH_NUM_LINKS_LR10 (NUM_NPORT_ENGINE_LR10)
#define NVSWITCH_NUM_LANES_LR10 4
#define NVSWITCH_LINKS_PER_NVLW (NVSWITCH_NUM_LINKS_LR10/NUM_NVLW_ENGINE_LR10)
#define NVSWITCH_LINKS_PER_MINION (NVSWITCH_NUM_LINKS_LR10/NUM_MINION_ENGINE_LR10)
#define NVSWITCH_LINKS_PER_NVLIPT (NVSWITCH_NUM_LINKS_LR10/NUM_NVLIPT_ENGINE_LR10)
#define NVSWITCH_LINKS_PER_NPG (NVSWITCH_NUM_LINKS_LR10/NUM_NPG_ENGINE_LR10)
#define NVSWITCH_DECLARE_ENGINE_UC_LR10(_engine) \
ENGINE_DESCRIPTOR_TYPE_LR10 eng##_engine[NUM_##_engine##_ENGINE_LR10];
#define NVSWITCH_DECLARE_ENGINE_LR10(_engine) \
ENGINE_DESCRIPTOR_TYPE_LR10 eng##_engine[NUM_##_engine##_ENGINE_LR10]; \
ENGINE_DESCRIPTOR_TYPE_LR10 eng##_engine##_BCAST[NUM_##_engine##_BCAST_ENGINE_LR10];
#define NVSWITCH_NVLIPT_GET_PUBLIC_ID_LR10(_physlinknum) \
((_physlinknum)/NVSWITCH_LINKS_PER_NVLIPT)
#define NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LR10(_physlinknum) \
((_physlinknum)%NVSWITCH_LINKS_PER_NVLIPT)
#define DISCOVERY_TYPE_UNDEFINED 0
#define DISCOVERY_TYPE_DISCOVERY 1
#define DISCOVERY_TYPE_UNICAST 2
#define DISCOVERY_TYPE_BROADCAST 3
//
// These field #defines describe which physical fabric address bits are
// relevant to the specific remap table address check/remap operation.
//
#define NV_INGRESS_REMAP_ADDR_PHYS_LR10 46:36
#define NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10 35:20
#define NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10 35:20
#define NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10 35:20
typedef NVSWITCH_LINK_TYPE NVSWITCH_LINK_TYPE_LR10;
//
// NPORT Portstat information
//
//
// LR10 supports CREQ0(0), DNGRD(1), ATR(2), ATSD(3), PROBE(4), RSP0(5), CREQ1(6), and RSP1(7) VCs.
// But DNGRD(1), ATR(2), ATSD(3), and PROBE(4) will be never used as PowerPC ATS support is not a POR for LR10 HW.
//
#define NVSWITCH_NUM_VCS_LR10 8
typedef struct
{
NvU32 count;
NvU32 low;
NvU32 medium;
NvU32 high;
NvU32 panic;
}
NVSWITCH_LATENCY_BINS_LR10;
typedef struct
{
NvU32 count;
NvU64 start_time_nsec;
NvU64 last_read_time_nsec;
NVSWITCH_LATENCY_BINS_LR10 accum_latency[NVSWITCH_NUM_LINKS_LR10];
}
NVSWITCH_LATENCY_VC_LR10;
typedef struct
{
NvU32 sample_interval_msec;
NvU64 last_visited_time_nsec;
NVSWITCH_LATENCY_VC_LR10 latency[NVSWITCH_NUM_VCS_LR10];
} NVSWITCH_LATENCY_STATS_LR10;
#define NV_NPORT_PORTSTAT_LR10(_block, _reg, _vc, _index) (NV_NPORT_PORTSTAT ## _block ## _reg ## _0 ## _index + \
_vc * (NV_NPORT_PORTSTAT ## _block ## _reg ## _1 ## _index - NV_NPORT_PORTSTAT ## _block ## _reg ## _0 ## _index))
#define NVSWITCH_NPORT_PORTSTAT_RD32_LR10(_d, _engidx, _block, _reg, _vc) \
( \
NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID_LR10(_d, NPORT, _engidx)) \
, \
NVSWITCH_PRINT(_d, MMIO, \
"%s: MEM_RD NPORT_PORTSTAT[%d]: %s,%s (%06x+%04x)\n", \
__FUNCTION__, \
_engidx, \
#_block, #_reg, \
NVSWITCH_GET_ENG(_d, NPORT, , _engidx), \
NV_NPORT_PORTSTAT_LR10(_block, _reg, _vc, _0)) \
, \
nvswitch_reg_read_32(_d, \
NVSWITCH_GET_ENG(_d, NPORT, , _engidx) + \
NV_NPORT_PORTSTAT_LR10(_block, _reg, _vc, _0)) \
); \
((void)(_d))
#define NVSWITCH_PORTSTAT_BCAST_WR32_LR10(_d, _block, _reg, _idx, _data) \
{ \
NVSWITCH_PRINT(_d, MMIO, \
"%s: BCAST_WR NPORT_PORTSTAT: %s,%s (%06x+%04x) 0x%08x\n", \
__FUNCTION__, \
#_block, #_reg, \
NVSWITCH_GET_ENG(_d, NPORT, _BCAST, 0), \
NV_NPORT_PORTSTAT_LR10(_block, _reg, _idx, ), _data); \
NVSWITCH_OFF_WR32(_d, \
NVSWITCH_GET_ENG(_d, NPORT, _BCAST, 0) + \
NV_NPORT_PORTSTAT_LR10(_block, _reg, _idx, ), _data); \
}
//
// Per-chip device information
//
//
// The chip-specific engine list is used to generate the code to collect
// discovered unit information and coalesce it into the data structures used by
// the common IO library (see io_nvswitch.h).
//
// The PTOP discovery table presents the information on wrappers and sub-units
// in a hierarchical manner. The top level discovery contains information
// about top level UNICAST units and IP wrappers like NPG, NVLW, and NXBAR.
// Individual units within an IP wrapper are described in discovery sub-tables.
// Each IP wrapper may have MULTICAST descriptors to allow addressing sub-units
// within a wrapper and a cluster of IP wrappers will also have a BCAST
// discovery tables, which have MULTICAST descriptors within them.
// In order to collect all the useful unit information into a single container,
// we need to pick where to find each piece within the parsed discovery table.
// Top level IP wrappers like NPG have a BCAST range to broadcast reads/writes,
// but IP sub-units like NPORT have a MULTICAST range within the BCAST IP
// wrapper to broadcast to all the sub-units in all the IP wrappers.
// So in the lists below top level IP wrappers (NPG, NVLW, and NXBAR) point
// to the _BCAST IP wrapper, but sub-unit point to the _MULTICAST range inside
// the BCAST unit (_MULTICAST_BCAST).
//
// All IP-based (0-based register manuals) engines need to be listed here to
// generate chip-specific handlers as well as in the global common list of all
// engines that have ever existed on *ANY* architecture(s) in order for them
// use common IO wrappers.
//
#define NVSWITCH_LIST_LR10_ENGINES(_op) \
_op(XVE, ) \
_op(SAW, ) \
_op(SOE, ) \
_op(SMR, ) \
_op(NPG, _BCAST) \
_op(NPORT, _MULTICAST_BCAST) \
\
_op(NVLW, _BCAST) \
_op(MINION, _BCAST) \
_op(NVLIPT, _BCAST) \
_op(NVLIPT_LNK, _MULTICAST_BCAST) \
_op(NVLTLC, _MULTICAST_BCAST) \
_op(NVLDL, _MULTICAST_BCAST) \
\
_op(NXBAR, _BCAST) \
_op(TILE, _MULTICAST_BCAST) \
\
_op(NPG_PERFMON, _BCAST) \
_op(NPORT_PERFMON, _MULTICAST_BCAST) \
\
_op(NVLW_PERFMON, _BCAST) \
_op(RX_PERFMON, _MULTICAST_BCAST) \
_op(TX_PERFMON, _MULTICAST_BCAST) \
\
_op(NXBAR_PERFMON, _BCAST) \
_op(TILE_PERFMON, _MULTICAST_BCAST) \
typedef struct
{
struct
{
NVSWITCH_ENGINE_DESCRIPTOR_TYPE common[NVSWITCH_ENGINE_ID_SIZE];
} io;
NVSWITCH_DECLARE_ENGINE_UC_LR10(PTOP)
NVSWITCH_DECLARE_ENGINE_UC_LR10(CLKS)
NVSWITCH_DECLARE_ENGINE_UC_LR10(FUSE)
NVSWITCH_DECLARE_ENGINE_UC_LR10(JTAG)
NVSWITCH_DECLARE_ENGINE_UC_LR10(PMGR)
NVSWITCH_DECLARE_ENGINE_UC_LR10(SAW)
NVSWITCH_DECLARE_ENGINE_UC_LR10(XP3G)
NVSWITCH_DECLARE_ENGINE_UC_LR10(XVE)
NVSWITCH_DECLARE_ENGINE_UC_LR10(ROM)
NVSWITCH_DECLARE_ENGINE_UC_LR10(EXTDEV)
NVSWITCH_DECLARE_ENGINE_UC_LR10(PRIVMAIN)
NVSWITCH_DECLARE_ENGINE_UC_LR10(PRIVLOC)
NVSWITCH_DECLARE_ENGINE_UC_LR10(PTIMER)
NVSWITCH_DECLARE_ENGINE_UC_LR10(SOE)
NVSWITCH_DECLARE_ENGINE_UC_LR10(SMR)
NVSWITCH_DECLARE_ENGINE_UC_LR10(I2C)
NVSWITCH_DECLARE_ENGINE_UC_LR10(SE)
NVSWITCH_DECLARE_ENGINE_UC_LR10(THERM)
NVSWITCH_DECLARE_ENGINE_LR10(NVLW)
NVSWITCH_DECLARE_ENGINE_LR10(NXBAR)
NVSWITCH_DECLARE_ENGINE_LR10(NPG)
NVSWITCH_DECLARE_ENGINE_LR10(MINION)
NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT)
NVSWITCH_DECLARE_ENGINE_LR10(NVLTLC)
NVSWITCH_DECLARE_ENGINE_LR10(NVLTLC_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT_SYS_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(TX_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(RX_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(TX_PERFMON_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(RX_PERFMON_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(PLL)
NVSWITCH_DECLARE_ENGINE_LR10(NVLW_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(NVLDL)
NVSWITCH_DECLARE_ENGINE_LR10(NVLDL_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT_LNK)
NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT_LNK_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(SYS_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(SYS_PERFMON_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(NPG_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(NPORT)
NVSWITCH_DECLARE_ENGINE_LR10(NPORT_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(NPORT_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(NPORT_PERFMON_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(NXBAR_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(TILE)
NVSWITCH_DECLARE_ENGINE_LR10(TILE_MULTICAST)
NVSWITCH_DECLARE_ENGINE_LR10(TILE_PERFMON)
NVSWITCH_DECLARE_ENGINE_LR10(TILE_PERFMON_MULTICAST)
// VBIOS configuration Data
NVSWITCH_BIOS_NVLINK_CONFIG bios_config;
// GPIO
const NVSWITCH_GPIO_INFO *gpio_pin;
NvU32 gpio_pin_size;
// Interrupts
NvU32 intr_enable_legacy;
NvU32 intr_enable_corr;
NvU32 intr_enable_fatal;
NvU32 intr_enable_nonfatal;
NvU32 intr_minion_dest;
//
// Book-keep interrupt masks to restore them after reset.
// Note: There is no need to book-keep interrupt masks for NVLink units like
// DL, MINION, TLC etc. because NVLink init routines would setup them.
//
struct
{
NVSWITCH_INTERRUPT_MASK route;
NVSWITCH_INTERRUPT_MASK ingress;
NVSWITCH_INTERRUPT_MASK egress;
NVSWITCH_INTERRUPT_MASK tstate;
NVSWITCH_INTERRUPT_MASK sourcetrack;
NVSWITCH_INTERRUPT_MASK tile;
NVSWITCH_INTERRUPT_MASK tileout;
} intr_mask;
// Latency statistics
NVSWITCH_LATENCY_STATS_LR10 *latency_stats;
// External TDIODE info
NVSWITCH_TDIODE_INFO_TYPE tdiode;
// Ganged Link table
NvU64 *ganged_link_table;
} lr10_device;
#define NVSWITCH_GET_CHIP_DEVICE_LR10(_device) \
( \
((_device)->chip_id == NV_PSMC_BOOT_42_CHIP_ID_LR10) ? \
((lr10_device *) _device->chip_device) : \
NULL \
)
//
// Internal function declarations
//
NvlStatus nvswitch_device_discovery_lr10(nvswitch_device *device, NvU32 discovery_offset);
void nvswitch_filter_discovery_lr10(nvswitch_device *device);
NvlStatus nvswitch_process_discovery_lr10(nvswitch_device *device);
nvswitch_device *nvswitch_get_device_by_pci_info_lr10(nvlink_pci_info *info);
NvlStatus nvswitch_ring_master_cmd_lr10(nvswitch_device *device, NvU32 cmd);
void nvswitch_initialize_interrupt_tree_lr10(nvswitch_device *device);
void nvswitch_lib_enable_interrupts_lr10(nvswitch_device *device);
void nvswitch_lib_disable_interrupts_lr10(nvswitch_device *device);
NvlStatus nvswitch_lib_service_interrupts_lr10(nvswitch_device *device);
NvlStatus nvswitch_lib_check_interrupts_lr10(nvswitch_device *device);
void nvswitch_set_ganged_link_table_lr10(nvswitch_device *device, NvU32 firstIndex, NvU64 *ganged_link_table, NvU32 numEntries);
NvlStatus nvswitch_pmgr_init_config_lr10(nvswitch_device *device);
NvlStatus nvswitch_minion_service_falcon_interrupts_lr10(nvswitch_device *device, NvU32 instance);
NvlStatus nvswitch_ctrl_i2c_indexed_lr10(nvswitch_device *device,
NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams);
NvU32 nvswitch_i2c_get_port_info_lr10(nvswitch_device *device, NvU32 port);
void nvswitch_translate_error_lr10(NVSWITCH_ERROR_TYPE *error_entry,
NVSWITCH_NVLINK_ARCH_ERROR *arch_error,
NVSWITCH_NVLINK_HW_ERROR *hw_error);
NvlStatus nvswitch_corelib_add_link_lr10(nvlink_link *link);
NvlStatus nvswitch_corelib_remove_link_lr10(nvlink_link *link);
NvlStatus nvswitch_corelib_set_dl_link_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags);
NvlStatus nvswitch_corelib_get_dl_link_mode_lr10(nvlink_link *link, NvU64 *mode);
NvlStatus nvswitch_corelib_set_tl_link_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags);
NvlStatus nvswitch_corelib_get_tl_link_mode_lr10(nvlink_link *link, NvU64 *mode);
NvlStatus nvswitch_corelib_set_tx_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags);
NvlStatus nvswitch_corelib_get_tx_mode_lr10(nvlink_link *link, NvU64 *mode, NvU32 *subMode);
NvlStatus nvswitch_corelib_set_rx_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags);
NvlStatus nvswitch_corelib_get_rx_mode_lr10(nvlink_link *link, NvU64 *mode, NvU32 *subMode);
NvlStatus nvswitch_corelib_set_rx_detect_lr10(nvlink_link *link, NvU32 flags);
NvlStatus nvswitch_corelib_get_rx_detect_lr10(nvlink_link *link);
void nvswitch_corelib_training_complete_lr10(nvlink_link *link);
NvBool nvswitch_link_lane_reversed_lr10(nvswitch_device *device, NvU32 linkId);
NvBool nvswitch_is_link_in_reset_lr10(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_wait_for_tl_request_ready_lr10(nvlink_link *link);
NvlStatus nvswitch_request_tl_link_state_lr10(nvlink_link *link, NvU32 tlLinkState, NvBool bSync);
void nvswitch_execute_unilateral_link_shutdown_lr10(nvlink_link *link);
NvlStatus nvswitch_get_link_public_id_lr10(nvswitch_device *device, NvU32 linkId, NvU32 *publicId);
NvlStatus nvswitch_get_link_local_idx_lr10(nvswitch_device *device, NvU32 linkId, NvU32 *localLinkIdx);
NvlStatus nvswitch_set_training_error_info_lr10(nvswitch_device *device,
NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams);
NvlStatus nvswitch_ctrl_get_fatal_error_scope_lr10(nvswitch_device *device, NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams);
void nvswitch_init_scratch_lr10(nvswitch_device *device);
void nvswitch_init_dlpl_interrupts_lr10(nvlink_link *link);
NvlStatus nvswitch_init_nport_lr10(nvswitch_device *device);
NvlStatus nvswitch_get_soe_ucode_binaries_lr10(nvswitch_device *device, const NvU32 **soe_ucode_data, const NvU32 **soe_ucode_header);
NvlStatus nvswitch_poll_sublink_state_lr10(nvswitch_device *device, nvlink_link *link);
void nvswitch_setup_link_loopback_mode_lr10(nvswitch_device *device, NvU32 linkNumber);
void nvswitch_reset_persistent_link_hw_state_lr10(nvswitch_device *device, NvU32 linkNumber);
void nvswitch_store_topology_information_lr10(nvswitch_device *device, nvlink_link *link);
void nvswitch_init_lpwr_regs_lr10(nvlink_link *link);
NvlStatus nvswitch_set_training_mode_lr10(nvswitch_device *device);
NvBool nvswitch_i2c_is_device_access_allowed_lr10(nvswitch_device *device, NvU32 port, NvU8 addr, NvBool bIsRead);
NvU32 nvswitch_get_sublink_width_lr10(nvswitch_device *device,NvU32 linkNumber);
NvlStatus nvswitch_parse_bios_image_lr10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_get_throughput_counters_lr10(nvswitch_device *device, NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p);
void nvswitch_corelib_get_uphy_load_lr10(nvlink_link *link, NvBool *bUnlocked);
void nvswitch_init_buffer_ready_lr10(nvswitch_device *device, nvlink_link *link, NvBool bNportBufferReady);
NvlStatus nvswitch_ctrl_get_nvlink_lp_counters_lr10(nvswitch_device *device, NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params);
NvlStatus nvswitch_service_nvldl_fatal_link_lr10(nvswitch_device *device, NvU32 nvliptInstance, NvU32 link);
NvlStatus nvswitch_service_minion_link_lr10(nvswitch_device *device, NvU32 nvliptInstance);
void nvswitch_apply_recal_settings_lr10(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_ctrl_get_sw_info_lr10(nvswitch_device *device, NVSWITCH_GET_SW_INFO_PARAMS *p);
#endif //_LR10_H_

View File

@@ -0,0 +1,95 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _MINION_LR10_H_
#define _MINION_LR10_H_
#include "lr10.h"
// TODO modify these for LR10
#define FALCON_IMEM_BLK_SIZE_BYTES_LR10 256
#define FALCON_CODE_HDR_OS_CODE_OFFSET_LR10 0
#define FALCON_CODE_HDR_OS_CODE_SIZE_LR10 1
#define FALCON_CODE_HDR_OS_DATA_OFFSET_LR10 2
#define FALCON_CODE_HDR_OS_DATA_SIZE_LR10 3
#define FALCON_CODE_HDR_NUM_APPS_LR10 4
#define FALCON_CODE_HDR_APP_CODE_START_LR10 5
#define FALCON_CODE_HDR_APP_DATA_START_LR10 ( FALCON_CODE_HDR_APP_CODE_START_LR10 + (FALCON_CODE_HDR_NUM_APPS_LR10 * 2))
#define FALCON_CODE_HDR_CODE_OFFSET_LR10 0
#define FALCON_CODE_HDR_CODE_SIZE_LR10 1
#define FALCON_CODE_HDR_DATA_OFFSET_LR10 0
#define FALCON_CODE_HDR_DATA_SIZE_LR10 1
#define NV_MINION_NVLINK_DL_STAT_ARGS_LANEID 15:12
#define NV_MINION_NVLINK_DL_STAT_ARGS_ADDRS 11:0
typedef const struct
{
NvU32 osCodeOffset;
NvU32 osCodeSize;
NvU32 osDataOffset;
NvU32 osDataSize;
NvU32 numApps;
NvU32 appCodeStart;
NvU32 appDataStart;
NvU32 codeOffset;
NvU32 codeSize;
NvU32 dataOffset;
NvU32 dataSize;
} FALCON_UCODE_HDR_INFO_LR10, *PFALCON_UCODE_HDR_INFO_LR10;
#define NVSWITCH_MINION_LINK_RD32_LR10(_d, _physlinknum, _dev, _reg) \
NVSWITCH_LINK_RD32_LR10(_d, _physlinknum, MINION, _dev, _reg)
#define NVSWITCH_MINION_LINK_WR32_LR10(_d, _physlinknum, _dev, _reg, _data) \
NVSWITCH_LINK_WR32_LR10(_d, _physlinknum, MINION, _dev, _reg, _data)
#define NVSWITCH_MINION_WR32_LR10(_d, _instance, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, MINION, , _instance, _dev, _reg, _data)
#define NVSWITCH_MINION_RD32_LR10(_d, _instance, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, MINION, _instance, _dev, _reg)
#define NVSWITCH_MINION_WR32_BCAST_LR10(_d, _dev, _reg, _data) \
NVSWITCH_BCAST_WR32_LR10(_d, MINION, _dev, _reg, _data)
#define NVSWITCH_MINION_GET_LOCAL_LINK_ID(_physlinknum) \
(_physlinknum%NVSWITCH_LINKS_PER_MINION)
//
// Internal function declarations
//
NvlStatus nvswitch_init_minion_lr10(nvswitch_device *device);
NvlStatus nvswitch_minion_send_command_lr10(nvswitch_device *device, NvU32 linkNumber, NvU32 command, NvU32 scratch0);
NvlStatus nvswitch_minion_get_dl_status_lr10(nvswitch_device *device, NvU32 linkId, NvU32 statusIdx, NvU32 statusArgs, NvU32 *statusData);
NvlStatus nvswitch_minion_get_initoptimize_status_lr10(nvswitch_device *device, NvU32 linkId);
NvlStatus nvswitch_minion_get_initnegotiate_status_lr10(nvswitch_device *device, NvU32 linkId);
NvlStatus nvswitch_minion_get_rxdet_status_lr10(nvswitch_device *device, NvU32 linkId);
NvlStatus nvswitch_minion_set_rx_term_lr10(nvswitch_device *device, NvU32 linkId);
NvU32 nvswitch_minion_get_line_rate_Mbps_lr10(nvswitch_device *device, NvU32 linkId);
NvU32 nvswitch_minion_get_data_rate_KiBps_lr10(nvswitch_device *device, NvU32 linkId);
NvlStatus nvswitch_set_minion_initialized_lr10(nvswitch_device *device, NvU32 idx_minion, NvBool initialized);
NvBool nvswitch_is_minion_initialized_lr10(nvswitch_device *device, NvU32 idx_minion);
NvlStatus nvswitch_minion_clear_dl_error_counters_lr10(nvswitch_device *device, NvU32 linkId);
#endif //_MINION_LR10_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _PMGR_LR10_H_
#define _PMGR_LR10_H_
#include "lr10.h"
void
nvswitch_init_pmgr_lr10
(
nvswitch_device *device
);
void
nvswitch_init_pmgr_devices_lr10
(
nvswitch_device *device
);
NvU32
nvswitch_read_physical_id_lr10
(
nvswitch_device *device
);
NvlStatus
nvswitch_get_rom_info_lr10
(
nvswitch_device *device,
NVSWITCH_EEPROM_TYPE *eeprom
);
void
nvswitch_i2c_set_hw_speed_mode_lr10
(
nvswitch_device *device,
NvU32 port,
NvU32 speedMode
);
NvBool
nvswitch_is_i2c_supported_lr10
(
nvswitch_device *device
);
#endif //_PMGR_LR10_H_

View File

@@ -0,0 +1,34 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SMBPBI_LR10_H_
#define _SMBPBI_LR10_H_
NvlStatus
nvswitch_smbpbi_get_dem_num_messages_lr10
(
nvswitch_device *device,
NvU8 *pMsgCount
);
#endif //_SMBPBI_LR10_H_

View File

@@ -0,0 +1,67 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SOE_LR10_H_
#define _SOE_LR10_H_
#include "lr10.h"
typedef const struct
{
NvU32 appVersion;
NvU32 appCodeStartOffset;
NvU32 appCodeSize;
NvU32 appCodeImemOffset;
NvU32 appCodeIsSecure;
NvU32 appDataStartOffset;
NvU32 appDataSize;
NvU32 appDataDmemOffset;
} SOE_UCODE_APP_INFO_LR10, *PSOE_UCODE_APP_INFO_LR10;
typedef const struct
{
NvU32 version;
NvU32 numApps;
NvU32 codeEntryPoint;
SOE_UCODE_APP_INFO_LR10 apps[0];
} SOE_UCODE_HDR_INFO_LR10, *PSOE_UCODE_HDR_INFO_LR10;
#define NVSWITCH_SOE_WR32_LR10(_d, _instance, _dev, _reg, _data) \
NVSWITCH_ENG_WR32_LR10(_d, SOE, , _instance, _dev, _reg, _data)
#define NVSWITCH_SOE_RD32_LR10(_d, _instance, _dev, _reg) \
NVSWITCH_ENG_RD32_LR10(_d, SOE, _instance, _dev, _reg)
//
// Internal function declarations
//
NvlStatus nvswitch_init_soe_lr10(nvswitch_device *device);
NvlStatus nvswitch_soe_prepare_for_reset_lr10(nvswitch_device *device);
void nvswitch_soe_unregister_events_lr10(nvswitch_device *device);
void nvswitch_therm_soe_callback_lr10(nvswitch_device *device, union RM_FLCN_MSG *pMsg,
void *pParams, NvU32 seqDesc, NV_STATUS status);
NvlStatus nvswitch_soe_set_ucode_core_lr10(nvswitch_device *device, NvBool bFalcon);
NvlStatus nvswitch_soe_register_event_callbacks_lr10(nvswitch_device *device);
#endif //_SOE_LR10_H_

View File

@@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _THERM_LR10_H_
#define _THERM_LR10_H_
//
// LR10-specific fuse assignments
//
#define NVSWITCH_FUSE_OPT_TDIODE_LR10 NV_FUSE_OPT_CP2_TDIODE_OFFSET
NvlStatus
nvswitch_init_thermal_lr10
(
nvswitch_device *device
);
NvlStatus
nvswitch_ctrl_therm_read_temperature_lr10
(
nvswitch_device *device,
NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info
);
NvlStatus
nvswitch_ctrl_therm_get_temperature_limit_lr10
(
nvswitch_device *device,
NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *info
);
void
nvswitch_monitor_thermal_alert_lr10
(
nvswitch_device *device
);
#endif //_THERM_LR10_H_

View File

@@ -0,0 +1,327 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _PMGR_NVSWITCH_H_
#define _PMGR_NVSWITCH_H_
#include "ctrl_dev_nvswitch.h"
#define NVSWITCH_BITS_PER_BYTE 8
#define NVSWITCH_HIGH NV_TRUE
#define NVSWITCH_LOW NV_FALSE
/*! Extract the first byte of a 10-bit address. */
#define NVSWITCH_GET_ADDRESS_10BIT_FIRST(a) ((NvU8)((((a) >> 8) & 0x6) | 0xF0))
/*! Extract the second byte of a 10-bit address. */
#define NVSWITCH_GET_ADDRESS_10BIT_SECOND(a) ((NvU8)(((a) >> 1) & 0xFF))
/*! Attaching read to read application interface */
#define NVSWITCH_I2C_READ(a,b) _nvswitch_i2c_i2cRead(device, a, b)
#define NVSWITCH_I2C_DELAY(a) NVSWITCH_NSEC_DELAY(a)
#define NVSWITCH_MAX_I2C_PORTS 4
/*! bit 0 of address set indicates read cycle to follow */
#define NVSWITCH_I2C_READCYCLE ((NvU8)0x01)
/*! Determine if an address is valid in the 7-bit address space. */
#define NVSWITCH_I2C_IS_7BIT_I2C_ADDRESS(a) ((a) <= 0xFF)
/*! Determine if an address is valid in the 10-bit address space. */
#define NVSWITCH_I2C_IS_10BIT_I2C_ADDRESS(a) ((a) <= 0x7FF)
// by-the-spec delay defaults (yields 100KHz)
#define NVSWITCH_I2C_PROFILE_STANDARD_tF 300
#define NVSWITCH_I2C_PROFILE_STANDARD_tR 1000
#define NVSWITCH_I2C_PROFILE_STANDARD_tSUDAT 1800 // actually, spec calls for (min) 250, but we've borrowed from tHDDAT
#define NVSWITCH_I2C_PROFILE_STANDARD_tHDDAT 1900 // actually, spec calls for (max) 3450, but we've loaned time to tSUDAT
#define NVSWITCH_I2C_PROFILE_STANDARD_tHIGH 4000
#define NVSWITCH_I2C_PROFILE_STANDARD_tSUSTO 4000
#define NVSWITCH_I2C_PROFILE_STANDARD_tHDSTA 4000
#define NVSWITCH_I2C_PROFILE_STANDARD_tSUSTA 4700
#define NVSWITCH_I2C_PROFILE_STANDARD_tBUF 4700
#define NVSWITCH_I2C_PROFILE_STANDARD_tLOW 4700 // NVSWITCH_I2C_PROFILE_STANDARD_tSUDAT + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHDDAT
#define NVSWITCH_I2C_PROFILE_STANDARD_CYCLEPERIOD 10000 // NVSWITCH_I2C_PROFILE_STANDARD_tF + NVSWITCH_I2C_PROFILE_STANDARD_tLOW + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHIGH
// by-the-spec delay defaults (yields 400KHz)
#define NVSWITCH_I2C_PROFILE_FAST_tF 300
#define NVSWITCH_I2C_PROFILE_FAST_tR 300
#define NVSWITCH_I2C_PROFILE_FAST_tSUDAT 200 // actually, spec calls for (min) 100, but we've borrowed from tHDDAT
#define NVSWITCH_I2C_PROFILE_FAST_tHDDAT 800 // actually, spec calls for (max) 900, but we've loaned time to tSUDAT
#define NVSWITCH_I2C_PROFILE_FAST_tHIGH 600
#define NVSWITCH_I2C_PROFILE_FAST_tSUSTO 600
#define NVSWITCH_I2C_PROFILE_FAST_tHDSTA 600
#define NVSWITCH_I2C_PROFILE_FAST_tSUSTA 600
#define NVSWITCH_I2C_PROFILE_FAST_tBUF 1300
#define NVSWITCH_I2C_PROFILE_FAST_tLOW 1300 // NVSWITCH_I2C_PROFILE_STANDARD_tSUDAT + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHDDAT
#define NVSWITCH_I2C_PROFILE_FAST_CYCLEPERIOD 2500 // NVSWITCH_I2C_PROFILE_STANDARD_tF + NVSWITCH_I2C_PROFILE_STANDARD_tLOW + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHIGH
/*!
* The I2C specification does not specify any timeout conditions for clock
* stretching, i.e. any device can hold down SCL as long as it likes so this
* value needs to be adjusted on case by case basis.
*/
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_1200US 1200
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_1000KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 4)
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_400KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 4)
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_300KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 3)
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_200KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 2)
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_1200US / 10)
/* A reasonable SCL timeout is five cycles at 20 KHz. Full use should be rare
* in devices, occurring when in the middle of a real-time task. That comes to
* 25 clock cycles at 100 KHz, or 250 us. */
#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_250US 250
/* We don't want I2C to deal with traffic slower than 20 KHz (50 us cycle).
*/
#define NVSWITCH_I2C_MAX_CYCLE_US 50
/* The longest HW I2C transaction: S BYTE*2 S BYTE*4 P, at 1 each for S/P, and
* 9 for each byte (+ack). */
#define NVSWITCH_I2C_HW_MAX_CYCLES ((1 * 3) + (9 * 6))
/* We determine the HW operational timeout as the longest operation, plus two
* long SCL clock stretches. */
#define I2C_HW_IDLE_TIMEOUT_NS (1000 * \
((NVSWITCH_I2C_MAX_CYCLE_US * NVSWITCH_I2C_HW_MAX_CYCLES) + (NVSWITCH_I2C_SCL_CLK_TIMEOUT_1200US * 2)))
//
// PMGR board configuration information
//
#define NVSWITCH_DESCRIBE_I2C_DEVICE(_port, _addr, _type, _rdWrAccessMask) \
{NVSWITCH_I2C_PORT ## _port, _addr, NVSWITCH_I2C_DEVICE ## _type, _rdWrAccessMask}
#define NVSWITCH_DESCRIBE_GPIO_PIN(_pin, _func, _hw_select, _misc_io) \
{_pin, NVSWITCH_GPIO_ENTRY_FUNCTION ## _func, _hw_select, \
NVSWITCH_GPIO_ENTRY_MISC_IO_ ## _misc_io}
/*! Structure containing a description of the I2C bus as needed by the software
* bit-banging implementation.
*/
typedef struct
{
NvU32 sclOut; // Bit number for SCL Output
NvU32 sdaOut; // Bit number for SDA Output
NvU32 sclIn; // Bit number for SCL Input
NvU32 sdaIn; // Bit number for SDA Input
NvU32 port; // Port number of the driving lines
NvU32 curLine; // Required for isLineHighFunction
NvU32 regCache; // Keeps the cache value of registers.
//
// The following timings are used as stand-ins for I2C spec timings, so
// that different speed modes may share the same code.
//
NvU16 tF;
NvU16 tR;
NvU16 tSuDat;
NvU16 tHdDat;
NvU16 tHigh;
NvU16 tSuSto;
NvU16 tHdSta;
NvU16 tSuSta;
NvU16 tBuf;
NvU16 tLow;
} NVSWITCH_I2C_SW_BUS;
/*! @brief Internal Command structure for HW I2C to perform I2C transaction */
typedef struct
{
NvU32 port;
NvU32 bRead;
NvU32 cntl;
NvU32 data;
NvU32 bytesRemaining;
NvS32 status;
NvU8 *pMessage;
NvBool bBlockProtocol;
} NVSWITCH_I2C_HW_CMD, *PNVSWITCH_I2C_HW_CMD;
typedef enum
{
NVSWITCH_I2C_ACQUIRER_NONE = 0,
NVSWITCH_I2C_ACQUIRER_UNKNOWN,
NVSWITCH_I2C_ACQUIRER_IOCTL, // e.g. MODS
NVSWITCH_I2C_ACQUIRER_EXTERNAL, // e.g. Linux Direct
} NVSWITCH_I2C_ACQUIRER;
typedef enum {
i2cProfile_Standard,
i2cProfile_Fast,
i2cProfile_End
} NVSWITCH_I2CPROFILE;
typedef enum
{
pmgrReg_i2cAddr,
pmgrReg_i2cCntl,
pmgrReg_i2cTiming,
pmgrReg_i2cOverride,
pmgrReg_i2cPoll,
pmgrReg_i2cData,
pmgrReg_unsupported
} NVSWITCH_PMGRREG_TYPE;
// I2C Speed limits
#define NVSWITCH_I2C_SPEED_LIMIT_NONE NV_U16_MAX //Close enough to not having a speed limit.
#define NVSWITCH_I2C_SPEED_1000KHZ 1000
#define NVSWITCH_I2C_SPEED_400KHZ 400
#define NVSWITCH_I2C_SPEED_300KHZ 300
#define NVSWITCH_I2C_SPEED_200KHZ 200
#define NVSWITCH_I2C_SPEED_100KHZ 100
enum
{
i2cSpeedLimit_dcb = 0,
i2cSpeedLimit_ctrl,
// Always leave as last element!
NVSWITCH_I2C_SPEED_LIMIT_MAX_DEVICES
};
// Timing for I2C cycles (allows for possibility of tweaking timing)
typedef struct __NVSWITCH_NVSWITCH_I2CTIMING
{
NvU32 tR; // at 100KHz, normally 1000ns
NvU32 tF; // at 100KHz, normally 300ns
NvU32 tHIGH; // at 100KHz, normally 4000ns
NvU32 tSUDAT; // at 100KHz, normally 250ns (min), but we borrow time from tHDDAT to improve clock phase
NvU32 tHDDAT; // at 100KHz, normally 3450ns (max), but we loan time to tSUDAT to improve clock phase
NvU32 tSUSTO; // at 100KHz, normally 4000ns
NvU32 tHDSTA; // at 100KHz, normally 4000ns
NvU32 tBUF; // at 100KHz, normally 4700ns
NvU32 tLOW; // computed to be: tSUDAT + tR + tHDDAT
NvU32 speed; // Port speed
} NVSWITCH_I2CTIMING;
#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL 2:0
#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL 5:3
#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PUBLIC 0x00000000
#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PRIVILEGED 0x00000001
#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INTERNAL 0x00000002
#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INACCESSIBLE 0x00000003
#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_PUBLIC NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PUBLIC
#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_PRIVILEGED NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PRIVILEGED
#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_INTERNAL NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INTERNAL
#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_INACCESSIBLE NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INACCESSIBLE
#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_PUBLIC NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PUBLIC
#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_PRIVILEGED NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PRIVILEGED
#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_INTERNAL NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INTERNAL
#define NV_NVSIWTCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_INACCESSIVLE NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INACCESSIBLE
typedef struct NVSWITCH_I2C_DEVICE_DESCRIPTOR
{
NVSWITCH_I2C_PORT_TYPE i2cPortLogical; //<! Logical I2C port where the device sits
NvU32 i2cAddress; //<! I2C slave address
NVSWITCH_I2C_DEVICE_TYPE i2cDeviceType;
NvU8 i2cRdWrAccessMask;
} NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE;
typedef struct NVSWITCH_OBJI2C *PNVSWITCH_OBJI2C;
#define NVSWITCH_I2C_SPEED_MODE_100KHZ 0
#define NVSWITCH_I2C_SPEED_MODE_200KHZ 1
#define NVSWITCH_I2C_SPEED_MODE_300KHZ 2
#define NVSWITCH_I2C_SPEED_MODE_400KHZ 3
#define NVSWITCH_I2C_SPEED_MODE_1000KHZ 4
typedef struct _nvswitch_tag_i2c_port
{
// Timing for I2C cycles (allows for possibility of tweaking timing)
NVSWITCH_I2CTIMING Timing;
NVSWITCH_I2C_HW_CMD hwCmd;
NvU32 defaultSpeedMode;
} NVSWITCH_I2CPORT, *PNVSWITCH_I2CPORT;
struct NVSWITCH_OBJI2C
{
//
// Addresses of I2C ports
//
// Note: The index of array is logical port number NOT physical
//
NVSWITCH_I2CPORT Ports[NVSWITCH_MAX_I2C_PORTS];
//
// Private data
//
// I2C Mutex/Synchronization state
NvU32 I2CAcquired;
NvU32 PortInfo[NVSWITCH_MAX_I2C_PORTS];
#define NV_I2C_PORTINFO_DEFINED 0:0
#define NV_I2C_PORTINFO_DEFINED_ABSENT 0x00000000
#define NV_I2C_PORTINFO_DEFINED_PRESENT 0x00000001
#define NV_I2C_PORTINFO_ACCESS_ALLOWED 1:1
#define NV_I2C_PORTINFO_ACCESS_ALLOWED_FALSE 0x00000000
#define NV_I2C_PORTINFO_ACCESS_ALLOWED_TRUE 0x00000001
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *device_list;
NvU32 device_list_size;
// I2C device allow list
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *i2c_allow_list;
NvU32 i2c_allow_list_size;
};
//
// Thermal
//
#define NVSWITCH_THERM_METHOD_UNKNOWN 0x00
#define NVSWITCH_THERM_METHOD_I2C 0x01
#define NVSWITCH_THERM_METHOD_MCU 0x02
typedef struct nvswitch_tdiode_info_type
{
NvU32 method;
struct NVSWITCH_I2C_DEVICE_DESCRIPTOR *method_i2c_info;
NvS32 A;
NvS32 B;
NvS32 offset;
} NVSWITCH_TDIODE_INFO_TYPE;
void nvswitch_i2c_destroy(nvswitch_device *device);
void nvswitch_i2c_init(nvswitch_device *device);
#endif //_PMGR_NVSWITCH_H_

View File

@@ -0,0 +1,548 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _REGKEY_NVSWITCH_H_
#define _REGKEY_NVSWITCH_H_
#include "export_nvswitch.h"
/*
* NV_SWITCH_REGKEY_TXTRAIN_OPTIMIZATION_ALGORITHM - Select TXTRAIN optimization algorithm
*
* NVLink3.0 Allows for multiple optimization algorithms A0-A7
* Documentation on details about each algorithm can be found in
* the IAS section "4.4.3.3. Optimization Algorithms"
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL "TxTrainControl"
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_NOP 0x00000000
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_FOM_FORMAT 2:0
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_FOM_FORMAT_NOP 0x00000000
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_FOM_FORMAT_FOMA 0x00000001
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_FOM_FORMAT_FOMB 0x00000002
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM 10:3
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_NOP 0x00000000
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A0 0x00000001
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A1 0x00000002
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A2 0x00000004
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A3 0x00000008
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A4 0x00000010
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A5 0x00000020
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A6 0x00000040
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_A7 0x00000080
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM 15:11
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM_NOP 0x00000000
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM_B0 0x00000001
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM_B1 0x00000002
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM_B2 0x00000004
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM_B3 0x00000008
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_MINIMUM_TRAIN_TIME_MANTISSA 19:16
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_MINIMUM_TRAIN_TIME_MANTISSA_NOP 0x00000000
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_MINIMUM_TRAIN_TIME_EXPONENT 23:20
#define NV_SWITCH_REGKEY_TXTRAIN_CONTROL_MINIMUM_TRAIN_TIME_EXPONENT_NOP 0x00000000
/*
* NV_SWITCH_REGKEY_EXTERNAL_FABRIC_MGMT - Toggle external fabric management.
*
* Switch driver currently uses nvlink core driver APIs which internally trigger
* link initialization and training. However, nvlink core driver now exposes a
* set of APIs for managing nvlink fabric externally (from user mode).
*
* When the regkey is enabled, switch driver will skip use of APIs which trigger
* link initialization and training. In that case, link training needs to be
* triggered externally.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_EXTERNAL_FABRIC_MGMT "ExternalFabricMgmt"
#define NV_SWITCH_REGKEY_EXTERNAL_FABRIC_MGMT_DISABLE 0x0
#define NV_SWITCH_REGKEY_EXTERNAL_FABRIC_MGMT_ENABLE 0x1
/*
* NV_SWITCH_REGKEY_CROSSBAR_DBI - Enable/disable crossbar DBI
* DBI - Data bus inversion provides some small power savings.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_CROSSBAR_DBI "CrossbarDBI"
#define NV_SWITCH_REGKEY_CROSSBAR_DBI_DISABLE 0x0
#define NV_SWITCH_REGKEY_CROSSBAR_DBI_ENABLE 0x1
/*
* NV_SWITCH_REGKEY_LINK_DBI - Enable/disable link DBI
* DBI - Data bus inversion provides some small power savings.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_LINK_DBI "LinkDBI"
#define NV_SWITCH_REGKEY_LINK_DBI_DISABLE 0x0
#define NV_SWITCH_REGKEY_LINK_DBI_ENABLE 0x1
/*
* NV_SWITCH_REGKEY_AC_COUPLING_MASK
*
* Value is a bitmask of which links are AC coupled and should be
* configured with SETACMODE.
* All links default to DC coupled.
*
* Mask contains links 0-31
* Mask2 contains links 32-63
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_AC_COUPLED_MASK "ACCoupledMask"
#define NV_SWITCH_REGKEY_AC_COUPLED_MASK2 "ACCoupledMask2"
/*
* NV_SWITCH_REGKEY_SWAP_CLK_OVERRIDE
*
* Value is a bitmask applied directly to _SWAP_CLK field.
* bit 0: select source for RXCLK_0P/N - ports 0-7
* bit 1: select source for RXCLK_1P/N - ports 16-17
* bit 2: select source for RXCLK_2P/N - ports 8-15
* bit 3: unconnected
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_SWAP_CLK_OVERRIDE "SwapClkOverride"
#define NV_SWITCH_REGKEY_SWAP_CLK_OVERRIDE_FIELD 3:0
/*
* NV_SWITCH_REGKEY_ENABLE_LINK_MASK - Mask of links to enable
*
* By default, all links are enabled
*
* [0]=1 - Enable link 0
* :
* [31]=1 - Enable link 31
*
* Mask contains links 0-31
* Mask2 contains links 32-63
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_ENABLE_LINK_MASK "LinkEnableMask"
#define NV_SWITCH_REGKEY_ENABLE_LINK_MASK2 "LinkEnableMask2"
/*
* NV_SWITCH_REGKEY_BANDWIDTH_SHAPER
*
* Selects among various transaction fairness modes affecting bandwidth
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_BANDWIDTH_SHAPER "BandwidthShaper"
#define NV_SWITCH_REGKEY_BANDWIDTH_SHAPER_PROD 0x0
#define NV_SWITCH_REGKEY_BANDWIDTH_SHAPER_XSD 0x1
#define NV_SWITCH_REGKEY_BANDWIDTH_SHAPER_BUCKET_BW 0x2
#define NV_SWITCH_REGKEY_BANDWIDTH_SHAPER_BUCKET_TX_FAIR 0x3
/*
* NV_SWITCH_REGKEY_SSG_CONTROL
*
* Internal use only (supported only on MODS)
* Allows SSG interface to tweak internal behavior for testing & debugging
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_SSG_CONTROL "SSGControl"
#define NV_SWITCH_REGKEY_SSG_CONTROL_BREAK_AFTER_UPHY_INIT 0:0
#define NV_SWITCH_REGKEY_SSG_CONTROL_BREAK_AFTER_UPHY_INIT_NO (0x00000000)
#define NV_SWITCH_REGKEY_SSG_CONTROL_BREAK_AFTER_UPHY_INIT_YES (0x00000001)
#define NV_SWITCH_REGKEY_SSG_CONTROL_BREAK_AFTER_DLPL_INIT 1:1
#define NV_SWITCH_REGKEY_SSG_CONTROL_BREAK_AFTER_DLPL_INIT_NO (0x00000000)
#define NV_SWITCH_REGKEY_SSG_CONTROL_BREAK_AFTER_DLPL_INIT_YES (0x00000001)
/*
* NV_SWITCH_REGKEY_SKIP_BUFFER_READY
*
* Used to optionally skip the initialization of NVLTLC_TX_CTRL_BUFFER_READY,
* NVLTLC_RX_CTRL_BUFFER_READY, and NPORT_CTRL_BUFFER_READY registers.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY "SkipBufferReady"
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY_TLC 0:0
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY_TLC_NO (0x00000000)
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY_TLC_YES (0x00000001)
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY_NPORT 1:1
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY_NPORT_NO (0x00000000)
#define NV_SWITCH_REGKEY_SKIP_BUFFER_READY_NPORT_YES (0x00000001)
/*
* NV_SWITCH_REGKEY_SOE_DISABLE - Disables init and usage of SOE by the kernel driver
*
* The NVSwitch driver relies on SOE for some features, but can operate
* without it, with reduced functionality.
*
* When the regkey is set to YES, the Nvswitch driver disregards SOE and will not
* bootstrap it with the driver payload image. All interactions between
* the driver and SOE are disabled.
*
* Driver unload doesn't idle already bootstrapped SOE. Hence it is
* recommended to reset device in order disable SOE completely. The pre-OS image
* will still be running even though SOE is disabled through the driver.
*
* If set to NO, the SOE will function as normal.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_SOE_DISABLE "SoeDisable"
#define NV_SWITCH_REGKEY_SOE_DISABLE_NO 0x0
#define NV_SWITCH_REGKEY_SOE_DISABLE_YES 0x1
/*
* NV_SWITCH_REGKEY_SOE_BOOT_CORE - Selects SOE core
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_SOE_BOOT_CORE "SoeBootCore"
#define NV_SWITCH_REGKEY_SOE_BOOT_CORE_FALCON 0x0
#define NV_SWITCH_REGKEY_SOE_BOOT_CORE_DEFAULT 0x2
/*
* NV_SWITCH_REGKEY_ENABLE_PM
*
* Used to optionally send the ENABLE_PM command to MINION on link training
* and DISABLE_PM on link teardown.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_ENABLE_PM "EnablePM"
#define NV_SWITCH_REGKEY_ENABLE_PM_NO 0x0
#define NV_SWITCH_REGKEY_ENABLE_PM_YES 0x1
/*
* NV_SWITCH_REGKEY_MINION_SET_UCODE*
*
* The following regkeys are used to override MINION image in the driver.
*
* The ucode image is overriden from .js file given along the regkey -nvswitch_set_minion_ucode.
*
* Private: Debug use only
*/
/*
* Overrides MINION image data with g_os_ucode_data_nvswitch_minion it fetches from js file.
*/
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_DATA "MinionSetUCodeData"
/*
* Overrides MINION header with g_os_ucode_header_nvswitch_minion it fetches from js file.
*/
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_HDR "MinionSetUCodeHdr"
/*
* Overrides MINION ucode data size with g_os_ucode_data_nvswitch_minion_size it fetches from js file.
*/
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_DATA_SIZE "MinionSetUCodeDataSize"
/*
* Overrides MINION ucode data size with g_os_ucode_data_nvswitch_minion_size it fetches from js file.
*/
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_HDR_SIZE "MinionSetUCodeHdrSize"
/*
* NV_SWITCH_REGKEY_CHIPLIB_FORCED_LINK_CONFIG_MASK
*
* Internal use only
* This notifies the driver that we are using a chiplib forced link config
* to initialize and train the links.
* Mask contains links 0-31
* Mask2 contains links 32-63
*
* This is intended for sim platforms only where MINION is not available
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_CHIPLIB_FORCED_LINK_CONFIG_MASK "ChiplibForcedLinkConfigMask"
#define NV_SWITCH_REGKEY_CHIPLIB_FORCED_LINK_CONFIG_MASK2 "ChiplibForcedLinkConfigMask2"
/*
* Initiates DMA selftest on SOE during init. Default is disable.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_SOE_DMA_SELFTEST "SoeDmaSelfTest"
#define NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_DISABLE 0x00
#define NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_ENABLE 0x01
/*
* Disables logging of latency counters
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_LATENCY_COUNTER_LOGGING "LatencyCounterLogging"
#define NV_SWITCH_REGKEY_LATENCY_COUNTER_LOGGING_DISABLE 0x00
#define NV_SWITCH_REGKEY_LATENCY_COUNTER_LOGGING_ENABLE 0x01
/*
* Knob to change NVLink link speed
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_SPEED_CONTROL "SpeedControl"
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED 4:0
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_DEFAULT 0x00
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_16G 0x01
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_20G 0x03
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_25G 0x08
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_25_78125G 0x08
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_32G 0x0E
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_40G 0x0F
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_50G 0x10
#define NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_53_12500G 0x11
/*
* Enable/Disable periodic flush to inforom. Default is disabled.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_INFOROM_BBX_ENABLE_PERIODIC_FLUSHING "InforomBbxPeriodicFlush"
#define NV_SWITCH_REGKEY_INFOROM_BBX_ENABLE_PERIODIC_FLUSHING_DISABLE 0x00
#define NV_SWITCH_REGKEY_INFOROM_BBX_ENABLE_PERIODIC_FLUSHING_ENABLE 0x01
/*
* The rate at which the lifetime data about the NVSwitch is written into the BBX object in seconds.
* This is gated by NV_SWITCH_REGKEY_INFOROM_BBX_ENABLE_PERIODIC_FLUSHING
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_PERIODICITY "InforomBbxWritePeriodicity"
#define NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_PERIODICITY_DEFAULT 600 // 600 seconds (10 min)
/*
* The minimum duration the driver must run before writing to the BlackBox Recorder (BBX) object
* on driver exit (in seconds).
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_MIN_DURATION "InforomBbxWriteMinDuration"
#define NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_MIN_DURATION_DEFAULT 30 // 30 seconds
/*
* Change ATO timer value
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_ATO_CONTROL "ATOControl"
#define NV_SWITCH_REGKEY_ATO_CONTROL_DEFAULT 0x0
#define NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT 19:0
#define NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT_DEFAULT 0x00
#define NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE 20:20
#define NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_FALSE 0x00
#define NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE 0x01
/*
* Change STO timer value
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_STO_CONTROL "STOControl"
#define NV_SWITCH_REGKEY_STO_CONTROL_DEFAULT 0x0
#define NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT 19:0
#define NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT_DEFAULT 0x00
#define NV_SWITCH_REGKEY_STO_CONTROL_DISABLE 20:20
#define NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_FALSE 0x00
#define NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE 0x01
/*
* NV_SWITCH_REGKEY_MINION_DISABLE - Disables init and usage of MINION by the kernel driver
*
* The NVSwitch driver relies on MINION for some features, but can operate
* without it and is required for Bug 2848340.
*
* When the regkey is set to YES, the Nvswitch driver disregards MINION and will not
* bootstrap it. All interactions between the driver and MINION are disabled.
*
* If set to NO, the MINION will function as normal.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_MINION_DISABLE "MinionDisable"
#define NV_SWITCH_REGKEY_MINION_DISABLE_NO 0x0
#define NV_SWITCH_REGKEY_MINION_DISABLE_YES 0x1
/*
* NV_SWITCH_REGKEY_MINION_SET_UCODE_TARGET - Selects the core on which Minion will run
*
* When the regkey is set to FALCON, the Nvswitch driver will run MINION on Falcon core.
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_TARGET "MinionSetUcodeTarget"
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_TARGET_DEFAULT 0x0
#define NV_SWITCH_REGKEY_MINION_SET_UCODE_TARGET_FALCON 0x1
/*
* NV_SWITCH_REGKEY_MINION_SET_SIMMODE - Selects simmode settings to send to MINION
*
* Regkey is set to either SLOW, MEDIUM or FAST depending on the environment and timing
* needed by MINION to setup alarms during the training sequence
*
* In the default option, no SIMMODE is selected
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_MINION_SET_SIMMODE "MinionSetSimmode"
#define NV_SWITCH_REGKEY_MINION_SET_SIMMODE_DEFAULT 0x0
#define NV_SWITCH_REGKEY_MINION_SET_SIMMODE_FAST 0x1
#define NV_SWITCH_REGKEY_MINION_SET_SIMMODE_MEDIUM 0x2
#define NV_SWITCH_REGKEY_MINION_SET_SIMMODE_SLOW 0x3
/*
* NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS - Selects SMF settings to send to MINION
*
* Regkey is set to either SLOW, MEDIUM or FAST depending on the environment and timing
* needed by MINION to setup alarms during the training sequence
*
* In the default option, no SMF settings are selected
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS "MinionSmfSettings"
#define NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS_DEFAULT 0x0
#define NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS_FAST 0x1
#define NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS_MEDIUM 0x2
#define NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS_SLOW 0x3
#define NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS_MEDIUM_SERIAL 0x4
/*
* NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES - Selects uphy tables to send to MINION
*
* Regkey is set to either SHORT or FAST depending on the environment and timing
* needed by MINION to setup alarms during the training sequence
*
* In the default option, no UPHY table is selected
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES "MinionSelectUphyTables"
#define NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES_DEFAULT 0x0
#define NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES_SHORT 0x1
#define NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES_FAST 0x2
/*
* NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS - Programs the L1_RECAL fields
*
* Regkey is used to program the the following:
*
* MIN_RECAL_TIME_MANTISSA
* MIN_RECAL_TIME_EXPONENT
* MAX_RECAL_PERIOD_MANTISSA
* MAX_RECAL_PERIOD_EXPONENT
*
* In the default option, no L1_RECAL fields are programmed
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS "LinkRecalSettings"
#define NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS_NOP 0x0
#define NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS_MIN_RECAL_TIME_MANTISSA 3:0
#define NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS_MIN_RECAL_TIME_EXPONENT 7:4
#define NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS_MAX_RECAL_PERIOD_MANTISSA 11:8
#define NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS_MAX_RECAL_PERIOD_EXPONENT 15:12
/*
* Used to disable private internal-use only regkeys from release build drivers
*/
#define NV_SWITCH_REGKEY_PRIVATE 1
#define NV_SWITCH_REGKEY_PUBLIC 0
#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
#define NV_SWITCH_REGKEY_PRIVATE_ALLOWED 1
#else
#define NV_SWITCH_REGKEY_PRIVATE_ALLOWED 0
#endif //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS)
/*
* NV_SWITCH_REGKEY_LINK_TRAINING_SELECT - Select the Link training to be done
*
* This regkey will
* allow for overriding System Defaults and can force either training method
* when desired.
*/
#define NV_SWITCH_REGKEY_LINK_TRAINING_SELECT "LinkTrainingMode"
#define NV_SWITCH_REGKEY_LINK_TRAINING_SELECT_DEFAULT 0x0
/*
* NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL - Enable access to all I2C Ports/Devices
*
* Private: Debug use only
*/
#define NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL "I2cAccessControl"
#define NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_DEFAULT 0x0
#define NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_ENABLE 0x1
#define NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_DISABLE 0x0
/*
* NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT - Configure the CRC bit error rate for the short interrupt
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT "CRCBitErrorRateShort"
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_OFF 0x0
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN 2:0
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP 3:3
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN 6:4
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP 11:8
/*
* NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG - Configure the CRC bit error rate for the long interrupt
*
* Public: Available in release drivers
*/
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG "CRCBitErrorRateLong"
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_OFF 0x000
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_BUG_3365481_CASE_1 0x803
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_BUG_3365481_CASE_2 0x703
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_BUG_3365481_CASE_5 0x34D
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_BUG_3365481_CASE_6 0x00F
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_MAN 2:0
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_EXP 3:3
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_MAN 6:4
#define NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_EXP 12:8
#endif //_REGKEY_NVSWITCH_H_

View File

@@ -0,0 +1,863 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _ROM_NVSWITCH_H_
#define _ROM_NVSWITCH_H_
#include "pmgr_nvswitch.h"
#include "io_nvswitch.h"
//
// When parsing BIOS tables these wrappers help protect against reading and using
// fields that may not be present in the ROM image by checking the offset against
// the structure size.
//
#define NV_OFFSETOF_MEMBER(_basePtr, _member) \
((NvUPtr)(((NvU8 *)(&((_basePtr)->_member))) - ((NvU8 *)(_basePtr))))
#define NVSWITCH_ELEMENT_PRESENT(_ptr, _element, _size) \
(NV_OFFSETOF_MEMBER((_ptr), _element) + sizeof((_ptr)->_element) <= (_size))
#define NVSWITCH_ELEMENT_READ(_ptr, _element, _size, _default) \
(NVSWITCH_ELEMENT_PRESENT(_ptr, _element, _size) ? \
((_ptr)->_element) : (_default))
#define NVSWITCH_ELEMENT_VALIDATE(_ptr, _element, _size, _default, _expected) \
do \
{ \
NvU32 data = NVSWITCH_ELEMENT_READ(_ptr, _element, _size, _default); \
if (data != (_expected)) \
{ \
NVSWITCH_PRINT(device, SETUP, \
"Element '%s->%s'=0x%x but expected 0x%x\n", \
#_ptr, #_element, data, (NvU32) (_expected)); \
} \
} while(0)
#define NVSWITCH_ELEMENT_CHECK(_ptr, _element, _size, _default) \
NVSWITCH_ELEMENT_VALIDATE(_ptr, _element, _size, _default, _default)
#define NVSWITCH_STRUCT_PACKED_ALIGNED(typeName, bytes) \
typedef struct __attribute__((packed, aligned(bytes)))
#define NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
//
// AT24CM02 EEPROM
// http://ww1.microchip.com/downloads/en/DeviceDoc/Atmel-8828-SEEPROM-AT24CM02-Datasheet.pdf
//
#define AT24CM02_INDEX_SIZE 18 // Addressing bits
#define AT24CM02_BLOCK_SIZE 256 // R/W block size (bytes)
//
// AT24C02C EEPROM
// http://ww1.microchip.com/downloads/en/DeviceDoc/Atmel-8700-SEEPROM-AT24C01C-02C-Datasheet.pdf
//
#define AT24C02C_INDEX_SIZE 8 // Addressing bits
#define AT24C02C_BLOCK_SIZE 8 // R/W block size (bytes)
//
// AT24C02D EEPROM
// http://ww1.microchip.com/downloads/en/devicedoc/atmel-8871f-seeprom-at24c01d-02d-datasheet.pdf
// 2kb EEPROM used on LR10 P4790 B00 platform
//
#define AT24C02D_INDEX_SIZE 8 // Addressing bits
#define AT24C02D_BLOCK_SIZE 8 // R/W block size (bytes)
typedef struct
{
NvU32 i2c_port;
NvU32 i2c_address;
NvU32 device_type;
NvU32 index_size;
NvU32 block_size;
NvU32 block_count;
NvU32 eeprom_size;
} NVSWITCH_EEPROM_TYPE;
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_EEPROM_HEADER, 1)
{
char signature[4];
NvU16 version;
NvU16 header_size;
NvU16 pci_vendor_id;
NvU16 pci_device_id;
NvU16 pci_system_vendor_id;
NvU16 pci_system_device_id;
NvU16 firmware_size;
NvU8 reserved[13];
NvU8 checksum;
} NVSWITCH_EEPROM_HEADER;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_HEADER, 1)
{
NvU16 id;
char signature[4];
NvU16 bcd_version;
NvU8 header_size;
NvU8 token_size;
NvU8 token_entries;
NvU8 checksum;
} NVSWITCH_BIT_HEADER;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_BIT_TOKEN_CLOCK_PTRS 0x43
#define NVSWITCH_BIT_TOKEN_NVINIT_PTRS 0x49
#define NVSWITCH_BIT_TOKEN_NOP 0x4E
#define NVSWITCH_BIT_TOKEN_PERF_PTRS 0x50
#define NVSWITCH_BIT_TOKEN_BRIDGE_FW_DATA 0x52
#define NVSWITCH_BIT_TOKEN_DCB_PTRS 0x6E
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_TOKEN, 1)
{
NvU8 id;
NvU8 data_version;
NvU16 data_size;
NvU16 data_offset;
} NVSWITCH_BIT_TOKEN;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
// 0x43: BIT_TOKEN_CLOCK_PTRS
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_CLOCK_PTRS, 1)
{
NvU32 pll_info_table;
NvU32 vbe_mode_pclk;
NvU32 clocks_table;
NvU32 clocks_programming;
NvU32 nafll;
NvU32 adc_table;
NvU32 freq_control;
} NVSWITCH_BIT_CLOCK_PTRS;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_CLOCK_PTRS_PLL_INFO_VERSION 0x50
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_PLL_INFO_HEADER, 1)
{
NvU8 version;
NvU8 header_size;
NvU8 entry_size;
NvU8 entry_count;
} NVSWITCH_PLL_INFO_HEADER;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_PLL_INFO_ENTRY, 1)
{
NvU8 pll_id;
NvU16 ref_min_mhz;
NvU16 ref_max_mhz;
NvU16 vco_min_mhz;
NvU16 vco_max_mhz;
NvU16 update_min_mhz;
NvU16 update_max_mhz;
NvU8 m_min;
NvU8 m_max;
NvU8 n_min;
NvU8 n_max;
NvU8 pl_min;
NvU8 pl_max;
} NVSWITCH_PLL_INFO_ENTRY;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_PLL_ID_SYSPLL 0x07
// 0x49: BIT_TOKEN_NVINIT_PTRS
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_NVINIT_PTRS, 1)
{
NvU16 init_script;
NvU16 macro_index;
NvU16 macro_table;
NvU16 condition;
NvU16 io_condition;
NvU16 io_flag_condition;
NvU16 init_function;
NvU16 private_boot;
NvU16 data_arrays;
NvU16 pcie_settings;
NvU16 devinit;
NvU16 devinit_size;
NvU16 boot_script;
NvU16 boot_script_size;
NvU16 nvlink_config;
NvU16 boot_script_nonGC6;
NvU16 boot_script_nonGC6_size;
} NVSWITCH_BIT_NVINIT_PTRS;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_NVLINK_CONFIG, 1)
{
NvU8 version;
NvU8 size;
NvU16 reserved;
NvU64 link_disable_mask; // 1 = disable
NvU64 link_speed_mask; // 1 = safe mode
NvU64 link_refclk_mask; // 0 = 100MHz, 1 = 133MHz
NvU8 flags;
NvU64 ac_coupled_mask; // 0 = DC, 1 = AC
} NVSWITCH_NVLINK_CONFIG;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
// 0x52: BIT_TOKEN_BRIDGE_FW_DATA
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_BRIDGE_FW_DATA, 1)
{
NvU32 firmware_version;
NvU8 oem_version;
NvU16 firmware_size;
char BIOS_MOD_date[8];
NvU32 firmware_flags;
NvU16 eng_product_name;
NvU8 eng_product_name_size;
NvU16 nvswitch_instance_id;
} NVSWITCH_BIT_BRIDGE_FW_DATA;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_BUILD 0:0
#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_BUILD_REL 0
#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_BUILD_ENG 1
#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_I2C 1:1
#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_I2C_MASTER 0
#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_I2C_NOT_MASTER 1
// 0x6E: BIT_TOKEN_DCB_PTRS
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_DCB_PTRS, 1)
{
NvU16 dcb_header_ptr;
} NVSWITCH_BIT_DCB_PTRS;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_DCB_HEADER_VERSION_41 0x41
#define NVSWITCH_DCB_HEADER_SIGNATURE 0x4edcbdcb
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_DCB_HEADER, 1)
{
NvU8 version;
NvU8 header_size;
NvU8 entry_count;
NvU8 entry_size;
NvU16 ccb_block_ptr;
NvU32 dcb_signature;
NvU16 gpio_table;
NvU16 input_devices;
NvU16 personal_cinema;
NvU16 spread_spectrum;
NvU16 i2c_devices;
NvU16 connectors;
NvU8 flags;
NvU16 hdtv;
NvU16 switched_outputs;
NvU32 display_patch;
NvU32 connector_patch;
} NVSWITCH_DCB_HEADER;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_GPIO_TABLE_VERSION_42 0x42
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_GPIO_TABLE, 1)
{
NvU8 version;
NvU8 header_size;
NvU8 entry_count;
NvU8 entry_size;
NvU16 ext_gpio_master;
} NVSWITCH_GPIO_TABLE;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_GPIO_ENTRY, 1)
{
NvU8 pin;
NvU8 function;
NvU8 output;
NvU8 input;
NvU8 misc;
} NVSWITCH_GPIO_ENTRY;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_GPIO_ENTRY_PIN_NUM 5:0
#define NVSWITCH_GPIO_ENTRY_PIN_IO_TYPE 6:6
#define NVSWITCH_GPIO_ENTRY_PIN_INIT_STATE 7:7
#define NVSWITCH_GPIO_ENTRY_FUNCTION 7:0
#define NVSWITCH_GPIO_ENTRY_FUNCTION_THERMAL_EVENT 17
#define NVSWITCH_GPIO_ENTRY_FUNCTION_OVERTEMP 35
#define NVSWITCH_GPIO_ENTRY_FUNCTION_THERMAL_ALERT 52
#define NVSWITCH_GPIO_ENTRY_FUNCTION_THERMAL_CRITICAL 53
#define NVSWITCH_GPIO_ENTRY_FUNCTION_POWER_ALERT 76
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID0 209
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID1 210
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID2 211
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID3 212
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID4 213
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID5 214
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID6 215
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID7 216
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID8 217
#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID9 218
#define NVSWITCH_GPIO_ENTRY_FUNCTION_SKIP_ENTRY 255
#define NVSWITCH_GPIO_ENTRY_OUTPUT 7:0
#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT 4:0
#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT_NONE 0
#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT_THERMAL_ALERT 22
#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT_POWER_ALERT 23
#define NVSWITCH_GPIO_ENTRY_INPUT_GSYNC 5:5
#define NVSWITCH_GPIO_ENTRY_INPUT_OPEN_DRAIN 6:6
#define NVSWITCH_GPIO_ENTRY_INPUT_PWM 7:7
//#define NVSWITCH_GPIO_ENTRY_INPUT_3V3 ?:?
#define NVSWITCH_GPIO_ENTRY_MISC_LOCK 3:0
#define NVSWITCH_GPIO_ENTRY_MISC_IO 7:4
#define NVSWITCH_GPIO_ENTRY_MISC_IO_UNUSED 0x0
#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_OUT 0x1
#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_OUT_TRISTATE 0x3
#define NVSWITCH_GPIO_ENTRY_MISC_IO_OUT 0x4
#define NVSWITCH_GPIO_ENTRY_MISC_IO_IN_STEREO_TRISTATE 0x6
#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_OUT_TRISTATE_LO 0x9
#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_IN 0xB
#define NVSWITCH_GPIO_ENTRY_MISC_IO_OUT_TRISTATE 0xC
#define NVSWITCH_GPIO_ENTRY_MISC_IO_IN 0xE
#define NVSWITCH_I2C_VERSION 0x40
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_I2C_TABLE, 1)
{
NvU8 version;
NvU8 header_size;
NvU8 entry_count;
NvU8 entry_size;
NvU8 flags;
} NVSWITCH_I2C_TABLE;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_I2C_ENTRY, 1)
{
NvU32 device;
} NVSWITCH_I2C_ENTRY;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_I2C_ENTRY_TYPE 7:0
#define NVSWITCH_I2C_ENTRY_ADDRESS 15:8
#define NVSWITCH_I2C_ENTRY_RESERVED1 19:16
#define NVSWITCH_I2C_ENTRY_PORT_1 20:20
#define NVSWITCH_I2C_ENTRY_WR_ACCESS 23:21
#define NVSWITCH_I2C_ENTRY_RD_ACCESS 26:24
#define NVSWITCH_I2C_ENTRY_PORT_2 27:27
#define NVSWITCH_I2C_ENTRY_RESERVED2 31:28
#define NVSWITCH_CCB_VERSION 0x41
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_CCB_TABLE, 1)
{
NvU8 version;
NvU8 header_size;
NvU8 entry_count;
NvU8 entry_size;
NvU8 comm_port[4];
} NVSWITCH_CCB_TABLE;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_CCB_ENTRY, 1)
{
NvU32 device;
} NVSWITCH_CCB_ENTRY;
NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX
#define NVSWITCH_CCB_DEVICE_I2C_PORT 4:0
#define NVSWITCH_CCB_DEVICE_DPAUX 9:5
#define NVSWITCH_CCB_DEVICE_VOLTAGE 10:10
#define NVSWITCH_CCB_DEVICE_RESERVED 27:11
#define NVSWITCH_CCB_DEVICE_I2C_SPEED 31:28
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_DEFAULT 0x0
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_100KHZ 0x1
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_200KHZ 0x2
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_400KHZ 0x3
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_800KHZ 0x4
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_1600KHZ 0x5
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_3400KHZ 0x6
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_60KHZ 0x7
#define NVSWITCH_CCB_DEVICE_I2C_SPEED_300KHZ 0x8
//
// Firmware data
//
#define NVSWITCH_PRODUCT_NAME_MAX_LEN 64
typedef struct
{
NvBool valid;
NvU32 ref_min_mhz;
NvU32 ref_max_mhz;
NvU32 vco_min_mhz;
NvU32 vco_max_mhz;
NvU32 update_min_mhz;
NvU32 update_max_mhz;
NvU32 m_min;
NvU32 m_max;
NvU32 n_min;
NvU32 n_max;
NvU32 pl_min;
NvU32 pl_max;
} NVSWITCH_PLL_LIMITS;
typedef struct
{
NvBool valid;
NvU32 i2c_speed;
NvBool i2c_33v;
} NVSWITCH_I2C_PORT;
#define NVSWITCH_MAX_I2C_DEVICES 16
typedef struct
{
NvU32 pin;
NvU32 function;
NvU32 hw_select;
NvU32 misc;
} NVSWITCH_GPIO_INFO;
#define NVSWITCH_MAX_GPIO_PINS 25
typedef struct
{
NvU32 firmware_size;
// ROM Header
NvU16 pci_vendor_id;
NvU16 pci_device_id;
NvU16 pci_system_vendor_id;
NvU16 pci_system_device_id;
// Firmware data
struct
{
NvBool bridge_fw_found;
NvU32 firmware_version;
NvU8 oem_version;
char BIOS_MOD_date[8];
NvBool fw_release_build;
char product_name[NVSWITCH_PRODUCT_NAME_MAX_LEN+1];
NvU16 instance_id;
} bridge;
// Clocks
struct
{
NvBool clocks_found;
NVSWITCH_PLL_LIMITS sys_pll;
} clocks;
// NVLink init
struct
{
NvBool link_config_found;
NvU64 link_enable_mask; // 1 = enabled
NvU64 link_ac_coupled_mask; // 0 = DC, 1 = AC
} nvlink;
// DCB
struct
{
NvBool dcb_found;
NVSWITCH_I2C_PORT i2c[NVSWITCH_MAX_I2C_PORTS];
NvU32 i2c_device_count;
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE i2c_device[NVSWITCH_MAX_I2C_DEVICES];
NvU32 gpio_pin_count;
NVSWITCH_GPIO_INFO gpio_pin[NVSWITCH_MAX_GPIO_PINS];
} dcb;
} NVSWITCH_FIRMWARE;
#define NVSWITCH_FIRMWARE_BRIDGE_INSTANCE_ID_UNKNOWN 0xFFFF
#define NVSWITCH_FIRMWARE_BRIDGE_INSTANCE_ID_NORMAL 0xFFFE
void
nvswitch_read_rom_tables
(
nvswitch_device *device,
NVSWITCH_FIRMWARE *firmware
);
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
#if !defined(BIOSTYPES_H_FILE)
#define bios_U008 NvU32
#define bios_U016 NvU32
#define bios_U032 NvU32
#define bios_S008 NvS32
#define bios_S016 NvS32
#define bios_S032 NvS32
#endif // !defined(BIOSTYPES_H_FILE)
/**************************************************************************************************************
* Description:
* Definitions of BIOS BIT structures as defined starting in Core 5
*
**************************************************************************************************************/
#if !defined(_BIT_H_)
#define BIT_HEADER_ID 0xB8FF
#define BIT_HEADER_SIGNATURE 0x00544942 // "BIT\0"
#define BIT_HEADER_SIZE_OFFSET 8
#define BIT_HEADER_LATEST_KNOWN_VERSION 0x100
#endif // !defined(_BIT_H_)
#define PCI_ROM_HEADER_SIZE 0x18
#define PCI_DATA_STRUCT_SIZE 0x1c
#define PCI_ROM_HEADER_PCI_DATA_SIZE (PCI_ROM_HEADER_SIZE + PCI_DATA_STRUCT_SIZE) // ROM Header + PCI Dat Structure size
#define PCI_EXP_ROM_SIGNATURE 0xaa55
#define PCI_DATA_STRUCT_SIGNATURE 0x52494350 // "PCIR" in dword format
#define NVLINK_CONFIG_DATA_HEADER_VER_20 0x2
#define NVLINK_CONFIG_DATA_HEADER_20_SIZE 8
#define NVLINK_CONFIG_DATA_HEADER_20_FMT "6b1w"
typedef struct _PCI_DATA_STRUCT
{
bios_U032 sig; // 00h: Signature, the string "PCIR" or NVIDIA's alternate "NPDS"
bios_U016 vendorID; // 04h: Vendor Identification
bios_U016 deviceID; // 06h: Device Identification
bios_U016 deviceListPtr; // 08h: Device List Pointer
bios_U016 pciDataStructLen; // 0Ah: PCI Data Structure Length
bios_U008 pciDataStructRev; // 0Ch: PCI Data Structure Revision
bios_U008 classCode[3]; // 0Dh: Class Code
bios_U016 imageLen; // 10h: Image Length (units of 512 bytes)
bios_U016 vendorRomRev; // 12h: Revision Level of the Vendor's ROM
bios_U008 codeType; // 14h: holds NBSI_OBJ_CODE_TYPE (0x70) and others
bios_U008 lastImage; // 15h: Last Image Indicator: bit7=1 is lastImage
bios_U016 maxRunTimeImageLen; // 16h: Maximum Run-time Image Length (units of 512 bytes)
bios_U016 configUtilityCodePtr; // 18h: Pointer to Configurations Utility Code Header
bios_U016 CMDTFCLPEntryPointPtr; // 1Ah: Pointer to DMTF CLP Entry Point
} PCI_DATA_STRUCT, *PPCI_DATA_STRUCT;
#define PCI_DATA_STRUCT_FMT "1d4w4b2w2b3w"
// BIT_TOKEN_NVINIT_PTRS 0x49 // 'I' Initialization Table Pointers
struct BIT_DATA_NVINIT_PTRS_V1
{
bios_U016 InitScriptTablePtr; // Init script table pointer
bios_U016 MacroIndexTablePtr; // Macro index table pointer
bios_U016 MacroTablePtr; // Macro table pointer
bios_U016 ConditionTablePtr; // Condition table pointer
bios_U016 IoConditionTablePtr; // IO Condition table pointer
bios_U016 IoFlagConditionTablePtr; // IO Flag Condition table pointer
bios_U016 InitFunctionTablePtr; // Init Function table pointer
bios_U016 VBIOSPrivateTablePtr; // VBIOS private table pointer
bios_U016 DataArraysTablePtr; // Data arrays table pointer
bios_U016 PCIESettingsScriptPtr; // PCI-E settings script pointer
bios_U016 DevinitTablesPtr; // Pointer to tables required by Devinit opcodes
bios_U016 DevinitTablesSize; // Size of tables required by Devinit opcodes
bios_U016 BootScriptsPtr; // Pointer to Devinit Boot Scripts
bios_U016 BootScriptsSize; // Size of Devinit Boot Scripts
bios_U016 NvlinkConfigDataPtr; // Pointer to NVLink Config Data
};
#define BIT_DATA_NVINIT_PTRS_V1_30_FMT "15w"
typedef struct BIT_DATA_NVINIT_PTRS_V1 BIT_DATA_NVINIT_PTRS_V1;
#define BIT_TOKEN_BIOSDATA 0x42 // 'B' BIOS Data
#define BIT_TOKEN_NVINIT_PTRS 0x49 // 'I'
struct BIT_HEADER_V1_00
{
bios_U016 Id; // BMP=0x7FFF/BIT=0xB8FF
bios_U032 Signature; // 0x00544942 - BIT Data Structure Signature
bios_U016 BCD_Version; // BIT Version - 0x0100 for 1.00
bios_U008 HeaderSize; // This version is 12 bytes long
bios_U008 TokenSize; // This version has 6 byte long Tokens
bios_U008 TokenEntries; // Number of Entries
bios_U008 HeaderChksum; // 0 Checksum of the header
};
#define BIT_HEADER_V1_00_FMT "1w1d1w4b"
typedef struct BIT_HEADER_V1_00 BIT_HEADER_V1_00;
struct BIT_TOKEN_V1_00
{
bios_U008 TokenId;
bios_U008 DataVersion;
bios_U016 DataSize;
bios_U016 DataPtr;
};
#define BIT_TOKEN_V1_00_FMT "2b2w"
typedef struct BIT_TOKEN_V1_00 BIT_TOKEN_V1_00;
// BIT_TOKEN_BIOSDATA 0x42 // 'B' BIOS Data
struct BIT_DATA_BIOSDATA_V1
{
bios_U032 Version; // BIOS Binary Version Ex. 5.40.00.01.12 = 0x05400001
bios_U008 OemVersion; // OEM Version Number Ex. 5.40.00.01.12 = 0x12
// OEM can override the two fields above
bios_U008 Checksum; // Filled by MakeVGA
bios_U016 Int15CallbacksPost; //
bios_U016 Int15CallbacksSystem; //
bios_U016 BoardId; //
bios_U016 FrameCount; // Frame count for signon message delay
bios_U008 BiosmodDate[8]; // '00/00/04' Date BIOSMod was last run
};
#define BIT_DATA_BIOSDATA_V1_FMT "1d2b4w8b"
typedef struct BIT_DATA_BIOSDATA_V1 BIT_DATA_BIOSDATA_V1;
struct BIT_DATA_BIOSDATA_V2
{
bios_U032 Version; // BIOS Binary Version Ex. 5.40.00.01.12 = 0x05400001
bios_U008 OemVersion; // OEM Version Number Ex. 5.40.00.01.12 = 0x12
// OEM can override the two fields above
bios_U008 Checksum; // Filled by MakeVGA
bios_U016 Int15CallbacksPost; //
bios_U016 Int15CallbacksSystem; //
bios_U016 FrameCount; // Frame count for signon message delay
bios_U032 Reserved1;
bios_U032 Reserved2;
bios_U008 MaxHeadsAtPost;
bios_U008 MemorySizeReport;
bios_U008 HorizontalScaleFactor;
bios_U008 VerticalScaleFactor;
bios_U016 DataTablePtr;
bios_U016 RomPackPtr;
bios_U016 AppliedRomPacksPtr;
bios_U008 AppliedRomPackMax;
bios_U008 AppliedRomPackCount;
bios_U008 ModuleMapExternal;
bios_U032 CompressionInfoPtr;
};
#define BIT_DATA_BIOSDATA_V2_FMT "1d2b3w2d4b3w3b1d"
typedef struct BIT_DATA_BIOSDATA_V2 BIT_DATA_BIOSDATA_V2;
#ifndef PCI_VENDOR_ID_NVIDIA
#define PCI_VENDOR_ID_NVIDIA 0x10DE
#endif
typedef struct _nvlink_Config_Data_Header_20
{
bios_U008 Version; // NVLink Config Data Structure version
bios_U008 HeaderSize; // Size of header
bios_U008 BaseEntrySize;
bios_U008 BaseEntryCount;
bios_U008 LinkEntrySize;
bios_U008 LinkEntryCount;
bios_U016 Reserved; // Reserved
} NVLINK_CONFIG_DATA_HEADER_20, *PNVLINK_CONFIG_DATA_HEADER_20;
#define NV_NVLINK_VBIOS_PARAM0_LINK 0:0
#define NV_NVLINK_VBIOS_PARAM0_LINK_ENABLE 0x0
#define NV_NVLINK_VBIOS_PARAM0_LINK_DISABLE 0x1
#define NV_NVLINK_VBIOS_PARAM0_RESERVED1 1:1
#define NV_NVLINK_VBIOS_PARAM0_ACDC_MODE 2:2
#define NV_NVLINK_VBIOS_PARAM0_ACDC_MODE_DC 0x0
#define NV_NVLINK_VBIOS_PARAM0_ACDC_MODE_AC 0x1
#define NV_NVLINK_VBIOS_PARAM0_RECEIVER_DETECT 3:3
#define NV_NVLINK_VBIOS_PARAM0_RECEIVER_DETECT_DISABLE 0x0
#define NV_NVLINK_VBIOS_PARAM0_RECEIVER_DETECT_ENABLE 0x1
#define NV_NVLINK_VBIOS_PARAM0_RESTORE_PHY_TRAINING 4:4
#define NV_NVLINK_VBIOS_PARAM0_RESTORE_PHY_TRAINING_DISABLE 0x0
#define NV_NVLINK_VBIOS_PARAM0_RESTORE_PHY_TRAINING_ENABLE 0x1
#define NV_NVLINK_VBIOS_PARAM0_SLM 5:5
#define NV_NVLINK_VBIOS_PARAM0_SLM_DISABLE 0x0
#define NV_NVLINK_VBIOS_PARAM0_SLM_ENABLE 0x1
#define NV_NVLINK_VBIOS_PARAM0_L2 6:6
#define NV_NVLINK_VBIOS_PARAM0_L2_DISABLE 0x0
#define NV_NVLINK_VBIOS_PARAM0_L2_ENABLE 0x1
#define NV_NVLINK_VBIOS_PARAM0_RESERVED2 7:7
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE 7:0
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_50_00000 0x00
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_16_00000 0x01
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_20_00000 0x02
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_25_00000 0x03
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_25_78125 0x04
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_32_00000 0x05
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_40_00000 0x06
#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_53_12500 0x07
#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE 7:0
#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE_NRZ 0x00
#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE_NRZ_128B130 0x01
#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE_NRZ_PAM4 0x03
#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE 1:0
#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_COMMON 0x0
#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_RSVD 0x1
#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_NON_COMMON_NO_SS 0x2
#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_NON_COMMON_SS 0x3
#define NV_NVLINK_VBIOS_PARAM3_RESERVED1 3:2
#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE 5:4
#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE_OFF 0x0
#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE_ECC96 0x1
#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE_ECC88 0x2
#define NV_NVLINK_VBIOS_PARAM3_RESERVED2 7:6
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM 7:0
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_RSVD 0x00
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A0_SINGLE_PRESENT 0x01
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A1_PRESENT_ARRAY 0x02
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A2_FINE_GRAINED_EXHAUSTIVE 0x04
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A3_RSVD 0x08
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A4_FOM_CENTRIOD 0x10
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A5_RSVD 0x20
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A6_RSVD 0x40
#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A7_RSVD 0x80
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM 4:0
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B0_NO_ADJUSTMENT 0x1
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B1_FIXED_ADJUSTMENT 0x2
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B2_RSVD 0x4
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B3_RSVD 0x8
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT 7:5
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT_FOM_A 0x1
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT_FOM_B 0x2
#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT_FOM_C 0x4
#define NV_NVLINK_VBIOS_PARAM6_TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA 3:0
#define NV_NVLINK_VBIOS_PARAM6_TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT 7:4
#define NVLINK_CONFIG_DATA_BASEENTRY_FMT "1b"
#define NVLINK_CONFIG_DATA_LINKENTRY_FMT "7b"
// Version 2.0 Link Entry and Base Entry
typedef struct _nvlink_config_data_baseentry_20
{
NvU8 positionId;
} NVLINK_CONFIG_DATA_BASEENTRY;
typedef struct _nvlink_config_data_linkentry_20
{
// VBIOS configuration Data
NvU8 nvLinkparam0;
NvU8 nvLinkparam1;
NvU8 nvLinkparam2;
NvU8 nvLinkparam3;
NvU8 nvLinkparam4;
NvU8 nvLinkparam5;
NvU8 nvLinkparam6;
} NVLINK_CONFIG_DATA_LINKENTRY;
// Union of different VBIOS configuration table formats
typedef union __nvlink_Config_Data_Header
{
NVLINK_CONFIG_DATA_HEADER_20 ver_20;
} NVLINK_CONFIG_DATA_HEADER, *PNVLINK_CONFIG_DATA_HEADER;
typedef struct _nvlink_vbios_config_data_baseentry_20
{
bios_U008 positionId;
} NVLINK_VBIOS_CONFIG_DATA_BASEENTRY;
typedef struct _nvlink_vbios_config_data_linkentry_20
{
// VBIOS configuration Data
bios_U008 nvLinkparam0;
bios_U008 nvLinkparam1;
bios_U008 nvLinkparam2;
bios_U008 nvLinkparam3;
bios_U008 nvLinkparam4;
bios_U008 nvLinkparam5;
bios_U008 nvLinkparam6;
} NVLINK_VBIOS_CONFIG_DATA_LINKENTRY, *PNVLINK_VBIOS_CONFIG_DATA_LINKENTRY;
//
// NVSwitch driver structures
//
#define NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY 12
typedef struct
{
NVLINK_CONFIG_DATA_BASEENTRY link_vbios_base_entry[NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY];
NVLINK_CONFIG_DATA_LINKENTRY link_vbios_entry[NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY][NVSWITCH_MAX_LINK_COUNT];
NvU32 identified_Link_entries[NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY];
NvU32 link_base_entry_assigned;
NvU64 vbios_disabled_link_mask;
NvU32 bit_address;
NvU32 pci_image_address;
NvU32 nvlink_config_table_address;
} NVSWITCH_BIOS_NVLINK_CONFIG;
#define NVSWITCH_DCB_PTR_OFFSET 0x36
typedef struct _nvswitch_vbios_dcb_header_41
{
bios_U008 version;
bios_U008 header_size;
bios_U008 entry_count;
bios_U008 entry_size;
bios_U016 ccb_block_ptr;
bios_U032 dcb_signature;
bios_U016 gpio_table;
bios_U016 input_devices;
bios_U016 personal_cinema;
bios_U016 spread_spectrum;
bios_U016 i2c_devices;
bios_U016 connectors;
bios_U008 flags;
bios_U016 hdtv;
bios_U016 switched_outputs;
bios_U032 display_patch;
bios_U032 connector_patch;
} NVSWITCH_VBIOS_DCB_HEADER;
#define NVSWITCH_VBIOS_DCB_HEADER_FMT "4b1w1d6w1b2w2d"
typedef struct _nvswitch_vbios_ccb_table_41
{
bios_U008 version;
bios_U008 header_size;
bios_U008 entry_count;
bios_U008 entry_size;
bios_U008 comm_port[4];
} NVSWITCH_VBIOS_CCB_TABLE;
#define NVSWITCH_VBIOS_CCB_TABLE_FMT "8b"
typedef struct _nvswitch_vbios_i2c_table_40
{
bios_U008 version;
bios_U008 header_size;
bios_U008 entry_count;
bios_U008 entry_size;
bios_U008 flags;
} NVSWITCH_VBIOS_I2C_TABLE;
#define NVSWITCH_I2C_TABLE_FMT "5b"
typedef struct _nvswitch_vbios_i2c_entry
{
bios_U032 device;
} NVSWITCH_VBIOS_I2C_ENTRY;
#define NVSWITCH_I2C_ENTRY_FMT "1d"
#endif //_ROM_NVSWITCH_H_

View File

@@ -0,0 +1,60 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SMBPBI_NVSWITCH_H_
#define _SMBPBI_NVSWITCH_H_
#include "soe/soeifsmbpbi.h"
#include "smbpbi_shared_nvswitch.h"
#include "oob/smbpbi_priv.h"
typedef struct
{
NvBool isValid;
NvU64 attemptedTrainingMask0;
NvU64 trainingErrorMask0;
} NVSWITCH_LINK_TRAINING_ERROR_INFO;
typedef struct
{
NvBool isValid;
NvU64 mask0;
} NVSWITCH_LINK_RUNTIME_ERROR_INFO;
struct smbpbi
{
SOE_SMBPBI_SHARED_SURFACE *sharedSurface;
NvU64 dmaHandle;
};
NvlStatus nvswitch_smbpbi_init(nvswitch_device *);
NvlStatus nvswitch_smbpbi_post_init(nvswitch_device *);
NvlStatus nvswitch_smbpbi_set_link_error_info(nvswitch_device *,
NVSWITCH_LINK_TRAINING_ERROR_INFO *pLinkTrainingErrorInfo,
NVSWITCH_LINK_RUNTIME_ERROR_INFO *pLinkRuntimeError);
void nvswitch_smbpbi_unload(nvswitch_device *);
void nvswitch_smbpbi_destroy(nvswitch_device *);
NvlStatus nvswitch_smbpbi_refresh_ecc_counts(nvswitch_device *);
void nvswitch_smbpbi_log_message(nvswitch_device *device, NvU32 num, NvU32 msglen, NvU8 *osErrorString);
#endif //_SMBPBI_NVSWITCH_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,120 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _HALDEFS_SOE_NVSWITCH_H_
#define _HALDEFS_SOE_NVSWITCH_H_
#include "nvstatus.h"
#include "flcnifcmn.h"
#include "flcn/haldefs_flcnable_nvswitch.h"
struct SOE;
typedef struct {
// needs to be the first thing in this struct so that a soe_hal* can be
// re-interpreted as a flcnable_hal* and vise-versa.
flcnable_hal base;
//add any hal functions specific to SOE here
NV_STATUS (*processMessages)(
struct nvswitch_device *device,
struct SOE *pSoe);
NV_STATUS (*waitForInitAck)(
struct nvswitch_device *device,
struct SOE *pSoe);
NvU32 (*service)(
struct nvswitch_device *device,
struct SOE *pSoe);
void (*serviceHalt)(
struct nvswitch_device *device,
struct SOE *pSoe);
void (*ememTransfer)(
struct nvswitch_device *device,
struct SOE *pSoe,
NvU32 dmemAddr,
NvU8 *pBuf,
NvU32 sizeBytes,
NvU8 port,
NvBool bCopyFrom);
NvU32 (*getEmemSize)(
struct nvswitch_device *device,
struct SOE *pSoe);
NvU32 (*getEmemStartOffset)(
struct nvswitch_device *device,
struct SOE *pSoe);
NV_STATUS (*ememPortToRegAddr)(
struct nvswitch_device *device,
struct SOE *pSoe,
NvU32 port,
NvU32 *pEmemCAddr,
NvU32 *pEmemDAddr);
void (*serviceExterr)(
struct nvswitch_device *device,
struct SOE *pSoe);
NV_STATUS (*getExtErrRegAddrs)(
struct nvswitch_device *device,
struct SOE *pSoe,
NvU32 *pExtErrAddr,
NvU32 *pExtErrStat);
NvU32 (*ememPortSizeGet)(
struct nvswitch_device *device,
struct SOE *pSoe);
NvBool (*isCpuHalted)(
struct nvswitch_device *device,
struct SOE *pSoe);
NvlStatus (*testDma)(
struct nvswitch_device *device);
NvlStatus (*setPexEOM)(
struct nvswitch_device *device,
NvU8 mode,
NvU8 nblks,
NvU8 nerrs,
NvU8 berEyeSel);
NvlStatus (*getPexEomStatus)(
struct nvswitch_device *device,
NvU8 mode,
NvU8 nblks,
NvU8 nerrs,
NvU8 berEyeSel,
NvU32 laneMask,
NvU16 *pEomStatus);
NvlStatus (*getUphyDlnCfgSpace)(
struct nvswitch_device *device,
NvU32 regAddress,
NvU32 laneSelectMask,
NvU16 *pRegValue);
NvlStatus (*forceThermalSlowdown)(
struct nvswitch_device *device,
NvBool slowdown,
NvU32 periodUs);
NvlStatus (*setPcieLinkSpeed)(
struct nvswitch_device *device,
NvU32 linkSpeed);
} soe_hal;
// HAL functions
void soeSetupHal_LR10(struct SOE *pSoe);
#endif //_HALDEFS_SOE_NVSWITCH_H_

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SOE_NVSWITCH_H_
#define _SOE_NVSWITCH_H_
#include "nvlink_errors.h"
#include "nvtypes.h"
#include "nvstatus.h"
typedef struct SOE SOE, *PSOE;
struct FLCNABLE;
struct nvswitch_device;
SOE *soeAllocNew(void);
NvlStatus soeInit(struct nvswitch_device *device, PSOE pSoe, NvU32 pci_device_id);
void soeDestroy(struct nvswitch_device *device, PSOE pSoe);
//HAL functions
NV_STATUS soeProcessMessages (struct nvswitch_device *device, PSOE pSoe);
NV_STATUS soeWaitForInitAck (struct nvswitch_device *device, PSOE pSoe);
NvU32 soeService_HAL (struct nvswitch_device *device, PSOE pSoe);
void soeServiceHalt_HAL (struct nvswitch_device *device, PSOE pSoe);
void soeEmemTransfer_HAL (struct nvswitch_device *device, PSOE pSoe, NvU32 dmemAddr, NvU8 *pBuf, NvU32 sizeBytes, NvU8 port, NvBool bCopyFrom);
NvU32 soeGetEmemSize_HAL (struct nvswitch_device *device, PSOE pSoe);
NvU32 soeGetEmemStartOffset_HAL (struct nvswitch_device *device, PSOE pSoe);
NV_STATUS soeEmemPortToRegAddr_HAL (struct nvswitch_device *device, PSOE pSoe, NvU32 port, NvU32 *pEmemCAddr, NvU32 *pEmemDAddr);
void soeServiceExterr_HAL (struct nvswitch_device *device, PSOE pSoe);
NV_STATUS soeGetExtErrRegAddrs_HAL (struct nvswitch_device *device, PSOE pSoe, NvU32 *pExtErrAddr, NvU32 *pExtErrStat);
NvU32 soeEmemPortSizeGet_HAL (struct nvswitch_device *device, PSOE pSoe);
NvBool soeIsCpuHalted_HAL (struct nvswitch_device *device, PSOE pSoe);
NvlStatus soeTestDma_HAL (struct nvswitch_device *device, PSOE pSoe);
NvlStatus soeSetPexEOM_HAL (struct nvswitch_device *device, NvU8 mode, NvU8 nblks, NvU8 nerrs, NvU8 berEyeSel);
NvlStatus soeGetPexEomStatus_HAL (struct nvswitch_device *device, NvU8 mode, NvU8 nblks, NvU8 nerrs, NvU8 berEyeSel, NvU32 laneMask, NvU16 *pEomStatus);
NvlStatus soeGetUphyDlnCfgSpace_HAL (struct nvswitch_device *device, NvU32 regAddress, NvU32 laneSelectMask, NvU16 *pRegValue);
NvlStatus soeForceThermalSlowdown_HAL (struct nvswitch_device *device, NvBool slowdown, NvU32 periodUs);
NvlStatus soeSetPcieLinkSpeed_HAL (struct nvswitch_device *device, NvU32 linkSpeed);
#endif //_SOE_NVSWITCH_H_

View File

@@ -0,0 +1,60 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SOE_PRIV_NVSWITCH_H_
#define _SOE_PRIV_NVSWITCH_H_
#include "soe/haldefs_soe_nvswitch.h"
#include "soe/soeifcmn.h"
#include "flcn/flcnqueue_nvswitch.h"
#include "flcn/flcnable_nvswitch.h"
#define SOE_DMEM_ALIGNMENT (4)
struct SOE
{
// needs to be the first thing in this struct so that a PSOE can be
// re-interpreted as a PFLCNABLE and vise-versa. While it is possible
// to remove this restriction by using (&pSoe->parent) instead of a cast,
// 1) the reverse (getting a PSOE from a PFLCNABLE) would be difficult and
// spooky 2) that would force anybody wanting to do the conversion
// to know the layout of an SOE object (not a big deal, but still annoying)
union {
// pointer to our function table - should always be the first thing in any object (including parent)
soe_hal *pHal;
FLCNABLE parent;
} base;
// Other member variables specific to SOE go here
/*!
* Structure tracking all information for active and inactive SEC2 sequences.
*/
FLCN_QMGR_SEQ_INFO seqInfo[RM_SOE_MAX_NUM_SEQUENCES];
/*! The event descriptor for the Thermal event handler */
NvU32 thermEvtDesc;
};
#endif //_SOE_PRIV_NVSWITCH_H_

View File

@@ -0,0 +1,28 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SPI_NVSWITCH_H_
#define _SPI_NVSWITCH_H_
NvlStatus nvswitch_spi_init(nvswitch_device *);
#endif //_SPI_NVSWITCH_H_

View File

@@ -0,0 +1,78 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "regkey_nvswitch.h"
#include "nvVer.h"
#include "inforom/inforom_nvswitch.h"
void
nvswitch_bbx_collect_current_time
(
nvswitch_device *device,
void *pBbxState
)
{
return;
}
NvlStatus
nvswitch_inforom_bbx_add_sxid
(
nvswitch_device *device,
NvU32 exceptionType,
NvU32 data0,
NvU32 data1,
NvU32 data2
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_inforom_bbx_unload
(
nvswitch_device *device
)
{
return;
}
NvlStatus
nvswitch_inforom_bbx_load
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_bbx_get_sxid
(
nvswitch_device *device,
NVSWITCH_GET_SXIDS_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}

View File

@@ -0,0 +1,267 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "inforom/inforom_nvswitch.h"
NvlStatus
nvswitch_inforom_ecc_load
(
nvswitch_device *device
)
{
NvlStatus status;
NvU8 version = 0;
NvU8 subversion = 0;
INFOROM_ECC_STATE *pEccState = NULL;
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
status = nvswitch_inforom_get_object_version_info(device, "ECC", &version,
&subversion);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN, "no ECC object found, rc:%d\n", status);
return NVL_SUCCESS;
}
if (!INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(subversion))
{
NVSWITCH_PRINT(device, WARN, "ECC v%u.%u not supported\n",
version, subversion);
return -NVL_ERR_NOT_SUPPORTED;
}
NVSWITCH_PRINT(device, INFO, "ECC v%u.%u found\n", version, subversion);
pEccState = nvswitch_os_malloc(sizeof(INFOROM_ECC_STATE));
if (pEccState == NULL)
{
return -NVL_NO_MEM;
}
nvswitch_os_memset(pEccState, 0, sizeof(INFOROM_ECC_STATE));
switch (version)
{
case 6:
pEccState->pFmt = INFOROM_ECC_OBJECT_V6_S0_FMT;
pEccState->pPackedObject = nvswitch_os_malloc(INFOROM_ECC_OBJECT_V6_S0_PACKED_SIZE);
if (pEccState->pPackedObject == NULL)
{
status = -NVL_NO_MEM;
goto nvswitch_inforom_ecc_version_fail;
}
pEccState->pEcc = nvswitch_os_malloc(sizeof(INFOROM_ECC_OBJECT));
if (pEccState->pEcc == NULL)
{
status = -NVL_NO_MEM;
nvswitch_os_free(pEccState->pPackedObject);
goto nvswitch_inforom_ecc_version_fail;
}
break;
default:
NVSWITCH_PRINT(device, WARN, "ECC v%u.%u not supported\n",
version, subversion);
goto nvswitch_inforom_ecc_version_fail;
break;
}
status = nvswitch_inforom_read_object(device, "ECC", pEccState->pFmt,
pEccState->pPackedObject,
pEccState->pEcc);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to read ECC object, rc:%d\n", status);
goto nvswitch_inforom_read_fail;
}
status = nvswitch_inforom_add_object(pInforom, &pEccState->pEcc->header);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to cache ECC object header, rc:%d\n",
status);
goto nvswitch_inforom_read_fail;
}
pInforom->pEccState = pEccState;
// Update shared surface counts, non-fatal if we encounter a failure
status = nvswitch_smbpbi_refresh_ecc_counts(device);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN, "Failed to update ECC counts on SMBPBI "
"shared surface rc:%d\n", status);
}
return NVL_SUCCESS;
nvswitch_inforom_read_fail:
nvswitch_os_free(pEccState->pPackedObject);
nvswitch_os_free(pEccState->pEcc);
nvswitch_inforom_ecc_version_fail:
nvswitch_os_free(pEccState);
return status;
}
void
nvswitch_inforom_ecc_unload
(
nvswitch_device *device
)
{
INFOROM_ECC_STATE *pEccState;
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL)
{
return;
}
pEccState = pInforom->pEccState;
if (pEccState == NULL)
{
return;
}
//
// Flush the data to InfoROM before unloading the object
// Currently the driver doesn't support deferred processing and so the
// error logging path in the interrupt handler cannot defer the flush.
// This is WAR until the driver adds support for deferred processing
//
nvswitch_inforom_ecc_flush(device);
nvswitch_os_free(pEccState->pPackedObject);
nvswitch_os_free(pEccState->pEcc);
nvswitch_os_free(pEccState);
pInforom->pEccState = NULL;
}
NvlStatus
nvswitch_inforom_ecc_flush
(
struct nvswitch_device *device
)
{
NvlStatus status = NVL_SUCCESS;
struct inforom *pInforom = device->pInforom;
INFOROM_ECC_STATE *pEccState;
if (pInforom == NULL || pInforom->pEccState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pEccState = pInforom->pEccState;
if (pEccState->bDirty)
{
status = nvswitch_inforom_write_object(device, "ECC",
pEccState->pFmt, pEccState->pEcc,
pEccState->pPackedObject);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"Failed to flush ECC object to InfoROM, rc: %d\n", status);
}
else
{
pEccState->bDirty = NV_FALSE;
}
}
return status;
}
NvlStatus
nvswitch_inforom_ecc_log_err_event
(
struct nvswitch_device *device,
INFOROM_NVS_ECC_ERROR_EVENT *err_event
)
{
NvlStatus status;
INFOROM_ECC_STATE *pEccState;
NvU64 time_ns;
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL || pInforom->pEccState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (err_event == NULL)
{
return -NVL_BAD_ARGS;
}
pEccState = pInforom->pEccState;
time_ns = nvswitch_os_get_platform_time();
err_event->timestamp = (NvU32)(time_ns / NVSWITCH_INTERVAL_1SEC_IN_NS);
// Scrub the incoming address field if it is invalid
if (!(err_event->bAddressValid))
{
err_event->address = 0;
}
// Invoke the chip dependent inforom logging routine
status = device->hal.nvswitch_inforom_ecc_log_error_event(device, pEccState->pEcc,
err_event);
if (status == NVL_SUCCESS)
{
//
// If the error was logged successfully, mark the object as dirty to be
// written on the subsequent flush.
//
pEccState->bDirty = NV_TRUE;
}
return status;
}
NvlStatus
nvswitch_inforom_ecc_get_errors
(
nvswitch_device *device,
NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params
)
{
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL || pInforom->pEccState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
return device->hal.nvswitch_inforom_ecc_get_errors(device, params);
}

View File

@@ -0,0 +1,107 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "inforom/inforom_nvswitch.h"
NvlStatus
nvswitch_inforom_nvlink_flush
(
struct nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvlink_load
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_inforom_nvlink_unload
(
nvswitch_device *device
)
{
return;
}
NvlStatus
nvswitch_inforom_nvlink_get_minion_data
(
nvswitch_device *device,
NvU8 linkId,
NvU32 *seedData
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvlink_set_minion_data
(
nvswitch_device *device,
NvU8 linkId,
NvU32 *seedData,
NvU32 size
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvlink_log_error_event
(
nvswitch_device *device,
void *error_event
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvlink_get_max_correctable_error_rate
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvlink_get_errors
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}

View File

@@ -0,0 +1,209 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "inforom/inforom_nvswitch.h"
NvlStatus
nvswitch_inforom_oms_get_device_disable
(
nvswitch_device *device,
NvBool *pBDisabled
)
{
struct inforom *pInforom = device->pInforom;
INFOROM_OMS_STATE *pOmsState;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pOmsState = pInforom->pOmsState;
if (pOmsState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
*pBDisabled = device->hal.nvswitch_oms_get_device_disable(pOmsState);
return NVL_SUCCESS;
}
NvlStatus
nvswitch_inforom_oms_set_device_disable
(
nvswitch_device *device,
NvBool bForceDeviceDisable
)
{
struct inforom *pInforom = device->pInforom;
INFOROM_OMS_STATE *pOmsState;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pOmsState = pInforom->pOmsState;
if (pOmsState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
device->hal.nvswitch_oms_set_device_disable(pOmsState, bForceDeviceDisable);
return NVL_SUCCESS;
}
NvlStatus
nvswitch_inforom_oms_load
(
nvswitch_device *device
)
{
NvlStatus status;
NvU8 version = 0;
NvU8 subversion = 0;
INFOROM_OMS_STATE *pOmsState = NULL;
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
status = nvswitch_inforom_get_object_version_info(device, "OMS", &version,
&subversion);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, INFO, "no OMS object found, rc:%d\n", status);
return NVL_SUCCESS;
}
if (!INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(subversion))
{
NVSWITCH_PRINT(device, WARN, "OMS v%u.%u not supported\n",
version, subversion);
return -NVL_ERR_NOT_SUPPORTED;
}
NVSWITCH_PRINT(device, INFO, "OMS v%u.%u found\n", version, subversion);
pOmsState = nvswitch_os_malloc(sizeof(INFOROM_OMS_STATE));
if (pOmsState == NULL)
{
return -NVL_NO_MEM;
}
nvswitch_os_memset(pOmsState, 0, sizeof(INFOROM_OMS_STATE));
switch (version)
{
case 1:
pOmsState->pFmt = INFOROM_OMS_OBJECT_V1S_FMT;
pOmsState->pPackedObject = nvswitch_os_malloc(INFOROM_OMS_OBJECT_V1_PACKED_SIZE);
if (pOmsState->pPackedObject == NULL)
{
status = -NVL_NO_MEM;
goto nvswitch_inforom_oms_version_fail;
}
pOmsState->pOms = nvswitch_os_malloc(sizeof(INFOROM_OMS_OBJECT));
if (pOmsState->pOms == NULL)
{
status = -NVL_NO_MEM;
nvswitch_os_free(pOmsState->pPackedObject);
goto nvswitch_inforom_oms_version_fail;
}
break;
default:
NVSWITCH_PRINT(device, WARN, "OMS v%u.%u not supported\n",
version, subversion);
goto nvswitch_inforom_oms_version_fail;
break;
}
status = nvswitch_inforom_load_object(device, pInforom, "OMS",
pOmsState->pFmt,
pOmsState->pPackedObject,
pOmsState->pOms);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to load OMS object, rc: %d\n",
status);
goto nvswitch_inforom_oms_load_fail;
}
pInforom->pOmsState = pOmsState;
device->hal.nvswitch_initialize_oms_state(device, pOmsState);
return NVL_SUCCESS;
nvswitch_inforom_oms_load_fail:
nvswitch_os_free(pOmsState->pOms);
nvswitch_os_free(pOmsState->pPackedObject);
nvswitch_inforom_oms_version_fail:
nvswitch_os_free(pOmsState);
return status;
}
void
nvswitch_inforom_oms_unload
(
nvswitch_device *device
)
{
struct inforom *pInforom = device->pInforom;
INFOROM_OMS_STATE *pOmsState;
NvlStatus status;
if (pInforom == NULL)
{
return;
}
pOmsState = pInforom->pOmsState;
if (pOmsState == NULL)
{
return;
}
(void)device->hal.nvswitch_read_oob_blacklist_state(device);
status = device->hal.nvswitch_oms_inforom_flush(device);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"Flushing OMS failed during unload, rc:%d\n", status);
}
nvswitch_os_free(pOmsState->pPackedObject);
nvswitch_os_free(pOmsState->pOms);
nvswitch_os_free(pOmsState);
pInforom->pOmsState = NULL;
}

View File

@@ -0,0 +1,84 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "inforom/inforom_nvswitch.h"
NvlStatus
nvswitch_inforom_read_only_objects_load
(
nvswitch_device *device
)
{
NvlStatus status;
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
status = nvswitch_inforom_load_object(device, pInforom, "OBD",
INFOROM_OBD_OBJECT_V1_XX_FMT,
pInforom->OBD.packedObject,
&pInforom->OBD.object);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to load OBD object, rc:%d\n",
status);
}
else
{
pInforom->OBD.bValid = NV_TRUE;
}
status = nvswitch_inforom_load_object(device, pInforom, "OEM",
INFOROM_OEM_OBJECT_V1_00_FMT,
pInforom->OEM.packedObject,
&pInforom->OEM.object);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to load OEM object, rc:%d\n",
status);
}
else
{
pInforom->OEM.bValid = NV_TRUE;
}
status = nvswitch_inforom_load_object(device, pInforom, "IMG",
INFOROM_IMG_OBJECT_V1_00_FMT,
pInforom->IMG.packedObject,
&pInforom->IMG.object);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to load IMG object, rc:%d\n",
status);
}
else
{
pInforom->IMG.bValid = NV_TRUE;
}
return NVL_SUCCESS;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,158 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "ipmi/fru_nvswitch.h"
#define ASCII_6BIT_TO_8BIT(b) ((b) + 0x20)
#define OFFSET_SCALE (8)
static NvU8
_nvswitch_calculate_checksum
(
NvU8 *data,
NvU32 size
)
{
NvU32 i;
NvU8 checksum = 0;
for (i = 0; i < size; ++i)
{
checksum += data[i];
}
return checksum;
}
/*
* @brief Retieves from bytes from src and stores into dest.
*
* @return The size of the field including the type/length byte.
*/
static NvU8
_nvswitch_get_field_bytes
(
NvU8 *pFieldSrc,
NvU8 *pFieldDest
)
{
NvU32 i;
NvU8 type;
NvU8 length;
NvU8 byte;
if (*pFieldSrc == NVSWITCH_IPMI_FRU_SENTINEL)
{
return 0;
}
type = DRF_VAL(SWITCH_IPMI, _FRU_TYPE_LENGTH_BYTE, _TYPE, *pFieldSrc);
length = DRF_VAL(SWITCH_IPMI, _FRU_TYPE_LENGTH_BYTE, _LENGTH, *pFieldSrc);
pFieldSrc++;
for (i = 0; i < length; ++i)
{
switch (type)
{
case NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_6BIT:
byte = ASCII_6BIT_TO_8BIT(pFieldSrc[i]);
break;
case NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_8BIT:
byte = pFieldSrc[i];
break;
default:
byte = 0;
break;
}
pFieldDest[i] = byte;
}
return (length + 1);
}
/*
* @brief Parse FRU board info from the given rom image.
*
* @return NVL_SUCCESS if board field is valid
*/
NvlStatus
nvswitch_read_partition_fru_board_info
(
nvswitch_device *device,
NVSWITCH_IPMI_FRU_BOARD_INFO *pBoardInfo,
NvU8 *pRomImage
)
{
NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER *pEepromHeader;
NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO *pEepromBoardInfo;
NvU8 *pInfoSrc;
if (pBoardInfo == NULL || pRomImage == NULL)
{
return -NVL_ERR_GENERIC;
}
pEepromHeader = (NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER *)pRomImage;
// zero checksum
if (_nvswitch_calculate_checksum((NvU8 *)pEepromHeader,
sizeof(NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER)) != 0)
{
NVSWITCH_PRINT(device, SETUP,
"%s: Common header checksum error.\n", __FUNCTION__);
return -NVL_ERR_GENERIC;
}
pEepromBoardInfo = (NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO *)(pRomImage +
(pEepromHeader->boardInfoOffset * OFFSET_SCALE));
if (_nvswitch_calculate_checksum((NvU8 *)pEepromBoardInfo,
pEepromBoardInfo->size * OFFSET_SCALE) != 0)
{
NVSWITCH_PRINT(device, SETUP,
"%s: Board info checksum error.\n", __FUNCTION__);
return -NVL_ERR_GENERIC;
}
if (pEepromBoardInfo->version != 0x1 || pEepromBoardInfo->languageCode != 0x0)
{
return -NVL_ERR_NOT_SUPPORTED;
}
nvswitch_os_memset(pBoardInfo, 0, sizeof(NVSWITCH_IPMI_FRU_BOARD_INFO));
pInfoSrc = (NvU8 *)&pEepromBoardInfo->boardInfo;
// LS byte first
pBoardInfo->mfgDateTime = pInfoSrc[0] | (pInfoSrc[1] << 8) | (pInfoSrc[2] << 16);
pInfoSrc += 3;
pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->mfg);
pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->productName);
pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->serialNum);
pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->partNum);
pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->fileId);
_nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->customMfgInfo);
return NVL_SUCCESS;
}

View File

@@ -0,0 +1,374 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "lr10/lr10.h"
#include "lr10/clock_lr10.h"
#include "lr10/soe_lr10.h"
#include "nvswitch/lr10/dev_soe_ip.h"
#include "nvswitch/lr10/dev_pri_ringstation_sys.h"
#include "nvswitch/lr10/dev_trim.h"
#include "nvswitch/lr10/dev_nvs.h"
#include "nvswitch/lr10/dev_nvlperf_ip.h"
#include "nvswitch/lr10/dev_npgperf_ip.h"
#include "nvswitch/lr10/dev_nvlctrl_ip.h"
#include "nvswitch/lr10/dev_nv_xp.h"
#include "nvswitch/lr10/dev_nv_xve.h"
#include "nvswitch/lr10/dev_nport_ip.h"
#include "nvswitch/lr10/dev_minion_ip.h"
#include "nvswitch/lr10/dev_timer.h"
#include "nvswitch/lr10/dev_pri_ringmaster.h"
#include "nvswitch/lr10/dev_pri_ringstation_prt.h"
//
// Initialize the software state of the switch PLL
//
NvlStatus
nvswitch_init_pll_config_lr10
(
nvswitch_device *device
)
{
NVSWITCH_PLL_LIMITS pll_limits;
NVSWITCH_PLL_INFO pll;
NvlStatus retval = NVL_SUCCESS;
//
// These parameters could come from schmoo'ing API, settings file or a ROM.
// If no configuration ROM settings are present, use the PLL documentation
//
// Refer to the PLL35G_DYN_PRB_ESD_B2 cell Vbios Table, in the PLL datasheet
// for restrictions on MDIV, NDIV and PLDIV to satisfy the pll's frequency limitation.
//
// PLL35G_DYN_PRB_ESD_B1.doc
//
pll_limits.ref_min_mhz = 100;
pll_limits.ref_max_mhz = 100;
pll_limits.vco_min_mhz = 1750;
pll_limits.vco_max_mhz = 3800;
pll_limits.update_min_mhz = 13; // 13.5MHz
pll_limits.update_max_mhz = 38; // 38.4MHz
pll_limits.m_min = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_MIN;
pll_limits.m_max = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_MAX;
pll_limits.n_min = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_MIN;
pll_limits.n_max = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_MAX;
pll_limits.pl_min = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_MIN;
pll_limits.pl_max = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_MAX;
pll_limits.valid = NV_TRUE;
//
// set well known coefficients to achieve frequency
// MDIV: need to set > 1 to achieve update_rate < 38.4 MHz
// 100 / 5 = 20 MHz update rate, therefore MDIV = 5
// NDIV needs to take us all the way to 1640 MHz
// 1640 / 20 = 82. But 100*82/5 < 1.75GHz VCOmin,
// therefore double NDIV to 164 and set PDIV to 2.
//
pll.src_freq_khz = 100000; // 100MHz
pll.M = 5;
pll.N = 164;
pll.PL = 1;
pll.dist_mode = NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_2XCLK;
pll.refclk_div = 15;
retval = nvswitch_validate_pll_config(device, &pll, pll_limits);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN,
"Selecting default PLL setting.\n");
// Select default, safe clock (1.64GHz)
pll.src_freq_khz = 100000; // 100MHz
pll.M = 5;
pll.N = 164;
pll.PL = 2;
pll.dist_mode =
NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_1XCLK;
pll.refclk_div = NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DIV_INIT;
retval = nvswitch_validate_pll_config(device, &pll, pll_limits);
if (retval != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"Default PLL setting failed.\n");
return retval;
}
}
device->switch_pll = pll;
return NVL_SUCCESS;
}
//
// Check that the PLLs are initialized. VBIOS is expected to configure PLLs
//
NvlStatus
nvswitch_init_pll_lr10
(
nvswitch_device *device
)
{
NvU32 pllRegVal;
//
// Clocks should only be initialized on silicon or a clocks netlist on emulation
// Unfortunately, we don't have a full robust infrastructure for detecting the
// runtime environment as we do on GPU.
//
if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
{
NVSWITCH_PRINT(device, WARN,
"%s: Skipping setup of NVSwitch clocks\n",
__FUNCTION__);
return NVL_SUCCESS;
}
pllRegVal = NVSWITCH_REG_RD32(device, _PCLOCK, _NVSW_SWITCHPLL_CFG);
if (!FLD_TEST_DRF(_PCLOCK, _NVSW_SWITCHPLL_CFG, _PLL_LOCK, _TRUE, pllRegVal))
{
NVSWITCH_PRINT(device, ERROR,
"%s: _PLL_LOCK failed\n",
__FUNCTION__);
return -NVL_INITIALIZATION_TOTAL_FAILURE;
}
if (!FLD_TEST_DRF(_PCLOCK, _NVSW_SWITCHPLL_CFG, _PLL_FREQLOCK, _YES, pllRegVal))
{
NVSWITCH_PRINT(device, ERROR,
"%s: _PLL_FREQLOCK failed\n",
__FUNCTION__);
return -NVL_INITIALIZATION_TOTAL_FAILURE;
}
pllRegVal = NVSWITCH_REG_RD32(device, _PCLOCK, _NVSW_SWITCHCLK);
if (!FLD_TEST_DRF_NUM(_PCLOCK, _NVSW_SWITCHCLK, _RDY_SWITCHPLL, 1, pllRegVal))
{
NVSWITCH_PRINT(device, ERROR,
"%s: _RDY_SWITCHPLL failed\n",
__FUNCTION__);
return -NVL_INITIALIZATION_TOTAL_FAILURE;
}
pllRegVal = NVSWITCH_REG_RD32(device, _PCLOCK, _NVSW_SYSTEMCLK);
if (!FLD_TEST_DRF_NUM(_PCLOCK, _NVSW_SYSTEMCLK, _SYSTEMCLK_RDY_SWITCHPLL, 1, pllRegVal))
{
NVSWITCH_PRINT(device, ERROR,
"%s: _RDY_SWITCHPLL for SYSTEMCLK failed\n",
__FUNCTION__);
return -NVL_INITIALIZATION_TOTAL_FAILURE;
}
return NVL_SUCCESS;
}
//
// Timer functions
//
void
nvswitch_init_hw_counter_lr10
(
nvswitch_device *device
)
{
return;
}
void
nvswitch_hw_counter_shutdown_lr10
(
nvswitch_device *device
)
{
return;
}
//
// Reads the 36-bit free running counter
//
NvU64
nvswitch_hw_counter_read_counter_lr10
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
//
// Initialize clock gating.
//
void
nvswitch_init_clock_gating_lr10
(
nvswitch_device *device
)
{
NvU32 regval;
NvU32 i;
// BUS
NVSWITCH_REG_WR32(device, _PBUS, _EXT_CG1,
DRF_DEF(_PBUS, _EXT_CG1, _SLCG, __PROD) |
DRF_DEF(_PBUS, _EXT_CG1, _SLCG_C11, __PROD) |
DRF_DEF(_PBUS, _EXT_CG1, _SLCG_PRI, __PROD) |
DRF_DEF(_PBUS, _EXT_CG1, _SLCG_UNROLL, __PROD) |
DRF_DEF(_PBUS, _EXT_CG1, _SLCG_ROLL, __PROD) |
DRF_DEF(_PBUS, _EXT_CG1, _SLCG_IFR, __PROD) |
DRF_DEF(_PBUS, _EXT_CG1, _SLCG_PMC, __PROD));
// PRI
NVSWITCH_REG_WR32(device, _PPRIV_MASTER, _CG1,
DRF_DEF(_PPRIV_MASTER, _CG1, _SLCG, __PROD));
regval =
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _SLOWCLK, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_CONFIG_REGS, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_FUNNEL_DECODER, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_FUNNEL_ARB, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_HISTORY_BUFFER, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_MASTER, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_SLAVE, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_UCODE_TRAP, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _LOC_PRIV, __PROD) |
DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PM, __PROD);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT0, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT1, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT2, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT3, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT4, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT5, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT6, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT7, _CG1, regval);
NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT8, _CG1, regval);
// XP3G
NVSWITCH_REG_WR32(device, _XP, _PRI_XP3G_CG,
DRF_DEF(_XP, _PRI_XP3G_CG, _IDLE_CG_DLY_CNT, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _IDLE_CG_EN, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _STATE_CG_EN, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _STALL_CG_DLY_CNT, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _STALL_CG_EN, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _QUIESCENT_CG_EN, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _WAKEUP_DLY_CNT, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _THROT_CLK_CNT, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _DI_DT_SKEW_VAL, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _THROT_CLK_EN, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _THROT_CLK_SW_OVER, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _PAUSE_CG_EN, __PROD) |
DRF_DEF(_XP, _PRI_XP3G_CG, _HALT_CG_EN, __PROD));
NVSWITCH_REG_WR32(device, _XP, _PRI_XP3G_CG1,
DRF_DEF(_XP, _PRI_XP3G_CG1, _MONITOR_CG_EN, __PROD));
// XVE
NVSWITCH_ENG_WR32_LR10(device, XVE, , 0, _XVE, _PRI_XVE_CG,
DRF_DEF(_XVE, _PRI_XVE_CG, _IDLE_CG_DLY_CNT, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _IDLE_CG_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _STATE_CG_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _STALL_CG_DLY_CNT, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _STALL_CG_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _QUIESCENT_CG_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _WAKEUP_DLY_CNT, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _THROT_CLK_CNT, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _DI_DT_SKEW_VAL, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _THROT_CLK_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _THROT_CLK_SW_OVER, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _PAUSE_CG_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG, _HALT_CG_EN, __PROD));
NVSWITCH_ENG_WR32_LR10(device, XVE, , 0, _XVE, _PRI_XVE_CG1,
DRF_DEF(_XVE, _PRI_XVE_CG1, _MONITOR_CG_EN, __PROD) |
DRF_DEF(_XVE, _PRI_XVE_CG1, _SLCG, __PROD));
// NPORT
NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _CTRL_SLCG,
DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _INGRESS, __PROD) |
DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _ROUTE, __PROD) |
DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _EGRESS, __PROD) |
DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _STRACK, __PROD) |
DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _TAGSTATE, __PROD) |
DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _TREX, __PROD));
// NPG_PERFMON
NVSWITCH_BCAST_WR32_LR10(device, NPG_PERFMON, _NPGPERF, _CTRL_CLOCK_GATING,
DRF_DEF(_NPGPERF, _CTRL_CLOCK_GATING, _CG1_SLCG, __PROD));
NVSWITCH_BCAST_WR32_LR10(device, NPG_PERFMON, _NPGPERF, _PERF_CTRL_CLOCK_GATING,
DRF_DEF(_NPGPERF, _PERF_CTRL_CLOCK_GATING, _CG1_SLCG, __PROD) |
DRF_DEF(_NPGPERF, _PERF_CTRL_CLOCK_GATING, _CONTEXT_FREEZE, __PROD));
//
// NVLW_PERFMON
//
// There registers are protected by PRIV_LEVEL_MASK6.
// PLM6 will not be blown on Production fuses.
//
NVSWITCH_BCAST_WR32_LR10(device, NVLW_PERFMON, _NVLPERF, _CTRL_CLOCK_GATING,
DRF_DEF(_NVLPERF, _CTRL_CLOCK_GATING, _CG1_SLCG, __PROD) |
DRF_DEF(_NVLPERF, _CTRL_CLOCK_GATING, _CG1_SLCG_CTRL, __PROD));
NVSWITCH_BCAST_WR32_LR10(device, NVLW_PERFMON, _NVLPERF, _PERF_CTRL_CLOCK_GATING,
DRF_DEF(_NVLPERF, _PERF_CTRL_CLOCK_GATING, _CG1_SLCG, __PROD) |
DRF_DEF(_NVLPERF, _PERF_CTRL_CLOCK_GATING, _CONTEXT_FREEZE, __PROD));
// NVLCTRL
NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL, _PLL_PRI_CLOCK_GATING,
DRF_DEF(_NVLCTRL, _PLL_PRI_CLOCK_GATING, _CG1_SLCG, __PROD));
// MINION
for (i = 0; i < NVSWITCH_ENG_COUNT(device, MINION, ); i++)
{
regval = NVSWITCH_ENG_RD32_LR10(device, MINION, i, _CMINION_FALCON, _CG2);
NVSWITCH_ENG_WR32_LR10(device, MINION, , i, _CMINION_FALCON, _CG2,
FLD_SET_DRF(_CMINION_FALCON, _CG2, _SLCG, __PROD, regval));
}
// PTIMER
NVSWITCH_REG_WR32(device, _PTIMER, _PRI_TMR_CG1,
DRF_DEF(_PTIMER, _PRI_TMR_CG1, _MONITOR_CG_EN, __PROD) |
DRF_DEF(_PTIMER, _PRI_TMR_CG1, _SLCG, __PROD));
// SOE
regval = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE, _FBIF_CG1);
regval = FLD_SET_DRF(_SOE, _FBIF_CG1, _SLCG, __PROD, regval);
NVSWITCH_SOE_WR32_LR10(device, 0, _SOE, _FBIF_CG1, regval);
regval = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE, _FALCON_CG2);
regval = FLD_SET_DRF(_SOE, _FALCON_CG2, _SLCG, __PROD, regval);
NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_FALCON, _CG2, regval);
regval = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_MISC, _CG1);
regval = FLD_SET_DRF(_SOE, _MISC_CG1, _SLCG, __PROD, regval);
NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_MISC, _CG1, regval);
NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_MISC, _TOP_CG,
DRF_DEF(_SOE_MISC, _TOP_CG, _IDLE_CG_DLY_CNT, __PROD));
return;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,336 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "lr10/lr10.h"
#include "flcn/flcn_nvswitch.h"
#include "nvswitch/lr10/dev_falcon_v4.h"
static NvU32
_flcnRegRead_LR10
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset
)
{
// Probably should perform some checks on the offset, the device, and the engine descriptor
return nvswitch_reg_read_32(device, pFlcn->engDescUc.base + offset);
}
static void
_flcnRegWrite_LR10
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset,
NvU32 data
)
{
// Probably should perform some checks on the offset, the device, and the engine descriptor
nvswitch_reg_write_32(device, pFlcn->engDescUc.base + offset, data);
}
/*
* @brief Retrigger an interrupt message from the engine to the NV_CTRL tree
*
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn FLCN pointer
*/
static void
_flcnIntrRetrigger_LR10
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NvU32 val = DRF_DEF(_PFALCON, _FALCON_INTR_RETRIGGER, _TRIGGER, _TRUE);
flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_INTR_RETRIGGER(0), val);
}
static NvBool
_flcnAreEngDescsInitialized_LR10
(
nvswitch_device *device,
FLCN *pFlcn
)
{
// if pFlcn->engDescUc is 0, we haven't finished discovery, return false
// if pFlcn->engDescUc is NOT 0, and pFlcn->engDescBc is NULL, this is a unicast only engine
return pFlcn->engDescUc.base != 0 && pFlcn->engDescUc.initialized &&
(pFlcn->engDescBc.base == 0 || pFlcn->engDescBc.initialized);
}
/*
* @brief Waits for falcon to finish scrubbing IMEM/DMEM.
*
* @param[in] device switch device
* @param[in] pFlcn FLCN pointer
*
* @returns nothing
*/
static NV_STATUS
_flcnWaitForResetToFinish_LR10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NVSWITCH_TIMEOUT timeout;
NvU32 dmaCtrl;
// Add a dummy write (of anything) to trigger scrubbing
flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_MAILBOX0, 0);
// TODO: Adapt timeout to our model, this should be centralized.
if (IS_EMULATION(device))
{
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
}
else
{
nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
}
while (1)
{
dmaCtrl = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_DMACTL);
if (FLD_TEST_DRF(_PFALCON, _FALCON_DMACTL, _DMEM_SCRUBBING, _DONE, dmaCtrl) &&
FLD_TEST_DRF(_PFALCON, _FALCON_DMACTL, _IMEM_SCRUBBING, _DONE, dmaCtrl))
{
// Operation successful, IMEM and DMEM scrubbing has finished.
return NV_OK;
}
if (nvswitch_timeout_check(&timeout))
{
NVSWITCH_PRINT(device, ERROR,
"%s: Timeout waiting for scrubbing to finish!!!\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
return NV_ERR_TIMEOUT;
}
}
}
/*!
* @brief Capture and dump the falconPC trace.
*
* @param[in] device nvswitch device pointer
* @param[in] pFlcn FLCN object pointer
*
* @returns nothing
*/
void
_flcnDbgInfoCapturePcTrace_LR10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NvU32 regTraceIdx;
NvU32 idx;
NvU32 maxIdx;
// Dump entire PC trace buffer
regTraceIdx = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_TRACEIDX);
maxIdx = DRF_VAL(_PFALCON_FALCON, _TRACEIDX, _MAXIDX, regTraceIdx);
NVSWITCH_PRINT(device, ERROR,
"PC TRACE (TOTAL %d ENTRIES. Entry 0 is the most recent branch):\n",
maxIdx);
for (idx = 0; idx < maxIdx; idx++)
{
regTraceIdx =
FLD_SET_DRF_NUM(_PFALCON, _FALCON_TRACEIDX, _IDX, idx, regTraceIdx);
flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_TRACEIDX, regTraceIdx);
NVSWITCH_PRINT(device, ERROR, "FALCON_TRACEPC(%d) : 0x%08x\n", idx,
DRF_VAL(_PFALCON, _FALCON_TRACEPC, _PC,
flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_TRACEPC)));
}
}
/*!
* @brief Read falcon core revision
*
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn FLCN pointer
*
* @return @ref NV_FLCN_CORE_REV_X_Y.
*/
NvU8
_flcnReadCoreRev_LR10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NvU32 hwcfg1 = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_HWCFG1);
return ((DRF_VAL(_PFALCON, _FALCON_HWCFG1, _CORE_REV, hwcfg1) << 4) |
DRF_VAL(_PFALCON, _FALCON_HWCFG1, _CORE_REV_SUBVERSION, hwcfg1));
}
//
// Store pointers to ucode header and data.
// Preload ucode from registry if available.
//
NV_STATUS
_flcnConstruct_LR10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NV_STATUS status;
PFLCNABLE pFlcnable = pFlcn->pFlcnable;
PFALCON_QUEUE_INFO pQueueInfo;
pFlcn->bConstructed = NV_TRUE;
if (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_DEFAULT)
{
// Default the arch to Falcon if it's not set
pFlcn->engArch = NV_UPROC_ENGINE_ARCH_FALCON;
}
// Allocate the memory for Queue Data Structure if needed.
if (pFlcn->bQueuesEnabled)
{
pQueueInfo = pFlcn->pQueueInfo = nvswitch_os_malloc(sizeof(*pQueueInfo));
if (pQueueInfo == NULL)
{
status = NV_ERR_NO_MEMORY;
NVSWITCH_ASSERT(0);
goto _flcnConstruct_LR10_fail;
}
nvswitch_os_memset(pQueueInfo, 0, sizeof(FALCON_QUEUE_INFO));
// Assert if Number of Queues are zero
NVSWITCH_ASSERT(pFlcn->numQueues != 0);
pQueueInfo->pQueues = nvswitch_os_malloc(sizeof(FLCNQUEUE) * pFlcn->numQueues);
if (pQueueInfo->pQueues == NULL)
{
status = NV_ERR_NO_MEMORY;
NVSWITCH_ASSERT(0);
goto _flcnConstruct_LR10_fail;
}
nvswitch_os_memset(pQueueInfo->pQueues, 0, sizeof(FLCNQUEUE) * pFlcn->numQueues);
// Sequences can be optional
if (pFlcn->numSequences != 0)
{
if ((pFlcn->numSequences - 1) > ((NvU32)NV_U8_MAX))
{
status = NV_ERR_OUT_OF_RANGE;
NVSWITCH_PRINT(device, ERROR,
"Max numSequences index = %d cannot fit into byte\n",
(pFlcn->numSequences - 1));
NVSWITCH_ASSERT(0);
goto _flcnConstruct_LR10_fail;
}
flcnQueueSeqInfoStateInit(device, pFlcn);
}
}
// DEBUG
NVSWITCH_PRINT(device, INFO, "Falcon: %s\n", flcnGetName_HAL(device, pFlcn));
NVSWITCH_ASSERT(pFlcnable != NULL);
flcnableGetExternalConfig(device, pFlcnable, &pFlcn->extConfig);
return NV_OK;
_flcnConstruct_LR10_fail:
// call flcnDestruct to free the memory allocated in this construct function
flcnDestruct_HAL(device, pFlcn);
return status;
}
void
_flcnDestruct_LR10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
PFALCON_QUEUE_INFO pQueueInfo;
PFLCNABLE pFlcnable = pFlcn->pFlcnable;
if (!pFlcn->bConstructed)
{
return;
}
pFlcn->bConstructed = NV_FALSE;
if (pFlcnable == NULL) {
NVSWITCH_ASSERT(pFlcnable != NULL);
return;
}
if (pFlcn->bQueuesEnabled && (pFlcn->pQueueInfo != NULL))
{
pQueueInfo = pFlcn->pQueueInfo;
if (NULL != pQueueInfo->pQueues)
{
nvswitch_os_free(pQueueInfo->pQueues);
pQueueInfo->pQueues = NULL;
}
nvswitch_os_free(pFlcn->pQueueInfo);
pFlcn->pQueueInfo = NULL;
}
}
const char *
_flcnGetName_LR10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
if (pFlcn->name == NULL)
{
return "UNKNOWN";
}
return pFlcn->name;
}
/**
* @brief set hal function pointers for functions defined in LR10 (i.e. this file)
*
* this function has to be at the end of the file so that all the
* other functions are already defined.
*
* @param[in] pFlcn The flcn for which to set hals
*/
void
flcnSetupHal_LR10
(
PFLCN pFlcn
)
{
flcn_hal *pHal = pFlcn->pHal;
pHal->readCoreRev = _flcnReadCoreRev_LR10;
pHal->regRead = _flcnRegRead_LR10;
pHal->regWrite = _flcnRegWrite_LR10;
pHal->construct = _flcnConstruct_LR10;
pHal->destruct = _flcnDestruct_LR10;
pHal->getName = _flcnGetName_LR10;
pHal->intrRetrigger = _flcnIntrRetrigger_LR10;
pHal->areEngDescsInitialized = _flcnAreEngDescsInitialized_LR10;
pHal->waitForResetToFinish = _flcnWaitForResetToFinish_LR10;
pHal->dbgInfoCapturePcTrace = _flcnDbgInfoCapturePcTrace_LR10;
}

View File

@@ -0,0 +1,818 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "lr10/lr10.h"
#include "lr10/inforom_lr10.h"
#include "inforom/ifrstruct.h"
#include "nvswitch/lr10/dev_nvlsaw_ip.h"
#include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h"
#include "nvswitch/lr10/dev_pmgr.h"
//
// TODO: Split individual object hals to their own respective files
//
static void _oms_parse(nvswitch_device *device, INFOROM_OMS_STATE *pOmsState);
static void _oms_refresh(nvswitch_device *device, INFOROM_OMS_STATE *pOmsState);
NvlStatus
nvswitch_inforom_nvl_log_error_event_lr10
(
nvswitch_device *device,
void *pNvlGeneric,
void *pNvlErrorEvent,
NvBool *bDirty
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvl_get_max_correctable_error_rate_lr10
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_inforom_nvl_get_errors_lr10
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus nvswitch_inforom_nvl_update_link_correctable_error_info_lr10
(
nvswitch_device *device,
void *pNvlGeneric,
void *pData,
NvU8 linkId,
NvU8 nvliptInstance,
NvU8 localLinkIdx,
void *pNvlErrorCounts,
NvBool *bDirty
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
static
NvlStatus
_inforom_ecc_find_useable_entry_index
(
INFOROM_ECC_OBJECT_V6_S0 *pEccObj,
INFOROM_NVS_ECC_ERROR_EVENT *error_event,
NvU8 *pEntryIndex
)
{
NvU8 entry;
//
// The size of the "entry" variable needs to be updated if the InfoROM ECC
// error log ever grows past 256
//
ct_assert(INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT <= NV_U8_MAX);
for (entry = 0; entry < INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT; entry++)
{
INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pErrorEntry = &(pEccObj->errorEntries[entry]);
//
// Check if the entry already exists
// Ideally the address should be verified only if it is valid, however
// we scrub an invalid address early on so expect them to match the
// recorded value in either case
//
if ((pErrorEntry->errId == error_event->sxid) &&
FLD_TEST_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER,
_ADDR_VALID, error_event->bAddressValid, pErrorEntry->header) &&
(pErrorEntry->address == error_event->address) &&
FLD_TEST_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _LOCATION,
_LINK_ID, error_event->linkId, pErrorEntry->location))
break;
//
// Encountering an empty entry indicates this is the first instance of the error
// The ECC error log on the InfoROM is never sparse so we can terminate
// the search early
//
else if (FLD_TEST_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER,
_VALID, _FALSE, pErrorEntry->header))
break;
}
if (entry == INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT)
return -NVL_NOT_FOUND;
*pEntryIndex = entry;
return NVL_SUCCESS;
}
static
NvlStatus
_inforom_ecc_calc_timestamp_delta
(
INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pErrorEntry,
INFOROM_NVS_ECC_ERROR_EVENT *error_event,
NvU64 existingCount
)
{
//
// Subtracting 1 from the existingCount to drop the first error event counter
// Unfortunately we cannot track the first error events counts so assuming 1
//
NvlStatus status = NVL_SUCCESS;
NvU32 currTime = error_event->timestamp;
NvU64 tmp = ((NvU64) pErrorEntry->averageEventDelta) * (existingCount - 1);
NvU64 ovfTmp = tmp + (currTime - pErrorEntry->lastErrorTimestamp);
NvU64 totCnt, delta;
if (ovfTmp < tmp)
{
status = -NVL_NO_MEM;
goto _updateEntryTimeFailed;
}
totCnt = error_event->errorCount + existingCount - 1;
delta = ovfTmp / totCnt;
if (delta > NV_U32_MAX)
{
status = -NVL_NO_MEM;
goto _updateEntryTimeFailed;
}
pErrorEntry->averageEventDelta = (NvU32) delta;
_updateEntryTimeFailed:
return status;
}
static
NvlStatus
_inforom_ecc_record_entry
(
INFOROM_ECC_OBJECT_V6_S0 *pEccObj,
INFOROM_NVS_ECC_ERROR_EVENT *error_event,
NvU8 entry
)
{
NvBool bNewEntry;
NvU32 *pErrCnt;
INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pErrorEntry = &(pEccObj->errorEntries[entry]);
bNewEntry = FLD_TEST_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER,
_VALID, _FALSE, pErrorEntry->header);
pErrCnt = ((error_event->bUncErr) ? &(pErrorEntry->uncorrectedCount) :
&(pErrorEntry->correctedCount));
if (bNewEntry)
{
pErrorEntry->errId = error_event->sxid;
pErrorEntry->location = FLD_SET_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER,
_LOCATION, _LINK_ID, error_event->linkId, pErrorEntry->location);
pErrorEntry->header = FLD_SET_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER,
_HEADER, _ADDR_VALID, error_event->bAddressValid, pErrorEntry->header);
pErrorEntry->address = error_event->address;
pErrorEntry->sublocation = 0;
*pErrCnt = error_event->errorCount;
pErrorEntry->averageEventDelta = 0;
pErrorEntry->header = FLD_SET_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER,
_VALID, _TRUE, pErrorEntry->header);
}
else
{
NvlStatus status;
NvU64 tmpCnt;
NvU64 existingCnt = (NvU64) (pErrorEntry->uncorrectedCount + pErrorEntry->correctedCount);
status = _inforom_ecc_calc_timestamp_delta(pErrorEntry, error_event, existingCnt);
if (status != NVL_SUCCESS)
{
pErrorEntry->header = FLD_SET_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER,
_HEADER, _CORRUPT_TIMEDATA, _TRUE, pErrorEntry->header);
}
// Update error counts by summing them up
tmpCnt = (NvU64) *pErrCnt + error_event->errorCount;
// Saturate at NvU32 limit
if (tmpCnt > NV_U32_MAX)
{
tmpCnt = NV_U32_MAX;
}
*pErrCnt = (NvU32) tmpCnt;
}
pErrorEntry->lastErrorTimestamp = error_event->timestamp;
return NVL_SUCCESS;
}
NvlStatus
nvswitch_inforom_ecc_log_error_event_lr10
(
nvswitch_device *device,
INFOROM_ECC_OBJECT *pEccGeneric,
INFOROM_NVS_ECC_ERROR_EVENT *err_event
)
{
NvU8 entry;
NvU64_ALIGN32 *pInforomTotalCount;
NvU64 tmpCount;
NvlStatus status;
INFOROM_ECC_OBJECT_V6_S0 *pEccObj;
if ((err_event == NULL) || (pEccGeneric == NULL))
return -NVL_BAD_ARGS;
pEccObj = &(pEccGeneric->v6s);
//
// Find the appropriate entry to log the error event
// If the function returns "out of memory" error, indicates no free entries
//
status = _inforom_ecc_find_useable_entry_index(pEccObj, err_event, &entry);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "InfoROM ECC: Unable to find logging entry rc: %d\n", status);
goto _ecc_log_error_event_lr10_failed;
}
//
// Record the error data into appropriate members of the error entry struct
// Also mark the entry as in-use if it is a new entry
//
status = _inforom_ecc_record_entry(pEccObj, err_event, entry);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "InfoROM ECC: Unable to record entry:%u rc:%d\n",
entry, status);
goto _ecc_log_error_event_lr10_failed;
}
// Log the error count to the InfoROM total values
if (err_event->bUncErr)
{
pInforomTotalCount = &(pEccObj->uncorrectedTotal);
}
else
{
pInforomTotalCount = &(pEccObj->correctedTotal);
}
NvU64_ALIGN32_UNPACK(&tmpCount, pInforomTotalCount);
tmpCount += err_event->errorCount;
if (tmpCount < err_event->errorCount)
{
tmpCount = NV_U64_MAX;
}
NvU64_ALIGN32_PACK(pInforomTotalCount, &tmpCount);
// Update shared surface counts, non-fatal if we encounter a failure
status = nvswitch_smbpbi_refresh_ecc_counts(device);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN, "Failed to update ECC counts on SMBPBI "
"shared surface rc:%d\n", status);
}
return NVL_SUCCESS;
_ecc_log_error_event_lr10_failed:
NVSWITCH_PRINT(device, ERROR, "Missed recording sxid=%u, linkId=%u, address=0x%04x, "
"timestamp=%u, errorCount=%u\n", err_event->sxid,
err_event->linkId, err_event->address, err_event->timestamp,
err_event->errorCount);
return status;
}
void
nvswitch_inforom_ecc_get_total_errors_lr10
(
nvswitch_device *device,
INFOROM_ECC_OBJECT *pEccGeneric,
NvU64 *pCorrectedTotal,
NvU64 *pUncorrectedTotal
)
{
INFOROM_ECC_OBJECT_V6_S0 *pEccObj = &(pEccGeneric->v6s);
NvU64_ALIGN32_UNPACK(pCorrectedTotal, &pEccObj->correctedTotal);
NvU64_ALIGN32_UNPACK(pUncorrectedTotal, &pEccObj->uncorrectedTotal);
}
static void _nvswitch_inforom_map_ecc_error_to_userspace_error
(
INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pEccError,
NVSWITCH_ECC_ERROR_ENTRY *pErrorLog
)
{
pErrorLog->sxid = pEccError->errId;
pErrorLog->linkId = DRF_VAL(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _LOCATION, _LINK_ID, pEccError->location);
pErrorLog->lastErrorTimestamp = pEccError->lastErrorTimestamp;
pErrorLog->bAddressValid = DRF_VAL(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, _ADDR_VALID, pEccError->header);
pErrorLog->address = pEccError->address;
pErrorLog->correctedCount = pEccError->correctedCount;
pErrorLog->uncorrectedCount = pEccError->uncorrectedCount;
return;
}
NvlStatus
nvswitch_inforom_ecc_get_errors_lr10
(
nvswitch_device *device,
NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params
)
{
struct inforom *pInforom = device->pInforom;
PINFOROM_ECC_STATE pEccState;
INFOROM_ECC_OBJECT *pEcc;
NvU32 errIndx;
/*
* Compile time check is needed here to make sure that the ECC_ERROR API interface query size is in sync
* with its internal counterpart. When the definition of the internal InfoROM error size limit changes,
* it will enforce API interface change as well, or use a retry style query with err_index
*/
ct_assert(NVSWITCH_ECC_ERRORS_MAX_READ_COUNT == INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT);
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pEccState = pInforom->pEccState;
if (pEccState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pEcc = pEccState->pEcc;
if (pEcc == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
nvswitch_os_memset(params->errorLog, 0, sizeof(params->errorLog));
nvswitch_os_memcpy(&params->correctedTotal, &pEcc->v6s.correctedTotal, sizeof(params->correctedTotal));
nvswitch_os_memcpy(&params->uncorrectedTotal, &pEcc->v6s.uncorrectedTotal, sizeof(params->uncorrectedTotal));
for (errIndx = 0; errIndx < NVSWITCH_ECC_ERRORS_MAX_READ_COUNT; errIndx++)
{
if (FLD_TEST_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, _VALID, _FALSE,
pEcc->v6s.errorEntries[errIndx].header))
{
break; // the last entry
}
_nvswitch_inforom_map_ecc_error_to_userspace_error(&pEcc->v6s.errorEntries[errIndx],
&params->errorLog[errIndx]);
}
params->errorCount = errIndx;
return NVL_SUCCESS;
}
static NvU8 _oms_dword_byte_sum(NvU16 dword)
{
NvU8 i, sum = 0;
for (i = 0; i < sizeof(dword); i++)
sum += (NvU8)((dword >> (8*i)) & 0xFF);
return sum;
}
static void _oms_update_entry_checksum
(
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pEntry
)
{
NvU8 datasum = 0;
// Upper byte is the checksum
datasum += _oms_dword_byte_sum(pEntry->data & ~0xFF00);
pEntry->data = FLD_SET_REF_NUM(
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_ENTRY_CHECKSUM,
0x00u - datasum, pEntry->data);
}
static void
_oms_reset_entry_iter
(
INFOROM_OMS_STATE *pOmsState,
NvBool bStart
)
{
INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s;
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
if (bStart)
{
pVerData->pIter = &pOms->settings[0];
}
else
{
pVerData->pIter = &pOms->settings[
INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES - 1];
}
}
static NvBool
_oms_entry_available
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pEntry = pOmsState->omsData.v1s.pIter;
if (pEntry == NULL)
return NV_FALSE;
return FLD_TEST_REF(INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE,
_YES, pEntry->data);
}
static NvBool
_oms_entry_valid
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pEntry = pOmsState->omsData.v1s.pIter;
NvU8 sum;
if (pEntry == NULL)
return NV_FALSE;
sum = _oms_dword_byte_sum(pEntry->data);
return (sum == 0);
}
/*
*
* Sets nextIdx to one after currIdx. Returns NV_TRUE if nextIdx
* is valid. NV_FALSE otherwise.
*
*/
static NvBool
_oms_entry_iter_next
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s;
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
if (pVerData->pIter >= pOms->settings +
INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES)
{
pVerData->pIter = NULL;
}
else
{
pVerData->pIter++;
}
return (pVerData->pIter != NULL);
}
static void
_oms_refresh
(
nvswitch_device *device,
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s;
nvswitch_os_memset(pOms->settings, 0xFF, sizeof(pOms->settings));
pOms->lifetimeRefreshCount++;
// This is guaranteed to find and set an UpdateEntry now
_oms_parse(device, pOmsState);
}
static void
_oms_set_current_entry
(
INFOROM_OMS_STATE *pOmsState
)
{
pOmsState->omsData.v1s.prev = *pOmsState->omsData.v1s.pIter;
}
static void
_oms_set_update_entry
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
pVerData->pNext = pVerData->pIter;
// Next settings always start out the same as the previous
*pVerData->pNext = pVerData->prev;
}
static NvBool
_oms_entry_iter_prev
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s;
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
if (pVerData->pIter <= pOms->settings)
{
pVerData->pIter = NULL;
}
else
{
pVerData->pIter--;
}
return (pVerData->pIter != NULL);
}
static void
_oms_parse
(
nvswitch_device *device,
INFOROM_OMS_STATE *pOmsState
)
{
NvBool bCurrentValid = NV_FALSE;
NvBool bIterValid = NV_TRUE;
//
// To find the "latest" entry - the one with the settings that were last
// flushed to the InfoROM - scan from the end of the array until we find
// an entry that is not available and is valid.
//
_oms_reset_entry_iter(pOmsState, NV_FALSE);
while (bIterValid)
{
if (!_oms_entry_available(pOmsState) &&
_oms_entry_valid(pOmsState))
{
_oms_set_current_entry(pOmsState);
bCurrentValid = NV_TRUE;
break;
}
bIterValid = _oms_entry_iter_prev(pOmsState);
}
//
// To find the "next" entry - one that we will write to if a setting is
// updated - start scanning from the entry after the latest entry to find
// an available one. This will skip entries that were previously written
// to but are invalid.
//
if (bCurrentValid)
{
bIterValid = _oms_entry_iter_next(pOmsState);
}
else
{
_oms_reset_entry_iter(pOmsState, NV_TRUE);
bIterValid = NV_TRUE;
}
while (bIterValid)
{
if (_oms_entry_available(pOmsState))
{
_oms_set_update_entry(pOmsState);
break;
}
bIterValid = _oms_entry_iter_next(pOmsState);
}
if (!bIterValid)
{
//
// No more entries available, we will need to refresh the object.
// We should have at least one valid recent entry in this case
// (otherwise every entry is corrupted).
//
NVSWITCH_ASSERT(bCurrentValid);
_oms_refresh(device, pOmsState);
}
}
static NvBool
_oms_is_content_dirty
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
if (pVerData->pNext == NULL)
return NV_FALSE;
return (pVerData->pNext->data != pVerData->prev.data);
}
NvlStatus
nvswitch_oms_inforom_flush_lr10
(
nvswitch_device *device
)
{
NvlStatus status = NVL_SUCCESS;
struct inforom *pInforom = device->pInforom;
INFOROM_OMS_STATE *pOmsState;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pOmsState = pInforom->pOmsState;
if (pOmsState != NULL && _oms_is_content_dirty(pOmsState))
{
status = nvswitch_inforom_write_object(device, "OMS",
pOmsState->pFmt, pOmsState->pOms,
pOmsState->pPackedObject);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"Failed to flush OMS object to InfoROM, rc: %d\n", status);
}
else
{
_oms_parse(device, pOmsState);
}
}
return status;
}
void
nvswitch_initialize_oms_state_lr10
(
nvswitch_device *device,
INFOROM_OMS_STATE *pOmsState
)
{
pOmsState->omsData.v1s.pIter = pOmsState->omsData.v1s.pNext = NULL;
pOmsState->omsData.v1s.prev.data =
REF_DEF(INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE, _NO) |
REF_DEF(INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE, _NO);
_oms_update_entry_checksum(&pOmsState->omsData.v1s.prev);
_oms_parse(device, pOmsState);
}
NvBool
nvswitch_oms_get_device_disable_lr10
(
INFOROM_OMS_STATE *pOmsState
)
{
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
return FLD_TEST_REF(
INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE,
_YES, pVerData->pNext->data);
}
void
nvswitch_oms_set_device_disable_lr10
(
INFOROM_OMS_STATE *pOmsState,
NvBool bForceDeviceDisable
)
{
INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s;
pVerData->pNext->data = FLD_SET_REF_NUM(
INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE,
bForceDeviceDisable, pVerData->pNext->data);
_oms_update_entry_checksum(pVerData->pNext);
}
NvlStatus
nvswitch_bbx_setup_prologue_lr10
(
nvswitch_device *device,
void *pInforomBbxState
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_bbx_setup_epilogue_lr10
(
nvswitch_device *device,
void *pInforomBbxState
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_bbx_add_data_time_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_bbx_add_sxid_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_bbx_add_temperature_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
void
nvswitch_bbx_set_initial_temperature_lr10
(
nvswitch_device *device,
void *pInforomBbxState,
void *pInforomBbxData
)
{
return;
}
NvlStatus
nvswitch_inforom_bbx_get_sxid_lr10
(
nvswitch_device *device,
NVSWITCH_GET_SXIDS_PARAMS *params
)
{
return -NVL_ERR_NOT_SUPPORTED;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,335 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "export_nvswitch.h"
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "rom_nvswitch.h"
#include "lr10/lr10.h"
#include "lr10/pmgr_lr10.h"
#include "nvswitch/lr10/dev_pmgr.h"
void _nvswitch_i2c_set_port_pmgr(nvswitch_device *device, NvU32 port);
/*! The number of nanoseconds we will wait for slave clock stretching.
* Previously, this was set to 100us, but proved too
* short (see bug 630691) so was increased to 2ms.
*/
#define I2C_STRETCHED_LOW_TIMEOUT_NS_LR10 2000000
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE nvswitch_i2c_device_allow_list_lr10[] =
{
};
const NvU32 nvswitch_i2c_device_allow_list_size_lr10 =
NV_ARRAY_ELEMENTS(nvswitch_i2c_device_allow_list_lr10);
//
// PMGR functions
//
/*!
* @brief Return I2c port info used in PMGR implementation.
*/
NvU32
nvswitch_i2c_get_port_info_lr10
(
nvswitch_device *device,
NvU32 port
)
{
PNVSWITCH_OBJI2C pI2c = device->pI2c;
if (port >= NVSWITCH_MAX_I2C_PORTS)
{
return 0;
}
else
{
return pI2c->PortInfo[port];
}
}
//
// Pre-initialize the software & hardware state of the switch I2C & GPIO interface
//
void
nvswitch_init_pmgr_lr10
(
nvswitch_device *device
)
{
PNVSWITCH_OBJI2C pI2c;
// Initialize I2C object
nvswitch_i2c_init(device);
pI2c = device->pI2c;
//
// Dynamically allocate the I2C device allowlist
// once VBIOS table reads are implemented.
//
pI2c->i2c_allow_list = nvswitch_i2c_device_allow_list_lr10;
pI2c->i2c_allow_list_size = nvswitch_i2c_device_allow_list_size_lr10;
// Setup the 3 I2C ports
_nvswitch_i2c_set_port_pmgr(device, NVSWITCH_I2C_PORT_I2CA);
_nvswitch_i2c_set_port_pmgr(device, NVSWITCH_I2C_PORT_I2CB);
_nvswitch_i2c_set_port_pmgr(device, NVSWITCH_I2C_PORT_I2CC);
}
static const NVSWITCH_GPIO_INFO nvswitch_gpio_pin_Default[] =
{
NVSWITCH_DESCRIBE_GPIO_PIN( 0, _INSTANCE_ID0, 0, IN), // Instance ID bit 0
NVSWITCH_DESCRIBE_GPIO_PIN( 1, _INSTANCE_ID1, 0, IN), // Instance ID bit 1
NVSWITCH_DESCRIBE_GPIO_PIN( 2, _INSTANCE_ID2, 0, IN), // Instance ID bit 2
NVSWITCH_DESCRIBE_GPIO_PIN( 3, _INSTANCE_ID3, 0, IN), // Instance ID bit 3
NVSWITCH_DESCRIBE_GPIO_PIN( 4, _INSTANCE_ID4, 0, IN), // Instance ID bit 4
NVSWITCH_DESCRIBE_GPIO_PIN( 5, _INSTANCE_ID5, 0, IN), // Instance ID bit 5
NVSWITCH_DESCRIBE_GPIO_PIN( 6, _INSTANCE_ID6, 0, IN), // Instance ID bit 6
};
static const NvU32 nvswitch_gpio_pin_Default_size = NV_ARRAY_ELEMENTS(nvswitch_gpio_pin_Default);
//
// Initialize the software state of the switch I2C & GPIO interface
// Temporarily forcing default GPIO values.
//
// TODO: This function should be updated with the board values from DCB.
void
nvswitch_init_pmgr_devices_lr10
(
nvswitch_device *device
)
{
lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
PNVSWITCH_OBJI2C pI2c = device->pI2c;
chip_device->gpio_pin = nvswitch_gpio_pin_Default;
chip_device->gpio_pin_size = nvswitch_gpio_pin_Default_size;
pI2c->device_list = NULL;
pI2c->device_list_size = 0;
}
/*!
* RM Control command to determine the physical id of the device.
*/
NvU32
nvswitch_read_physical_id_lr10
(
nvswitch_device *device
)
{
lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
NvU32 physical_id = 0;
NvU32 data;
NvU32 idx_gpio;
NvU32 input_inv;
NvU32 function_offset;
for (idx_gpio = 0; idx_gpio < chip_device->gpio_pin_size; idx_gpio++)
{
if ((chip_device->gpio_pin[idx_gpio].function >= NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID0) &&
(chip_device->gpio_pin[idx_gpio].function <= NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID6))
{
if (chip_device->gpio_pin[idx_gpio].misc == NVSWITCH_GPIO_ENTRY_MISC_IO_INV_IN)
{
input_inv = NV_PMGR_GPIO_INPUT_CNTL_1_INV_YES;
}
else
{
input_inv = NV_PMGR_GPIO_INPUT_CNTL_1_INV_NO;
}
NVSWITCH_REG_WR32(device, _PMGR, _GPIO_INPUT_CNTL_1,
DRF_NUM(_PMGR, _GPIO_INPUT_CNTL_1, _PINNUM, chip_device->gpio_pin[idx_gpio].pin) |
DRF_NUM(_PMGR, _GPIO_INPUT_CNTL_1, _INV, input_inv) |
DRF_DEF(_PMGR, _GPIO_INPUT_CNTL_1, _BYPASS_FILTER, _NO));
data = NVSWITCH_REG_RD32(device, _PMGR, _GPIO_INPUT_CNTL_1);
function_offset = chip_device->gpio_pin[idx_gpio].function -
NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID0;
physical_id |=
(DRF_VAL(_PMGR, _GPIO_INPUT_CNTL_1, _READ, data) << function_offset);
}
}
NVSWITCH_PRINT(device, SETUP, "%s Device position Id = 0x%x\n", __FUNCTION__, physical_id);
return physical_id;
}
/*!
* RM Control command to perform indexed I2C.
*/
NvlStatus
nvswitch_ctrl_i2c_indexed_lr10
(
nvswitch_device *device,
NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams
)
{
NvlStatus status = (-NVL_ERR_GENERIC);
return status;
}
NvlStatus
nvswitch_get_rom_info_lr10
(
nvswitch_device *device,
NVSWITCH_EEPROM_TYPE *eeprom
)
{
if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
{
NVSWITCH_PRINT(device, SETUP,
"ROM configuration not supported on Fmodel/RTL/emulation\n");
return -NVL_ERR_NOT_SUPPORTED;
}
return -NVL_ERR_NOT_SUPPORTED;
}
/*!
* Set the speed of the HW I2C controller on a given port.
*
* @param[in] port The port identifying the controller.
*
* @param[in] speedMode The speed mode to run at.
*/
void
nvswitch_i2c_set_hw_speed_mode_lr10
(
nvswitch_device *device,
NvU32 port,
NvU32 speedMode
)
{
NvU32 timing = DRF_DEF(_PMGR, _I2C_TIMING, _IGNORE_ACK, _DISABLE) |
DRF_DEF(_PMGR, _I2C_TIMING, _TIMEOUT_CHECK, _ENABLE);
switch (speedMode)
{
// Default should not be hit if above layers work correctly.
default:
NVSWITCH_PRINT(device, ERROR,
"%s: undefined speed\n",
__FUNCTION__);
// Deliberate fallthrough
case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_100KHZ:
timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _100KHZ, timing);
timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ, timing);
break;
case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_200KHZ:
timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _200KHZ, timing);
timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_200KHZ, timing);
break;
case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_300KHZ:
timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _300KHZ, timing);
timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_300KHZ, timing);
break;
case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_400KHZ:
timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _400KHZ, timing);
timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_400KHZ, timing);
break;
case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_1000KHZ:
timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _1000KHZ, timing);
timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_1000KHZ, timing);
break;
}
NVSWITCH_REG_WR32(device, _PMGR, _I2C_TIMING(port), timing);
}
/*!
* Return if I2C transactions are supported.
*
* @param[in] device The NvSwitch Device.
*
*/
NvBool
nvswitch_is_i2c_supported_lr10
(
nvswitch_device *device
)
{
return NV_TRUE;
}
/*!
* Return if I2C device and port is allowed access
*
* @param[in] device The NvSwitch Device.
* @param[in] port The I2C Port.
* @param[in] addr The I2C device to access.
* @param[in] bIsRead Boolean if I2C transaction is a read.
*
*/
NvBool
nvswitch_i2c_is_device_access_allowed_lr10
(
nvswitch_device *device,
NvU32 port,
NvU8 addr,
NvBool bIsRead
)
{
NvU32 i;
NvU32 device_allow_list_size;
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *device_allow_list;
NvBool bAllow = NV_FALSE;
PNVSWITCH_OBJI2C pI2c = device->pI2c;
device_allow_list = pI2c->i2c_allow_list;
device_allow_list_size = pI2c->i2c_allow_list_size;
for (i = 0; i < device_allow_list_size; i++)
{
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE i2c_device = device_allow_list[i];
if ((port == i2c_device.i2cPortLogical) &&
(addr == i2c_device.i2cAddress))
{
bAllow = bIsRead ?
FLD_TEST_DRF(_NVSWITCH, _I2C_DEVICE, _READ_ACCESS_LEVEL,
_PUBLIC, i2c_device.i2cRdWrAccessMask) :
FLD_TEST_DRF(_NVSWITCH, _I2C_DEVICE, _WRITE_ACCESS_LEVEL,
_PUBLIC, i2c_device.i2cRdWrAccessMask);
break;
}
}
return bAllow;
}

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "lr10/lr10.h"
#include "lr10/smbpbi_lr10.h"
#include "nvswitch/lr10/dev_nvlsaw_ip.h"
#include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h"
NvlStatus
nvswitch_smbpbi_get_dem_num_messages_lr10
(
nvswitch_device *device,
NvU8 *pMsgCount
)
{
NvU32 reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_12);
*pMsgCount = DRF_VAL(_NVLSAW_SW, _SCRATCH_12, _EVENT_MESSAGE_COUNT, reg);
return NVL_SUCCESS;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,299 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "export_nvswitch.h"
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "lr10/lr10.h"
#include "lr10/therm_lr10.h"
#include "soe/soeiftherm.h"
#include "rmflcncmdif_nvswitch.h"
#include "soe/soe_nvswitch.h"
#include "nvswitch/lr10/dev_therm.h"
#include "nvswitch/lr10/dev_nvlsaw_ip.h"
//
// Thermal functions
//
//
// Initialize thermal offsets for External Tdiode.
//
NvlStatus
nvswitch_init_thermal_lr10
(
nvswitch_device *device
)
{
lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device);
// Mark everything invalid
chip_device->tdiode.method = NVSWITCH_THERM_METHOD_UNKNOWN;
return NVL_SUCCESS;
}
static void
_nvswitch_read_max_tsense_temperature
(
nvswitch_device *device,
NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info,
NvU32 channel
)
{
NvU32 offset;
NvU32 temperature;
temperature = nvswitch_reg_read_32(device, NV_THERM_TSENSE_MAXIMUM_TEMPERATURE);
temperature = DRF_VAL(_THERM_TSENSE, _MAXIMUM_TEMPERATURE, _MAXIMUM_TEMPERATURE, temperature);
if (channel == NVSWITCH_THERM_CHANNEL_LR10_TSENSE_MAX)
{
offset = nvswitch_reg_read_32(device, NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS);
offset = DRF_VAL(_THERM_TSENSE, _U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS, _TEMPERATURE_OFFSET, offset);
// Temperature of the sensor reported equals calculation of the max temperature reported
// from the TSENSE HUB plus the temperature offset programmed by SW. This offset needs to
// be substracted to get the actual temperature of the sensor.
temperature -= offset;
}
info->temperature[channel] = NV_TSENSE_FXP_9_5_TO_24_8(temperature);
info->status[channel] = NVL_SUCCESS;
}
static void
_nvswitch_read_external_tdiode_temperature
(
nvswitch_device *device,
NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info,
NvU32 channel
)
{
}
NvlStatus
nvswitch_ctrl_therm_read_temperature_lr10
(
nvswitch_device *device,
NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info
)
{
NvU32 channel;
if (!info->channelMask)
{
NVSWITCH_PRINT(device, ERROR,
"%s: No channel given in the input.\n",
__FUNCTION__);
return -NVL_BAD_ARGS;
}
nvswitch_os_memset(info->temperature, 0x0, sizeof(info->temperature));
channel = NVSWITCH_THERM_CHANNEL_LR10_TSENSE_MAX;
if (info->channelMask & NVBIT(channel))
{
_nvswitch_read_max_tsense_temperature(device, info, channel);
info->channelMask &= ~NVBIT(channel);
}
channel = NVSWITCH_THERM_CHANNEL_LR10_TSENSE_OFFSET_MAX;
if (info->channelMask & NVBIT(channel))
{
_nvswitch_read_max_tsense_temperature(device, info, channel);
info->channelMask &= ~NVBIT(channel);
}
channel = NVSWITCH_THERM_CHANNEL_LR10_TDIODE;
if (info->channelMask & NVBIT(channel))
{
_nvswitch_read_external_tdiode_temperature(device, info, channel);
info->channelMask &= ~NVBIT(channel);
}
channel = NVSWITCH_THERM_CHANNEL_LR10_TDIODE_OFFSET;
if (info->channelMask & NVBIT(channel))
{
_nvswitch_read_external_tdiode_temperature(device, info, channel);
info->channelMask &= ~NVBIT(channel);
}
if (info->channelMask)
{
NVSWITCH_PRINT(device, ERROR,
"%s: ChannelMask %x absent on LR10.\n",
__FUNCTION__, info->channelMask);
return -NVL_BAD_ARGS;
}
return NVL_SUCCESS;
}
NvlStatus
nvswitch_ctrl_therm_get_temperature_limit_lr10
(
nvswitch_device *device,
NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *info
)
{
NvU32 threshold;
NvU32 temperature;
threshold = nvswitch_reg_read_32(device, NV_THERM_TSENSE_THRESHOLD_TEMPERATURES);
switch (info->thermalEventId)
{
case NVSWITCH_CTRL_THERMAL_EVENT_ID_WARN:
{
// Get Slowdown temperature
temperature = DRF_VAL(_THERM_TSENSE, _THRESHOLD_TEMPERATURES,
_WARNING_TEMPERATURE, threshold);
break;
}
case NVSWITCH_CTRL_THERMAL_EVENT_ID_OVERT:
{
// Get Shutdown temperature
temperature = DRF_VAL(_THERM_TSENSE, _THRESHOLD_TEMPERATURES,
_OVERTEMP_TEMPERATURE, threshold);
break;
}
default:
{
NVSWITCH_PRINT(device, ERROR, "Invalid Thermal Event Id: 0x%x\n", info->thermalEventId);
return -NVL_BAD_ARGS;
}
}
info->temperatureLimit = NV_TSENSE_FXP_9_5_TO_24_8(temperature);
return NVL_SUCCESS;
}
// Background task to monitor thermal warn and adjust link mode
void
nvswitch_monitor_thermal_alert_lr10
(
nvswitch_device *device
)
{
return;
}
/*
* @brief Callback function to recieve thermal messages from SOE.
*/
void
nvswitch_therm_soe_callback_lr10
(
nvswitch_device *device,
RM_FLCN_MSG *pGenMsg,
void *pParams,
NvU32 seqDesc,
NV_STATUS status
)
{
RM_SOE_THERM_MSG_SLOWDOWN_STATUS slowdown_status;
RM_SOE_THERM_MSG_SHUTDOWN_STATUS shutdown_status;
RM_FLCN_MSG_SOE *pMsg = (RM_FLCN_MSG_SOE *)pGenMsg;
NvU32 temperature;
NvU32 threshold;
switch (pMsg->msg.soeTherm.msgType)
{
case RM_SOE_THERM_MSG_ID_SLOWDOWN_STATUS:
{
slowdown_status = pMsg->msg.soeTherm.slowdown;
if (slowdown_status.bSlowdown)
{
if (slowdown_status.source.bTsense) // TSENSE_THERM_ALERT
{
temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.maxTemperature);
threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.warnThreshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"NVSWITCH Temperature %dC | TSENSE WARN Threshold %dC\n",
temperature, threshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"Thermal Slowdown Engaged | Temp higher than WARN Threshold\n");
}
if (slowdown_status.source.bPmgr) // PMGR_THERM_ALERT
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"Thermal Slowdown Engaged | PMGR WARN Threshold reached\n");
}
}
else // REVERT_SLOWDOWN
{
temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.maxTemperature);
threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.warnThreshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END,
"NVSWITCH Temperature %dC | TSENSE WARN Threshold %dC\n",
temperature, threshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END,
"Thermal slowdown Disengaged\n");
}
break;
}
case RM_SOE_THERM_MSG_ID_SHUTDOWN_STATUS:
{
shutdown_status = pMsg->msg.soeTherm.shutdown;
if (shutdown_status.source.bTsense) // TSENSE_THERM_SHUTDOWN
{
temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(shutdown_status.maxTemperature);
threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(shutdown_status.overtThreshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN,
"NVSWITCH Temperature %dC | OVERT Threshold %dC\n",
temperature, threshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN,
"TSENSE OVERT Threshold reached. Shutting Down\n");
}
if (shutdown_status.source.bPmgr) // PMGR_THERM_SHUTDOWN
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"PMGR OVERT Threshold reached. Shutting Down\n");
}
break;
}
default:
{
NVSWITCH_PRINT(device, ERROR, "%s Unknown message Id\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "pmgr_nvswitch.h"
void
nvswitch_i2c_init
(
nvswitch_device *device
)
{
PNVSWITCH_OBJI2C pI2c = nvswitch_os_malloc(sizeof(struct NVSWITCH_OBJI2C));
nvswitch_os_memset(pI2c, 0, sizeof(struct NVSWITCH_OBJI2C));
device->pI2c = pI2c;
}
void
nvswitch_i2c_destroy
(
nvswitch_device *device
)
{
if (device->pI2c == NULL)
return;
nvswitch_os_free(device->pI2c);
device->pI2c = NULL;
}
/*! @brief Set up a port to use a PMGR implementation.
*
* @param[in] device NvSwitch device
* @param[in] port The port identifier for the bus.
*/
void
_nvswitch_i2c_set_port_pmgr
(
nvswitch_device *device,
NvU32 port
)
{
NvU32 i;
NvU32 device_allow_list_size;
NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *device_allow_list;
PNVSWITCH_OBJI2C pI2c = device->pI2c;
NVSWITCH_ASSERT(port < NVSWITCH_MAX_I2C_PORTS);
pI2c->PortInfo[port] = FLD_SET_DRF(_I2C, _PORTINFO, _DEFINED, _PRESENT, pI2c->PortInfo[port]);
pI2c->Ports[port].defaultSpeedMode = NVSWITCH_I2C_SPEED_MODE_100KHZ;
device_allow_list = pI2c->i2c_allow_list;
device_allow_list_size = pI2c->i2c_allow_list_size;
for (i = 0; i < device_allow_list_size; i++)
{
if (port == device_allow_list[i].i2cPortLogical)
{
pI2c->PortInfo[port] = FLD_SET_DRF(_I2C, _PORTINFO,
_ACCESS_ALLOWED, _TRUE,
pI2c->PortInfo[port]);
break;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,734 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvfixedtypes.h"
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "rmsoecmdif.h"
#include "smbpbi_nvswitch.h"
#include "nvswitch/lr10/dev_ext_devices.h"
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
#define GET_PFIFO_FROM_DEVICE(dev) (&(dev)->pSmbpbi->sharedSurface->inforomObjects.DEM.object.v1)
#define DEM_FIFO_SIZE INFOROM_DEM_OBJECT_V1_00_FIFO_SIZE
#define DEM_FIFO_PTR(x) ((x) % DEM_FIFO_SIZE)
#define DEM_PTR_DIFF(cur, next) (((next) > (cur)) ? ((next) - (cur)) : \
(DEM_FIFO_SIZE - ((cur) - (next))))
#define DEM_BYTES_OCCUPIED(pf) DEM_PTR_DIFF((pf)->readOffset, (pf)->writeOffset)
//
// See how much space is available in the FIFO.
// Must leave 1 word free so the write pointer does not
// catch up with the read pointer. That would be indistinguishable
// from an empty FIFO.
//
#define DEM_BYTES_AVAILABLE(pf) (DEM_PTR_DIFF((pf)->writeOffset, (pf)->readOffset) - \
sizeof(NvU32))
#define DEM_RECORD_SIZE_MAX (sizeof(NV_MSGBOX_DEM_RECORD) \
+ NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE)
#define DEM_RECORD_SIZE_MIN (sizeof(NV_MSGBOX_DEM_RECORD) + 1)
#define FIFO_REC_LOOP_ITERATOR _curPtr
#define FIFO_REC_LOOP_REC_PTR _recPtr
#define FIFO_REC_LOOP_REC_SIZE _recSize
#define FIFO_REC_LOOP_START(pf, cond) \
{ \
NvU16 _nextPtr; \
for (FIFO_REC_LOOP_ITERATOR = (pf)->readOffset; cond; FIFO_REC_LOOP_ITERATOR = _nextPtr) \
{ \
NV_MSGBOX_DEM_RECORD *FIFO_REC_LOOP_REC_PTR = (NV_MSGBOX_DEM_RECORD *) \
((pf)->fifoBuffer + FIFO_REC_LOOP_ITERATOR); \
NvU16 FIFO_REC_LOOP_REC_SIZE = \
FIFO_REC_LOOP_REC_PTR->recordSize * sizeof(NvU32);
#define FIFO_REC_LOOP_END \
_nextPtr = DEM_FIFO_PTR(FIFO_REC_LOOP_ITERATOR + FIFO_REC_LOOP_REC_SIZE); \
} \
}
static void _smbpbiDemInit(nvswitch_device *device, struct smbpbi *pSmbpbi, struct INFOROM_DEM_OBJECT_V1_00 *pFifo);
static void _nvswitch_smbpbi_dem_flush(nvswitch_device *device);
NvlStatus
nvswitch_smbpbi_init
(
nvswitch_device *device
)
{
NV_STATUS status;
NvU64 dmaHandle;
void *cpuAddr;
if (!device->pSoe)
{
return -NVL_ERR_INVALID_STATE;
}
// Create DMA mapping for SMBPBI transactions
status = nvswitch_os_alloc_contig_memory(device->os_handle, &cpuAddr,
sizeof(SOE_SMBPBI_SHARED_SURFACE),
(device->dma_addr_width == 32));
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to allocate contig memory, rc:%d\n",
status);
return status;
}
nvswitch_os_memset(cpuAddr, 0, sizeof(SOE_SMBPBI_SHARED_SURFACE));
status = nvswitch_os_map_dma_region(device->os_handle, cpuAddr, &dmaHandle,
sizeof(SOE_SMBPBI_SHARED_SURFACE),
NVSWITCH_DMA_DIR_BIDIRECTIONAL);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"Failed to map dma region for SMBPBI shared surface, rc:%d\n",
status);
goto os_map_dma_region_fail;
}
device->pSmbpbi = nvswitch_os_malloc(sizeof(struct smbpbi));
if (!device->pSmbpbi)
{
status = -NVL_NO_MEM;
goto smbpbi_init_fail;
}
device->pSmbpbi->sharedSurface = cpuAddr;
device->pSmbpbi->dmaHandle = dmaHandle;
return NVL_SUCCESS;
smbpbi_init_fail:
nvswitch_os_unmap_dma_region(device->os_handle, cpuAddr, dmaHandle,
sizeof(SOE_SMBPBI_SHARED_SURFACE), NVSWITCH_DMA_DIR_BIDIRECTIONAL);
os_map_dma_region_fail:
nvswitch_os_free_contig_memory(device->os_handle, cpuAddr, sizeof(SOE_SMBPBI_SHARED_SURFACE));
return status;
}
NvlStatus
nvswitch_smbpbi_post_init
(
nvswitch_device * device
)
{
struct smbpbi *pSmbpbi = device->pSmbpbi;
FLCN *pFlcn;
NvU64 dmaHandle;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
NvU32 cmdSeqDesc;
RM_SOE_SMBPBI_CMD_INIT *pInitCmd = &cmd.cmd.smbpbiCmd.init;
NvlStatus status;
if (!device->pSmbpbi || !device->pInforom)
{
return -NVL_ERR_NOT_SUPPORTED;
}
// Populate shared surface with static InfoROM data
nvswitch_inforom_read_static_data(device, device->pInforom,
&device->pSmbpbi->sharedSurface->inforomObjects);
pFlcn = device->pSoe->pFlcn;
dmaHandle = pSmbpbi->dmaHandle;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_SMBPBI;
cmd.hdr.size = RM_SOE_CMD_SIZE(SMBPBI, INIT);
cmd.cmd.smbpbiCmd.cmdType = RM_SOE_SMBPBI_CMD_ID_INIT;
RM_FLCN_U64_PACK(&pInitCmd->dmaHandle, &dmaHandle);
//
// Make the interval twice the heartbeat period to avoid
// skew between driver and soe threads
//
pInitCmd->driverPollingPeriodUs = (NVSWITCH_HEARTBEAT_INTERVAL_NS / 1000) * 2;
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg - not used for now
NULL, // pPayload - not used for now
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s: SMBPBI Init command failed. rc:%d\n",
__FUNCTION__, status);
return status;
}
nvswitch_lib_smbpbi_log_sxid(device, NVSWITCH_ERR_NO_ERROR,
"NVSWITCH SMBPBI server is online.");
NVSWITCH_PRINT(device, INFO, "%s: SMBPBI POST INIT completed\n", __FUNCTION__);
return NVL_SUCCESS;
}
static void
_nvswitch_smbpbi_send_unload
(
nvswitch_device *device
)
{
FLCN *pFlcn;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
NvU32 cmdSeqDesc;
NvlStatus status;
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_SMBPBI;
cmd.hdr.size = RM_SOE_CMD_SIZE(SMBPBI, UNLOAD);
cmd.cmd.smbpbiCmd.cmdType = RM_SOE_SMBPBI_CMD_ID_UNLOAD;
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg - not used for now
NULL, // pPayload - not used for now
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s: SMBPBI unload command failed. rc:%d\n",
__FUNCTION__, status);
}
}
void
nvswitch_smbpbi_unload
(
nvswitch_device *device
)
{
if (device->pSmbpbi)
{
_nvswitch_smbpbi_send_unload(device);
_nvswitch_smbpbi_dem_flush(device);
}
}
void
nvswitch_smbpbi_destroy
(
nvswitch_device *device
)
{
if (device->pSmbpbi)
{
nvswitch_os_unmap_dma_region(device->os_handle,
device->pSmbpbi->sharedSurface,
device->pSmbpbi->dmaHandle,
sizeof(SOE_SMBPBI_SHARED_SURFACE),
NVSWITCH_DMA_DIR_BIDIRECTIONAL);
nvswitch_os_free_contig_memory(device->os_handle, device->pSmbpbi->sharedSurface,
sizeof(SOE_SMBPBI_SHARED_SURFACE));
nvswitch_os_free(device->pSmbpbi);
device->pSmbpbi = NULL;
}
}
NvlStatus
nvswitch_smbpbi_refresh_ecc_counts
(
nvswitch_device *device
)
{
PRM_SOE_SMBPBI_INFOROM_DATA pObjs;
struct inforom *pInforom = device->pInforom;
NvU64 corCnt;
NvU64 uncCnt;
if ((device->pSmbpbi == NULL) || (device->pSmbpbi->sharedSurface == NULL))
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (pInforom == NULL || pInforom->pEccState == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
device->hal.nvswitch_inforom_ecc_get_total_errors(device, pInforom->pEccState->pEcc,
&corCnt, &uncCnt);
pObjs = &device->pSmbpbi->sharedSurface->inforomObjects;
NvU64_ALIGN32_PACK(&(pObjs->ECC.correctedTotal), &corCnt);
NvU64_ALIGN32_PACK(&(pObjs->ECC.uncorrectedTotal), &uncCnt);
return NVL_SUCCESS;
}
NvlStatus
nvswitch_inforom_dem_load
(
nvswitch_device *device
)
{
NvlStatus status;
NvU8 version = 0;
NvU8 subversion = 0;
struct inforom *pInforom = device->pInforom;
NvU8 *pPackedObject = NULL;
struct INFOROM_DEM_OBJECT_V1_00 *pFifo;
if ((pInforom == NULL) || (device->pSmbpbi == NULL) ||
(device->pSmbpbi->sharedSurface == NULL))
{
return -NVL_ERR_NOT_SUPPORTED;
}
pFifo = GET_PFIFO_FROM_DEVICE(device);
status = nvswitch_inforom_get_object_version_info(device, "DEM", &version,
&subversion);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, INFO, "no DEM object found, rc:%d\n", status);
goto nvswitch_inforom_dem_load_fail;
}
if (!INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(subversion))
{
NVSWITCH_PRINT(device, WARN, "DEM v%u.%u not supported\n",
version, subversion);
status = -NVL_ERR_NOT_SUPPORTED;
goto nvswitch_inforom_dem_load_fail;
}
NVSWITCH_PRINT(device, INFO, "DEM v%u.%u found\n", version, subversion);
if (version != 1)
{
NVSWITCH_PRINT(device, WARN, "DEM v%u.%u not supported\n",
version, subversion);
status = -NVL_ERR_NOT_SUPPORTED;
goto nvswitch_inforom_dem_load_fail;
}
pPackedObject = nvswitch_os_malloc(INFOROM_DEM_OBJECT_V1_00_PACKED_SIZE);
if (pPackedObject == NULL)
{
status = -NVL_NO_MEM;
goto nvswitch_inforom_dem_load_fail;
}
status = nvswitch_inforom_load_object(device, pInforom, "DEM",
INFOROM_DEM_OBJECT_V1_00_FMT,
pPackedObject,
pFifo);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to load DEM object, rc: %d\n",
status);
goto nvswitch_inforom_dem_load_fail;
}
nvswitch_inforom_dem_load_fail:
if (pPackedObject)
{
nvswitch_os_free(pPackedObject);
}
//
// Mark the cached DEM as usable for Xid logging, even if we were
// unable to find it in the InfoROM image.
//
device->pSmbpbi->sharedSurface->inforomObjects.DEM.bValid = NV_TRUE;
_smbpbiDemInit(device, device->pSmbpbi, pFifo);
return status;
}
/*!
* Validate/Initialize the Driver Event Message (SXid) FIFO buffer
*
* @param[in] device device object pointer
* @param[in] pSmbpbi SMBPBI object pointer
* @param[in,out] pFifo DEM object pointer
*
* @return void
*/
static void
_smbpbiDemInit
(
nvswitch_device *device,
struct smbpbi *pSmbpbi,
struct INFOROM_DEM_OBJECT_V1_00 *pFifo
)
{
NvU8 msgLeft;
unsigned recordsHeld = 0;
NvU16 FIFO_REC_LOOP_ITERATOR;
NvU16 bytesOccupied;
NvU16 bytesSeen;
NvBool status = NV_FALSE;
// validate the FIFO buffer
if ((DEM_FIFO_PTR(pFifo->writeOffset) != pFifo->writeOffset) ||
(DEM_FIFO_PTR(pFifo->readOffset) != pFifo->readOffset) ||
((pFifo->writeOffset % sizeof(NvU32)) != 0) ||
((pFifo->readOffset % sizeof(NvU32)) != 0))
{
goto smbpbiDemInit_exit;
}
if (pFifo->writeOffset == pFifo->readOffset)
{
// The FIFO is empty
status = NV_TRUE;
goto smbpbiDemInit_exit;
}
//
// This HAL extracts from a scratch register the count of DEM messages
// in the FIFO that has not yet been requested by the SMBPBI client.
// If the FIFO holds more messages than that, it means those in excess
// of this count have been delivered to the client by PreOS app.
//
if (device->hal.nvswitch_smbpbi_get_dem_num_messages(device, &msgLeft) != NVL_SUCCESS)
{
// assume the maximum
msgLeft = ~0;
}
if (msgLeft == 0)
{
// Nothing of value in the FIFO. Lets reset it explicitly.
status = NV_TRUE;
pFifo->writeOffset = 0;
pFifo->readOffset = 0;
goto smbpbiDemInit_exit;
}
//
// Count the messages in the FIFO, while also checking the structure
// for integrity. Reset the FIFO in case any corruption is found.
//
bytesOccupied = DEM_BYTES_OCCUPIED(pFifo);
bytesSeen = 0;
FIFO_REC_LOOP_START(pFifo, bytesSeen < bytesOccupied)
if ((_recSize > DEM_RECORD_SIZE_MAX) ||
(FIFO_REC_LOOP_REC_SIZE < DEM_RECORD_SIZE_MIN))
{
goto smbpbiDemInit_exit;
}
bytesSeen += FIFO_REC_LOOP_REC_SIZE;
++recordsHeld;
FIFO_REC_LOOP_END
if ((bytesSeen != bytesOccupied) || (msgLeft > recordsHeld))
{
goto smbpbiDemInit_exit;
}
//
// Advance the FIFO read ptr in order to remove those messages that
// have already been delivered to the client.
//
FIFO_REC_LOOP_START(pFifo, recordsHeld > msgLeft)
--recordsHeld;
FIFO_REC_LOOP_END
pFifo->readOffset = FIFO_REC_LOOP_ITERATOR;
status = NV_TRUE;
smbpbiDemInit_exit:
if (!status)
{
// Reset the FIFO
pFifo->writeOffset = 0;
pFifo->readOffset = 0;
pFifo->seqNumber = 0;
}
}
static void
_nvswitch_smbpbi_dem_flush(nvswitch_device *device)
{
NvU8 *pPackedObject = NULL;
struct INFOROM_DEM_OBJECT_V1_00 *pFifo;
NvlStatus status = NVL_SUCCESS;
pPackedObject = nvswitch_os_malloc(INFOROM_DEM_OBJECT_V1_00_PACKED_SIZE);
if (pPackedObject == NULL)
{
status = -NVL_NO_MEM;
goto _nvswitch_smbpbi_dem_flush_exit;
}
pFifo = GET_PFIFO_FROM_DEVICE(device);
status = nvswitch_inforom_write_object(device, "DEM",
INFOROM_DEM_OBJECT_V1_00_FMT,
pFifo,
pPackedObject);
_nvswitch_smbpbi_dem_flush_exit:
nvswitch_os_free(pPackedObject);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "DEM object write failed, status=%d\n",
status);
}
}
/*!
* A helper to create a new DEM FIFO record
*
* @param[in,out] pFifo DEM object pointer
* @param[in] num Xid number
* @param[in] osErrorString text message to store
* @param[in] msglen message size
* @param[out] pRecSize new record size in bytes
*
* @return ptr to the new record
* @return NULL if there's no room in the FIFO
* or dynamic allocation error
*/
static NV_MSGBOX_DEM_RECORD *
_makeNewRecord
(
INFOROM_DEM_OBJECT_V1_00 *pFifo,
NvU32 num,
NvU8 *osErrorString,
NvU32 msglen,
NvU32 *pRecSize
)
{
NV_MSGBOX_DEM_RECORD *pNewRec;
*pRecSize = NV_MIN(sizeof(NV_MSGBOX_DEM_RECORD) + msglen,
DEM_RECORD_SIZE_MAX);
if ((*pRecSize > DEM_BYTES_AVAILABLE(pFifo)) ||
((pNewRec = nvswitch_os_malloc(*pRecSize)) == NULL))
{
return NULL;
}
// Fill the new record.
nvswitch_os_memset(pNewRec, 0, *pRecSize);
pNewRec->recordSize = NV_UNSIGNED_DIV_CEIL(*pRecSize, sizeof(NvU32));
pNewRec->xidId = num;
pNewRec->seqNumber = pFifo->seqNumber++;
pNewRec->timeStamp = nvswitch_os_get_platform_time() / NVSWITCH_NSEC_PER_SEC;
if (msglen > NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE)
{
// The text string is too long. Truncate and notify the client.
pNewRec->flags = FLD_SET_DRF(_MSGBOX, _DEM_RECORD_FLAGS,
_TRUNC, _SET, pNewRec->flags);
msglen = NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE - 1;
}
nvswitch_os_memcpy(pNewRec->textMessage, osErrorString, msglen);
return pNewRec;
}
/*!
* A helper to add the new record to the DEM FIFO
*
* @param[in,out] pFifo DEM object pointer
* @param[in] pNewRec the new record
* @param[in] recSize new record size in bytes
*
* @return void
*/
static void
_addNewRecord
(
INFOROM_DEM_OBJECT_V1_00 *pFifo,
NV_MSGBOX_DEM_RECORD *pNewRec,
NvU32 recSize
)
{
NvU16 rem;
NvU16 curPtr;
NvU16 copySz;
NvU8 *srcPtr;
// Copy the new record into the FIFO, handling a possible wrap-around.
rem = recSize;
curPtr = pFifo->writeOffset;
srcPtr = (NvU8 *)pNewRec;
while (rem > 0)
{
copySz = NV_MIN(rem, DEM_FIFO_SIZE - curPtr);
nvswitch_os_memcpy(pFifo->fifoBuffer + curPtr, srcPtr, copySz);
rem -= copySz;
srcPtr += copySz;
curPtr = DEM_FIFO_PTR(curPtr + copySz);
}
// Advance the FIFO write ptr.
pFifo->writeOffset = DEM_FIFO_PTR(pFifo->writeOffset +
(pNewRec->recordSize * sizeof(NvU32)));
}
/*!
* Add a Driver Event Message (SXid) to the InfoROM DEM FIFO buffer
*
* @param[in] device device object pointer
* @param[in] num Xid number
* @param[in] msglen message size
* @param[in] osErrorString text message to store
*
* @return void
*/
void
nvswitch_smbpbi_log_message
(
nvswitch_device *device,
NvU32 num,
NvU32 msglen,
NvU8 *osErrorString
)
{
INFOROM_DEM_OBJECT_V1_00 *pFifo;
NvU32 recSize;
NvU16 FIFO_REC_LOOP_ITERATOR;
NV_MSGBOX_DEM_RECORD *pNewRec;
if ((device->pSmbpbi == NULL) ||
(device->pSmbpbi->sharedSurface == NULL))
{
return;
}
pFifo = GET_PFIFO_FROM_DEVICE(device);
pNewRec = _makeNewRecord(pFifo, num, osErrorString, msglen, &recSize);
if (pNewRec != NULL)
{
_addNewRecord(pFifo, pNewRec, recSize);
nvswitch_os_free(pNewRec);
}
else
{
//
// We are unable to log this message. Mark the latest record
// with a flag telling the client that message(s) were dropped.
//
NvU16 bytesOccupied = DEM_BYTES_OCCUPIED(pFifo);
NvU16 bytesSeen;
NV_MSGBOX_DEM_RECORD *pLastRec = NULL;
// Find the newest record
bytesSeen = 0;
FIFO_REC_LOOP_START(pFifo, bytesSeen < bytesOccupied)
pLastRec = FIFO_REC_LOOP_REC_PTR;
bytesSeen += FIFO_REC_LOOP_REC_SIZE;
FIFO_REC_LOOP_END
if (pLastRec != NULL)
{
pLastRec->flags = FLD_SET_DRF(_MSGBOX, _DEM_RECORD_FLAGS,
_OVFL, _SET, pLastRec->flags);
}
}
return;
}
NvlStatus
nvswitch_smbpbi_set_link_error_info
(
nvswitch_device *device,
NVSWITCH_LINK_TRAINING_ERROR_INFO *pLinkTrainingErrorInfo,
NVSWITCH_LINK_RUNTIME_ERROR_INFO *pLinkRuntimeErrorInfo
)
{
FLCN *pFlcn;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
NvU32 cmdSeqDesc;
RM_SOE_SMBPBI_CMD_SET_LINK_ERROR_INFO *pSetCmd = &cmd.cmd.smbpbiCmd.linkErrorInfo;
NvlStatus status;
if (!device->pSmbpbi)
{
return -NVL_ERR_NOT_SUPPORTED;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_SMBPBI;
cmd.hdr.size = RM_SOE_CMD_SIZE(SMBPBI, SET_LINK_ERROR_INFO);
cmd.cmd.smbpbiCmd.cmdType = RM_SOE_SMBPBI_CMD_ID_SET_LINK_ERROR_INFO;
pSetCmd->trainingErrorInfo.isValid = pLinkTrainingErrorInfo->isValid;
pSetCmd->runtimeErrorInfo.isValid = pLinkRuntimeErrorInfo->isValid;
RM_FLCN_U64_PACK(&pSetCmd->trainingErrorInfo.attemptedTrainingMask0,
&pLinkTrainingErrorInfo->attemptedTrainingMask0);
RM_FLCN_U64_PACK(&pSetCmd->trainingErrorInfo.trainingErrorMask0,
&pLinkTrainingErrorInfo->trainingErrorMask0);
RM_FLCN_U64_PACK(&pSetCmd->runtimeErrorInfo.mask0, &pLinkRuntimeErrorInfo->mask0);
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg - not used for now
NULL, // pPayload - not used for now
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s SMBPBI Set Link Error Info command failed. rc:%d\n",
__FUNCTION__, status);
return status;
}
return NVL_SUCCESS;
}

View File

@@ -0,0 +1,345 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "soe/haldefs_soe_nvswitch.h"
#include "soe/soe_nvswitch.h"
#include "soe/soe_priv_nvswitch.h"
#include "export_nvswitch.h"
NV_STATUS
soeProcessMessages
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->processMessages == NULL)
{
NVSWITCH_ASSERT(0);
return NV_ERR_INVALID_ARGUMENT;
}
return pSoe->base.pHal->processMessages(device, pSoe);
}
NV_STATUS
soeWaitForInitAck
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->waitForInitAck == NULL)
{
NVSWITCH_ASSERT(0);
return NV_ERR_INVALID_ARGUMENT;
}
return pSoe->base.pHal->waitForInitAck(device, pSoe);
}
NvU32
soeService_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->service == NULL)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pSoe->base.pHal->service(device, pSoe);
}
void
soeServiceHalt_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->serviceHalt == NULL)
{
NVSWITCH_ASSERT(0);
return;
}
pSoe->base.pHal->serviceHalt(device, pSoe);
}
void
soeEmemTransfer_HAL
(
nvswitch_device *device,
PSOE pSoe,
NvU32 dmemAddr,
NvU8 *pBuf,
NvU32 sizeBytes,
NvU8 port,
NvBool bCopyFrom
)
{
if (pSoe->base.pHal->ememTransfer == NULL)
{
NVSWITCH_ASSERT(0);
return;
}
pSoe->base.pHal->ememTransfer(device, pSoe, dmemAddr, pBuf, sizeBytes, port, bCopyFrom);
}
NvU32
soeGetEmemSize_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->getEmemSize == NULL)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pSoe->base.pHal->getEmemSize(device, pSoe);
}
NvU32
soeGetEmemStartOffset_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->getEmemStartOffset == NULL)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pSoe->base.pHal->getEmemStartOffset(device, pSoe);
}
NV_STATUS
soeEmemPortToRegAddr_HAL
(
nvswitch_device *device,
PSOE pSoe,
NvU32 port,
NvU32 *pEmemCAddr,
NvU32 *pEmemDAddr
)
{
if (pSoe->base.pHal->ememPortToRegAddr == NULL)
{
NVSWITCH_ASSERT(0);
return NV_ERR_INVALID_ARGUMENT;
}
return pSoe->base.pHal->ememPortToRegAddr(device, pSoe, port, pEmemCAddr, pEmemDAddr);
}
void
soeServiceExterr_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->serviceExterr == NULL)
{
NVSWITCH_ASSERT(0);
return;
}
pSoe->base.pHal->serviceExterr(device, pSoe);
}
NV_STATUS
soeGetExtErrRegAddrs_HAL
(
nvswitch_device *device,
PSOE pSoe,
NvU32 *pExtErrAddr,
NvU32 *pExtErrStat
)
{
if (pSoe->base.pHal->getExtErrRegAddrs == NULL)
{
NVSWITCH_ASSERT(0);
return NV_ERR_INVALID_ARGUMENT;
}
return pSoe->base.pHal->getExtErrRegAddrs(device, pSoe, pExtErrAddr, pExtErrStat);
}
NvU32
soeEmemPortSizeGet_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->ememPortSizeGet == NULL)
{
NVSWITCH_ASSERT(0);
return 0;
}
return pSoe->base.pHal->ememPortSizeGet(device, pSoe);
}
NvBool
soeIsCpuHalted_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->isCpuHalted == NULL)
{
NVSWITCH_ASSERT(0);
return NV_FALSE;
}
return pSoe->base.pHal->isCpuHalted(device, pSoe);
}
NvlStatus
soeTestDma_HAL
(
nvswitch_device *device,
PSOE pSoe
)
{
if (pSoe->base.pHal->testDma == NULL)
{
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
return pSoe->base.pHal->testDma(device);
}
NvlStatus
soeSetPexEOM_HAL
(
nvswitch_device *device,
NvU8 mode,
NvU8 nblks,
NvU8 nerrs,
NvU8 berEyeSel
)
{
PSOE pSoe = (PSOE)device->pSoe;
if (pSoe->base.pHal->setPexEOM == NULL)
{
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
return pSoe->base.pHal->setPexEOM(device, mode, nblks, nerrs, berEyeSel);
}
NvlStatus
soeGetPexEomStatus_HAL
(
nvswitch_device *device,
NvU8 mode,
NvU8 nblks,
NvU8 nerrs,
NvU8 berEyeSel,
NvU32 laneMask,
NvU16 *pEomStatus
)
{
PSOE pSoe = (PSOE)device->pSoe;
if (pSoe->base.pHal->getPexEomStatus == NULL)
{
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
return pSoe->base.pHal->getPexEomStatus(device, mode, nblks, nerrs, berEyeSel, laneMask, pEomStatus);
}
NvlStatus
soeGetUphyDlnCfgSpace_HAL
(
nvswitch_device *device,
NvU32 regAddress,
NvU32 laneSelectMask,
NvU16 *pRegValue
)
{
PSOE pSoe = (PSOE)device->pSoe;
if (pSoe->base.pHal->getUphyDlnCfgSpace == NULL)
{
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
return pSoe->base.pHal->getUphyDlnCfgSpace(device, regAddress, laneSelectMask, pRegValue);
}
NvlStatus
soeForceThermalSlowdown_HAL
(
nvswitch_device *device,
NvBool slowdown,
NvU32 periodUs
)
{
PSOE pSoe = (PSOE)device->pSoe;
if (pSoe->base.pHal->forceThermalSlowdown == NULL)
{
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
return pSoe->base.pHal->forceThermalSlowdown(device, slowdown, periodUs);
}
NvlStatus
soeSetPcieLinkSpeed_HAL
(
nvswitch_device *device,
NvU32 linkSpeed
)
{
PSOE pSoe = (PSOE)device->pSoe;
if (pSoe->base.pHal->setPcieLinkSpeed == NULL)
{
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
return pSoe->base.pHal->setPcieLinkSpeed(device, linkSpeed);
}

View File

@@ -0,0 +1,665 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "soe/soe_nvswitch.h"
#include "soe/soe_priv_nvswitch.h"
#include "flcn/haldefs_flcnable_nvswitch.h"
#include "flcn/haldefs_flcn_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
#include "common_nvswitch.h"
static NV_STATUS _soeGetInitMessage(nvswitch_device *device, PSOE pSoe, RM_FLCN_MSG_SOE *pMsg);
/*!
* Use the SOE INIT Message to construct and initialize all SOE Queues.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE object pointer
* @param[in] pMsg Pointer to the INIT Message
*
* @return 'NV_OK' upon successful creation of all SOE Queues
*/
static NV_STATUS
_soeQMgrCreateQueuesFromInitMsg
(
nvswitch_device *device,
PFLCNABLE pSoe,
RM_FLCN_MSG_SOE *pMsg
)
{
RM_SOE_INIT_MSG_SOE_INIT *pInit;
NvU32 i;
NvU32 queueLogId;
NV_STATUS status;
FLCNQUEUE *pQueue;
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
PFALCON_QUEUE_INFO pQueueInfo;
NVSWITCH_ASSERT(pFlcn != NULL);
pQueueInfo = pFlcn->pQueueInfo;
NVSWITCH_ASSERT(pQueueInfo != NULL);
pInit = &pMsg->msg.init.soeInit;
NVSWITCH_ASSERT(pInit->numQueues <= pFlcn->numQueues);
for (i = 0; i < pFlcn->numQueues; i++)
{
queueLogId = pInit->qInfo[i].queueLogId;
NVSWITCH_ASSERT(queueLogId < pFlcn->numQueues);
pQueue = &pQueueInfo->pQueues[queueLogId];
status = flcnQueueConstruct_dmem_nvswitch(
device,
pFlcn,
&pQueue, // ppQueue
queueLogId, // Logical ID of the queue
pInit->qInfo[i].queuePhyId, // Physical ID of the queue
pInit->qInfo[i].queueOffset, // offset
pInit->qInfo[i].queueSize, // size
RM_FLCN_QUEUE_HDR_SIZE); // cmdHdrSize
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Error constructing SOE Queue (status="
"0x%08x).\n", __FUNCTION__, status);
NVSWITCH_ASSERT(0);
return status;
}
}
return NV_OK;
}
/*!
* Purges all the messages from the SOE's message queue. Each message will
* be analyzed, clients will be notified of status, and events will be routed
* to all registered event listeners.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE object pointer
*
* @return 'NV_OK' if the message queue was successfully purged.
*/
static NV_STATUS
_soeProcessMessages_IMPL
(
nvswitch_device *device,
PSOE pSoe
)
{
RM_FLCN_MSG_SOE soeMessage;
NV_STATUS status;
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
// keep processing messages until no more exist in the message queue
while (NV_OK == (status = flcnQueueReadData(
device,
pFlcn,
SOE_RM_MSGQ_LOG_ID,
(RM_FLCN_MSG *)&soeMessage, NV_TRUE)))
{
NVSWITCH_PRINT(device, INFO,
"%s: unitId=0x%02x, size=0x%02x, ctrlFlags=0x%02x, " \
"seqNumId=0x%02x\n",
__FUNCTION__,
soeMessage.hdr.unitId,
soeMessage.hdr.size,
soeMessage.hdr.ctrlFlags,
soeMessage.hdr.seqNumId);
// check to see if the message is a reply or an event.
if ((soeMessage.hdr.ctrlFlags &= RM_FLCN_QUEUE_HDR_FLAGS_EVENT) != 0)
{
flcnQueueEventHandle(device, pFlcn, (RM_FLCN_MSG *)&soeMessage, NV_OK);
}
// the message is a response from a previously queued command
else
{
flcnQueueResponseHandle(device, pFlcn, (RM_FLCN_MSG *)&soeMessage);
}
}
//
// Status NV_ERR_NOT_READY implies, Queue is empty.
// Log the message in other error cases.
//
if (status != NV_ERR_NOT_READY)
{
NVSWITCH_PRINT(device, ERROR,
"%s: unexpected error while purging message queue (status=0x%x).\n",
__FUNCTION__, (status));
}
return status;
}
/*!
* This function exists to solve a natural chicken-and-egg problem that arises
* due to the fact that queue information (location, size, id, etc...) is
* relayed to the RM as a message in a queue. Queue construction is done when
* the message arives and the normal queue read/write functions are not
* available until construction is complete. Construction cannot be done until
* the message is read from the queue. Therefore, the very first message read
* from the Message Queue must be considered as a special-case and must NOT use
* any functionality provided by the SOE's queue manager.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE object pointer
*
* @return 'NV_OK'
* Upon successful extraction and processing of the first SOE message.
*/
static NV_STATUS
_soeProcessMessagesPreInit_IMPL
(
nvswitch_device *device,
PSOE pSoe
)
{
RM_FLCN_MSG_SOE msg;
NV_STATUS status;
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
// extract the "INIT" message (this is never expected to fail)
status = _soeGetInitMessage(device, pSoe, &msg);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to extract the INIT message "
"from the SOE Message Queue (status=0x%08x).",
__FUNCTION__, status);
NVSWITCH_ASSERT(0);
return status;
}
//
// Now hookup the "real" message-processing function and handle the "INIT"
// message.
//
pSoe->base.pHal->processMessages = _soeProcessMessages_IMPL;
return flcnQueueEventHandle(device, pFlcn, (RM_FLCN_MSG *)&msg, NV_OK);
}
/*!
* @brief Process the "INIT" message sent from the SOE ucode application.
*
* When the SOE ucode is done initializing, it will post an INIT message in
* the Message Queue that contains all the necessary attributes that are
* needed to enqueuing commands and extracting messages from the queues.
* The packet will also contain the offset and size of portion of DMEM that
* the RM must manage. Upon receiving this message it will be assume that
* the SOE is ready to start accepting commands.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE object pointer
* @param[in] pMsg Pointer to the event's message data
*
* @return 'NV_OK' if the event was successfully handled.
*/
static NV_STATUS
_soeHandleInitEvent_IMPL
(
nvswitch_device *device,
PFLCNABLE pSoe,
RM_FLCN_MSG *pGenMsg
)
{
NV_STATUS status;
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
RM_FLCN_MSG_SOE *pMsg = (RM_FLCN_MSG_SOE *)pGenMsg;
if (pFlcn == NULL)
{
NVSWITCH_ASSERT(pFlcn != NULL);
return NV_ERR_INVALID_POINTER;
}
NVSWITCH_PRINT(device, INFO,
"%s: Received INIT message from SOE\n",
__FUNCTION__);
//
// Pass the INIT message to the queue manager to allow it to create the
// queues.
//
status = _soeQMgrCreateQueuesFromInitMsg(device, pSoe, pMsg);
if (status != NV_OK)
{
NVSWITCH_ASSERT(0);
return status;
}
flcnDbgInfoDmemOffsetSet(device, pFlcn,
pMsg->msg.init.soeInit.osDebugEntryPoint);
// the SOE ucode is now initialized and ready to accept commands
pFlcn->bOSReady = NV_TRUE;
return NV_OK;
}
/*!
* @brief Read the INIT message directly out of the Message Queue.
*
* This function accesses the Message Queue directly using the HAL. It does
* NOT and may NOT use the queue manager as it has not yet been constructed and
* initialized. The Message Queue may not be empty when this function is called
* and the first message in the queue MUST be the INIT message.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE object pointer
* @param[out] pMsg Message structure to fill with the INIT message data
*
* @return 'NV_OK' upon successful extraction of the INIT message.
* @return
* 'NV_ERR_INVALID_STATE' if the first message found was not an INIT
* message or if the message was improperly formatted.
*/
static NV_STATUS
_soeGetInitMessage
(
nvswitch_device *device,
PSOE pSoe,
RM_FLCN_MSG_SOE *pMsg
)
{
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
NV_STATUS status = NV_OK;
NvU32 tail = 0;
PFALCON_QUEUE_INFO pQueueInfo;
// on the GPU, rmEmemPortId = sec2RmEmemPortIdGet_HAL(...);
NvU8 rmEmemPortId = 0;
if (pFlcn == NULL)
{
NVSWITCH_ASSERT(pFlcn != NULL);
return NV_ERR_INVALID_POINTER;
}
pQueueInfo = pFlcn->pQueueInfo;
if (pQueueInfo == NULL)
{
NVSWITCH_ASSERT(pQueueInfo != NULL);
return NV_ERR_INVALID_POINTER;
}
//
// Message queue 0 is used by SOE to communicate with RM
// Check SOE_CMDMGMT_MSG_QUEUE_RM in //uproc/soe/inc/soe_cmdmgmt.h
//
pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID].queuePhyId = 0;
// read the header starting at the current tail position
(void)flcnMsgQueueTailGet(device, pFlcn,
&pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID], &tail);
if (pFlcn->bEmemEnabled)
{
//
// We use the offset in DMEM for the src address, since
// EmemCopyFrom automatically converts it to the offset in EMEM
//
flcnableEmemCopyFrom(
device, pFlcn->pFlcnable,
tail, // src
(NvU8 *)&pMsg->hdr, // pDst
RM_FLCN_QUEUE_HDR_SIZE, // numBytes
rmEmemPortId); // port
}
else
{
status = flcnDmemCopyFrom(device,
pFlcn,
tail, // src
(NvU8 *)&pMsg->hdr, // pDst
RM_FLCN_QUEUE_HDR_SIZE, // numBytes
0); // port
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to copy from SOE DMEM\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
goto _soeGetInitMessage_exit;
}
}
if (pMsg->hdr.unitId != RM_SOE_UNIT_INIT)
{
status = NV_ERR_INVALID_STATE;
NVSWITCH_ASSERT(0);
goto _soeGetInitMessage_exit;
}
// read the message body and update the tail position
if (pFlcn->bEmemEnabled)
{
//
// We use the offset in DMEM for the src address, since
// EmemCopyFrom automatically converts it to the offset in EMEM
//
flcnableEmemCopyFrom(
device, pFlcn->pFlcnable,
tail + RM_FLCN_QUEUE_HDR_SIZE, // src
(NvU8 *)&pMsg->msg, // pDst
pMsg->hdr.size - RM_FLCN_QUEUE_HDR_SIZE, // numBytes
rmEmemPortId); // port
}
else
{
status = flcnDmemCopyFrom(device,
pFlcn,
tail + RM_FLCN_QUEUE_HDR_SIZE, // src
(NvU8 *)&pMsg->msg, // pDst
pMsg->hdr.size - RM_FLCN_QUEUE_HDR_SIZE, // numBytes
0); // port
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to copy from SOE DMEM\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
goto _soeGetInitMessage_exit;
}
}
tail += NV_ALIGN_UP(pMsg->hdr.size, SOE_DMEM_ALIGNMENT);
flcnMsgQueueTailSet(device, pFlcn,
&pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID], tail);
_soeGetInitMessage_exit:
return status;
}
/*!
* Copies 'sizeBytes' from DMEM address 'src' to 'pDst' using EMEM access port.
*
* The address must be located in the EMEM region located directly above the
* maximum virtual address of DMEM.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE pointer
* @param[in] src The DMEM address for the source of the copy
* @param[out] pDst Pointer to write with copied data from EMEM
* @param[in] sizeBytes The number of bytes to copy from EMEM
* @param[in] port EMEM port
*/
static void
_soeEmemCopyFrom_IMPL
(
nvswitch_device *device,
FLCNABLE *pSoe,
NvU32 src,
NvU8 *pDst,
NvU32 sizeBytes,
NvU8 port
)
{
soeEmemTransfer_HAL(device, (PSOE)pSoe, src, pDst, sizeBytes, port, NV_TRUE);
}
/*!
* Copies 'sizeBytes' from 'pDst' to DMEM address 'dst' using EMEM access port.
*
* The address must be located in the EMEM region located directly above the
* maximum virtual address of DMEM.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE pointer
* @param[in] dst The DMEM address for the copy destination.
* @param[in] pSrc The pointer to the buffer containing the data to copy
* @param[in] sizeBytes The number of bytes to copy into EMEM
* @param[in] port EMEM port
*/
static void
_soeEmemCopyTo_IMPL
(
nvswitch_device *device,
FLCNABLE *pSoe,
NvU32 dst,
NvU8 *pSrc,
NvU32 sizeBytes,
NvU8 port
)
{
soeEmemTransfer_HAL(device, (PSOE)pSoe, dst, pSrc, sizeBytes, port, NV_FALSE);
}
/*!
* Loop until SOE RTOS is loaded and gives us an INIT message
*
* @param[in] device nvswitch_device object pointer
* @param[in] pSoe SOE object pointer
*/
static NV_STATUS
_soeWaitForInitAck_IMPL
(
nvswitch_device *device,
PSOE pSoe
)
{
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
// POBJMC pMc = GPU_GET_MC(device);
NVSWITCH_TIMEOUT timeout;
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout);
while (!pFlcn->bOSReady && !nvswitch_timeout_check(&timeout))
{
// Once interrupt handling is ready, might need to replace this with
//mcServiceSingle_HAL(device, pMc, MC_ENGINE_IDX_SOE, NV_FALSE);
soeService_HAL(device, pSoe);
nvswitch_os_sleep(1);
}
if (!pFlcn->bOSReady)
{
NVSWITCH_PRINT(device, ERROR,
"%s Timeout while waiting for SOE bootup\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
return NV_ERR_TIMEOUT;
}
return NV_OK;
}
/*!
* @brief Retrieves a pointer to the engine specific SEQ_INFO structure.
*
* @param[in] device nvswitch_device pointer
* @param[in] pSoe SOE pointer
* @param[in] seqIndex Index of the structure to retrieve
*
* @return Pointer to the SEQ_INFO structure or NULL on invalid index.
*/
static PFLCN_QMGR_SEQ_INFO
_soeQueueSeqInfoGet_IMPL
(
nvswitch_device *device,
FLCNABLE *pSoe,
NvU32 seqIndex
)
{
FLCN *pFlcn = ENG_GET_FLCN(pSoe);
if (seqIndex < pFlcn->numSequences)
{
return &(((PSOE)pSoe)->seqInfo[seqIndex]);
}
return NULL;
}
/*!
* @copydoc flcnableQueueCmdValidate_IMPL
*/
static NvBool
_soeQueueCmdValidate_IMPL
(
nvswitch_device *device,
FLCNABLE *pSoe,
PRM_FLCN_CMD pCmd,
PRM_FLCN_MSG pMsg,
void *pPayload,
NvU32 queueIdLogical
)
{
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
FLCNQUEUE *pQueue = &pFlcn->pQueueInfo->pQueues[queueIdLogical];
NvU32 cmdSize = pCmd->cmdGen.hdr.size;
// Verify that the target queue ID represents a valid RM queue.
if (queueIdLogical != SOE_RM_CMDQ_LOG_ID)
{
NVSWITCH_PRINT(device, ERROR,
"%s: invalid SOE command queue ID = 0x%x\n",
__FUNCTION__, queueIdLogical);
return NV_FALSE;
}
//
// Command size cannot be larger than queue size / 2. Otherwise, it is
// impossible to send two commands back to back if we start from the
// beginning of the queue.
//
if (cmdSize > (pQueue->queueSize / 2))
{
NVSWITCH_PRINT(device, ERROR,
"%s: invalid command (illegal size = 0x%x)\n",
__FUNCTION__, cmdSize);
return NV_FALSE;
}
// Validate the command's unit identifier.
if (!RM_SOE_UNITID_IS_VALID(pCmd->cmdGen.hdr.unitId))
{
NVSWITCH_PRINT(device, ERROR,
"%s: invalid unitID = %d\n",
__FUNCTION__, pCmd->cmdGen.hdr.unitId);
return NV_FALSE;
}
return NV_TRUE;
}
/* -------------------- Object construction/initialization ------------------- */
static void
soeSetupHal
(
SOE *pSoe,
NvU32 pci_device_id
)
{
soe_hal *pHal = NULL;
flcnable_hal *pParentHal = NULL;
if (nvswitch_is_lr10_device_id(pci_device_id))
{
soeSetupHal_LR10(pSoe);
}
else
{
// we're on a device which doesn't support SOE
NVSWITCH_PRINT(NULL, ERROR, "Tried to initialize SOE on device with no SOE\n");
NVSWITCH_ASSERT(0);
}
pHal = pSoe->base.pHal;
pParentHal = (flcnable_hal *)pHal;
//set any functions we want to override
pParentHal->handleInitEvent = _soeHandleInitEvent_IMPL;
pParentHal->ememCopyTo = _soeEmemCopyTo_IMPL;
pParentHal->ememCopyFrom = _soeEmemCopyFrom_IMPL;
pParentHal->queueSeqInfoGet = _soeQueueSeqInfoGet_IMPL;
pParentHal->queueCmdValidate = _soeQueueCmdValidate_IMPL;
//set any functions specific to SOE
pHal->processMessages = _soeProcessMessagesPreInit_IMPL;
pHal->waitForInitAck = _soeWaitForInitAck_IMPL;
}
SOE *
soeAllocNew(void)
{
SOE *pSoe = nvswitch_os_malloc(sizeof(*pSoe));
if (pSoe != NULL)
{
nvswitch_os_memset(pSoe, 0, sizeof(*pSoe));
}
return pSoe;
}
NvlStatus
soeInit
(
nvswitch_device *device,
SOE *pSoe,
NvU32 pci_device_id
)
{
NvlStatus retval;
// allocate hal if a child class hasn't already
if (pSoe->base.pHal == NULL)
{
soe_hal *pHal = pSoe->base.pHal = nvswitch_os_malloc(sizeof(*pHal));
if (pHal == NULL)
{
NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n");
retval = -NVL_NO_MEM;
goto soe_init_fail;
}
nvswitch_os_memset(pHal, 0, sizeof(*pHal));
}
// init parent class
retval = flcnableInit(device, (PFLCNABLE)pSoe, pci_device_id);
if (retval != NVL_SUCCESS)
{
goto soe_init_fail;
}
soeSetupHal(pSoe, pci_device_id);
return retval;
soe_init_fail:
soeDestroy(device, pSoe);
return retval;
}
// reverse of soeInit()
void
soeDestroy
(
nvswitch_device *device,
SOE *pSoe
)
{
// destroy parent class
flcnableDestroy(device, (PFLCNABLE)pSoe);
if (pSoe->base.pHal != NULL)
{
nvswitch_os_free(pSoe->base.pHal);
pSoe->base.pHal = NULL;
}
}

View File

@@ -0,0 +1,69 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "common_nvswitch.h"
#include "error_nvswitch.h"
#include "rmsoecmdif.h"
#include "spi_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
NvlStatus
nvswitch_spi_init
(
nvswitch_device *device
)
{
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
NvU32 cmdSeqDesc;
NV_STATUS status;
FLCN *pFlcn;
if (!device->pSoe)
{
return -NVL_ERR_INVALID_STATE;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_SPI;
cmd.hdr.size = sizeof(cmd);
cmd.cmd.spi.cmdType = RM_SOE_SPI_INIT;
nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * 30, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg - not used for now
NULL, // pPayload - not used for now
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s: SPI INIT failed. rc:%d\n",
__FUNCTION__, status);
}
return status;
}