515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,86 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @brief Defines for simplifying SW accesses to the dev_ctrl interrupt tree.
* These are generic defines ued in addition to architecure-specific
* defines in dev_vm_addendum.h
*
*/
//
// Notes on the terms used below:
// Subtree: The HW tree is a 64-way tree that consists of 2 TOP level interrupt
// registers, 32 bits each. Each of these 64 is referred to as a subtree.
// Leaf: Each of these 64 subtrees are associated with a pair of LEAF registers
// giving us a total of 128 LEAF registers.
// GPU vector: The 128 LEAF registers give us a total of (128*32) GPU vectors
// giving us a total of 4096 GPU vectors
//
//
// Given a subtree index, the below macros give us the index of the TOP level
// register and the bit within the TOP level register to program for that
// subtree.
//
#define NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(i) ((i) / 32)
#define NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i) ((i) % 32)
//
// Given a subtree index, the below macros give us the two LEAF register indices
// that correspond to that subtree.
//
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i) ((i)*2)
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i) (((i)*2) + 1)
//
// Given a LEAF register index, the below macros give us the range of GPU
// interrupt vectors that correspond to those leafs.
//
#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(i) ((i)*32)
#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_END(i) (((i)*32) + 31)
//
// Given a GPU interrupt vector, the below macros give us the index of the
// LEAF register and the bit within the LEAF register to program for that
// GPU interrupt vector.
//
#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i) ((i) / 32)
#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32)
//
// Given a GPU interrupt vector, the below macro gives us the subtree in which
// it belongs.
//
#define NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
//
// The max number of leaf registers we expect
// This is enforced to be greater than or equal to
// (NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_STALL_SUBTREE_LAST) + 1)
// for the largest NV_CPU_INTR_STALL_SUBTREE_LAST
//
#define NV_MAX_INTR_LEAVES 12
// In SW, this specifies an invalid interrupt vector
#define NV_INTR_VECTOR_INVALID (NV_U32_MAX)

View File

@@ -0,0 +1,149 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* flcn Command/Message Interfaces - Common
*/
#ifndef FLCNIFCMN_H
#define FLCNIFCMN_H
#include "nvtypes.h"
#include "flcnretval.h"
#ifndef NV_SIZEOF32
#define NV_SIZEOF32(v) (sizeof(v))
#endif
#ifndef NV_ARRAY_ELEMENTS
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#endif
/*!
* @ref NvU64_ALIGN32 , NvU64_ALIGN32_PACK, NvU64_ALIGN32_UNPACK
* TODO: Use NV types directly
*/
typedef NvU64_ALIGN32 RM_FLCN_U64;
typedef NvU64_ALIGN32 *PRM_FLCN_U64;
#define RM_FLCN_U64_IS_ZERO NvU64_ALIGN32_IS_ZERO
#define RM_FLCN_U64_PACK NvU64_ALIGN32_PACK
#define RM_FLCN_U64_UNPACK NvU64_ALIGN32_UNPACK
/*!
* @brief Header preceding each CMD/MSG exchanged through falcon's queues.
*
* @note Data package sent to falcon are referred as CMDs (commands).
* Data package sent by falcon are referred as MSGs (messages).
*/
typedef struct
{
/*!
* Unit ID identifies falcon's task/unit receiving/issuing this message.
*/
NvU8 unitId;
/*!
* Total CMD/MSG size (including header).
*/
NvU8 size;
/*!
* Flags identifying state of CMD/MSG.
*/
NvU8 ctrlFlags;
/*!
* Sequence # ID to track each request sent to falcon (where applicable).
*/
NvU8 seqNumId;
} RM_FLCN_QUEUE_HDR,
*PRM_FLCN_QUEUE_HDR;
/*!
* Convenience macro for determining the size of the falcon's queue header:
*/
#define RM_FLCN_QUEUE_HDR_SIZE sizeof(RM_FLCN_QUEUE_HDR)
/*!
* Generic Falcon rewind unit ID.
*/
#define RM_FLCN_UNIT_ID_REWIND (0x00U)
/*!
* Generic CMD structure to hold the header.
*/
typedef struct
{
RM_FLCN_QUEUE_HDR hdr;
NvU32 cmd;
} RM_FLCN_CMD_GEN;
/*!
* Generic MSG structure to hold the header.
*/
typedef struct
{
RM_FLCN_QUEUE_HDR hdr;
NvU32 msg;
} RM_FLCN_MSG_GEN;
/*!
* Convenience macros for determining the size of body for a command or message:
*/
#define RM_FLCN_CMD_BODY_SIZE(u,t) sizeof(RM_FLCN_##u##_CMD_##t)
#define RM_FLCN_MSG_BODY_SIZE(u,t) sizeof(RM_FLCN_##u##_MSG_##t)
/*!
* Convenience macros for determining the size of a command or message:
*/
#define RM_FLCN_CMD_SIZE(u,t) \
(RM_FLCN_QUEUE_HDR_SIZE + RM_FLCN_CMD_BODY_SIZE(u,t))
#define RM_FLCN_MSG_SIZE(u,t) \
(RM_FLCN_QUEUE_HDR_SIZE + RM_FLCN_MSG_BODY_SIZE(u,t))
/*!
* Convenience macros for determining the type of a command or message
* (intended to be used symmetrically with the CMD and MSG _SIZE macros):
*/
#define RM_FLCN_CMD_TYPE(u,t) RM_FLCN_##u##_CMD_ID_##t
#define RM_FLCN_MSG_TYPE(u,t) RM_FLCN_##u##_MSG_ID_##t
/*!
* @brief Falcons' queue header flags (@ref RM_FLCN_QUEUE_HDR::ctrlFlags).
*
* RM_FLCN_QUEUE_FLAGS_STATUS
* Set by command's sender to request back message confirming the completion of
* command's execution. In RM->FLCN communication response is required to free
* command related data tracked within RM (***_SEQ_INFO).
*
* RM_FLCN_QUEUE_HDR_FLAGS_EVENT
* Set by the falcon to distinguish messages sent to RM from command responses.
*
* RM_FLCN_QUEUE_HDR_FLAGS_UNIT_ACK
* Used internally within the falcon to track completed commands when updating
* get (tail) command queue pointers.
*/
#define RM_FLCN_QUEUE_HDR_FLAGS_STATUS NVBIT(0)
#define RM_FLCN_QUEUE_HDR_FLAGS_EVENT NVBIT(2)
#define RM_FLCN_QUEUE_HDR_FLAGS_UNIT_ACK NVBIT(5)
#endif // FLCNIFCMN_H

View File

@@ -0,0 +1,279 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file flcnretval.h
* @brief Defines various status codes that are convenient to relay status
* information in functions.
*/
#ifndef FLCNRETVAL_H
#define FLCNRETVAL_H
#include "nvtypes.h"
/*!
* Defines a generic type that may be used to convey status information. This
* is very similar to the RM_STATUS type but smaller in width to save DMEM and
* stack space.
*/
typedef NvU8 FLCN_STATUS;
#define FLCN_OK (0x00U)
#define FLCN_ERR_BINARY_NOT_STARTED (0xFEU)
#define FLCN_ERROR (0xFFU)
#define FLCN_ERR_NOT_SUPPORTED (0x01U)
#define FLCN_ERR_INVALID_ARGUMENT (0x02U)
#define FLCN_ERR_ILLEGAL_OPERATION (0x03U)
#define FLCN_ERR_TIMEOUT (0x04U)
#define FLCN_ERR_NO_FREE_MEM (0x05U)
#define FLCN_ERR_HDCP_INVALID_SRM (0x06U)
#define FLCN_ERR_HDCP_RECV_REVOKED (0x07U)
#define FLCN_ERR_RPC_INVALID_INPUT (0x08U)
#define FLCN_ERR_DMA_SUSPENDED (0x09U)
#define FLCN_ERR_MUTEX_ACQUIRED (0x10U)
#define FLCN_ERR_MUTEX_ID_NOT_AVAILABLE (0x11U)
#define FLCN_ERR_OBJECT_NOT_FOUND (0x12U)
#define FLCN_ERR_MSGBOX_TIMEOUT (0x13U)
#define FLCN_ERR_INVALID_INDEX (0x14U)
#define FLCN_ERR_INVALID_FUNCTION (0x15U)
#define FLCN_ERR_INSUFFICIENT_PMB_PLM_PROTECTION (0x16U)
#define FLCN_ERR_DMA_NACK (0x17U)
#define FLCN_ERR_CHIP_NOT_SUPPORTED_FOR_PR (0x18U)
#define FLCN_ERR_BAR0_PRIV_READ_ERROR (0x19U)
#define FLCN_ERR_BAR0_PRIV_WRITE_ERROR (0x1AU)
#define FLCN_ERR_HDCP22_ABORT_AUTHENTICATION (0x1BU)
#define FLCN_ERR_DPU_IS_BUSY (0x1CU)
#define FLCN_ERR_DPU_TIMEOUT_FOR_HDCP_TYPE1_LOCK_REQUEST (0x1DU)
#define FLCN_ERR_HDCP_TYPE1_LOCK_FAILED (0x1EU)
#define FLCN_ERR_HDCP_TYPE1_LOCK_UNKNOWN (0x1FU)
#define FLCN_ERR_WAIT_FOR_BAR0_IDLE_FAILED (0x20U)
#define FLCN_ERR_CSB_PRIV_READ_ERROR (0x21U)
#define FLCN_ERR_CSB_PRIV_WRITE_ERROR (0x22U)
#define FLCN_ERR_DMA_UNEXPECTED_DMAIDX (0x23U)
#define FLCN_ERR_PRIV_SEC_VIOLATION (0x24U)
#define FLCN_ERR_INVALID_VERSION (0x25U)
#define FLCN_ERR_PR_SHARED_STRUCT_INIT_FAILED (0x26U)
#define FLCN_ERR_GPU_IN_DEBUG_MODE (0x27U)
#define FLCN_ERR_HPD_UNPLUG (0x28U)
#define FLCN_ERR_HDCP22_DELAY_ABORT_AUTHENTICATION (0x29U)
#define FLCN_ERR_SECUREBUS_REGISTER_READ_ERROR (0x2AU)
#define FLCN_ERR_SECUREBUS_REGISTER_WRITE_ERROR (0x2BU)
#define FLCN_ERR_HDCP22_FLUSH_TYPE_LOCK_ACTIVE (0x2CU)
#define FLCN_ERR_HDCP22_FLUSH_TYPE_IN_PROGRESS (0x2DU)
#define FLCN_ERR_FEATURE_NOT_ENABLED (0x2EU)
#define FLCN_ERR_OUT_OF_RANGE (0x2FU)
// I2C Errors
#define FLCN_ERR_I2C_BUSY (0x30U)
#define FLCN_ERR_I2C_NACK_ADDRESS (0x31U)
#define FLCN_ERR_I2C_NACK_BYTE (0x32U)
#define FLCN_ERR_I2C_SIZE (0x33U)
#define FLCN_ERR_I2C_BUS_INVALID (0x34U)
#define FLCN_ERR_INVALID_STATE (0x35U)
#define FLCN_ERR_RECURSION_LIMIT_EXCEEDED (0x36U)
#define FLCN_ERR_INVALID_CAST (0x37U)
// AUX Errors
#define FLCN_ERR_AUX_ERROR (0x3AU)
#define FLCN_ERR_AUX_SEMA_ACQUIRED (0x3BU)
#define FLCN_ERR_AUX_SEMA_INVALID_RELEASE (0x3CU)
#define FLCN_ERR_MORE_PROCESSING_REQUIRED (0x3EU)
#define FLCN_ERR_DMA_ALIGN (0x3FU)
// Power-Device Errors
#define FLCN_ERR_PWR_DEVICE_TAMPERED (0x40U)
#define FLCN_ERR_ITERATION_END (0x41U)
// Perf change sequence Errors
#define FLCN_ERR_LOCK_NOT_AVAILABLE (0x42U)
#define FLCN_ERR_STATE_RESET_NEEDED (0x43U)
#define FLCN_ERR_DMA_GENERIC (0x44U)
#define FLCN_ERR_LS_CHK_UCODE_REVOKED (0x45U)
#define FLCN_ERR_ACC_SEQ_MISMATCH (0x46U)
#define FLCN_ERR_SSP_STACK_CHECK_FAILED (0x47U)
#define FLCN_ERR_SE_TRNG_FAILED (0x48U)
#define FLCN_ERR_PROD_MODE_NOT_YET_SUPPORTED (0x49U)
// SHA HW errors
#define FLCN_ERR_SHA_HW_CHECK_INT_STATUS (0x4AU)
#define FLCN_ERR_SHA_HW_SOFTRESET_REQUIRED (0x4BU)
#define FLCN_ERR_SHA_HW_SOFTRESET_FAILED (0x4CU)
#define FLCN_ERR_SHA_HW_BUSY (0x4DU)
//
// Add new generic error codes here, do not changes values of exiting error codes,
// because that will affect other binaries and their signatures
//
#define FLCN_ERR_CTXSW_ERROR (0x4EU)
// VPR SEC2 task errors
#define FLCN_ERR_VPR_APP_INVALID_REQUEST_END_ADDR (0x51U)
#define FLCN_ERR_VPR_APP_INVALID_REQUEST_START_ADDR (0x52U)
#define FLCN_ERR_VPR_APP_SCRUB_VERIF_FAILED (0x53U)
#define FLCN_ERR_VPR_APP_MEMLOCK_ALREADY_SET (0x54U)
#define FLCN_ERR_VPR_APP_INVALID_INDEX (0x55U)
#define FLCN_ERR_VPR_APP_UNEXPECTED_VPR_HANDOFF_FROM_SCRUBBER (0x56U)
#define FLCN_ERR_VPR_APP_CBC_RANGE_CLASH (0x57U)
#define FLCN_ERR_VPR_APP_NOT_SUPPORTED_BY_HW (0x58U)
#define FLCN_ERR_VPR_APP_NOT_SUPPORTED_BY_SW (0x59U)
#define FLCN_ERR_VPR_APP_DISPLAY_VERSION_NOT_SUPPORTED (0x5AU)
#define FLCN_ERR_VPR_APP_VPR_WPR_WRITE_FAILED (0x5BU)
#define FLCN_ERR_VPR_APP_NOTHING_TO_DO (0x5CU)
#define FLCN_ERR_VPR_APP_DISPLAY_NOT_PRESENT (0x5DU)
#define FLCN_ERR_VPR_APP_PREVIOUS_CMD_FAILED_AS_MAX_VPR_IS_0 (0x5EU)
#define FLCN_ERR_VPR_APP_PLM_PROTECTION_NOT_RAISED (0x5FU)
#define FLCN_ERR_VPR_APP_PLM_PROTECTION_ALREADY_RAISED (0x60U)
#define FLCN_ERR_VPR_APP_DISP_FALCON_IS_NOT_IN_LS_MODE (0x61U)
#define FLCN_ERR_VPR_APP_VPR_IS_ALREADY_ENABLED (0x62U)
#define FLCN_ERR_VPR_APP_UNEXPECTEDLY_RUNNING_ON_RISCV (0x63U)
// Clocks Errors
#define FLCN_ERR_CYCLE_DETECTED (0x70U)
#define FLCN_ERR_INVALID_PATH (0x71U)
#define FLCN_ERR_MISMATCHED_TARGET (0x72U)
#define FLCN_ERR_FREQ_NOT_SUPPORTED (0x73U)
#define FLCN_ERR_INVALID_SOURCE (0x74U)
#define FLCN_ERR_NOT_INITIALIZED (0x75U)
// HDCP2.2 Errors
#define FLCN_ERR_HDCP22_GETDKEY_FAILED (0x80U)
#define FLCN_ERR_HDCP22_H_PRIME (0x81U)
#define FLCN_ERR_HDCP22_CERT_RX (0x82U)
#define FLCN_ERR_HDCP22_PAIRING (0x83U)
#define FLCN_ERR_HDCP22_L_PRIME (0x84U)
#define FLCN_ERR_HDCP22_V_PRIME (0x85U)
#define FLCN_ERR_HDCP22_INVALID_RXIDLIST (0x86U)
#define FLCN_ERR_HDCP22_M_PRIME (0x87U)
#define FLCN_ERR_HDCP22_SEQ_ROLLOVER (0x88U)
#define FLCN_ERR_HDCP22_RSA_HW (0x89U)
#define FLCN_ERR_HDCP22_ECF_TIMESLOT_MISMATCH (0x90U)
// LibCCC Errors
#define FLCN_ERR_INIT_CRYPTO_DEVICE_FAILED (0x91U)
#define FLCN_ERR_NVPKA_SELECT_ENGINE_FAILED (0x92U)
#define FLCN_ERR_NVPKA_ACQUIRE_MUTEX_FAILED (0x93U)
#define FLCN_ERR_NVPKA_MODULAR_EXP_LOCK_FAILED (0x94U)
#define FLCN_ERR_NVRNG_INIT_CRYPTO_DEVICE_FAILED (0x95U)
#define FLCN_ERR_NVRNG_SELECT_ENGINE_FAILED (0x96U)
#define FLCN_ERR_NVRNG_GENERATE_FAILED (0x97U)
// Heavy Secure Errors
#define FLCN_ERR_HS_CHK_INVALID_INPUT (0xA0U)
#define FLCN_ERR_HS_CHK_CHIP_NOT_SUPPORTED (0xA1U)
#define FLCN_ERR_HS_CHK_UCODE_REVOKED (0xA2U)
#define FLCN_ERR_HS_CHK_NOT_IN_LSMODE (0xA3U)
#define FLCN_ERR_HS_CHK_INVALID_LS_PRIV_LEVEL (0xA4U)
#define FLCN_ERR_HS_CHK_INVALID_REGIONCFG (0xA5U)
#define FLCN_ERR_HS_CHK_PRIV_SEC_DISABLED_ON_PROD (0xA6U)
#define FLCN_ERR_HS_CHK_SW_FUSING_ALLOWED_ON_PROD (0xA7U)
#define FLCN_ERR_HS_CHK_INTERNAL_SKU_ON_PROD (0xA8U)
#define FLCN_ERR_HS_CHK_DEVID_OVERRIDE_ENABLED_ON_PROD (0xA9U)
#define FLCN_ERR_HS_CHK_INCONSISTENT_PROD_MODES (0xAAU)
#define FLCN_ERR_HS_CHK_HUB_ENCRPTION_DISABLED (0xABU)
#define FLCN_ERR_HS_PR_ILLEGAL_LASSAHS_STATE_AT_HS_ENTRY (0xACU)
#define FLCN_ERR_HS_PR_ILLEGAL_LASSAHS_STATE_AT_MPK_DECRYPT (0xADU)
#define FLCN_ERR_HS_PR_ILLEGAL_LASSAHS_STATE_AT_HS_EXIT (0xAEU)
#define FLCN_ERR_HS_PROD_MODE_NOT_YET_SUPPORTED (0xAFU)
#define FLCN_ERR_HS_DEV_VERSION_ON_PROD (0xB0U)
#define FLCN_ERR_HS_PR_LASSAHS_LS_SIG_GRP_MISMATCH (0xB1U)
#define FLCN_ERR_HS_PR_LASSAHS_LS_SIG_GRP_OVERLAYS_CNT (0xB2U)
#define FLCN_ERR_HS_PR_LASSAHS_LS_SIG_GRP_INVALID_VA (0xB3U)
#define FLCN_ERR_HS_MUTEX_ACQUIRE_FAILED (0xB4U)
#define FLCN_ERR_HS_MUTEX_RELEASE_FAILED (0xB5U)
#define FLCN_ERR_HS_PR_MPK_DEC_NEEDS_NEWER_ACR_UDE_SCRUBBER (0xB6U)
#define FLCN_ERR_HS_CHK_ENGID_MISMATCH (0xB7U)
#define FLCN_ERR_HS_OPT_INTERNAL_SKU_CHECK_FAILED (0xB8U)
#define FLCN_ERR_HS_CHK_BOARD_MISMATCH (0xB9U)
#define FLCN_ERR_HS_CHK_DISP_ENG_DISABLED (0xBAU)
#define FLCN_ERR_HS_GEN_RANDOM (0xBBU)
#define FLCN_ERR_HS_CHK_IMPROPERLY_FUSED_BOARD (0xBCU)
#define FLCN_ERR_HS_CHK_HDCP_DISABLED (0xBDU)
#define FLCN_ERR_HS_CHK_HDCP_BLACKLISTED_SKU (0XBEU)
#define FLCN_ERR_HS_SECURE_ACTION_ARG_CHECK_FAILED (0xBFU)
#define FLCN_ERR_HS_CHK_RETURN_PC_AT_HS_ENTRY_IS_OF_HS (0xC0U)
#define FLCN_ERR_HS_CHK_HS_LIB_ENTRY_CALLED_BY_NON_HS (0xC1U)
#define FLCN_ERR_HS_DECODE_TRAP_ALREADY_IN_USE (0xC2U)
#define FLCN_ERR_HS_REGISTER_READ_WRITE_ERROR (0xC3U)
#define FLCN_ERR_HS_CHK_CPUCTL_ALIAS_FALSE (0xC4U)
#define FLCN_ERR_HS_UPDATE_RESET_PLM_ERROR (0xC5U)
#define FLCN_ERR_HS_RNG_CONFIG_ERROR (0xC6U)
#define FLCN_ERR_HS_CHK_NOT_IN_HSMODE (0xC7U)
#define FLCN_ERR_HS_CHK_GFW_CHAIN_OF_TRUST_BROKEN (0xC8U)
#define FLCN_ERR_HS_HDCP22_WRONG_SEQUENCE (0xC9U)
#define FLCN_ERR_HS_HDCP22_INTEGRITY_ERROR (0xCAU)
#define FLCN_ERR_HS_HDCP22_WRONG_TYPE (0xCBU)
#define FLCN_ERR_HS_APM_NOT_ENABLED (0xCCU)
#define FLCN_ERR_HS_APM_SMC_ENABLED (0xCDU)
#define FLCN_ERR_HS_APM_FECS_NOT_HALTED (0xCEU)
#define FLCN_ERR_HS_APM_SCRATCH_PLM_INVALID (0xCFU)
#define FLCN_ERR_HS_APM_SCRATCH_INIT_INVALID (0xD0U)
//
// BAR0/CSB Priv Read/Write Error Handling Defines
// These need to be defined by HW - NV Bug 200198584
//
#define FLCN_BAR0_PRIV_PRI_ERROR_MASK 0xFFF00000U
#define FLCN_BAR0_PRIV_PRI_ERROR_CODE 0xBAD00000U
#define FLCN_BAR0_PRIV_PRI_RETURN_VAL 0x00BADBADU
#define FLCN_CSB_PRIV_PRI_ERROR_MASK 0xFFFF0000U
#define FLCN_CSB_PRIV_PRI_ERROR_CODE 0xBADF0000U
//
// Macro to check FALCON return status
//
#define CHECK_FLCN_STATUS(expr) do { \
flcnStatus = (expr); \
if (flcnStatus != FLCN_OK) \
{ \
goto ErrorExit; \
} \
} while (NV_FALSE)
// Warnings.
#define FLCN_WARN_NOTHING_TO_DO (0xD0U)
#define FLCN_WARN_NOT_QUERIED (0xD1U)
// Queue handling Errors
#define FLCN_ERR_QUEUE_MGMT_INVALID_UNIT_ID (0xE0U)
#define FLCN_ERR_QUEUE_MGMT_HEAP_MIRROR_ERR (0xE1U)
#define FLCN_ERR_QUEUE_TASK_INVALID_EVENT_TYPE (0xE2U)
#define FLCN_ERR_QUEUE_TASK_INVALID_UNIT_ID (0xE3U)
#define FLCN_ERR_QUEUE_TASK_INVALID_CMD_TYPE (0xE4U)
// Posted write errors
#define FLCN_ERR_POSTED_WRITE_FAILURE (0xF0U)
#define FLCN_ERR_POSTED_WRITE_INTERRUPTS_ENABLED (0xF1U)
#define FLCN_ERR_POSTED_WRITE_PRI_CLUSTER_COUNT_MISMATCH (0xF2U)
#define FLCN_ERR_POSTED_WRITE_INCORRECT_PARAMS (0xF3U)
// Lane Margining errors
#define FLCN_ERR_LM_INVALID_RECEIVER_NUMBER (0xF5U)
#endif // FLCNRETVAL_H

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GSP_ERROR_H
#define GSP_ERROR_H
// Definitions for GSP-RM to report errors to CPU-RM via mailbox
#define NV_GSP_ERROR_CODE 7:0
#define NV_GSP_ERROR_REASON 15:8
#define NV_GSP_ERROR_TASK 23:16
#define NV_GSP_ERROR_SKIPPED 27:24
#define NV_GSP_ERROR_TAG 31:28
#define NV_GSP_ERROR_TAG_VAL 0xE
#endif // GSP_ERROR_H

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) <year> NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _IFR_DEM_H_
#define _IFR_DEM_H_
#define INFOROM_DEM_OBJECT_V1_00_PACKED_SIZE 4144
#define INFOROM_DEM_OBJECT_V1_00_FIFO_SIZE 4096
struct INFOROM_DEM_OBJECT_V1_00
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U032 seqNumber;
inforom_U016 writeOffset;
inforom_U016 readOffset;
inforom_X008 fifoBuffer[INFOROM_DEM_OBJECT_V1_00_FIFO_SIZE];
inforom_U032 reserved[8];
};
#define INFOROM_DEM_OBJECT_V1_00_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d2w4096x8d"
typedef struct INFOROM_DEM_OBJECT_V1_00 INFOROM_DEM_OBJECT_V1_00;
#endif // _IFR_DEM_H_

View File

@@ -0,0 +1,119 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _IFRECC_H_
#define _IFRECC_H_
#include "nvtypes.h"
#include "inforom/types.h"
// NVSwitch ECC v6 object definition
#define INFOROM_ECC_OBJECT_V6_S0_PACKED_SIZE 3808
//Used to determine if the entry is empty or not
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_VALID 0:0
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_VALID_FALSE 0
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_VALID_TRUE 1
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_CORRUPT_TIMEDATA 1:1
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_CORRUPT_TIMEDATA_FALSE 0
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_CORRUPT_TIMEDATA_TRUE 1
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_ADDR_VALID 2:2
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_ADDR_VALID_FALSE 0
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_ADDR_VALID_TRUE 1
#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_LOCATION_LINK_ID 7:0
typedef struct INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER
{
inforom_U008 header;
inforom_U032 errId;
inforom_U032 lastErrorTimestamp;
inforom_U032 averageEventDelta;
inforom_U016 location;
inforom_U016 sublocation;
inforom_U032 correctedCount;
inforom_U032 uncorrectedCount;
inforom_U032 address;
} INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER;
#define INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT 128
typedef struct INFOROM_ECC_OBJECT_V6_S0
{
INFOROM_OBJECT_HEADER_V1_00 header;
NvU64_ALIGN32 uncorrectedTotal;
NvU64_ALIGN32 correctedTotal;
inforom_U032 lastClearedTimestamp;
INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER errorEntries[INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT];
inforom_U008 padding[68];
} INFOROM_ECC_OBJECT_V6_S0;
#define INFOROM_ECC_OBJECT_V6_S0_HEADER_FMT INFOROM_OBJECT_HEADER_V1_00_FMT
#define INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_FMT "b3d2w3d"
#define INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_ARRAY_FMT \
INFOROM_FMT_REP128(INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_FMT)
#define INFOROM_ECC_OBJECT_V6_S0_PADDING_FMT "68b"
#define INFOROM_ECC_OBJECT_V6_S0_FMT INFOROM_ECC_OBJECT_V6_S0_HEADER_FMT "qqd" \
INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_ARRAY_FMT \
INFOROM_ECC_OBJECT_V6_S0_PADDING_FMT
// Error event structure for NVSwitch ECC errors
typedef struct
{
NvU32 sxid;
NvU32 linkId;
NvBool bAddressValid;
NvU32 address;
// The timestamp is filled in by the inforom ECC error logging API
NvU32 timestamp;
NvBool bUncErr;
NvU32 errorCount;
} INFOROM_NVS_ECC_ERROR_EVENT;
typedef union
{
INFOROM_OBJECT_HEADER_V1_00 header;
INFOROM_ECC_OBJECT_V6_S0 v6s;
} INFOROM_ECC_OBJECT;
typedef struct
{
const char *pFmt;
NvU8 *pPackedObject;
INFOROM_ECC_OBJECT *pEcc;
// Signals if there are pending updates to be flushed to InfoROM
NvBool bDirty;
} INFOROM_ECC_STATE, *PINFOROM_ECC_STATE;
#endif // _IFRECC_H_

View File

@@ -0,0 +1,83 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _IFRSTRUCT_H_
#define _IFRSTRUCT_H_
#include "inforom/types.h"
#include "inforom/ifrecc.h"
#include "inforom/ifrdem.h"
#define INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE 128
struct INFOROM_OBD_OBJECT_V1_XX
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U032 buildDate;
inforom_U008 marketingName[24];
inforom_U008 serialNumber[16];
inforom_U008 memoryManufacturer;
inforom_U008 memoryPartID[20];
inforom_U008 memoryDateCode[6];
inforom_U008 productPartNumber[20];
inforom_U008 boardRevision[3];
inforom_U008 boardType;
inforom_U008 board699PartNumber[20];
inforom_U008 reserved[5];
};
#define INFOROM_OBD_OBJECT_V1_XX_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d116b"
typedef struct INFOROM_OBD_OBJECT_V1_XX INFOROM_OBD_OBJECT_V1_XX;
//
// OEM 1.0
//
#define INFOROM_OEM_OBJECT_V1_00_PACKED_SIZE 512
#define INFOROM_OEM_OBJECT_HEADER_VERSION 1
struct INFOROM_OEM_OBJECT_V1_00
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U008 oemInfo[504];
};
#define INFOROM_OEM_OBJECT_V1_00_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "504b"
typedef struct INFOROM_OEM_OBJECT_V1_00 INFOROM_OEM_OBJECT_V1_00;
#define INFOROM_IMG_OBJECT_V1_00_PACKED_SIZE 64
#define INFOROM_IMG_OBJECT_V1_00_VERSION_LENGTH 16
struct INFOROM_IMG_OBJECT_V1_00
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U008 version[INFOROM_IMG_OBJECT_V1_00_VERSION_LENGTH];
inforom_U016 pciDeviceId;
inforom_U016 pciVendorId;
inforom_U016 pciSubsystemId;
inforom_U016 pciSubsystemVendorId;
inforom_U008 reserved[32];
};
#define INFOROM_IMG_OBJECT_V1_00_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "16b4w32b"
typedef struct INFOROM_IMG_OBJECT_V1_00 INFOROM_IMG_OBJECT_V1_00;
#endif // _IFRSTRUCT_H_

View File

@@ -0,0 +1,84 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INFOROM_OMSDEF_H_
#define _INFOROM_OMSDEF_H_
#include "inforom/types.h"
#define INFOROM_OMS_OBJECT_V1_PACKED_SIZE 112
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE 0:0
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE_NO 0
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE_YES 1
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE 1:1
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE_NO 0
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE_YES 1
#define INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_RESERVED 7:2
#define INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_ENTRY_CHECKSUM 15:8
typedef struct INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY
{
inforom_U016 data;
} INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY;
#define INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES 50
typedef struct INFOROM_OMS_OBJECT_V1S
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U032 lifetimeRefreshCount;
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY settings[
INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES];
} INFOROM_OMS_OBJECT_V1S;
#define INFOROM_OMS_OBJECT_V1S_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d50w"
typedef struct INFOROM_OMS_V1S_DATA
{
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pIter;
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY prev;
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pNext;
} INFOROM_OMS_V1S_DATA;
typedef union
{
INFOROM_OBJECT_HEADER_V1_00 header;
INFOROM_OMS_OBJECT_V1S v1s;
} INFOROM_OMS_OBJECT;
typedef union
{
INFOROM_OMS_V1S_DATA v1s;
} INFOROM_OMS_DATA;
typedef struct
{
const char *pFmt;
NvU8 *pPackedObject;
INFOROM_OMS_OBJECT *pOms;
INFOROM_OMS_DATA omsData;
} INFOROM_OMS_STATE;
#endif /* _INFOROM_OMSDEF_H_ */

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INFOROM_TYPES_H_
#define _INFOROM_TYPES_H_
/*!
* @file types.h
* @brief Common types and definitions used by InfoROM objects
*/
typedef NvS32 inforom_S008;
typedef NvU32 inforom_U004;
typedef NvU32 inforom_U008;
typedef NvU32 inforom_U016;
typedef NvU32 inforom_U024;
typedef NvU32 inforom_U032;
typedef NvU64 inforom_U064;
typedef NvU8 inforom_X008;
#define INFOROM_FMT_S08 's'
#define INFOROM_FMT_U04 'n'
#define INFOROM_FMT_U08 'b'
#define INFOROM_FMT_U16 'w'
#define INFOROM_FMT_U24 't'
#define INFOROM_FMT_U32 'd'
#define INFOROM_FMT_U64 'q'
#define INFOROM_FMT_BINARY 'x'
// Helper macros for generating repeating format sequences
#define INFOROM_FMT_REP02(fmt) fmt fmt
#define INFOROM_FMT_REP04(fmt) INFOROM_FMT_REP02(fmt) INFOROM_FMT_REP02(fmt)
#define INFOROM_FMT_REP08(fmt) INFOROM_FMT_REP04(fmt) INFOROM_FMT_REP04(fmt)
#define INFOROM_FMT_REP16(fmt) INFOROM_FMT_REP08(fmt) INFOROM_FMT_REP08(fmt)
#define INFOROM_FMT_REP32(fmt) INFOROM_FMT_REP16(fmt) INFOROM_FMT_REP16(fmt)
#define INFOROM_FMT_REP64(fmt) INFOROM_FMT_REP32(fmt) INFOROM_FMT_REP32(fmt)
#define INFOROM_FMT_REP128(fmt) INFOROM_FMT_REP64(fmt) INFOROM_FMT_REP64(fmt)
#define INFOROM_FMT_REP256(fmt) INFOROM_FMT_REP128(fmt) INFOROM_FMT_REP128(fmt)
#define INFOROM_FMT_REP512(fmt) INFOROM_FMT_REP256(fmt) INFOROM_FMT_REP256(fmt)
#define INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(sv) \
((((sv) & 0xF0) == 0) || (((sv) & 0xF0) == 0x20))
#define INFOROM_OBJECT_HEADER_V1_00_SIZE_OFFSET 0x05
#define INFOROM_OBJECT_HEADER_V1_00_CHECKSUM_OFFSET 0x07
#define INFOROM_OBJECT_HEADER_V1_00_PACKED_SIZE 8
typedef struct INFOROM_OBJECT_HEADER_V1_00
{
inforom_S008 type[3];
inforom_U008 version;
inforom_U008 subversion;
inforom_U016 size;
inforom_U008 checksum;
} INFOROM_OBJECT_HEADER_V1_00;
#define INFOROM_OBJECT_HEADER_V1_00_FMT "3s2bwb"
#endif // _INFOROM_TYPES_H_

View File

@@ -0,0 +1,356 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVCST_H
#define NVCST_H
#include <platform/chipset/chipset.h>
#include <platform/chipset/chipset_info.h>
#include <nvpcie.h>
#define CHIPSET_SETUP_FUNC(name) static NV_STATUS name(OBJCL *pCl);
CHIPSET_SETUP_FUNC(Intel_25XX_setupFunc)
CHIPSET_SETUP_FUNC(Intel_27XX_setupFunc)
CHIPSET_SETUP_FUNC(Intel_2A40_setupFunc)
CHIPSET_SETUP_FUNC(Intel_0040_setupFunc)
CHIPSET_SETUP_FUNC(Intel_2E00_setupFunc)
CHIPSET_SETUP_FUNC(Intel_25E0_setupFunc)
CHIPSET_SETUP_FUNC(Intel_29XX_setupFunc)
CHIPSET_SETUP_FUNC(Intel_29X0_setupFunc)
CHIPSET_SETUP_FUNC(Intel_29E0_setupFunc)
CHIPSET_SETUP_FUNC(Intel_359E_setupFunc)
CHIPSET_SETUP_FUNC(Intel_4000_setupFunc)
CHIPSET_SETUP_FUNC(Intel_4003_setupFunc)
CHIPSET_SETUP_FUNC(Intel_3400_setupFunc)
CHIPSET_SETUP_FUNC(Intel_3B42_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1C46_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1C10_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1C4B_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1C49_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1D40_setupFunc)
CHIPSET_SETUP_FUNC(Intel_8D47_setupFunc)
CHIPSET_SETUP_FUNC(Intel_8D44_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1E10_setupFunc)
CHIPSET_SETUP_FUNC(Intel_8C4B_setupFunc)
CHIPSET_SETUP_FUNC(Intel_8CC4_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A145_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A2C5_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A242_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A2D2_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A2C9_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A301_setupFunc)
CHIPSET_SETUP_FUNC(Intel_0685_setupFunc)
CHIPSET_SETUP_FUNC(Intel_4381_setupFunc)
CHIPSET_SETUP_FUNC(Intel_7A82_setupFunc)
CHIPSET_SETUP_FUNC(SiS_656_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RS400_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RS480_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RD870_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RD890_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RX780_setupFunc)
CHIPSET_SETUP_FUNC(ATI_FX990_setupFunc)
CHIPSET_SETUP_FUNC(AMD_RS780_setupFunc)
CHIPSET_SETUP_FUNC(AMD_FX790_setupFunc)
CHIPSET_SETUP_FUNC(AMD_FX890_setupFunc)
CHIPSET_SETUP_FUNC(AMD_X370_setupFunc)
CHIPSET_SETUP_FUNC(VIA_VX900_setupFunc)
CHIPSET_SETUP_FUNC(APM_Storm_setupFunc)
CHIPSET_SETUP_FUNC(ARMV8_generic_setupFunc)
CHIPSET_SETUP_FUNC(Marvell_ThunderX2_setupFunc)
CHIPSET_SETUP_FUNC(QEMU_setupFunc)
CHIPSET_SETUP_FUNC(Ampere_eMag_setupFunc)
CHIPSET_SETUP_FUNC(Huawei_Kunpeng920_setupFunc)
CHIPSET_SETUP_FUNC(Mellanox_BlueField_setupFunc)
CHIPSET_SETUP_FUNC(Amazon_Gravitron2_setupFunc)
CHIPSET_SETUP_FUNC(Fujitsu_A64FX_setupFunc)
CHIPSET_SETUP_FUNC(Phytium_FT2000_setupFunc)
CHIPSET_SETUP_FUNC(Ampere_Altra_setupFunc)
CHIPSET_SETUP_FUNC(Arm_NeoverseN1_setupFunc)
CHIPSET_SETUP_FUNC(Nvidia_T210_setupFunc)
CHIPSET_SETUP_FUNC(Nvidia_T194_setupFunc)
// Keep string length <=32 (including termination) to avoid string copy overflow
CSINFO chipsetInfo[] =
{
// PCI Express chipset
{PCI_VENDOR_ID_INTEL, 0x2580, CS_INTEL_2580, "Grantsdale", Intel_25XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2584, CS_INTEL_2584, "Alderwood", Intel_25XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2588, CS_INTEL_2588, "Intel2588", Intel_25XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2590, CS_INTEL_2590, "Alviso", Intel_25XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x25C0, CS_INTEL_25E0, "Greencreek", Intel_25E0_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x25E0, CS_INTEL_25E0, "Greencreek", Intel_25E0_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x29B0, CS_INTEL_29X0, "IntelQ35", Intel_29X0_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x29C0, CS_INTEL_29X0, "BearlakeB", Intel_29X0_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x29D0, CS_INTEL_29X0, "IntelQ33", Intel_29X0_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x29E0, CS_INTEL_29E0, "BearlakeX", Intel_29E0_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x359E, CS_INTEL_359E, "Tumwater", Intel_359E_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x4000, CS_INTEL_4000, "Stoakley", Intel_4000_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x4003, CS_INTEL_4003, "SkullTrail", Intel_4003_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x3400, CS_INTEL_3400, "IntelX58", Intel_3400_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x3403, CS_INTEL_3400, "IntelX58", Intel_3400_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x3405, CS_INTEL_3400, "IntelX58", Intel_3400_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x3406, CS_INTEL_3400, "Tylersburg", Intel_3400_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2770, CS_INTEL_2770, "Lakeport", Intel_25XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2774, CS_INTEL_2774, "Glenwood", Intel_27XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x277C, CS_INTEL_277C, "Glenwood", Intel_27XX_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2A40, CS_INTEL_2A40, "Montevina", Intel_2A40_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2E00, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2E10, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2E20, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x2E30, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0044, CS_INTEL_0040, "Arrandale/Auburndale", Intel_0040_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0062, CS_INTEL_0040, "Arrandale/Auburndale", Intel_0040_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xD130, CS_INTEL_3B42, "Clarksfield", Intel_3B42_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xD132, CS_INTEL_3B42, "Clarksfield", Intel_3B42_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x3B42, CS_INTEL_3B42, "P55/PM55/H57", Intel_3B42_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1C46, CS_INTEL_1C46, "IntelP67-CougarPoint", Intel_1C46_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1C4B, CS_INTEL_1C46, "HuronRiver-HM67", Intel_1C4B_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1C4F, CS_INTEL_1C46, "HuronRiver-QM67", Intel_1C4B_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1C49, CS_INTEL_1C49, "HuronRiver-HM65", Intel_1C49_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1C44, CS_INTEL_1C46, "IntelZ68", Intel_1C46_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1C10, CS_INTEL_1C10, "IntelP67", Intel_1C10_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1D40, CS_INTEL_1D40, "IntelX79", Intel_1D40_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1D41, CS_INTEL_1D40, "IntelX79", Intel_1D40_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1E10, CS_INTEL_1E10, "IntelZ75", Intel_1E10_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0150, CS_INTEL_1E10, "IntelZ77A-GD55", Intel_1E10_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0151, CS_INTEL_1E10, "IntelZ77A-GD55", Intel_1E10_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0100, CS_INTEL_1E10, "IntelZ77A-GD55", Intel_1E10_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8C4B, CS_INTEL_8C4B, "SharkBay-HM87", Intel_8C4B_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8C44, CS_INTEL_8C4B, "SharkBay-Z87", Intel_8C4B_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8C41, CS_INTEL_8C4B, "SharkBay-H8x/P8x", Intel_8C4B_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8C49, CS_INTEL_8C4B, "SharkBay-HM86", Intel_8C4B_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8C52, CS_INTEL_8C4B, "SharkBay-E3", Intel_8C4B_setupFunc}, // Does not support SLI
{PCI_VENDOR_ID_INTEL, 0x8CC4, CS_INTEL_8CC4, "IntelZ97", Intel_8CC4_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8CC3, CS_INTEL_8CC4, "IntelHM97", Intel_8CC4_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA145, CS_INTEL_A145, "IntelZ170", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA14E, CS_INTEL_A145, "IntelHM170", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA150, CS_INTEL_A145, "IntelHM170", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA149, CS_INTEL_A145, "SkyLake C236", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA14A, CS_INTEL_A145, "SkyLake C232", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA14D, CS_INTEL_A145, "SkyLake-H", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA244, CS_INTEL_A145, "SkyLake C620", Intel_A145_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8D47, CS_INTEL_8D47, "IntelX99", Intel_8D47_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x8D44, CS_INTEL_8D47, "IntelC612", Intel_8D44_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA2C5, CS_INTEL_A2C5, "IntelZ270", Intel_A2C5_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA154, CS_INTEL_A2C5, "IntelZ270", Intel_A2C5_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA152, CS_INTEL_A2C5, "IntelRX9S", Intel_A2C5_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA242, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA241, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA243, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA2D2, CS_INTEL_A2D2, "IntelX299", Intel_A2D2_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA2D3, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C1, CS_INTEL_A242, "IntelC621", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C2, CS_INTEL_A242, "IntelC622", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C3, CS_INTEL_A242, "IntelC624", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C4, CS_INTEL_A242, "IntelC625", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C5, CS_INTEL_A242, "IntelC626", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C6, CS_INTEL_A242, "IntelC627", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1C7, CS_INTEL_A242, "IntelC628", Intel_A242_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA2C9, CS_INTEL_A2C9, "IntelZ370", Intel_A2C9_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA310, CS_INTEL_A2C9, "IntelZ370", Intel_A2C9_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA30E, CS_INTEL_A2C9, "IntelZ370", Intel_A2C9_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA305, CS_INTEL_A2C9, "IntelZ390", Intel_A2C9_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA30D, CS_INTEL_A2C9, "IntelH370", Intel_A2C9_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA301, CS_INTEL_A301, "Intel-CannonLake", Intel_A301_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0685, CS_INTEL_0685, "Intel-CometLake", Intel_0685_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1CB, CS_INTEL_C620, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x4381, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x4385, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A82, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A84, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x10E5, CS_NVIDIA_T186, "T186", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x10E6, CS_NVIDIA_T186, "T186", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x1AD0, CS_NVIDIA_T194, "T194", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x1AD1, CS_NVIDIA_T194, "T194", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x1AD2, CS_NVIDIA_T194, "T194", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x229A, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x229C, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x229E, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_SIS, 0x0649, CS_SIS_649, "649", SiS_656_setupFunc},
{PCI_VENDOR_ID_SIS, 0x0656, CS_SIS_656, "656", SiS_656_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5A31, CS_ATI_RS400, "RS400", ATI_RS400_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5A33, CS_ATI_RS400, "RS400", ATI_RS400_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5950, CS_ATI_RS480, "RS480", ATI_RS480_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5951, CS_ATI_RS480, "RS480", ATI_RS480_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5956, CS_ATI_FX790, "FX790" ,AMD_FX790_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5A11, CS_ATI_FX890, "FX890" ,AMD_FX890_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5a13, CS_ATI_RD850, "RD850" ,ATI_RD870_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5a12, CS_ATI_RD870, "RD870" ,ATI_RD870_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5a10, CS_ATI_RD890, "RD890" ,ATI_RD890_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5957, CS_ATI_RX780, "RX780" ,ATI_RX780_setupFunc},
{PCI_VENDOR_ID_ATI, 0x5A14, CS_ATI_FX990, "FX990/X990/970",ATI_FX990_setupFunc},
{PCI_VENDOR_ID_AMD, 0x9601, CS_AMD_GX890, "GX890" ,AMD_FX890_setupFunc},
{PCI_VENDOR_ID_AMD, 0x9600, CS_AMD_RS780, "RS780" ,AMD_RS780_setupFunc},
{PCI_VENDOR_ID_AMD, 0x790e, CS_AMD_X370, "X370/X399/X470/ TRX40/X570/WRX80", AMD_X370_setupFunc },
{PCI_VENDOR_ID_VIA, 0x0308, CS_VIA_VT8369B, "VT8369B", NULL},
{PCI_VENDOR_ID_VIA, 0x0410, CS_VIA_VX900, "VX900", VIA_VX900_setupFunc},
{PCI_VENDOR_ID_APM, 0xe004, CS_APM_STORM, "X-Gene Storm", APM_Storm_setupFunc},
{PCI_VENDOR_ID_IBM, 0x03DC, CS_IBM_VENICE, "Venice", NULL},
{PCI_VENDOR_ID_MARVELL, 0xAF00, CS_MARVELL_THUNDERX2, "Marvell ThunderX2", Marvell_ThunderX2_setupFunc},
{PCI_VENDOR_ID_REDHAT, 0x0008, CS_REDHAT_QEMU, "QEMU Redhat", QEMU_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE005, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE006, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE007, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE008, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE009, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE00A, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE00B, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE00C, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc},
{PCI_VENDOR_ID_HUAWEI, 0xA120, CS_HUAWEI_KUNPENG920, "Huawei Kunpeng920", Huawei_Kunpeng920_setupFunc},
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD, "Mellanox BlueField", Mellanox_BlueField_setupFunc},
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2", NULL},
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2 Crypto disabled", NULL},
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2, "Amazon Gravitron2", Amazon_Gravitron2_setupFunc},
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX, "Fujitsu A64FX", Fujitsu_A64FX_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xFC16, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
{PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
{PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
{PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
{PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
{PCI_VENDOR_ID_CADENCE, 0xFC16, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
{PCI_VENDOR_ID_AMPERE, 0xE000, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE00D, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE00E, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE010, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE100, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE110, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc},
{PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1, "Arm Neoverse N1", Arm_NeoverseN1_setupFunc},
{PCI_VENDOR_ID_HYGON, 0x790E, CS_HYGON_C86, "Hygon-C86-7151", NULL},
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX, "Marvell Octeon CN96xx", ARMV8_generic_setupFunc},
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX, "Marvell Octeon CN98xx", ARMV8_generic_setupFunc},
///////////////////////////////////////////////////////////////////////////////////////////////////
// last element must have chipset CS_UNKNOWN (zero)
{0, 0, CS_UNKNOWN, "Unknown", NULL}
};
VENDORNAME vendorName[] =
{
{PCI_VENDOR_ID_NVIDIA, "NVIDIA"},
{PCI_VENDOR_ID_INTEL, "Intel"},
{PCI_VENDOR_ID_VIA, "VIA"},
{PCI_VENDOR_ID_RCC, "ServerWorks"},
{PCI_VENDOR_ID_MICRON_1, "Micron"},
{PCI_VENDOR_ID_MICRON_2, "Micron"},
{PCI_VENDOR_ID_APPLE, "Apple"},
{PCI_VENDOR_ID_SIS, "SiS"},
{PCI_VENDOR_ID_ATI, "ATI"},
{PCI_VENDOR_ID_TRANSMETA, "Transmeta"},
{PCI_VENDOR_ID_HP, "HP"},
{PCI_VENDOR_ID_AMD, "AMD"},
{PCI_VENDOR_ID_ALI, "ALi"},
{PCI_VENDOR_ID_APM, "AppliedMicro"},
{PCI_VENDOR_ID_IBM, "IBM"},
{PCI_VENDOR_ID_MARVELL, "MarvellThunderX2"},
{PCI_VENDOR_ID_REDHAT, "QemuRedhat"},
{PCI_VENDOR_ID_AMPERE, "AmpereComputing"},
{PCI_VENDOR_ID_HUAWEI, "Huawei"},
{PCI_VENDOR_ID_MELLANOX, "Mellanox"},
{PCI_VENDOR_ID_AMAZON, "Amazon"},
{PCI_VENDOR_ID_FUJITSU, "Fujitsu"},
{PCI_VENDOR_ID_CADENCE, "Cadence"},
{PCI_VENDOR_ID_ARM, "ARM"},
{0, "Unknown"} // Indicates end of the table
};
//
// Allowlist all chipsets with which dGPU over PCIe is supported on ARM
// (both v7 and v8) platforms
//
ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210}, // NVIDIA Tegra X1 RP0
{PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210}, // NVIDIA Tegra X1 RP1
{PCI_VENDOR_ID_NVIDIA, 0x10E5, CS_NVIDIA_T186}, // NVIDIA Tegra P1 RP0
{PCI_VENDOR_ID_NVIDIA, 0x10E6, CS_NVIDIA_T186}, // NVIDIA Tegra P1 RP1
{PCI_VENDOR_ID_NVIDIA, 0x1AD0, CS_NVIDIA_T194}, // NVIDIA Tegra V1 RP0
{PCI_VENDOR_ID_NVIDIA, 0x1AD1, CS_NVIDIA_T194}, // NVIDIA Tegra V1 RP1
{PCI_VENDOR_ID_NVIDIA, 0x1AD2, CS_NVIDIA_T194}, // NVIDIA Tegra V1 RP2
{PCI_VENDOR_ID_NVIDIA, 0x229A, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP0
{PCI_VENDOR_ID_NVIDIA, 0x229C, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP1
{PCI_VENDOR_ID_NVIDIA, 0x229E, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP2
{PCI_VENDOR_ID_APM, 0xe004, CS_APM_STORM}, // Applied Micro X-Gene "Storm"
{PCI_VENDOR_ID_MARVELL, 0xAF00, CS_MARVELL_THUNDERX2}, // Marvell ThunderX2
{PCI_VENDOR_ID_REDHAT, 0x0008, CS_REDHAT_QEMU}, // Redhat QEMU
{PCI_VENDOR_ID_AMPERE, 0xE005, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE006, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE007, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE008, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE009, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE00A, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE00B, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_AMPERE, 0xE00C, CS_AMPERE_EMAG}, // Ampere eMag
{PCI_VENDOR_ID_HUAWEI, 0xA120, CS_HUAWEI_KUNPENG920}, // Huawei Kunpeng 920
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD}, // Mellanox BlueField
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2 Crypto disabled
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2}, // Amazon Gravitron2
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX}, // Fujitsu A64FX
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_FT2000}, // Phytium FT2000
{PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_FT2000}, // Phytium FT2000
{PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_FT2000}, // Phytium FT2000
{PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_FT2000}, // Phytium FT2000
{PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_FT2000}, // Phytium FT2000
{PCI_VENDOR_ID_CADENCE, 0xFC16, CS_PHYTIUM_FT2000}, // Phytium FT2000
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500}, // Phytium S2500
{PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_S2500}, // Phytium S2500
{PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_S2500}, // Phytium S2500
{PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_S2500}, // Phytium S2500
{PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_S2500}, // Phytium S2500
{PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_S2500}, // Phytium S2500
{PCI_VENDOR_ID_AMPERE, 0xE000, CS_AMPERE_ALTRA}, // Ampere Altra
{PCI_VENDOR_ID_AMPERE, 0xE00D, CS_AMPERE_ALTRA}, // Ampere Altra
{PCI_VENDOR_ID_AMPERE, 0xE00E, CS_AMPERE_ALTRA}, // Ampere Altra
{PCI_VENDOR_ID_AMPERE, 0xE010, CS_AMPERE_ALTRA}, // Ampere Altra
{PCI_VENDOR_ID_AMPERE, 0xE100, CS_AMPERE_ALTRA}, // Ampere Altra
{PCI_VENDOR_ID_AMPERE, 0xE110, CS_AMPERE_ALTRA}, // Ampere Altra
{PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1}, // Arm Neoverse N1
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX}, // Marvell OCTEON CN96xx
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX}, // Marvell OCTEON CN98xx
// last element must have chipset CS_UNKNOWN (zero)
{0, 0, CS_UNKNOWN}
};
#endif /* NVCST_H */

View File

@@ -0,0 +1,693 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 200-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVDEVID_H
#define NVDEVID_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Private device ids defines - only defines ! *
* *
\***************************************************************************/
///////////////////////////////////////////////////////////////////////////////////////////
//
// VENDOR/SUBVENDOR IDS
// XXX Cleanup to do: change PCI_VENDOR_* to NV_PCI_SUBID_VENDOR_*
//
///////////////////////////////////////////////////////////////////////////////////////////
#define NV_PCI_SUBID_VENDOR 15:0 /* RW--F */
#define NV_PCI_SUBID_VENDOR_AMD 0x1022
#define NV_PCI_SUBID_VENDOR_ALI 0x10B9
#define NV_PCI_SUBID_VENDOR_NVIDIA 0x10DE
#define NV_PCI_SUBID_VENDOR_INTEL 0x8086
#define NV_PCI_SUBID_VENDOR_VIA 0x1106
#define NV_PCI_SUBID_VENDOR_RCC 0x1166
#define NV_PCI_SUBID_VENDOR_MICRON_1 0x1042
#define NV_PCI_SUBID_VENDOR_MICRON_2 0x1344
#define NV_PCI_SUBID_VENDOR_APPLE 0x106B
#define NV_PCI_SUBID_VENDOR_SIS 0x1039
#define NV_PCI_SUBID_VENDOR_ATI 0x1002
#define NV_PCI_SUBID_VENDOR_TRANSMETA 0x1279
#define NV_PCI_SUBID_VENDOR_HP 0x103C
#define NV_PCI_SUBID_VENDOR_DELL 0x1028
#define NV_PCI_SUBID_VENDOR_FUJITSU 0x10cf
#define NV_PCI_SUBID_VENDOR_ASUS 0x1043
#define NV_PCI_SUBID_VENDOR_MSI 0x1462
#define NV_PCI_SUBID_VENDOR_FOXCONN 0x105B
#define NV_PCI_SUBID_VENDOR_ECS 0x1019
#define NV_PCI_SUBID_VENDOR_DFI_1 0x106E
#define NV_PCI_SUBID_VENDOR_TOSHIBA 0x1179
#define NV_PCI_SUBID_VENDOR_DFI_2 0x15BD
#define NV_PCI_SUBID_VENDOR_ACER 0x1025
#define NV_PCI_SUBID_VENDOR_GIGABYTE 0x1458
#define NV_PCI_SUBID_VENDOR_EVGA 0x3842
#define NV_PCI_SUBID_VENDOR_BROADCOM 0x1166
#define NV_PCI_SUBID_VENDOR_SUPERMICRO 0x15D9
#define NV_PCI_SUBID_VENDOR_BIOSTAR 0x1565
#define NV_PCI_SUBID_VENDOR_XFX 0x1682
#define NV_PCI_SUBID_VENDOR_PCPARTNER 0x19DA
#define NV_PCI_SUBID_VENDOR_LENOVO 0x17AA
#define NV_PCI_SUBID_VENDOR_FSC 0x1734
#define NV_PCI_SUBID_VENDOR_FTS 0x1734
#define NV_PCI_SUBID_VENDOR_COLORFUL 0x7377
#define NV_PCI_SUBID_VENDOR_ASROCK 0x1849
#define NV_PCI_SUBID_VENDOR_SHUTTLE 0x1297
#define NV_PCI_SUBID_VENDOR_CLEVO 0x1558
#define NV_PCI_SUBID_VENDOR_PEGATRON 0x1B0A
#define NV_PCI_SUBID_VENDOR_JETWAY 0x16F3
#define NV_PCI_SUBID_VENDOR_HIGHGRADE 0x1C6C
#define NV_PCI_SUBID_VENDOR_GALAXY 0x1B4C
#define NV_PCI_SUBID_VENDOR_ZOTAC 0x19DA
#define NV_PCI_SUBID_VENDOR_ARIMA 0x161F
#define NV_PCI_SUBID_VENDOR_BFG 0x19F1
#define NV_PCI_SUBID_VENDOR_SONY 0x104D
#define NV_PCI_SUBID_VENDOR_BITLAND 0x1642
#define NV_PCI_SUBID_VENDOR_PC_PARTNER 0x174B
#define NV_PCI_SUBID_VENDOR_NZXT 0x1D96
// XXX CKEANUP TO REMOVE IN FAVOR OF NV_PCI_SUBID_VENDOR_*
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_VENDOR_ID_ALI 0x10B9
#define PCI_VENDOR_ID_NVIDIA 0x10DE
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_VENDOR_ID_RCC 0x1166
#define PCI_VENDOR_ID_MICRON_1 0x1042
#define PCI_VENDOR_ID_MICRON_2 0x1344
#define PCI_VENDOR_ID_APPLE 0x106B
#define PCI_VENDOR_ID_SIS 0x1039
#define PCI_VENDOR_ID_ATI 0x1002
#define PCI_VENDOR_ID_TRANSMETA 0x1279
#define PCI_VENDOR_ID_HP 0x103C
#define PCI_VENDOR_ID_DELL 0x1028
#define PCI_VENDOR_ID_FUJITSU 0x10cf
#define PCI_VENDOR_ID_ASUS 0x1043
#define PCI_VENDOR_ID_MSI 0x1462
#define PCI_VENDOR_ID_FOXCONN 0x105B
#define PCI_VENDOR_ID_ECS 0x1019
#define PCI_VENDOR_ID_DFI_1 0x106E
#define PCI_VENDOR_ID_TOSHIBA 0x1179
#define PCI_VENDOR_ID_DFI_2 0x15BD
#define PCI_VENDOR_ID_ACER 0x1025
#define PCI_VENDOR_ID_GIGABYTE 0x1458
#define PCI_VENDOR_ID_EVGA 0x3842
#define PCI_VENDOR_ID_BROADCOM 0x1166
#define PCI_VENDOR_ID_SUPERMICRO 0x15D9
#define PCI_VENDOR_ID_BIOSTAR 0x1565
#define PCI_VENDOR_ID_XFX 0x1682
#define PCI_VENDOR_ID_PCPARTNER 0x19DA
#define PCI_VENDOR_ID_LENOVO 0x17AA
#define PCI_VENDOR_ID_FSC 0x1734
#define PCI_VENDOR_ID_FTS 0x1734
#define PCI_VENDOR_ID_COLORFUL 0x7377
#define PCI_VENDOR_ID_ASROCK 0x1849
#define PCI_VENDOR_ID_SHUTTLE 0x1297
#define PCI_VENDOR_ID_CLEVO 0x1558
#define PCI_VENDOR_ID_PEGATRON 0x1B0A
#define PCI_VENDOR_ID_JETWAY 0x16F3
#define PCI_VENDOR_ID_HIGHGRADE 0x1C6C
#define PCI_VENDOR_ID_GALAXY 0x1B4C
#define PCI_VENDOR_ID_ZOTAC 0x19DA
#define PCI_VENDOR_ID_ARIMA 0x161F
#define PCI_VENDOR_ID_PC_PARTNER 0x174B
#define PCI_VENDOR_ID_APM 0x10E8
#define PCI_VENDOR_ID_IBM 0x1014
#define PCI_VENDOR_ID_NZXT 0x1D96
#define PCI_VENDOR_ID_MARVELL 0x177D
#define PCI_VENDOR_ID_REDHAT 0x1B36
#define PCI_VENDOR_ID_AMPERE 0x1DEF
#define PCI_VENDOR_ID_HUAWEI 0x19E5
#define PCI_VENDOR_ID_MELLANOX 0x15B3
#define PCI_VENDOR_ID_AMAZON 0x1D0F
#define PCI_VENDOR_ID_CADENCE 0x17CD
#define PCI_VENDOR_ID_ARM 0x13B5
#define PCI_VENDOR_ID_HYGON 0x1D94
#define NV_PCI_DEVID_DEVICE 31:16 /* RW--F */
#define NV_PCI_SUBID_DEVICE 31:16 /* RW--F */
///////////////////////////////////////////////////////////////////////////////////////////
//
// GPU DEVICE IDS
//
///////////////////////////////////////////////////////////////////////////////////////////
#define NV_PCI_DEVID_DEVICE_PG171_SKU200_PG179_SKU220 0x25B6 /* NVIDIA A16 / NVIDIA A2 */
///////////////////////////////////////////////////////////////////////////////////////////
//
// SUBDEVICE IDs
//
///////////////////////////////////////////////////////////////////////////////////////////
// A16
#define NV_PCI_SUBID_DEVICE_PG171_SKU200 0x14A9
///////////////////////////////////////////////////////////////////////////////////////////
//
// CHIPSET IDs
//
///////////////////////////////////////////////////////////////////////////////////////////
// Desktop flavor of X58
#define X58_DESKTOP_DEVIDS 0x3400, 0x3405
// Mobile version of X58
#define X58_MOBILE_DEVID 0x3405
#define X58_MOBILE_CLEVO_7200_SSDEVID 0x7200
// Sandy bridge CLEVO platform
#define SANDYBRIDGE_P180HM_SSDEVID 0x8000
#define SandyBridge_E_X79_P270WM_SSDEVID 0x270
#define IvyBridge_Z75_P370EM_SSDEVID 0x371
// Device ID's of Devices present on Patsburg's PCIE bus.
#define PATSBURG_PCIE_DEVICE_MIN_DEVID 0x1D10
#define PATSBURG_PCIE_DEVICE_MAX_DEVID 0x1D1F
#define PATSBURG_PCIE_DEVICE_DEVID 0x244E
//Tylersburg Congurations
#define TYLERSBURG_DEVID 0x3406
// Intel Grantsdale definitions
#define DEVICE_ID_INTEL_2580_HOST_BRIDGE 0x2580
#define DEVICE_ID_INTEL_2581_ROOT_PORT 0x2581
// Intel Alderwood definitions
#define DEVICE_ID_INTEL_2584_HOST_BRIDGE 0x2584
#define DEVICE_ID_INTEL_2585_ROOT_PORT 0x2585
// Intel Alviso definitions
#define DEVICE_ID_INTEL_2590_HOST_BRIDGE 0x2590
#define DEVICE_ID_INTEL_2591_ROOT_PORT 0x2591
// Intel Tumwater definitions
#define DEVICE_ID_INTEL_359E_HOST_BRIDGE 0x359E
#define DEVICE_ID_INTEL_3597_ROOT_PORT 0x3597
// Intel Stoakley definitions
#define INTEL_4000_SUBDEVICE_ID 0x021D
// Intel SkullTrail definitions
#define INTEL_4003_SUBDEVICE_ID 0x5358
// Intel Core I7 CPU
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I7 0x2C01
// Intel Core I5 CPU Lynnfield
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_L 0x2C81
#define INTEL_LYNNFIELD_ROOTPORT_CPU1 0xD138
#define INTEL_LYNNFIELD_ROOTPORT_CPU2 0xD13A
// Intel Core I5 CPU Auburndale
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_A 0x2D41
// Intel Core I5 CPU 650
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_6 0x2D01
// Intel Poulsbo definitions
#define DEVICE_ID_INTEL_8100_HOST_BRIDGE 0x8100
#define DEVICE_ID_INTEL_8110_ROOT_PORT 0x8110
#define DEVICE_ID_INTEL_8112_ROOT_PORT 0x8112
// Intel TunnelCreek definitions
#define DEVICE_ID_INTEL_8180_ROOT_PORT 0x8180
#define DEVICE_ID_INTEL_8181_ROOT_PORT 0x8181
#define DEVICE_ID_INTEL_8184_ROOT_PORT 0x8184
#define DEVICE_ID_INTEL_8185_ROOT_PORT 0x8185
// Intel I/O Hub definitions
#define DEVICE_ID_INTEL_3408_ROOT_PORT 0x3408
#define DEVICE_ID_INTEL_3411_ROOT_PORT 0x3411
#define DEVICE_ID_INTEL_3420_ROOT_PORT 0x3420
#define DEVICE_ID_INTEL_3421_ROOT_PORT 0x3421
// Intel SandyBridge IIO definitions
#define DEVICE_ID_INTEL_3C02_ROOT_PORT 0x3c02
#define DEVICE_ID_INTEL_3C03_ROOT_PORT 0x3c03
#define DEVICE_ID_INTEL_3C04_ROOT_PORT 0x3c04
#define DEVICE_ID_INTEL_3C05_ROOT_PORT 0x3c05
#define DEVICE_ID_INTEL_3C06_ROOT_PORT 0x3c06
#define DEVICE_ID_INTEL_3C07_ROOT_PORT 0x3c07
#define DEVICE_ID_INTEL_3C08_ROOT_PORT 0x3c08
#define DEVICE_ID_INTEL_3C09_ROOT_PORT 0x3c09
#define DEVICE_ID_INTEL_3C0A_ROOT_PORT 0x3c0a
#define DEVICE_ID_INTEL_3C0B_ROOT_PORT 0x3c0b
// Intel Haswell-E definitions
#define DEVICE_ID_INTEL_2F00_HOST_BRIDGE 0x2f00
#define DEVICE_ID_INTEL_2F01_ROOT_PORT 0x2f01
#define DEVICE_ID_INTEL_2F02_ROOT_PORT 0x2f02
#define DEVICE_ID_INTEL_2F03_ROOT_PORT 0x2f03
#define DEVICE_ID_INTEL_2F04_ROOT_PORT 0x2f04
#define DEVICE_ID_INTEL_2F05_ROOT_PORT 0x2f05
#define DEVICE_ID_INTEL_2F06_ROOT_PORT 0x2f06
#define DEVICE_ID_INTEL_2F07_ROOT_PORT 0x2f07
#define DEVICE_ID_INTEL_2F08_ROOT_PORT 0x2f08
#define DEVICE_ID_INTEL_2F09_ROOT_PORT 0x2f09
#define DEVICE_ID_INTEL_2F0A_ROOT_PORT 0x2f0a
#define DEVICE_ID_INTEL_2F0B_ROOT_PORT 0x2f0b
#define DEVICE_ID_INTEL_0C01_ROOT_PORT 0x0c01
// Intel IvyTown definitions
#define DEVICE_ID_INTEL_0E02_ROOT_PORT 0x0e02
#define DEVICE_ID_INTEL_0E03_ROOT_PORT 0x0e03
#define DEVICE_ID_INTEL_0E04_ROOT_PORT 0x0e04
#define DEVICE_ID_INTEL_0E05_ROOT_PORT 0x0e05
#define DEVICE_ID_INTEL_0E06_ROOT_PORT 0x0e06
#define DEVICE_ID_INTEL_0E07_ROOT_PORT 0x0e07
#define DEVICE_ID_INTEL_0E08_ROOT_PORT 0x0e08
#define DEVICE_ID_INTEL_0E09_ROOT_PORT 0x0e09
#define DEVICE_ID_INTEL_0E0A_ROOT_PORT 0x0e0a
#define DEVICE_ID_INTEL_0E0B_ROOT_PORT 0x0e0b
// Intel Ivy Bridge E definitions
#define DEVICE_ID_INTEL_0E00_HOST_BRIDGE 0x0E00
// Intel Haswell definitions
#define DEVICE_ID_INTEL_0C00_HASWELL_HOST_BRIDGE 0x0C00
#define DEVICE_ID_INTEL_0C04_HASWELL_HOST_BRIDGE 0x0C04
// Intel PCH definitions
#define DEVICE_ID_INTEL_9D10_PCH_BRIDGE 0x9d10
#define DEVICE_ID_INTEL_9D18_PCH_BRIDGE 0x9d18
#define DEVICE_ID_INTEL_A117_PCH_BRIDGE 0xa117
#define DEVICE_ID_INTEL_A118_PCH_BRIDGE 0xa118
#define DEVICE_ID_INTEL_9C98_PCH_BRIDGE 0x9c98
// Intel Broadwell definitions
#define DEVICE_ID_INTEL_6F00_HOST_BRIDGE 0x6f00
#define DEVICE_ID_INTEL_6F01_ROOT_PORT 0x6f01
#define DEVICE_ID_INTEL_6F02_ROOT_PORT 0x6f02
#define DEVICE_ID_INTEL_6F03_ROOT_PORT 0x6f03
#define DEVICE_ID_INTEL_6F04_ROOT_PORT 0x6f04
#define DEVICE_ID_INTEL_6F05_ROOT_PORT 0x6f05
#define DEVICE_ID_INTEL_6F06_ROOT_PORT 0x6f06
#define DEVICE_ID_INTEL_6F07_ROOT_PORT 0x6f07
#define DEVICE_ID_INTEL_6F08_ROOT_PORT 0x6f08
#define DEVICE_ID_INTEL_6F09_ROOT_PORT 0x6f09
#define DEVICE_ID_INTEL_6F0A_ROOT_PORT 0x6f0A
#define DEVICE_ID_INTEL_6F0B_ROOT_PORT 0x6f0B
#define DEVICE_ID_INTEL_1601_ROOT_PORT 0x1601
#define DEVICE_ID_INTEL_1605_ROOT_PORT 0x1605
#define DEVICE_ID_INTEL_1609_ROOT_PORT 0x1609
#define DEVICE_ID_INTEL_BROADWELL_U_HOST_BRIDGE 0x1604
#define DEVICE_ID_INTEL_BROADWELL_H_HOST_BRIDGE 0x1614
// Intel Skylake definitions
#define DEVICE_ID_INTEL_1901_ROOT_PORT 0x1901
#define DEVICE_ID_INTEL_1905_ROOT_PORT 0x1905
#define DEVICE_ID_INTEL_1909_ROOT_PORT 0x1909
#define DEVICE_ID_INTEL_SKYLAKE_U_HOST_BRIDGE 0x1904
#define DEVICE_ID_INTEL_SKYLAKE_S_HOST_BRIDGE 0x191F
#define DEVICE_ID_INTEL_SKYLAKE_H_HOST_BRIDGE 0x1910
// Intel Skylake-E definitions
#define DEVICE_ID_INTEL_2030_ROOT_PORT 0x2030
#define DEVICE_ID_INTEL_2033_ROOT_PORT 0x2033
// Intel Kabylake definitions
#define DEVICE_ID_INTEL_KABYLAKE_U_HOST_BRIDGE 0x5904
#define DEVICE_ID_INTEL_KABYLAKE_H_HOST_BRIDGE 0x5910
// AMD Matisse, Rome definitions
#define DEVICE_ID_AMD_1483_ROOT_PORT 0x1483
// AMD Castle Peak definition
#define DEVICE_ID_AMD_1480_ROOT_PORT 0x1480
// AMD Renoir-H definition
#define DEVICE_ID_AMD_1630_ROOT_PORT 0x1630
// Dell SkullTrail definitions
#define DELL_4003_SUBDEVICE_ID 0x021D
// Dell Quicksilver MLK definitions
#define DELL_0040_SUBDEVICE_ID 0x043a
// HP Tylersburg definitions
#define TYLERSBURG_Z800_SSDEVID 0x130B
// HP Romley definitions
#define ROMLEY_Z820_SSDEVID 0x158B
#define ROMLEY_Z620_SSDEVID 0x158A
#define ROMLEY_Z420_SSDEVID 0x1589
// HP Grantley definitions
#define GRANTLEY_Z840_SSDEVID 0x2129
#define GRANTLEY_Z640_SSDEVID 0x212A
#define GRANTLEY_Z440_SSDEVID 0x212B
// HP PURELY definitions
#define HP_QUADRO_Z4GEN4_DEVID 0xA2D2
#define PURLEY_Z8GEN4_SSDEVID 0x81C7
#define PURLEY_Z6GEN4_SSDEVID 0x81C6
#define PURLEY_Z4GEN4_SSDEVID 0x81C5
// Lenovo Romley definitions
#define ROMLEY_C30_SSDEVID 0x1028
#define ROMLEY_D30_SSDEVID 0x1027
#define ROMLEY_S30_SSDEVID 0x1026
// Dell Romley definitions
#define ROMLEY_T7600_SSDEVID 0x0495
#define ROMLEY_T5600_SSDEVID 0x0496
#define ROMLEY_T3600_SSDEVID 0x0497
// Dell Romley + IVB-EP CPU Refresh
#define IVYTOWN_T7610_SSDEVID 0x05D4
#define IVYTOWN_T5610_SSDEVID 0x05D3
// Dell Romley (Ipanema)
#define ROMLEY_R7610_SSDEVID 0x05A1
// FTS Romley definitions
#define ROMLEY_R920_SSDEVID 0x11B6
// Lenovo Grantley (Messi, Pele, Ronaldo)
#define GRANTLEY_V40_SSDEVID 0x1031
#define GRANTLEY_D40_SSDEVID 0x1030
#define GRANTLEY_S40_SSDEVID 0x102F
// Dell Grantley (Avalon)
#define GRANTLEY_T7810_SSDEVID 0x0618
#define GRANTLEY_T7910_SSDEVID 0x0619
// Lenovo Purley (Nile, Volga)
#define PURLEY_P920_SSDEVID 0x1038
#define PURLEY_P720_SSDEVID 0x1037
#define PURLEY_P520_SSDEVID 0x1036
// Lenovo P520c
#define LENOVO_P520C_SSDEVID 0x103C
// Dell Purley(Matira)
#define PURLEY_MATIRA3X_DEVID 0xA2D2
#define PURLEY_MATIRA3X_SSDEVID 0x08B1
#define PURLEY_MATIRA3_SSDEVID 0x0738
#define PURLEY_MATIRA5_SSDEVID 0x0739
#define PURLEY_MATIRA7_SSDEVID 0x073A
//FTS Grantley
#define GRANTLEY_R940_SSDEVID 0x1201
//FTS Purley
#define PURLEY_R970_SSDEVID 0x1230
#define PURLEY_M770_SSDEVID 0x1231
// HP Arrandale, Clarksfield, X58 workstation definitions
#define ARRANDALE_Z200SFF_SSDEVID 0x304A
#define CLARKSFIELD_Z200_SSDEVID 0x170B
#define X58_Z400_SSDEVID 0x1309
// GIGABYTE Sniper 3 (Z77)
#define GIGABYTE_SNIPER_3_SSDEVID_1 0x5000
#define GIGABYTE_SNIPER_3_SSDEVID_2 0x5001
// Supermicro Quadro VCA definitions
#define SUPERMICRO_QUADRO_VCA_DEVID 0x8D44
#define SUPERMICRO_QUADRO_VCA_SSDEVID 0x7270
// Supermicro SYS-4027GR-TRT
#define SUPERMICRO_SYS_4027GR_TRT_DEVID 0x1D41
#define SUPERMICRO_SYS_4027GR_TRT_SSDEVID 0x0732
// Supermicro SYS-4029GP-TRT2
#define SUPERMICRO_SYS_4029GP_TRT2_DEVID 0xA1C2
#define SUPERMICRO_SYS_4029GP_TRT2_SSDEVID 0x7270
// Asus Quadro BOXX definitions
#define ASUS_QUADRO_BOXX_DEVID 0x8D44
#define ASUS_QUADRO_BOXX_SSDEVID 0x85F6
// APEXX8 Quadro BOXX definitions
#define APEXX8_QUADRO_BOXX_DEVID 0xA2D3
#define APEXX8_QUADRO_BOXX_SSDEVID 0x098e
// APEXX5 Quadro BOXX definitions
#define APEXX5_QUADRO_BOXX_DEVID 0xA2D3
#define APEXX5_QUADRO_BOXX_SSDEVID 0x1000
// ASUS X99-E-10G
#define ASUS_X99_E_10G_SSDEVID 0x8600
// VIA definitions
#define DEVICE_ID_VIA_VT8369B_HOST_BRIDGE 0x0308
// Foxconn Einstein 64 [8086:a1c1][105b:7270]
#define FOXCONN_EINSTEIN_64_DEVID 0xA1C1
#define FOXCONN_EINSTEIN_64_SSDEVID 0x7270
// Tyan Workstation
#define TYAN_B7100_DEVID 0xA1C1
#define TYAN_B7100_SSDEVID 0x7270
// ESC 4000 Series Workstation
#define ESC_4000_G4_DEVID 0xA1C1
#define ESC_4000_G4_SSDEVID 0x871E
// NVIDIA C51
#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_0 0x2F0
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_1 0x2F1
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_2 0x2F2
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_3 0x2F3
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_0 0x2F4
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_1 0x2F5
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_2 0x2F6
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_3 0x2F7
#define NVIDIA_C51_DEVICE_ID_MAX 0x2F7
// NVIDIA MCP55
#define NVIDIA_MCP55_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0369
// NVIDIA MCP61
#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x03EA
#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_PA 0x03E2
// NVIDIA C55
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLIX16 0x03A1
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLI 0x03A3
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_U 0x03A2
// NVIDIA MCP65
#define NVIDIA_MCP65_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0444
// NVIDIA MCP67/MCP68
#define NVIDIA_MCP67_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0547
// NVIDIA MCP73
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_PV 0x07C0
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_O 0x07C1
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_S 0x07C2
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_V 0x07C3
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_0 0x07C4
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_1 0x07C5
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_2 0x07C6
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_D 0x07C7
// NVIDIA C73
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI2 0x0800
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI_ALL 0x0801
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLIX8 0x0802
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_U 0x0803
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_0 0x0804
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_1 0x0805
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_2 0x0806
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_3 0x0807
// NVIDIA MCP77/78
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0754
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_1 0x0755
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_2 0x0756
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_3 0x0757
#define NVIDIA_MCP77_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0752
// NVIDIA MCP79/7A
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_DEFAULT 0x0A80
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLIX16 0x0A81
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLI 0x0A82
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_U 0x0A83
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GM 0x0A84
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GVM 0x0A85
#define NVIDIA_MCP79_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0AA2
// NVIDIA MCP89/P83
#define NVIDIA_MCP89_CPU_PCI_0_DEVICE_ID_DEFAULT 0x00000D60
///////////////////////////////////////////////////////////////////////////////////////////
//
// enumeration of chipset families
//
///////////////////////////////////////////////////////////////////////////////////////////
//
// When adding a variable to the following enum, please
// add it also to the following chipset_names[].
//
enum {
CS_UNKNOWN = 0x0000,
CS_UNKNOWN_PCIE = 0x1000
, CS_INTEL_2580
, CS_INTEL_2584
, CS_INTEL_2588
, CS_INTEL_2590
, CS_INTEL_25E0
, CS_INTEL_29X0
, CS_INTEL_29E0
, CS_INTEL_359E
, CS_INTEL_4000
, CS_INTEL_4003
, CS_INTEL_3400
, CS_INTEL_3B42
, CS_INTEL_2770
, CS_INTEL_2774
, CS_INTEL_277C
, CS_INTEL_2A40
, CS_INTEL_2E00
, CS_INTEL_0040
, CS_INTEL_1C10
, CS_INTEL_1C46
, CS_INTEL_1C49
, CS_INTEL_1D40
, CS_INTEL_8D47
, CS_INTEL_1E10
, CS_INTEL_8C4B
, CS_INTEL_8CC4
, CS_INTEL_A145
, CS_INTEL_A2C5
, CS_INTEL_A242
, CS_INTEL_A2D2
, CS_INTEL_A2C9
, CS_INTEL_A301
, CS_INTEL_0685
, CS_INTEL_4381
, CS_INTEL_7A82
, CS_NVIDIA_CK804
, CS_NVIDIA_C19
, CS_NVIDIA_C51
, CS_NVIDIA_MCP55
, CS_NVIDIA_MCP61
, CS_NVIDIA_C55
, CS_NVIDIA_MCP65
, CS_NVIDIA_MCP67
, CS_NVIDIA_MCP73
, CS_NVIDIA_C73
, CS_NVIDIA_MCP77
, CS_NVIDIA_MCP79
, CS_NVIDIA_MCP89
, CS_NVIDIA_TEGRA3
, CS_SIS_649
, CS_SIS_656
, CS_ATI_RS400
, CS_ATI_RS400_A21
, CS_ATI_RS480
, CS_ATI_RS480_A21
, CS_AMD_RS780
, CS_VIA_VT8369B
, CS_ATI_FX790
, CS_ATI_RD850
, CS_ATI_RD870
, CS_ATI_RD890
, CS_ATI_FX890
, CS_ATI_RX780
, CS_ATI_FX990
, CS_AMD_GX890
, CS_AMD_X370
, CS_VIA_VX900
, CS_APM_STORM
, CS_IBM_VENICE
, CS_NVIDIA_T124
, CS_NVIDIA_T210
, CS_NVIDIA_T186
, CS_NVIDIA_T194
, CS_NVIDIA_T234
, CS_MARVELL_THUNDERX2
, CS_REDHAT_QEMU
, CS_AMPERE_EMAG
, CS_HUAWEI_KUNPENG920
, CS_MELLANOX_BLUEFIELD
, CS_AMAZON_GRAVITRON2
, CS_FUJITSU_A64FX
, CS_PHYTIUM_FT2000
, CS_AMPERE_ALTRA
, CS_ARM_NEOVERSEN1
, CS_MARVELL_OCTEON_CN96XX
, CS_MARVELL_OCTEON_CN98XX
, CS_INTEL_C620
, CS_HYGON_C86
, CS_PHYTIUM_S2500
, CS_MELLANOX_BLUEFIELD2
, CS_MAX_PCIE
};
enum {
RP_UNKNOWN = 0
, RP_BROADCOM_HT2100
, RP_INTEL_2581
, RP_INTEL_2585
, RP_INTEL_2589
, RP_INTEL_2591
, RP_INTEL_3597
, RP_INTEL_2775
, RP_INTEL_2771
, RP_INTEL_8110
, RP_INTEL_8112
, RP_INTEL_8180
, RP_INTEL_8181
, RP_INTEL_8184
, RP_INTEL_8185
, RP_INTEL_3C02
, RP_INTEL_3C03
, RP_INTEL_3C04
, RP_INTEL_3C05
, RP_INTEL_3C06
, RP_INTEL_3C07
, RP_INTEL_3C08
, RP_INTEL_3C09
, RP_INTEL_3C0A
, RP_INTEL_3C0B
, RP_INTEL_2F04
, RP_INTEL_2F08
, RP_INTEL_0C01
, RP_INTEL_1601
, RP_INTEL_1605
, RP_INTEL_1609
, RP_INTEL_1901
, RP_INTEL_1905
, RP_INTEL_1909
, RP_INTEL_5904
, RP_NVIDIA_CK804
, RP_NVIDIA_C19
, RP_NVIDIA_C51
, RP_NVIDIA_MCP55
, RP_NVIDIA_MCP61
, RP_NVIDIA_C55
, RP_NVIDIA_MCP65
};
#endif //NVDEVID_H

View File

@@ -0,0 +1,292 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVPCIE_H
#define NVPCIE_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Private PCI Express related defines and structures. *
* *
\***************************************************************************/
#define PCI_VENDOR_ID 0x00
#ifndef PCI_DEVICE_ID
#define PCI_DEVICE_ID 0x02
#endif
#define PCI_BASE_ADDRESS_1 0x14 /* Aperture Base */
#define PCI_BASE_ADDRESS_2 0x18 /* Aperture Base */
#define PCI_CAPABILITY_LIST 0x34
#define PCI_DEVICE_SPECIFIC 0x40
#define NV_PCI_ID 0x0
#define NV_PCI_ID_VENDOR 15:0
#define NV_PCI_ID_VENDOR_NVIDIA 0x10DE
#define NV_PCI_ID_DEVICE 31:16
#define PCI_MAX_SLOTS 255
#define PCI_MAX_LANE_WIDTH 32
#define PCI_MAX_FUNCTION 8
#define PCI_INVALID_VENDORID 0xFFFF
#define PCI_INVALID_DEVICEID 0xFFFF
#define PCI_INVALID_SUBVENDORID 0xFFFF
#define PCI_INVALID_SUBDEVICEID 0xFFFF
#define PCI_CLASS_BRIDGE_DEV 0x06
#define PCI_SUBCLASS_BR_HOST 0x00
#define PCI_MULTIFUNCTION 0x80
// From PCI Local Bus Specification, Revision 3.0
#define CAP_ID_MASK 0xFF
#define CAP_ID_PMI 0x01
#define CAP_ID_AGP 0x02
#define CAP_ID_VPD 0x03
#define CAP_ID_SLOT_ID 0x04
#define CAP_ID_MSI 0x05
#define CAP_ID_HOT_SWAP 0x06
#define CAP_ID_PCI_X 0x07
#define CAP_ID_HYPER_TRANSPORT 0x08
#define CAP_ID_VENDOR_SPECIFIC 0x09
#define CAP_ID_DEBUG_PORT 0x0A
#define CAP_ID_CRC 0x0B
#define CAP_ID_HOT_PLUG 0x0C
#define CAP_ID_SUBSYSTEM_ID 0x0D
#define CAP_ID_AGP8X 0x0E
#define CAP_ID_SECURE 0x0F
#define CAP_ID_PCI_EXPRESS 0x10
#define CAP_ID_MSI_X 0x11
//
// Extended config space size is 4096 bytes.
//
#define PCI_EXTENDED_CONFIG_SPACE_LENGTH 4096
//
// From PCI Local Bus Specification, Revision 3.0
// HEADER TYPE0 Definitions - Byte offsets
//
#define PCI_HEADER_TYPE0_VENDOR_ID 0x00
#define PCI_HEADER_TYPE0_DEVICE_ID 0x02
#define PCI_HEADER_TYPE0_COMMAND 0x04
#define PCI_HEADER_TYPE0_STATUS 0x06
#define PCI_HEADER_TYPE0_REVISION_ID 0x08
#define PCI_HEADER_TYPE0_PROGIF 0x09
#define PCI_HEADER_TYPE0_SUBCLASS 0x0A
#define PCI_HEADER_TYPE0_BASECLASS 0x0B
#define PCI_HEADER_TYPE0_CACHE_LINE_SIZE 0x0C
#define PCI_HEADER_TYPE0_LATENCY_TIMER 0x0D
#define PCI_HEADER_TYPE0_HEADER_TYPE 0x0E
#define PCI_HEADER_TYPE0_BIST 0x0F
#define PCI_HEADER_TYPE0_BAR0 0x10
#define PCI_HEADER_TYPE0_BAR1 0x14
#define PCI_HEADER_TYPE0_BAR2 0x18
#define PCI_HEADER_TYPE0_BAR3 0x1C
#define PCI_HEADER_TYPE0_BAR4 0x20
#define PCI_HEADER_TYPE0_BAR5 0x24
#define PCI_HEADER_TYPE0_CBCIS_PTR 0x28
#define PCI_HEADER_TYPE0_SUBSYS_VEN_ID 0x2C
#define PCI_HEADER_TYPE0_SUBSYS_ID 0x2E
#define PCI_HEADER_TYPE0_ROMBAR 0x30
#define PCI_HEADER_TYPE0_CAP_PTR 0x34
#define PCI_HEADER_TYPE0_INT_LINE 0x3C
#define PCI_HEADER_TYPE0_INT_PIN 0x3D
#define PCI_HEADER_TYPE0_MIN_GNT 0x3E
#define PCI_HEADER_TYPE0_MAX_LAT 0x3F
//
// From PCI Express Base Specification Revision 2.0
// HEADER TYPE1 Definitions
#define PCI_HEADER_TYPE1_BRIDGE_CONTROL 0x3E
#define PCI_HEADER_TYPE1_BRIDGE_CONTROL_VGA_EN 0x08
#define PCIE_LINK_CAPABILITIES_2 0x000000A4 /* R--4R */
#define PCIE_LINK_CAPABILITIES_2_RSVD 0:0 /* C--VF */
#define PCIE_LINK_CAPABILITIES_2_RSVD_INIT 0x00000000 /* C---V */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED 7:1 /* R-EVF */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2_GEN3_GEN4_GEN5 0x0000001F /* R---V */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_HIDDEN 0x00000000 /* R---V */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1 0x00000001 /* R---V */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2 0x00000003 /* R---V */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2_GEN3 0x00000007 /* R-E-V */
#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2_GEN3_GEN4 0x0000000F /* R---V */
#define PCIE_LINK_CAPABILITIES_2_CROSS_LINK_SUPPORT 8:8 /* C--VF */
#define PCIE_LINK_CAPABILITIES_2_CROSS_LINK_SUPPORT_DISABLED 0x00000000 /* C---V */
#define PCIE_LINK_CAPABILITIES_2_RET_PRESENCE_DET_SUPP 23:23 /* R-EVF */
#define PCIE_LINK_CAPABILITIES_2_RET_PRESENCE_DET_SUPP_UNSET 0x00000000 /* R-E-V */
#define PCIE_LINK_CAPABILITIES_2_2RET_PRESENCE_DET_SUPP 24:24 /* R-EVF */
#define PCIE_LINK_CAPABILITIES_2_2RET_PRESENCE_DET_SUPP_UNSET 0x00000000 /* R-E-V */
#define PCIE_LINK_CAPABILITIES_2_RSVD1 31:25 /* C--VF */
#define PCIE_LINK_CAPABILITIES_2_RSVD1_INIT 0x00000000 /* C---V */
//
// PCI Express Virtual Peer-to-Peer Approval Definition
//
// These offsets are unused in hardware on existing chips and are reserved on
// future chips. Software has defined a virtual PCI capability that may be
// emulated by hypervisors at these offsets, and this capability is not tied
// to any specific hardware.
//
//
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0 0x000000C8
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_ID 7:0
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_NEXT 15:8
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_LENGTH 23:16
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_SIG_LO 31:24
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1 0x000000CC
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_SIG_HI 15:0
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_VERSION 18:16
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_PEER_CLIQUE_ID 22:19
#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_RSVD 31:23
#define NV_PCI_VIRTUAL_P2P_APPROVAL_SIGNATURE 0x00503250
// Chipset-specific definitions.
// Intel SantaRosa definitions
#define INTEL_2A00_CONFIG_SPACE_BASE 0x60
// Intel Montevina definitions
#define INTEL_2A40_CONFIG_SPACE_BASE 0x60
#define INTEL_2A40_ASLM_CAPABLE_REVID 0x05
// Intel EagleLake definitions
#define INTEL_2E00_CONFIG_SPACE_BASE 0x60
// Intel Bearlake definitions
#define INTEL_29XX_CONFIG_SPACE_BASE 0x60
// Intel BroadWater definitions
#define INTEL_29A0_CONFIG_SPACE_BASE 0x60
// Intel Grantsdale definitions
#define INTEL_25XX_CONFIG_SPACE_BASE 0x48
// Intel Tumwater definitions
#define INTEL_359E_CONFIG_SPACE_BASE 0xCC
// Intel Greencreek definitions
#define INTEL_25E0_CONFIG_SPACE_BASE_ADDRESS 0xE0000000
// Intel Stoakley definitions
#define INTEL_4000_CONFIG_SPACE_BASE_ADDRESS 0xE0000000
// Intel SkullTrail definitions
#define INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_F 0xF0000000
#define INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_E 0xE0000000
#define INTEL_4003_CONFIG_SPACE_BASE_ADDRESS INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_F
#define INTEL_4003_CONFIG_SPACE_BASE 0x64
// SiS 656
#define SIS_656_CONFIG_SPACE_BASE 0xE0
#define SIS_656_CONFIG_SPACE_BASE_ADDRESS 3:0 // mapped to 31:28
// PCI/PCIE definitions
#define PCI_MAX_CAPS 20 // max caps to parse
#define PCI_MAX_DEVICES 32 // max devices on bus
#define PCI_MAX_FUNCTIONS 8 // max functions for a device
#define PCI_CAP_HEADER_ID 7:0 // PCI cap header id
#define PCI_CAP_HEADER_NEXT 15:8 // PCI cap header next
#define PCI_COMMON_CLASS_SUBCLASS 0x0a // PCI class/subclass (word)
#define PCI_COMMON_CLASS_SUBBASECLASS_HOST 0x0600 // Host bridge (connect PCI to CPU) [00] + Bridge Device [06]
#define PCI_COMMON_CLASS_SUBBASECLASS_P2P 0x0604 // PCI-to-PCI bridge (connects PCI buses) [04] + Bridge Device [06]
#define PCI_COMMON_CLASS_SUBBASECLASS_VGA 0x0300
#define PCI_COMMON_CLASS_SUBBASECLASS_3DCTRL 0x0302
#define PCI_COMMON_CAP_PTR 0x34 // PCI common cap ptr (byte)
#define PCI_TYPE_1_SECONDARY_BUS_NUMBER 0x19 // PCI type 1 sec bus (byte)
#define PCI_TYPE_1_SUBORDINATE_BUS_NUMBER 0x1a // PCI type 1 sub bus (byte)
#define PCIE_CAP_HEADER_ID 15:0 // PCIE cap header id
#define PCIE_CAP_HEADER_ID_INVALID 0xffff
#define PCIE_CAP_HEADER_NEXT 31:20 // PCIE cap header next
#define PCIE_BUS_SHIFT 20 // PCIE cfg space bus shift
#define PCIE_DEVICE_SHIFT 15 // PCIE cfg space dev shift
#define PCIE_FUNC_SHIFT 12 // PCIE cfg space func shift
#define PCIE_CAP_VERSION 19:16 // PCIE cap version
#define PCIE_CAP_VERSION_2P0 2 // PCIE 2.0 version
#define PCIE_LINK_CNTRL_STATUS_2_OFFSET 0x30 // PCIE Link Control/Status 2 offset
#define PCIE_LINK_STATUS_2 31:16 // PCIE Link Status 2 Register
#define PCIE_LINK_STATUS_2_DE_EMPHASIS 0:0 // PCIE De-Emphasis Level
#define PCI_COMMON_SUBSYSTEM_VENDOR_ID 0x2c // PCI subsystem Vendor Id
#define PCI_COMMON_SUBSYSTEM_ID 0x2e // PCI subsystem Id
// PCI Express Capability ID in the enhanced configuration space
#define PCIE_CAP_ID_ERROR 0x1 // PCIE Advanced Error Reporting
#define PCIE_CAP_ID_VC 0x2 // PCIE Virtual Channel (VC)
#define PCIE_CAP_ID_SERIAL 0x3 // PCIE Device Serial Number
#define PCIE_CAP_ID_POWER 0x4 // PCIE Power Budgeting
#define PCIE_CAP_ID_L1_PM_SUBSTATES 0x1E // PCIE L1 PM Substates
// Intel CPU family.
#define INTEL_CPU_FAMILY_06 0x06
#define INTEL_CPU_FAMILY_15 0x0f
#define INTEL_CPU_FAMILY_16 0x10
#define INTEL_CPU_FAMILY_21 0x15
// Intel CPU Model. Calculated as Model += (extModel << 4).
#define INTEL_CPU_MODEL_2A 0x2a
#define INTEL_CPU_MODEL_2D 0x2d
#define INTEL_CPU_MODEL_3A 0x3a
#define INTEL_CPU_MODEL_3F 0x3f
// Symbolic defines for each possible virtual channel
enum
{
RM_PCIE_VIRTUAL_CHANNEL_0 = 0,
RM_PCIE_VIRTUAL_CHANNEL_1,
RM_PCIE_VIRTUAL_CHANNEL_2,
RM_PCIE_VIRTUAL_CHANNEL_3,
RM_PCIE_VIRTUAL_CHANNEL_4,
RM_PCIE_VIRTUAL_CHANNEL_5,
RM_PCIE_VIRTUAL_CHANNEL_6,
RM_PCIE_VIRTUAL_CHANNEL_7,
RM_PCIE_VIRTUAL_CHANNEL_INVALID
};
struct OBJCL;
// root port setup functions
NV_STATUS Broadcom_HT2100_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_RP25XX_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_RP81XX_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_RP3C0X_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_RP2F0X_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_RP0C0X_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_Broadwell_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_Skylake_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_Skylake_U_Pch_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_Skylake_H_Pch_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Intel_Kabylake_Y_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Nvidia_RPC19_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Nvidia_RPC51_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS Nvidia_RPC55_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS AMD_RP1480_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS AMD_RP1630_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS AMD_RP1483_setupFunc(OBJGPU *, OBJCL*);
// Determines if the GPU is in a multi-GPU board based on devid checks
NvBool gpuIsMultiGpuBoard(OBJGPU *, NvBool *, NvBool *);
#endif // NVPCIE_H

View File

@@ -0,0 +1,162 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file nvrangetypes.h
* @brief Range types and operator macros
* @note #include a header to define NvUxx and NvSxx before sourcing this file.
*/
#ifndef _NVRANGETYPES_H_
#define _NVRANGETYPES_H_
//
// Define range types by convention
//
#define __NV_DEFINE_RANGE_TYPE(T) \
typedef struct NvRange ## T \
{ \
Nv ## T min; \
Nv ## T max; \
} NvRange ## T;
__NV_DEFINE_RANGE_TYPE(U64) // NvRangeU64
__NV_DEFINE_RANGE_TYPE(S64) // NvRangeS64
__NV_DEFINE_RANGE_TYPE(U32) // NvRangeU32
__NV_DEFINE_RANGE_TYPE(S32) // NvRangeS32
__NV_DEFINE_RANGE_TYPE(U16) // NvRangeU16
__NV_DEFINE_RANGE_TYPE(S16) // NvRangeS16
__NV_DEFINE_RANGE_TYPE(U8) // NvRangeU8
__NV_DEFINE_RANGE_TYPE(S8) // NvRangeS8
//
// Operator macros
//
// Macros are named xxx_RANGE (rather than xxx_RANGEU32, etc.) since they work
// properly on ranges with any number of bits, signed or unsigned.
//
#define NV_EQUAL_RANGE(r1, r2) ((r1).min == (r2).min && (r1).max == (r2).max)
#define NV_EMPTY_INCLUSIVE_RANGE(r) ((r).min > (r).max)
#define NV_EMPTY_EXCLUSIVE_RANGE(r) ((r).min + 1 > (r).max - 1)
#define NV_WITHIN_INCLUSIVE_RANGE(r, x) ((r).min <= (x) && (x) <= (r).max)
#define NV_WITHIN_EXCLUSIVE_RANGE(r, x) ((r).min < (x) && (x) < (r).max)
#define NV_IS_SUBSET_RANGE(r1, r2) ((r1).min >= (r2).min && (r2).max >= (r1).max)
#define NV_IS_SUPERSET_RANGE(r1, r2) ((r1).min <= (r2).min && (r2).max <= (r1).max)
#define NV_CENTER_OF_RANGE(r) ((r).min / 2 + ((r).max + 1) / 2) // Avoid overflow and rounding anomalies.
#define NV_IS_OVERLAPPING_RANGE(r1, r2) \
(NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).min) || \
NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).max))
#define NV_DISTANCE_FROM_RANGE(r, x) ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0))
#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x)))
#define NV_VALUE_WITHIN_EXCLUSIVE_RANGE(r, x) ((x) <= (r).min? (r).min + 1 : ((x) >= (r).max? (r).max - 1 : (x)))
#define NV_INIT_RANGE(r, x, y) \
do \
{ \
(r).min = (x); \
(r).max = (y); \
} while(0)
#define NV_ASSIGN_DELTA_RANGE(r, x, d) \
do \
{ \
(r).min = (x) - (d); \
(r).max = (x) + (d); \
} while(0)
#define NV_ASSIGN_INTERSECTION_RANGE(r1, r2) \
do \
{ \
if ((r1).min < (r2).min) \
(r1).min = (r2).min; \
if ((r1).max > (r2).max) \
(r1).max = (r2).max; \
} while(0)
#define NV_ASSIGN_UNION_RANGE(r1, r2) \
do \
{ \
if ((r1).min > (r2).min) \
(r1).min = (r2).min; \
if ((r1).max < (r2).max) \
(r1).max = (r2).max; \
} while(0)
#define NV_MULTIPLY_RANGE(r, x) \
do \
{ \
(r).min *= (x); \
(r).max *= (x); \
} while(0)
#define NV_DIVIDE_FLOOR_RANGE(r, x) \
do \
{ \
(r).min /= (x); \
(r).max /= (x); \
} while(0)
#define NV_DIVIDE_CEILING_RANGE(r, x) \
do \
{ \
(r).min = ((r).min + (x) - 1) / (x); \
(r).max = ((r).max + (x) - 1) / (x); \
} while(0)
#define NV_DIVIDE_ROUND_RANGE(r, x) \
do \
{ \
(r).min = ((r).min + (x) / 2) / (x); \
(r).max = ((r).max + (x) / 2) / (x); \
} while(0)
#define NV_DIVIDE_WIDE_RANGE(r, x) \
do \
{ \
(r).min /= (x); \
(r).max = ((r).max + (x) - 1) / (x); \
} while(0)
#define NV_DIVIDE_NARROW_RANGE(r, x) \
do \
{ \
(r).min = ((r).min + (x) - 1) / (x); \
(r).max /= (x); \
} while(0)
#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) \
((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x)))
#define NV_WITHIN_INCLUSIVE_RANGE(r, x) \
((r).min <= (x) && (x) <= (r).max)
#define NV_DISTANCE_FROM_RANGE(r, x) \
((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0))
#endif // _NVRANGETYPES_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SMBPBI_IMPL_H
#define _SMBPBI_IMPL_H
/*!
* This header file stores implementation dependent parameters of the SMBPBI server.
*/
/*!
* Maximum number of individual requests in a bundle
*/
#define NV_MSGBOX_PARAM_MAX_BUNDLE_SIZE 4
/*!
* Maximum number of Result Disposition Rules
*/
#define NV_MSGBOX_PARAM_MAX_DISP_RULES 10
/*!
* Maximum length of the Driver Event Message text string is 80, including
* the terminating NUL character.
*/
#define NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE 80
#endif // _SMBPBI_IMPL_H

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _SMBPBI_PRIV_H_
#define _SMBPBI_PRIV_H_
/*!
* This file contains NVIDIA private defines for the SMBPBI
* interface.
*/
#include "oob/smbpbi.h"
// MSGBOX command word structure
//
// Reserving opcodes above 0xC0 for internal/private functionationality.
//
// These opcodes should not be included in any documentation we release outside
// of NVIDIA!
//
//
// Only for internal use (should not be written to the command register). Used
// for internal tracking when commands are redirected to the RM from the PMU.
//
#define NV_MSGBOX_CMD_ERR_MORE_PROCESSING_REQUIRED 0x000000F0
//
// Alternative encodings of the command word
// These are distinguished by a non-zero value in the 29:29 bit,
// previously known as _RSVD.
//
#define NV_MSGBOX_CMD_ENCODING 29:29
#define NV_MSGBOX_CMD_ENCODING_STANDARD 0x00000000
#define NV_MSGBOX_CMD_ENCODING_DEBUG 0x00000001
// Debug command structure
#define NV_MSGBOX_DEBUG_CMD_OPCODE 1:0
#define NV_MSGBOX_DEBUG_CMD_OPCODE_READ_PRIV 0x00000000
#define NV_MSGBOX_DEBUG_CMD_ARG 23:2
/* Utility command constructor macros */
#define NV_MSGBOX_DEBUG_CMD(opcode, arg) \
( \
DRF_DEF(_MSGBOX, _DEBUG_CMD, _OPCODE, opcode) | \
DRF_NUM(_MSGBOX, _DEBUG_CMD, _ARG, (arg)) | \
DRF_DEF(_MSGBOX, _CMD, _STATUS, _NULL) | \
DRF_DEF(_MSGBOX, _CMD, _ENCODING, _DEBUG) | \
DRF_DEF(_MSGBOX, _CMD, _INTR, _PENDING) \
)
#define NV_MSGBOX_DEBUG_CMD_READ_PRIV(offset) \
NV_MSGBOX_DEBUG_CMD(_READ_PRIV, (offset) >> 2)
#endif // _SMBPBI_PRIV_H_

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* << DO NOT EDIT >>
*
* This file describes the format of generated ucode binary. Please do not change the
* content unless the same change is applied to the target ucode builds.
*/
#ifndef RM_RISCV_UCODE_H
#define RM_RISCV_UCODE_H
#include "nvtypes.h"
typedef struct {
//
// Version 1
// Version 2
// Vesrion 3 = for Partition boot
// Vesrion 4 = for eb riscv boot
//
NvU32 version; // structure version
NvU32 bootloaderOffset;
NvU32 bootloaderSize;
NvU32 bootloaderParamOffset;
NvU32 bootloaderParamSize;
NvU32 riscvElfOffset;
NvU32 riscvElfSize;
NvU32 appVersion; // Changelist number associated with the image
//
// Manifest contains information about Monitor and it is
// input to BR
//
NvU32 manifestOffset;
NvU32 manifestSize;
//
// Monitor Data offset within RISCV image and size
//
NvU32 monitorDataOffset;
NvU32 monitorDataSize;
//
// Monitor Code offset withtin RISCV image and size
//
NvU32 monitorCodeOffset;
NvU32 monitorCodeSize;
NvU32 bIsMonitorEnabled;
//
// Swbrom Code offset within RISCV image and size
//
NvU32 swbromCodeOffset;
NvU32 swbromCodeSize;
//
// Swbrom Data offset within RISCV image and size
//
NvU32 swbromDataOffset;
NvU32 swbromDataSize;
} RM_RISCV_UCODE_DESC;
#endif // RM_RISCV_UCODE_H

View File

@@ -0,0 +1,147 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* Data structures and interfaces used for generic falcon boot-loader.
*
* This generic boot-loader is designed to load both non-secure and
* secure code taking care of signature as well. This bootloader
* should be loaded at the end of the IMEM so that it doesnt overwrite
* itself when it tries to load the code into IMEM starting at blk 0.
* The data will be loaded into DMEM offset 0.
*/
#ifndef RMFLCNBL_H
#define RMFLCNBL_H
#include "flcnifcmn.h"
/*!
* @brief Structure used by the boot-loader to load the rest of the code.
*
* This has to be filled by the GPU driver and copied into DMEM at offset
* provided in the @ref _def_rm_flcn_bl_desc.blDmemDescLoadOff.
*/
typedef struct _def_rm_flcn_bl_dmem_desc
{
/*!
* reserved should be always first element
*/
NvU32 reserved[4];
/*!
* signature should follow reserved 16B signature for secure code.
* 0s if no secure code
*/
NvU32 signature[4];
/*!
* ctxDma is used by the bootloader while loading code/data.
*/
NvU32 ctxDma;
/*!
* 256B aligned physical FB address where code is located.
*/
RM_FLCN_U64 codeDmaBase;
/*!
* Offset from codeDmaBase where the nonSecure code is located.
* The offset must be multiple of 256 to help performance.
*/
NvU32 nonSecureCodeOff;
/*!
* The size of the nonSecure code part.
*/
NvU32 nonSecureCodeSize;
/*!
* Offset from codeDmaBase where the secure code is located.
* The offset must be multiple of 256 to help performance.
*/
NvU32 secureCodeOff;
/*!
* The size of the ecure code part.
*/
NvU32 secureCodeSize;
/*!
* Code entry point which will be invoked by BL after code is loaded.
*/
NvU32 codeEntryPoint;
/*!
* 256B aligned Physical FB Address where data is located.
*/
RM_FLCN_U64 dataDmaBase;
/*!
* Size of data block. Should be multiple of 256B.
*/
NvU32 dataSize;
/*!
* Arguments to be passed to the target firmware being loaded.
*/
NvU32 argc;
/*!
* Number of arguments to be passed to the target firmware being loaded.
*/
NvU32 argv;
} RM_FLCN_BL_DMEM_DESC, *PRM_FLCN_BL_DMEM_DESC;
/*!
* @brief The header used by the GPU driver to figure out code and data
* sections of bootloader.
*/
typedef struct _def_rm_flcn_bl_img_header
{
/*!
* Offset of code section in the image.
*/
NvU32 blCodeOffset;
/*!
* Size of code section in the image.
*/
NvU32 blCodeSize;
/*!
* Offset of data section in the image.
*/
NvU32 blDataOffset;
/*!
* Size of data section in the image.
*/
NvU32 blDataSize;
} RM_FLCN_BL_IMG_HEADER, *PRM_FLCN_BL_IMG_HEADER;
/*!
* @brief The descriptor used by RM to figure out the requirements of boot loader.
*/
typedef struct _def_rm_flcn_bl_desc
{
/*!
* Starting tag of bootloader
*/
NvU32 blStartTag;
/*!
* Dmem offset where _def_rm_flcn_bl_dmem_desc to be loaded
*/
NvU32 blDmemDescLoadOff;
/*!
* Description of the image
*/
RM_FLCN_BL_IMG_HEADER blImgHeader;
} RM_FLCN_BL_DESC, *PRM_FLCN_BL_DESC;
#endif // RMFLCNBL_H

View File

@@ -0,0 +1,202 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* Top-level header-file that defines the GSP sequencer structures
*/
#ifndef _RMGSPSEQ_H_
#define _RMGSPSEQ_H_
/*!
* GSP sequencer structures and defines
*
* Here are the supported opcodes.
* the sequencer buffer is a stream of commands composed of
* an op-code directly followed by the exact number of arguments it needs.
*
* The supported op-codes are:
* GSP_SEQUENCER_BUFFER_OPCODE_REG_WRITE:
* arg[0]: the register offset
* arg[1]: the register value
*
* GSP_SEQUENCER_BUFFER_OPCODE_REG_MODIFY:
* arg[0]: the register offset
* arg[1]: the mask where to apply the modification
* arg[2]: the value to apply. The value needs to be shifted to fit inside the mask,
*
* GSP_SEQUENCER_BUFFER_OPCODE_REG_POLL:
* arg[0]: the register offset
* arg[1]: the mask where to apply the modification
* arg[2]: the value to apply. The value needs to be shifted to fit inside the mask.
* arg[3]: the timeout in MS
* arg[4]: an unique error code from GSP_SEQUENCER_BUFFER_ERR. Helps map to the failing GSP code.
*
* GSP_SEQUENCER_BUFFER_OPCODE_DELAY_US
* arg[0]: the delay in micoseconds.
*
* GSP_SEQUENCER_REG_STORE
* This operation stores the specified register at the specified index in
* the sequencer buffer register storage area.
* arg[0]: the reg offset to store in the save area
* arg[1]: index in save area to store value of reg offset
*
* GSP_SEQUENCER_CORE_RESET
* This operation resets the core. This operation takes no arguments.
*
* GSP_SEQUENCER_CORE_START
* This operation starts the core. This operation takes no arguments.
*
* GSP_SEQUENCER_CORE_WAIT_FOR_HALT
* This operation waits for the core to halt after completing execution.
* This operation takes no arguments.
*
* GSP_SEQUENCER_CORE_RESUME
* This operation resumes the core in preparation for switching back to
* the GSP-RM.
*/
typedef enum GSP_SEQ_BUF_OPCODE
{
GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
GSP_SEQ_BUF_OPCODE_REG_MODIFY,
GSP_SEQ_BUF_OPCODE_REG_POLL,
GSP_SEQ_BUF_OPCODE_DELAY_US,
GSP_SEQ_BUF_OPCODE_REG_STORE,
GSP_SEQ_BUF_OPCODE_CORE_RESET,
GSP_SEQ_BUF_OPCODE_CORE_START,
GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
GSP_SEQ_BUF_OPCODE_CORE_RESUME,
} GSP_SEQ_BUF_OPCODE;
#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
/* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
/* GSP_SEQ_BUF_OPCODE_CORE_START */ \
/* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
/* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
0)
// The size of the structure must be DWord aligned!
typedef struct
{
NvU32 addr;
NvU32 val;
} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
// The size of the structure must be DWord aligned!
typedef struct
{
NvU32 addr;
NvU32 mask;
NvU32 val;
} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
// The size of the structure must be DWord aligned!
typedef struct
{
NvU32 addr;
NvU32 mask;
NvU32 val;
NvU32 timeout;
NvU32 error;
} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
// The size of the structure must be DWord aligned!
typedef struct
{
NvU32 val;
} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
// The size of the structure must be DWord aligned!
typedef struct
{
NvU32 addr;
NvU32 index;
} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
typedef struct GSP_SEQUENCER_BUFFER_CMD
{
GSP_SEQ_BUF_OPCODE opCode;
union
{
GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
} payload;
} GSP_SEQUENCER_BUFFER_CMD;
//
// These error codes printed by the client help us
// see to which GSP uCode line it corresponds.
//
typedef enum GSP_SEQUENCER_BUFFER_ERR
{
GSP_SEQUENCER_BUFFER_ERR_OK = 0,
GSP_SEQUENCER_BUFFER_ERR_TIMEOUT1,
GSP_SEQUENCER_BUFFER_ERR_TIMEOUT2,
GSP_SEQUENCER_BUFFER_ERR_TIMEOUT3
} GSP_SEQUENCER_BUFFER_ERR;
// Sequencer implementation of FLD_WR_DRF_DEF()
#define GSP_SEQ_FLD_WR_DRF_DEF(gpu, gsp, d, r, f, c) \
{ \
GSP_SEQUENCER_BUFFER_CMD cmd; \
cmd.opCode = GSP_SEQ_BUF_OPCODE_REG_MODIFY; \
cmd.payload.regModify.addr = NV##d##r; \
cmd.payload.regModify.mask = DRF_MASK(NV##d##r##f) << DRF_SHIFT(NV##d##r##f); \
cmd.payload.regModify.val = DRF_DEF(d, r, f, c); \
(void)gspAppendToSequencerBuffer(gpu, gsp, &cmd); \
}
/*!
* Forward references
*/
struct rpc_run_cpu_sequencer_v17_00;
/*!
* Structure tracking all information relevant to GSP sequencer bufferfor GSP-RM
*/
typedef struct
{
/*! Pointer to RM-GSP CPU sequencer parameter block */
struct rpc_run_cpu_sequencer_v17_00 *pRunCpuSeqParam;
} GSP_SEQUENCER_BUFFER;
/*!
* RM-GSP sequencer buffer register macros.
* GSP_SEQ_BUF_REG_SAVE_SIZE : specifies size of save area in reg values
* GSP_SEQ_BUF_REG_SAVE_MAILBOX0 : index for saving of mailbox0 reg
* GSP_SEQ_BUF_REG_SAVE_MAILBOX1 : index for saving of mailbox1 reg
*/
#define GSP_SEQ_BUF_REG_SAVE_SIZE (8)
#define GSP_SEQ_BUF_REG_SAVE_MAILBOX0 (0)
#define GSP_SEQ_BUF_REG_SAVE_MAILBOX1 (1)
#endif // _RMGSPSEQ_H_

View File

@@ -0,0 +1,124 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2012 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMIFRIF_H_
#define _RMIFRIF_H_
/*!
* @file rmifrif.h
* @brief Defines structures and interfaces common between RM and
* Init-From-Rom (IFR).
*
* For systems supporting GC6 that have on-board VBIOS ROMs, IFR is used
* to expedite several parts of GC6 exit in parallel with PEX init.
*
* After running devinit using a PMU ucode image loaded from the ROM itself,
* parts of RM stateLoad can be done using RM's ucode image. This is
* achieved by loading RM PMU ucode directly from FB. The primary difficulties
* are how to find RM's PMU ucode and how to bootstrap it.
*
* We use the simple approach of allocating a fixed buffer near the
* top of FB that contains the information required to bootstrap RM's PMU
* image. This buffer is called the RM_IFR_GC6_CTX.
*
* The buffer is allocated within RM's reserved memory space, directly before
* the VBIOS workspace (if any is present). Since the VBIOS workspace is
* always a multiple of 64K, RM enforces that the offset between top of memory
* and the end of the buffer is 64K. This way the IFR code can start
* from the top of memory and search downwards in 64K decrements.
*
* A small header is placed at the end of the buffer which contains a
* string signature identifying the buffer and other data needed to find the
* remaining context data.
*
* Top_Of-FB /---------------------\ <-
* | | \
* | (VBIOS_Workspace) | | END_OFFSET
* | | /
* |---------------------| <-
| | \
* | GSP FW (if present) | | pFbHalPvtInfo->gspFwSizeBytes
* | | /
* |---------------------| <-
* | RM_IFR_GC6_CTX_HDR | \
* |---------------------| |
* | (Padding) | | RM_IFR_GC6_CTX_HDR.bufferSize
* |---------------------| |
* | Sequence Data | /
* |---------------------| <-
* | |
* | |
* | |
* | |
* 0x00000000 \---------------------/
*
* To simplify the RM PMU bootstrap process and decrease IFR maintainence
* cost, the bootstrap process is encoded as a sequence script, leveraging
* a small subset of RM's PMU_SEQ_INST interface (see pmuseqinst.h).
* Register writes are captured during the initial (CPU-driven) RM PMU bootstrap
* and saved into a sequence for replay during GC6 exit.
*
* Only the following opcodes are supported currently:
* NV_PMU_SEQ_WRITE_REG_OPC - (multi-)register write
* NV_PMU_SEQ_EXIT_OPC - sequence done
*
*/
/*!
* Header structure which identifies the GC6 context buffer.
*/
typedef struct
{
NvU8 signature[12]; // RM_IFR_GC6_CTX_SIGNATURE
NvU32 bufferSize; // Size of the entire context buffer in bytes
NvU32 seqSizeWords; // Number of 32-bit words of sequence data
NvU32 checksum; // 32-bit chunk checksum of the sequence data
} RM_IFR_GC6_CTX_HDR, *PRM_IFR_GC6_CTX_HDR;
/*!
* String signature that IFR searches for to find the GC6 context buffer.
*/
#define RM_IFR_GC6_CTX_SIGNATURE "GC6_CTX_HDR" // 12 bytes
/*!
* Alignment of the offset between top of memory and the end of the
* GC6 context buffer (which is also the end of the header).
*/
#define RM_IFR_GC6_CTX_END_OFFSET_ALIGNMENT 0x10000 // 64KB
/*!
* Maximum offset between top of memory and the end of the
* GC6 context buffer. This is meant to be a loose upper bound preventing
* scanning of the whole of memory (e.g. when something goes wrong).
*/
#define RM_IFR_GC6_CTX_END_OFFSET_MAX 0x1000000 // 16MB
#define RM_IFR_GC6_CTX_END_OFFSET_MAX_WITH_GSP 0x10000000 // 256MB
/*!
* Maximum size of the context data in bytes.
* This is limited by FECS falcon DMEM size (4K on Kepler).
* The buffer must fit within DMEM together with stack and other global data.
*/
#define RM_IFR_GC6_CTX_DATA_MAX_SIZE 2048 // 2KB
#endif // _RMIFRIF_H_

View File

@@ -0,0 +1,515 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef RMLSFM_H
#define RMLSFM_H
/*****************************************************************************/
/* This file is shared between ACR, SEC2 Binaries */
/* Do not update this file without updating ACR/SEC2 */
/*****************************************************************************/
/*!
* @file rmlsfm.h
* @brief Top-level header-file that defines Light Secure Falcon Managment
SW shared interfaces.
*/
/*!
* READ/WRITE masks for WPR region
*/
#define LSF_WPR_REGION_RMASK (0xCU) // Readable only from level 2 and 3 client
#define LSF_WPR_REGION_WMASK (0xCU) // Writable only from level 2 and 3 client
#define LSF_WPR_REGION_RMASK_SUB_WPR_ENABLED (0x8) // Readable only from level 3 client
#define LSF_WPR_REGION_WMASK_SUB_WPR_ENABLED (0x8) // Writable only from level 3 client
#define LSF_WPR_REGION_ALLOW_READ_MISMATCH_NO (0x0) // Disallow read mis-match for all clients
#define LSF_WPR_REGION_ALLOW_WRITE_MISMATCH_NO (0x0) // Disallow write mis-match for all clients
/*!
* READ mask for WPR region on Tegra
* This is required until we update tegra binaries, Bug 200281517
* TODO: dgoyal - Remove this once tegra binaries are updated
*/
#define LSF_WPR_REGION_RMASK_FOR_TEGRA (0xFU)
/*!
* Expected REGION ID to be used for the unprotected FB region (region that
* does not have read or write protections)
*/
#define LSF_UNPROTECTED_REGION_ID (0x0U)
/*!
* Expected REGION ID to be used for the WPR region for the falcon microcode (includes data).
* ACR allots client requests to each region based on read/write masks and it is supposed
* to provide first priority to requests from LSFM. Providing first priority will naturally assign
* region ID 1 to LSFM and this define will provide a way for different parties to sanity check
* this fact. Also there are other falcons (FECS/video falcons) which depends on this define, so please
* be aware while modifying this.
*/
#define LSF_WPR_EXPECTED_REGION_ID (0x1U)
/*!
* Expected REGION ID to be used for the unused WPR region.
*/
#define LSF_WPR_UNUSED_REGION_ID (0x2U)
/*!
* Invalid LS falcon subWpr ID
*/
#define LSF_SUB_WPR_ID_INVALID (0xFFFFFFFFU)
/*!
* Expected REGION ID to be used for the VPR region.
*/
#define LSF_VPR_REGION_ID (0x3U)
/*!
* Size of the separate bootloader data that could be present in WPR region.
*/
#define LSF_LS_BLDATA_EXPECTED_SIZE (0x100U)
/*!
* since we dont check signatures in GC6 exit, we need to hardcode the WPR offset
*/
#define LSF_WPR_EXPECTED_OFFSET (0x0U)
/*!
* CTXDMA to be used while loading code/data in target falcons
*/
#define LSF_BOOTSTRAP_CTX_DMA_FECS (0x0)
/*!
* Context DMA ID 6 is reserved for Video UCODE
*/
#define LSF_BOOTSTRAP_CTX_DMA_VIDEO (0x6)
#define LSF_BOOTSTRAP_CTX_DMA_BOOTSTRAP_OWNER (0x0)
#define LSF_BOOTSTRAP_CTX_DMA_FBFLCN (0x0)
/*!
* Falcon Id Defines
* Defines a common Light Secure Falcon identifier.
* Codesigning infra. assumes LSF_FALCON_ID_ prefix for units,
* Changes to the define needs to be reflected in path [1]
* For new Falcon Id adding, we need to append to the end;
* don't insert the new falcon Id in the middle.
*/
#define LSF_FALCON_ID_PMU (0U)
#define LSF_FALCON_ID_DPU (1U)
#define LSF_FALCON_ID_GSPLITE LSF_FALCON_ID_DPU
#define LSF_FALCON_ID_FECS (2U)
#define LSF_FALCON_ID_GPCCS (3U)
#define LSF_FALCON_ID_NVDEC (4U)
#define LSF_FALCON_ID_NVENC (5U)
#define LSF_FALCON_ID_NVENC0 (5U)
#define LSF_FALCON_ID_NVENC1 (6U)
#define LSF_FALCON_ID_SEC2 (7U)
#define LSF_FALCON_ID_NVENC2 (8U)
#define LSF_FALCON_ID_MINION (9U)
#define LSF_FALCON_ID_FBFALCON (10U)
#define LSF_FALCON_ID_XUSB (11U)
#define LSF_FALCON_ID_GSP_RISCV (12U)
#define LSF_FALCON_ID_PMU_RISCV (13U)
#define LSF_FALCON_ID_SOE (14U)
#define LSF_FALCON_ID_NVDEC1 (15U)
#define LSF_FALCON_ID_OFA (16U)
#define LSF_FALCON_ID_SEC2_RISCV (17U)
#define LSF_FALCON_ID_NVDEC_RISCV (18U)
#define LSF_FALCON_ID_NVDEC_RISCV_EB (19U)
#define LSF_FALCON_ID_NVJPG (20U)
#define LSF_FALCON_ID_END (21U)
#define LSF_FALCON_ID_INVALID (0xFFFFFFFFU)
//
// ************************ NOTIFICATION *********************************
// In case anyone needs to add new LSF falconId, please must calculate
// WPR header size per LSF_FALCON_ID_END. RM needs to call lsfmGetWprHeaderSizeMax_HAL
// to align with acrReadSubWprHeader_HAL in ACR. Otherwise, ACR can't get correct
// address to read sub wpr headers.
// We observer in case LSF_FALCON_ID_END > 32 will cause SEC2 IMEM tag missing error;
// but don't get the root cause yet.
//
#define LSF_FALCON_ID_END_15 (15U)
#define LSF_FALCON_ID_END_17 (17U)
#define LSF_FALCON_ID_END_18 (18U)
#define LSF_FALCON_ID_END_21 (21U)
#define LSF_FALCON_INSTANCE_DEFAULT_0 (0x0)
#define LSF_FALCON_INSTANCE_COUNT_DEFAULT_1 (0x1)
// Currently max supported instance is 8 for FECS/GPCCS SMC
#define LSF_FALCON_INSTANCE_FECS_GPCCS_MAX (0x8)
#define LSF_FALCON_INSTANCE_INVALID (0xFFFFFFFFU)
#define LSF_FALCON_INDEX_MASK_DEFAULT_0 (0x0)
/*!
* Size in entries of the ucode descriptor's dependency map.
* This used to be LSF_FALCON_ID_END, but since that had to grow and we did not want to break any
* existing binaries, they had to be split.
*
* Increasing this number should be done with care.
*/
#define LSF_FALCON_DEPMAP_SIZE (11)
/*!
* Falcon Binaries version defines
*/
#define LSF_FALCON_BIN_VERSION_INVALID (0xFFFFFFFFU)
/*!
* Light Secure Falcon Ucode Description Defines
* This stucture is prelim and may change as the ucode signing flow evolves.
*/
typedef struct
{
NvU8 prdKeys[2][16];
NvU8 dbgKeys[2][16];
NvU32 bPrdPresent;
NvU32 bDbgPresent;
NvU32 falconId;
NvU32 bSupportsVersioning;
NvU32 version;
NvU32 depMapCount;
NvU8 depMap[LSF_FALCON_DEPMAP_SIZE * 2 * 4];
NvU8 kdf[16];
} LSF_UCODE_DESC, *PLSF_UCODE_DESC;
/*!
* Light Secure WPR Header
* Defines state allowing Light Secure Falcon bootstrapping.
*
* falconId - LS falcon ID
* lsbOffset - Offset into WPR region holding LSB header
* bootstrapOwner - Bootstrap OWNER (either PMU or SEC2)
* bLazyBootstrap - Skip bootstrapping by ACR
* status - Bootstrapping status
*/
typedef struct
{
NvU32 falconId;
NvU32 lsbOffset;
NvU32 bootstrapOwner;
NvU32 bLazyBootstrap;
NvU32 binVersion;
NvU32 status;
} LSF_WPR_HEADER, *PLSF_WPR_HEADER;
/*!
* LSF shared SubWpr Header
*
* useCaseId - Shared SubWpr se case ID (updated by RM)
* startAddr - start address of subWpr (updated by RM)
* size4K - size of subWpr in 4K (updated by RM)
*/
typedef struct
{
NvU32 useCaseId;
NvU32 startAddr;
NvU32 size4K;
} LSF_SHARED_SUB_WPR_HEADER, *PLSF_SHARED_SUB_WPR_HEADER;
// Shared SubWpr use case IDs
typedef enum
{
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_FRTS_VBIOS_TABLES = 1,
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA = 2,
LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_APM_RTS = 3
} LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_ENUM;
#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_APM_RTS
#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID (0xFFFFFFFFU)
#define MAX_SUPPORTED_SHARED_SUB_WPR_USE_CASES LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX
//
// Static sizes of shared subWPRs
// Minimum granularity supported is 4K
//
#define LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K (0x100) // 1MB in 4K
#define LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K (0x1) // 4K
#define LSF_SHARED_DATA_SUB_WPR_APM_RTS_SIZE_IN_4K (0x1) // 4K
/*!
* Bootstrap Owner Defines
*/
#define LSF_BOOTSTRAP_OWNER_PMU (LSF_FALCON_ID_PMU)
#define LSF_BOOTSTRAP_OWNER_SEC2 (LSF_FALCON_ID_SEC2)
#define LSF_BOOTSTRAP_OWNER_GSPLITE (LSF_FALCON_ID_GSPLITE)
#define LSF_BOOTSTRAP_OWNER_DEFAULT LSF_BOOTSTRAP_OWNER_PMU
/*!
* Image Status Defines
*/
#define LSF_IMAGE_STATUS_NONE (0U)
#define LSF_IMAGE_STATUS_COPY (1U)
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2U)
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3U)
#define LSF_IMAGE_STATUS_VALIDATION_DONE (4U)
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5U)
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6U)
#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED (7U)
/*!
* Light Secure Bootstrap Header
* Defines state allowing Light Secure Falcon bootstrapping.
*
* signature - Code/data signature details for this LS falcon
* ucodeOffset - Offset into WPR region where UCODE is located
* ucodeSize - Size of ucode
* dataSize - Size of ucode data
* blCodeSize - Size of bootloader that needs to be loaded by bootstrap owner
* blImemOffset - BL starting virtual address. Need for tagging.
* blDataOffset - Offset into WPR region holding the BL data
* blDataSize - Size of BL data
* appCodeOffset - Offset into WPR region where Application UCODE is located
* appCodeSize - Size of Application UCODE
* appDataOffset - Offset into WPR region where Application DATA is located
* appDataSize - Size of Application DATA
* blLoadCodeAt0 - Load BL at 0th IMEM offset
* bSetVACtx - Make sure to set the code/data loading CTX DMA to be virtual before exiting
* bDmaReqCtx - This falcon requires a ctx before issuing DMAs
* bForcePrivLoad- Use priv loading method instead of bootloader/DMAs
*/
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0 0:0
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_FALSE 0
#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE 1
#define NV_FLCN_ACR_LSF_FLAG_SET_VA_CTX 1:1
#define NV_FLCN_ACR_LSF_FLAG_SET_VA_CTX_FALSE 0
#define NV_FLCN_ACR_LSF_FLAG_SET_VA_CTX_TRUE 1
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX 2:2
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_FALSE 0
#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE 1
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD 3:3
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE 0
#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE 1
typedef struct
{
LSF_UCODE_DESC signature;
NvU32 ucodeOffset;
NvU32 ucodeSize;
NvU32 dataSize;
NvU32 blCodeSize;
NvU32 blImemOffset;
NvU32 blDataOffset;
NvU32 blDataSize;
NvU32 appCodeOffset;
NvU32 appCodeSize;
NvU32 appDataOffset;
NvU32 appDataSize;
NvU32 flags;
/*
* TODO: Uncomment this once Sanket's changes
* of HALifying is done
NvU32 monitorCodeOffset;
NvU32 monitorDataOffset;
NvU32 manifestOffset;
*/
} LSF_LSB_HEADER, *PLSF_LSB_HEADER;
/*!
* Light Secure WPR Content Alignments
*/
#define LSF_WPR_HEADER_ALIGNMENT (256U)
#define LSF_SUB_WPR_HEADER_ALIGNMENT (256U)
#define LSF_LSB_HEADER_ALIGNMENT (256U)
#define LSF_BL_DATA_ALIGNMENT (256U)
#define LSF_BL_DATA_SIZE_ALIGNMENT (256U)
#define LSF_BL_CODE_SIZE_ALIGNMENT (256U)
#define LSF_DATA_SIZE_ALIGNMENT (256U)
#define LSF_CODE_SIZE_ALIGNMENT (256U)
// MMU excepts subWpr sizes in units of 4K
#define SUB_WPR_SIZE_ALIGNMENT (4096U)
/*!
* Maximum WPR Header size
*/
#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END), LSF_WPR_HEADER_ALIGNMENT))
#define LSF_LSB_HEADER_TOTAL_SIZE_MAX (NV_ALIGN_UP(sizeof(LSF_LSB_HEADER), LSF_LSB_HEADER_ALIGNMENT))
//
// PMU OBJACR_ALIGNED_256 size will vary with LSF_FALCON_ID_END.
// PMU could run out of DMEM in case we increase LSF_FALCON_ID_END more and more.
// The PMU supports the ACR task on GM20X_thru_VOLTA profiles only.
// In order to prevent LSF_FALCON_ID_END changes to affect older / shipped PMU ucodes (increase of DMEM footprint)
// adding PMU specific ***_END define capturing value covering all PMU profiles that this with the ACR task.
//
#define LSF_FALCON_ID_END_PMU (LSF_FALCON_ID_FBFALCON + 1)
#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX_PMU (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END_PMU), LSF_WPR_HEADER_ALIGNMENT))
// Maximum SUB WPR header size
#define LSF_SUB_WPR_HEADERS_TOTAL_SIZE_MAX (NV_ALIGN_UP((sizeof(LSF_SHARED_SUB_WPR_HEADER) * LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX), LSF_SUB_WPR_HEADER_ALIGNMENT))
/*!
* For the ucode surface alignment, We align to RM_PAGE_SIZE because of
* busMapRmAperture issues, not because of Falcon ucode alignment requirements
* which currently are that it be at least 256.
*/
#define LSF_UCODE_DATA_ALIGNMENT RM_PAGE_SIZE
/*!
* ACR Descriptors used by ACR UC
*/
/*!
* Supporting maximum of 2 regions.
* This is needed to pre-allocate space in DMEM
*/
#define RM_FLCN_ACR_MAX_REGIONS (2)
#define LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE (0x200)
/*!
* startAddress - Starting address of region
* endAddress - Ending address of region
* regionID - Region ID
* readMask - Read Mask
* writeMask - WriteMask
* clientMask - Bit map of all clients currently using this region
* shadowMemStartAddress- FB location from where contents need to be copied to startAddress
*/
typedef struct _def_acr_dmem_region_prop
{
NvU32 startAddress;
NvU32 endAddress;
NvU32 regionID;
NvU32 readMask;
NvU32 writeMask;
NvU32 clientMask;
NvU32 shadowMemStartAddress;
} RM_FLCN_ACR_REGION_PROP, *PRM_FLCN_ACR_REGION_PROP;
/*!
* noOfRegions - Number of regions used by RM.
* regionProps - Region properties
*/
typedef struct _def_acr_regions
{
NvU32 noOfRegions;
RM_FLCN_ACR_REGION_PROP regionProps[RM_FLCN_ACR_MAX_REGIONS];
} RM_FLCN_ACR_REGIONS, *PRM_FLCN_ACR_REGIONS;
/*!
* bVprEnabled : When set, ACR_LOCKDOWN phase programs VPR range. Needs to be
: NvU32 because of alignment
* vprStartAddress : Start address of VPR region. SEC2 binary updates this value
* vprEndAddress : End address of VPR region. SEC2 binary updates this value
* hdcpPolicies : VPR display policies. SEC2 binary updates this value
*/
typedef struct _def_acr_vpr_dmem_desc
{
NvU32 bVprEnabled;
NvU32 vprStartAddress;
NvU32 vprEndAddress;
NvU32 hdcpPolicies;
} ACR_BSI_VPR_DESC, *PACR_BSI_VPR_DESC;
/*!
* reservedDmem - When the bootstrap owner has done bootstrapping other falcons,
* and need to switch into LS mode, it needs to have its own actual
* DMEM image copied into DMEM as part of LS setup. If ACR desc is at
* location 0, it will definitely get overwritten causing data corruption.
* Hence we are reserving 0x200 bytes to give room for any loading data.
* NOTE: This has to be the first member always
* signature - Signature of ACR ucode.
* wprRegionID - Region ID holding the WPR header and its details
* wprOffset - Offset from the WPR region holding the wpr header
* regions - Region descriptors
* ucodeBlobBase- Used for Tegra, stores non-WPR start address where kernel stores ucode blob
* ucodeBlobSize- Used for Tegra, stores the size of the ucode blob
*/
typedef struct _def_acr_dmem_desc
{
NvU32 signatures[4];
NvU32 wprRegionID;
NvU32 wprOffset;
NvU32 mmuMemoryRange;
RM_FLCN_ACR_REGIONS regions;
NvU32 ucodeBlobSize;
// uCodeBlobBase is moved after ucodeBlobSize to inherently align to qword (8 bytes)
NvU64 NV_DECLARE_ALIGNED(ucodeBlobBase, 8);
/*!
* Do not change the offset of this descriptor as it shared between
* ACR_REGION_LOCKDOWN HS binary and SEC2. Any change in this structure
* need recompilation of SEC2 and ACR_LOCKDOWN HS binary
*/
ACR_BSI_VPR_DESC vprDesc;
} RM_FLCN_ACR_DESC, *PRM_FLCN_ACR_DESC;
/*!
* Hub keys/nonce Structure in BSI
*/
#define MAX_SFBHUB_ENCRYPTION_REGION_KEY_SIZE 4
typedef struct _def_acr_hub_scratch_data
{
NvU32 key[MAX_SFBHUB_ENCRYPTION_REGION_KEY_SIZE];
NvU32 nonce[MAX_SFBHUB_ENCRYPTION_REGION_KEY_SIZE];
} ACR_BSI_HUB_DESC, *PACR_BSI_HUB_DESC;
#define MAX_HUB_ENCRYPTION_REGION_COUNT 3
typedef struct _def_acr_hub_scratch_array
{
ACR_BSI_HUB_DESC entries[MAX_HUB_ENCRYPTION_REGION_COUNT];
} ACR_BSI_HUB_DESC_ARRAY, *PACR_BSI_HUB_DESC_ARRAY;
typedef struct _def_acr_reserved_dmem
{
NvU32 reservedDmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/4)]; // Always first..
} ACR_RESERVED_DMEM, *PACR_RESERVED_DMEM;
#define NV_FLCN_ACR_DESC_FLAGS_SIG_VERIF 0:0
#define NV_FLCN_ACR_DESC_FLAGS_SIG_VERIF_DISABLE 0
#define NV_FLCN_ACR_DESC_FLAGS_SIG_VERIF_ENABLE 1
/*!
* Size of ACR phase in dword
*/
#define ACR_PHASE_SIZE_DWORD sizeof(RM_FLCN_ACR_DESC)/sizeof(NvU32)
/*!
* Falcon Mode Tokens
* This is the value logged to a mailbox register to indicate that the
* falcon isn't booted in secure mode.
*/
#define LSF_FALCON_MODE_TOKEN_FLCN_INSECURE (0xDEADDEADU)
// LS encryption context, to store data to decrypt LS images.
#define LS_ENCRYPTION_AES128_CBC_IV_SIZE_BYTE (16)
typedef struct
{
NvU8 bLsEncrypted;
NvU8 rsvd[3];
NvU8 lsEncIV[LS_ENCRYPTION_AES128_CBC_IV_SIZE_BYTE];
} LSF_ENCRYPTION_CONTEXT, *PLSF_ENCRYPTION_CONTEXT;
#endif // RMLSFM_H

View File

@@ -0,0 +1,140 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* Shared postbox interface defines for RM and PMU.
*/
#ifndef RMPBICMDIF_H
#define RMPBICMDIF_H
/*
* Define the status of postbox interface at different instances.
*
* The values should be in accordance to the spec and must not be changed.
* A new PBI command must be submitted with the status NV_PBI_COMMAND_STATUS_UNDEFINED.
*/
#define NV_PBI_COMMAND_STATUS 7:0
#define NV_PBI_COMMAND_STATUS_UNDEFINED 0x00 // command submitted to PMU
#define NV_PBI_COMMAND_STATUS_SUCCESS 0x01 // command successfully completed by PMU
#define NV_PBI_COMMAND_STATUS_PENDING 0x02 // command accepted by PMU
#define NV_PBI_COMMAND_STATUS_BUSY 0x03 // command processing in PMU
#define NV_PBI_COMMAND_STATUS_UNSPECIFIED_FAILURE 0x04 // unknown failure or hang
#define NV_PBI_COMMAND_STATUS_INVALID_ADDRESS 0x05 // invalid address submitted to PMU
#define NV_PBI_COMMAND_STATUS_MORE_DATA 0x06 // user needs to send more data to PMU
#define NV_PBI_COMMAND_STATUS_INVALID_COMMAND 0x07 // invalid command submitted
/*
* This corresponds to reserved bits of command register
*/
#define NV_PBI_COMMAND_RSVD_0 15:8
#define NV_PBI_COMMAND_RSVD_0_VAL 0x00
/*
* This specifies the dword index if client is sending multiple dwords
* for single Dword this should be zero
*/
#define NV_PBI_COMMAND_BUFFER_INDEX 19:16
/*
* This specifies the total no. of dwords passed by client
*/
#define NV_PBI_COMMAND_BUFFER_SIZE 23:20
/*
* These corresponds the different function ID's supported out of PBI
*/
#define NV_PBI_COMMAND_FUNC_ID 27:24
#define NV_PBI_COMMAND_FUNC_ID_GET_CAPABILITIES (0)
#define NV_PBI_COMMAND_FUNC_ID_EXECUTE_ROUTINE (11)
//
// Definitions for common 'Execute Routine' calls that are
// shared across all GPUs.
//
// Common routine IDs must be in the range 0x80 to 0xff.
//
// Get GID:
#define NV_PBI_EXECUTE_ROUTINE_GET_GID 0x80
// Get Feature:
#define NV_PBI_EXECUTE_ROUTINE_GET_FEATURE 0x81
#define NV_PBI_EXECUTE_ROUTINE_GET_FEATURE_EXCLUSION 5:0
#define NV_PBI_EXECUTE_ROUTINE_GET_FEATURE_EXCLUSION_ALLOWED 0x2
/*
* This corresponds to reserved field of command register
*/
#define NV_PBI_COMMAND_RSVD_1 28:28
#define NV_PBI_COMMAND_RSVD_1_VAL 0x00
/*
* If this bit is set system will be notified on command completion
*/
#define NV_PBI_COMMAND_SYS_NOTIFY 29:29
#define NV_PBI_COMMAND_SYS_NOTIFY_TRUE 0x01
#define NV_PBI_COMMAND_SYS_NOTIFY_FALSE 0x00
/*
* If this bit is set driver will be notified of command completion status
*/
#define NV_PBI_COMMAND_DRV_NOTIFY 30:30
#define NV_PBI_COMMAND_DRV_NOTIFY_TRUE 0x01
#define NV_PBI_COMMAND_DRV_NOTIFY_FALSE 0x00
/*
* Defines the interrupt state of the PBI command
*/
#define NV_PBI_COMMAND_INTERRUPT 31:31
#define NV_PBI_COMMAND_INTERRUPT_TRUE 0x01
#define NV_PBI_COMMAND_INTERRUPT_FALSE 0x00
/*
* This sets the different fields of command register
*/
#define PBI_SET_COMMAND_PARAMS(status, r0, index, sz, cmd, r1, sys, \
drv, intr, val) \
{ \
val = DRF_NUM(_PBI, _COMMAND, _STATUS, status) | \
DRF_NUM(_PBI, _COMMAND, _RSVD_0, r0) | \
DRF_NUM(_PBI, _COMMAND, _BUFFER_INDEX, index) | \
DRF_NUM(_PBI, _COMMAND, _BUFFER_SIZE, sz) | \
DRF_NUM(_PBI, _COMMAND, _FUNC_ID, cmd) | \
DRF_NUM(_PBI, _COMMAND, _RSVD_1, r1) | \
DRF_NUM(_PBI, _COMMAND, _SYS_NOTIFY, sys) | \
DRF_NUM(_PBI, _COMMAND, _DRV_NOTIFY, drv) | \
DRF_NUM(_PBI, _COMMAND, _INTERRUPT, intr); \
}
/*
* postbox_clients_mutex_id
*
* Define the Mutex ID for different PBI clients
*/
enum postbox_clients_mutex_id
{
PBI_CLIENT_NONE = 0x00,
PBI_CLIENT_DRIVER_PCIPBI_SHIM = 0x15
};
#endif /* RMPBICMDIF_H */

View File

@@ -0,0 +1,65 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef EFI_CONSOLE_H
#define EFI_CONSOLE_H
#include "gpu/disp/kern_disp_max.h"
struct OBJGPU;
typedef struct
{
NvBool isDispStateSave;
NvU32 activeDisplayId[OBJ_MAX_HEADS];
struct
{
NvU32 displayId;
struct {
NvU32 index;
NvU32 subLinkMask;
} sorXBar;
struct {
NvU32 linkBw;
NvU32 laneCount;
NvU32 linkCtl;
} displayPort;
} activeDfpState[OBJ_MAX_DFPS];
NvU32 numDfps;
struct
{
NvU32 coreChannelClass;
NvU32 cacheSize;
NvU32 *pCache;
} display;
} nv_efi_t;
void RmSaveEFIDisplayState (OBJGPU *pGpu);
void RmRestoreEFIDisplayState (OBJGPU *pGpu);
#endif // EFI_CONSOLE_H

View File

@@ -0,0 +1,94 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CAPS_H_
#define _NV_CAPS_H_
#include <nv-kernel-interface-api.h>
/*
* Opaque OS-specific struct; on Linux, this has member
* 'struct proc_dir_entry'.
*/
typedef struct nv_cap nv_cap_t;
/*
* Creates directory named "capabilities" under the provided path.
*
* @param[in] path Absolute path
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_init(const char *path);
/*
* Creates capability directory entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability directory's name
* @param[in] mode Capability directory's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Creates capability file entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability file's name
* @param[in] mode Capability file's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Destroys capability entry
*
* @param[in] cap Capability entry
*/
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap);
/*
* Validates and duplicates the provided file descriptor
*
* @param[in] cap Capability entry
* @param[in] fd File descriptor to be validated
*
* Returns duplicate fd upon success. Otherwise, returns -1.
*/
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd);
/*
* Closes file descriptor
*
* This function should be used to close duplicate file descriptors
* returned by nv_cap_validate_and_dup_fd.
*
* @param[in] fd File descriptor to be validated
*
*/
void NV_API_CALL nv_cap_close_fd(int fd);
#endif /* _NV_CAPS_H_ */

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_GPU_INFO_H_
#define _NV_GPU_INFO_H_
typedef struct {
NvU32 gpu_id;
struct {
NvU32 domain;
NvU8 bus, slot, function;
} pci_info;
/*
* opaque OS-specific pointer; on Linux, this is a pointer to the
* 'struct device' for the GPU.
*/
void *os_device_ptr;
} nv_gpu_info_t;
#define NV_MAX_GPUS 32
#endif /* _NV_GPU_INFO_H_ */

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_NUMBERS_H
#define NV_IOCTL_NUMBERS_H
/* NOTE: using an ioctl() number > 55 will overflow! */
#define NV_IOCTL_MAGIC 'F'
#define NV_IOCTL_BASE 200
#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0)
#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1)
#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6)
#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7)
#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9)
#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10)
#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11)
#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12)
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
#endif

View File

@@ -0,0 +1,145 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_H
#define NV_IOCTL_H
#include <nv-ioctl-numbers.h>
#include <nvtypes.h>
typedef struct {
NvU32 domain; /* PCI domain number */
NvU8 bus; /* PCI bus number */
NvU8 slot; /* PCI slot number */
NvU8 function; /* PCI function number */
NvU16 vendor_id; /* PCI vendor ID */
NvU16 device_id; /* PCI device ID */
} nv_pci_info_t;
/*
* ioctl()'s with parameter structures too large for the
* _IOC cmd layout use the nv_ioctl_xfer_t structure
* and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual
* size and user argument pointer into the RM, which
* will then copy it to/from kernel space in separate steps.
*/
typedef struct nv_ioctl_xfer
{
NvU32 cmd;
NvU32 size;
NvP64 ptr NV_ALIGN_BYTES(8);
} nv_ioctl_xfer_t;
typedef struct nv_ioctl_card_info
{
NvBool valid;
nv_pci_info_t pci_info; /* PCI config information */
NvU32 gpu_id;
NvU16 interrupt_line;
NvU64 reg_address NV_ALIGN_BYTES(8);
NvU64 reg_size NV_ALIGN_BYTES(8);
NvU64 fb_address NV_ALIGN_BYTES(8);
NvU64 fb_size NV_ALIGN_BYTES(8);
NvU32 minor_number;
NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */
} nv_ioctl_card_info_t;
/* alloc event */
typedef struct nv_ioctl_alloc_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_alloc_os_event_t;
/* free event */
typedef struct nv_ioctl_free_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_free_os_event_t;
/* status code */
typedef struct nv_ioctl_status_code
{
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU32 status;
} nv_ioctl_status_code_t;
/* check version string */
#define NV_RM_API_VERSION_STRING_LENGTH 64
typedef struct nv_ioctl_rm_api_version
{
NvU32 cmd;
NvU32 reply;
char versionString[NV_RM_API_VERSION_STRING_LENGTH];
} nv_ioctl_rm_api_version_t;
#define NV_RM_API_VERSION_CMD_STRICT 0
#define NV_RM_API_VERSION_CMD_RELAXED '1'
#define NV_RM_API_VERSION_CMD_OVERRIDE '2'
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1
typedef struct nv_ioctl_query_device_intr
{
NvU32 intrStatus NV_ALIGN_BYTES(4);
NvU32 status;
} nv_ioctl_query_device_intr;
/* system parameters that the kernel driver may use for configuration */
typedef struct nv_ioctl_sys_params
{
NvU64 memblock_size NV_ALIGN_BYTES(8);
} nv_ioctl_sys_params_t;
typedef struct nv_ioctl_register_fd
{
int ctl_fd;
} nv_ioctl_register_fd_t;
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
NvHandle hClient;
NvU32 totalObjects;
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU32 status;
} nv_ioctl_export_to_dma_buf_fd_t;
#endif

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_KERNEL_RMAPI_OPS_H_
#define _NV_KERNEL_RMAPI_OPS_H_
/*
* Define the RMAPI provided to kernel-level RM clients.
*
* Kernel-level RM clients should populate nvidia_kernel_rmapi_ops_t
* by assigning nvidia_kernel_rmapi_ops_t::op and the corresponding
* parameter structure in nvidia_kernel_rmapi_ops_t's params union.
* Then, pass a pointer to the nvidia_kernel_rmapi_ops_t to
* rm_kernel_rmapi_op().
*/
#include "nvtypes.h"
#include "nvos.h"
typedef struct {
NvU32 op; /* One of the NV0[14]_XXXX operations listed below. */
union {
NVOS00_PARAMETERS free; /* NV01_FREE */
NVOS02_PARAMETERS allocMemory64; /* NV01_ALLOC_MEMORY */
NVOS21_PARAMETERS alloc; /* NV04_ALLOC */
NVOS32_PARAMETERS *pVidHeapControl; /* NV04_VID_HEAP_CONTROL */
NVOS33_PARAMETERS mapMemory; /* NV04_MAP_MEMORY */
NVOS34_PARAMETERS unmapMemory; /* NV04_UNMAP_MEMORY */
NVOS39_PARAMETERS allocContextDma2; /* NV04_ALLOC_CONTEXT_DMA */
NVOS46_PARAMETERS mapMemoryDma; /* NV04_MAP_MEMORY_DMA */
NVOS47_PARAMETERS unmapMemoryDma; /* NV04_UNMAP_MEMORY_DMA */
NVOS49_PARAMETERS bindContextDma; /* NV04_BIND_CONTEXT_DMA */
NVOS54_PARAMETERS control; /* NV04_CONTROL*/
NVOS55_PARAMETERS dupObject; /* NV04_DUP_OBJECT */
NVOS57_PARAMETERS share; /* NV04_SHARE */
NVOS61_PARAMETERS addVblankCallback; /* NV04_ADD_VBLANK_CALLBACK */
} params;
} nvidia_kernel_rmapi_ops_t;
#endif /* _NV_KERNEL_RMAPI_OPS_H_ */

View File

@@ -0,0 +1,63 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2007-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_NB_REGS_H_
#define _NV_NB_REGS_H_
typedef struct
{
NvU32 subsystem_vendor_id;
NvU32 subsystem_device_id;
NvU16 gpu_device_id;
} nv_nb_id_t;
typedef struct
{
NvU32 vendor_id;
const char *name;
NvU32 data;
} nv_nb_reg_t;
/*
* nb_id_table contains the OEM vendor ID, the subsystem ID and the
* GPU device ID of the notebooks for which we need to enable
* vendor specific registry keys. nb_reg_table contains the vendor
* specific registry key values. The initVendorSpecificRegistry()
* function compares the present notebooks OEM subsystem ID and the
* GPU device ID with the values present in id_tables. If a match
* is found, initVendorSpecificRegistry() extracts the vendor
* ID and sets any associated registry key listed in nb_reg_table.
*/
static nv_nb_id_t nb_id_table[] = {
{ PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x1284 }, // Acer GT 630
{ PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x124b }, // Acer GT 640
{ 0, 0, 0 }
};
static nv_nb_reg_t nb_reg_table[] = {
{ PCI_VENDOR_ID_PC_PARTNER, "RmOverrideSupportChipsetAspm", 2 },
{ 0, NULL, 0 }
};
#endif //_NV_NB_REGS_H_

View File

@@ -0,0 +1,373 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PRIV_H_
#define _NV_PRIV_H_
#include <nv.h>
#include <os/os.h>
#include <ctrl/ctrl402c.h>
#include <gpu/disp/kern_disp_max.h>
#include <efi-console.h>
#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d))
#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d))
#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d))
#define NV_PRIV_REG_RD08(b,o) ((b)->Reg008[(o)/1])
#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2])
#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4])
#define NV_NUM_CR_REGS 0x99
struct OBJGPU;
#define NV_BIT_PLANE_SIZE 64 * 1024
#define NV_NUM_VGA_BIT_PLANES 4
/*
* device state during Power Management
*/
typedef struct nv_pm_state_s
{
NvU32 IntrEn;
NvBool InHibernate;
} nv_pm_state_t;
/*
* data structure for the UNIX workqueues
*/
typedef struct nv_work_item_s
{
NvU32 flags;
NvU32 gpuInstance;
union
{
OSWorkItemFunction *pGpuFunction;
OSSystemWorkItemFunction *pSystemFunction;
} func;
void *pData;
} nv_work_item_t;
#define NV_WORK_ITEM_FLAGS_NONE 0x0
#define NV_WORK_ITEM_FLAGS_REQUIRES_GPU 0x1
#define NV_WORK_ITEM_FLAGS_DONT_FREE_DATA 0x2
/*
* pseudo-registry data structure
*/
typedef enum
{
NV_REGISTRY_ENTRY_TYPE_UNKNOWN = 0,
NV_REGISTRY_ENTRY_TYPE_DWORD,
NV_REGISTRY_ENTRY_TYPE_BINARY,
NV_REGISTRY_ENTRY_TYPE_STRING
} nv_reg_type_t;
typedef struct nv_reg_entry_s
{
char *regParmStr;
NvU32 type;
NvU32 data; // used when type == NV_REGISTRY_ENTRY_TYPE_DWORD
NvU8 *pdata; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING}
NvU32 len; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING}
struct nv_reg_entry_s *next;
} nv_reg_entry_t;
#define INVALID_DISP_ID 0xFFFFFFFF
#define MAX_DISP_ID_PER_ADAPTER 0x2
typedef struct nv_i2c_adapter_entry_s
{
void *pOsAdapter;
NvU32 port;
NvU32 displayId[MAX_DISP_ID_PER_ADAPTER];
} nv_i2c_adapter_entry_t;
#define NV_INIT_FLAG_HAL 0x0001
#define NV_INIT_FLAG_HAL_COMPONENTS 0x0002
#define NV_INIT_FLAG_GPU_STATE 0x0004
#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008
#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010
#define NV_INIT_FLAG_CORE_LOGIC 0x0020
#define NV_INIT_FLAG_HIRES 0x0040
#define NV_INIT_FLAG_DISP_STATE_SAVED 0x0080
#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0100
#define NV_INIT_FLAG_PUBLIC_I2C 0x0400
#define NV_INIT_FLAG_SCALABILITY 0x0800
#define NV_INIT_FLAG_DMA 0x1000
#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS
/*
* GPU dynamic power state machine.
*
* The GPU is in exactly one of these states at at time. Only certain state
* transitions are valid, as documented by the DAGs below.
*
* When in "instant idle" or COARSE mode:
*
* +----------------------+
* v |
* +---------+ +----------------+ +--------+
* | UNKNOWN | --> | IDLE_INDICATED | --> | IN_USE |
* +---------+ +----------------+ +--------+
*
* The transition from UNKNOWN to IDLE_INDICATED happens in
* rm_init_dynamic_power_management().
*
* Thereafter, transitions from IDLE_INDICATED to IN_USE happen when
* os_ref_dynamic_power() is called and the refcount transitions from 0 to 1;
* transitions from IN_USE to IDLE_INDICATED happen when
* os_unref_dynamic_power() is called and the refcount transitions from 1 to 0.
* Note that only calls to os_(un)ref_dynamic_power() with the mode == COARSE
* are considered in this mode; calls with mode == FINE are ignored. Since
* COARSE calls are placed only in rm_init_adapter/rm_shutdown_adapter, the GPU
* effectively stays in the IN_USE state any time any client has initialized
* it.
*
*
* When in "deferred idle" or FINE mode:
*
* +----------------------------------------------------------------+
* | |
* | |
* | +-------------------------------------------+----------------------+
* | | | v
* | +---------+ +----------------+ +--------------+ +----------------+ +--------+
* | | UNKNOWN | --> | IDLE_INDICATED | --> | | --> | IDLE_SUSTAINED | --> | IN_USE | -+
* | +---------+ +----------------+ | | +----------------+ +--------+ |
* | ^ | | | ^ |
* +--------------------+ | IDLE_INSTANT | ------+----------------------+ |
* | | | |
* | | | |
* | | <-----+ |
* +--------------+ |
* ^ |
* +-----------------------------------------------------+
*
* As before, the transition from UNKNOWN to IDLE_INDICATED happens in
* rm_init_dynamic_power_management(). This is not ideal: it means the GPU may
* be powered down immediately upon loading the RM module, even if
* rm_init_adapter() is going to be called soon thereafter. However, we can't
* rely on deferred idle callbacks yet, since those currently rely on core RM
* being initialized.
*
* At the beginning of rm_init_adapter(), the GPU transitions to the IN_USE
* state; during the rm_init_adapter() sequence,
* RmInitDeferredDynamicPowerManagement() will be called which will schedule
* timer callbacks and set the "deferred_idle_enabled" boolean.
*
* While in "deferred idle" mode, one of the callbacks
* timerCallbackForIdlePreConditions(), timerCallbackToIndicateIdle(), or
* RmIndicateIdle() should be scheduled when in the states:
* - IN_USE
* - IDLE_INSTANT
* - IDLE_SUSTAINED
* Note that since we may transition from IN_USE to IDLE_INSTANT rapidly (e.g.,
* for a series of RM calls), we don't attempt to schedule the callbacks and
* cancel them on each of these transitions. The
* timerCallbackForIdlePreConditions() callback will simply exit early if in
* the IN_USE state.
*
* As before, the GPU will remain in the IN_USE state until
* os_unref_dynamic_power() is called and the count transitions from 1 to 0
* (calls with mode == FINE are honored, in this mode, and these transitions
* can happen frequently). When the refcount reaches 0, rather than going
* directly to the IDLE_INDICATED state, it transitions to the IDLE_INSTANT
* state.
*
* Then, when the next timerCallbackForIdlePreConditions() callback executes,
* if all preconditions are met, the state will transition to IDLE_SUSTAINED.
*
* If, when in the IDLE_SUSTAINED state, os_ref_dynamic_power() is called, the
* GPU will transition back to the IN_USE state and return to the IDLE_INSTANT
* state. This ensures that there is a suitable delay between any activity
* that requires bumping the refcount and indicating idleness.
*
* If the timerCallbackForIdlePreConditions() callback executes again and the
* GPU is still in the IDLE_SUSTAINED state, userspace mappings will be revoked
* and the timerCallbackToIndicateIdle() callback will be scheduled.
*
* If, before the timerCallbackToIndicateIdle() callback executes, either
* os_ref_dynamic_power() is called or a mapping which has been revoked is
* accessed (which triggers the RmForceGpuNotIdle() callback), the GPU will
* transition back to the IN_USE or IDLE_INSTANT state, respectively.
*
* Then, when the timerCallbackToIndicateIdle() callback executes, if all
* mappings are still revoked, and the GPU is still in the IDLE_SUSTAINED
* state, and all GPU idleness preconditions remain satisfied, the
* RmIndicateIdle() work item will be enqueued. (Else, the GPU will transition
* back to the IDLE_INSTANT state and the callback for preconditions is
* scheduled again.)
*
* Finally, once the RmIndicateIdle() work item is called, if all of the same
* conditions still hold, the state will transition to IDLE_INDICATED. No
* callbacks will be scheduled from here; the callbacks for preconditions
* should be re-scheduled when transitioning out of the IDLE_INDICATED state.
*
* Once in the IDLE_INDICATED state, the kernel is free to call the RM to
* perform the GC6 entry sequence then turn off power to the GPU (although it
* may not, if the audio function is being used for example).
*
* There are two paths to exit the IDLE_INDICATED state:
* (a) If os_ref_dynamic_power() is called, in which case it transitions
* directly to the IN_USE state;
* (b) If RmForceGpuNotIdle() is called, in which case it transitions back to
* the IDLE_INSTANT state.
*/
typedef enum
{
NV_DYNAMIC_POWER_STATE_UNKNOWN = 0,
NV_DYNAMIC_POWER_STATE_IN_USE,
NV_DYNAMIC_POWER_STATE_IDLE_INSTANT,
NV_DYNAMIC_POWER_STATE_IDLE_SUSTAINED,
NV_DYNAMIC_POWER_STATE_IDLE_INDICATED,
} nv_dynamic_power_state_t;
typedef struct nv_dynamic_power_s
{
/*
* mode is read without the mutex -- should be read-only outside of
* rm_init_dynamic_power_management, called during probe only.
*/
nv_dynamic_power_mode_t mode;
/*
* Whether to indicate idle immediately when the refcount reaches 0, or
* only go to the IDLE_INSTANT state, and expect timer callbacks to
* transition through IDLE_SUSTAINED -> IDLE_INDICATED.
*/
NvBool deferred_idle_enabled;
nv_dynamic_power_state_t state;
NvS32 refcount;
/*
* A word on lock ordering. These locks must be taken in the order:
*
* RM API lock > this dynamic_power mutex > RM GPUs lock
*
* Skipping any of those locks is fine (if they aren't required to protect
* whatever state is being accessed or modified), so long as the order is
* not violated.
*/
PORT_MUTEX *mutex;
/*
* callback handles for deferred dynamic power management.
*/
NvP64 idle_precondition_check_event;
NvP64 indicate_idle_event;
NvBool idle_precondition_check_callback_scheduled;
/*
* callback handle for kernel initiated gc6 entry/exit.
* these will be protected by the gpu lock.
*/
NvP64 remove_idle_holdoff;
NvBool b_idle_holdoff;
/*
* flag set if the platform does not support fine grain dynamic power
* management.
*/
NvBool b_fine_not_supported;
/*
* Counter to track clients disallowing GCOFF.
*/
NvU32 clients_gcoff_disallow_refcount;
/*
* Maximum FB allocation size which can be saved in system memory
* while doing GCOFF based dynamic PM.
*/
NvU64 gcoff_max_fb_size;
/*
* NVreg_DynamicPowerManagement regkey value set by the user
*/
NvU32 dynamic_power_regkey;
} nv_dynamic_power_t;
typedef struct
{
OBJGPU *pGpu;
NvU32 pmc_boot_0;
nv_efi_t efi;
NvU8 scr_vga_active[OBJ_MAX_HEADS];
NvU8 scr_dcb_index_lo[OBJ_MAX_HEADS];
NvU8 scr_dcb_index_hi[OBJ_MAX_HEADS];
NvU8 font_bitplanes[NV_NUM_VGA_BIT_PLANES][NV_BIT_PLANE_SIZE];
NvU32 flags;
NvU32 status;
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
void *pVbiosCopy;
NvU32 vbiosSize;
nv_pm_state_t pm_state;
nv_reg_entry_t *pRegistry;
nv_dynamic_power_t dynamic_power;
/* Flag to check if the GPU needs 4K page isolation. */
NvBool b_4k_page_isolation_required;
/* Flag to check if GPU mobile config is enabled */
NvBool b_mobile_config_enabled;
/* Flag to check if S0ix-based power management is enabled. */
NvBool s0ix_pm_enabled;
/*
* Maximum FB allocation size which can be saved in system memory
* during system supened with S0ix-based power management.
*/
NvU64 s0ix_gcoff_max_fb_size;
NvU32 pmc_boot_42;
} nv_priv_t;
#define NV_SET_NV_PRIV(nv,p) ((nv)->priv = (p))
#define NV_GET_NV_PRIV(nv) ((nv) ? (nv)->priv : NULL)
/*
* Make sure that your stack has taken API Lock before using this macro.
*/
#define NV_GET_NV_PRIV_PGPU(nv) \
(NV_GET_NV_PRIV(nv) ? ((nv_priv_t *)NV_GET_NV_PRIV(nv))->pGpu : NULL)
#endif // _NV_PRIV_H_

View File

@@ -0,0 +1,920 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RM_REG_H_
#define _RM_REG_H_
#include "nvtypes.h"
/*
* use NV_REG_STRING to stringify a registry key when using that registry key
*/
#define __NV_REG_STRING(regkey) #regkey
#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey)
/*
* use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition
* of registry keys in the kernel module source code.
*/
#define __NV_REG_VAR(regkey) NVreg_##regkey
#if defined(NV_MODULE_PARAMETER)
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value)
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value)
#endif
#if defined(NV_MODULE_STRING_PARAMETER)
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value)
#endif
#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) }
/*
* Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of
* the regkey and the name of the module parameter. When using this macro, the
* name of the parameter is passed to the extra "parameter" argument, and it is
* this name that must be used in the NV_DEFINE_REG_ENTRY() macro.
*/
#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)}
/*
*----------------- registry key definitions--------------------------
*/
/*
* Option: ModifyDeviceFiles
*
* Description:
*
* When this option is enabled, the NVIDIA driver will verify the validity
* of the NVIDIA device files in /dev and attempt to dynamically modify
* and/or (re-)create them, if necessary. If you don't wish for the NVIDIA
* driver to touch the device files, you can use this registry key.
*
* This module parameter is only honored by the NVIDIA GPU driver and NVIDIA
* capability driver. Furthermore, the NVIDIA capability driver provides
* modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of
* this module parameter per device file.
*
* Possible Values:
* 0 = disable dynamic device file management
* 1 = enable dynamic device file management (default)
*/
#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles
#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES)
/*
* Option: DeviceFileUID
*
* Description:
*
* This registry key specifies the UID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default UID is 0 ('root').
*/
#define __NV_DEVICE_FILE_UID DeviceFileUID
#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID)
/*
* Option: DeviceFileGID
*
* Description:
*
* This registry key specifies the GID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default GID is 0 ('root').
*/
#define __NV_DEVICE_FILE_GID DeviceFileGID
#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID)
/*
* Option: DeviceFileMode
*
* Description:
*
* This registry key specifies the device file mode assigned to the NVIDIA
* device files created and/or modified by the NVIDIA driver when dynamic
* device file management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default mode is 0666 (octal, rw-rw-rw-).
*/
#define __NV_DEVICE_FILE_MODE DeviceFileMode
#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE)
/*
* Option: ResmanDebugLevel
*
* Default value: ~0
*/
#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel
#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL)
/*
* Option: RmLogonRC
*
* Default value: 1
*/
#define __NV_RM_LOGON_RC RmLogonRC
#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC)
/*
* Option: InitializeSystemMemoryAllocations
*
* Description:
*
* The NVIDIA Linux driver normally clears system memory it allocates
* for use with GPUs or within the driver stack. This is to ensure
* that potentially sensitive data is not rendered accessible by
* arbitrary user applications.
*
* Owners of single-user systems or similar trusted configurations may
* choose to disable the aforementioned clears using this option and
* potentially improve performance.
*
* Possible values:
*
* 1 = zero out system memory allocations (default)
* 0 = do not perform memory clears
*/
#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
InitializeSystemMemoryAllocations
#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS)
/*
* Option: RegistryDwords
*
* Description:
*
* This option accepts a semicolon-separated list of key=value pairs. Each
* key name is checked against the table of static options; if a match is
* found, the static option value is overridden, but invalid options remain
* invalid. Pairs that do not match an entry in the static option table
* are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwords="<key=value>;<key=value>;..."
*/
#define __NV_REGISTRY_DWORDS RegistryDwords
#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS)
/*
* Option: RegistryDwordsPerDevice
*
* Description:
*
* This option allows to specify registry keys per GPU device. It helps to
* control registry at GPU level of granularity. It accepts a semicolon
* separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* For each of the registry keys that follows, key name is checked against the
* table of static options; if a match is found, the static option value is
* overridden, but invalid options remain invalid. Pairs that do not match an
* entry in the static option table are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;<key=value>;<key=value>;..; \
* pci=DDDD:BB:DD.F;<key=value>;..;"
*/
#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice
#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE)
#define __NV_RM_MSG RmMsg
#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG)
/*
* Option: UsePageAttributeTable
*
* Description:
*
* Enable/disable use of the page attribute table (PAT) available in
* modern x86/x86-64 processors to set the effective memory type of memory
* mappings to write-combining (WC).
*
* If enabled, an x86 processor with PAT support is present and the host
* system's Linux kernel did not configure one of the PAT entries to
* indicate the WC memory type, the driver will change the second entry in
* the PAT from its default (write-through (WT)) to WC at module load
* time. If the kernel did update one of the PAT entries, the driver will
* not modify the PAT.
*
* In both cases, the driver will honor attempts to map memory with the WC
* memory type by selecting the appropriate PAT entry using the correct
* set of PTE flags.
*
* Possible values:
*
* ~0 = use the NVIDIA driver's default logic (default)
* 1 = enable use of the PAT for WC mappings.
* 0 = disable use of the PAT for WC mappings.
*/
#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable
#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE)
/*
* Option: EnableMSI
*
* Description:
*
* When this option is enabled and the host kernel supports the MSI feature,
* the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the
* support for this feature instead of using PCI-E wired interrupt.
*
* Possible Values:
*
* 0 = disable MSI interrupt
* 1 = enable MSI interrupt (default)
*
*/
#define __NV_ENABLE_MSI EnableMSI
#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI)
/*
* Option: RegisterForACPIEvents
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with the
* ACPI subsystem to receive notification of ACPI events.
*
* Possible values:
*
* 1 - register for ACPI events (default)
* 0 - do not register for ACPI events
*/
#define __NV_REGISTER_FOR_ACPI_EVENTS RegisterForACPIEvents
#define NV_REG_REGISTER_FOR_ACPI_EVENTS NV_REG_STRING(__NV_REGISTER_FOR_ACPI_EVENTS)
/*
* Option: EnablePCIeGen3
*
* Description:
*
* Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs
* when configured on SandyBridge E desktop platforms, NVIDIA feels that
* delivering a reliable, high-quality experience is not currently possible in
* PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and
* NVS Kepler products operate in PCIe Gen2 mode by default. You may use this
* option to enable PCIe Gen3 support.
*
* This is completely unsupported!
*
* Possible Values:
*
* 0: disable PCIe Gen3 support (default)
* 1: enable PCIe Gen3 support
*/
#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3
#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3)
/*
* Option: MemoryPoolSize
*
* Description:
*
* When set to a non-zero value, this option specifies the size of the
* memory pool, given as a multiple of 1 GB, created on VMware ESXi to
* satisfy any system memory allocations requested by the NVIDIA kernel
* module.
*/
#define __NV_MEMORY_POOL_SIZE MemoryPoolSize
#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE)
/*
* Option: KMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for kmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize
#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE)
/*
* Option: VMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for vmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize
#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE)
/*
* Option: IgnoreMMIOCheck
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will ignore
* MMIO limit check during device probe on VMWare ESXi kernel. This is
* typically necessary when VMware ESXi MMIO limit differs between any
* base version and its updates. Customer using updates can set regkey
* to avoid probe failure.
*/
#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck
#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK)
/*
* Option: TCEBypassMode
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will attempt to setup
* all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass
* the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically
* necessary for CUDA applications in which large system memory mappings may
* exceed the default TCE remapping capacity when operated in non-bypass mode.
*
* This option has no effect on non-POWER platforms.
*
* Possible Values:
*
* 0: system default TCE mode on all GPUs
* 1: enable TCE bypass mode on all GPUs
* 2: disable TCE bypass mode on all GPUs
*/
#define __NV_TCE_BYPASS_MODE TCEBypassMode
#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE)
#define NV_TCE_BYPASS_MODE_DEFAULT 0
#define NV_TCE_BYPASS_MODE_ENABLE 1
#define NV_TCE_BYPASS_MODE_DISABLE 2
/*
* Option: pci
*
* Description:
*
* On Unix platforms, per GPU based registry key can be specified as:
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,<per-gpu registry keys>".
* where DDDD:BB:DD.F refers to Domain:Bus:Device.Function.
* We need this key "pci" to identify what follows next is a PCI BDF identifier,
* for which the registry keys are to be applied.
*
* This define is not used on non-UNIX platforms.
*
* Possible Formats for value:
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI BDF identifier string.
*/
#define __NV_PCI_DEVICE_BDF pci
#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF)
/*
* Option: EnableStreamMemOPs
*
* Description:
*
* When this option is enabled, the CUDA driver will enable support for
* CUDA Stream Memory Operations in user-mode applications, which are so
* far required to be disabled by default due to limited support in
* devtools.
*
* Note: this is treated as a hint. MemOPs may still be left disabled by CUDA
* driver for other reasons.
*
* Possible Values:
*
* 0 = disable feature (default)
* 1 = enable feature
*/
#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs
#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS)
/*
* Option: EnableUserNUMAManagement
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will require the
* user-mode NVIDIA Persistence daemon to manage the onlining and offlining
* of its NUMA device memory.
*
* This option has no effect on platforms that do not support onlining
* device memory to a NUMA node (this feature is only supported on certain
* POWER9 systems).
*
* Possible Values:
*
* 0: disable user-mode NUMA management
* 1: enable user-mode NUMA management (default)
*/
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
/*
* Option: GpuBlacklist
*
* Description:
*
* This option accepts a list of blacklisted GPUs, separated by commas, that
* cannot be attached or used. Each blacklisted GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs. This regkey is deprecated and will be removed in the future. Use
* NV_REG_EXCLUDED_GPUS instead.
*/
#define __NV_GPU_BLACKLIST GpuBlacklist
#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST)
/*
* Option: ExcludedGpus
*
* Description:
*
* This option accepts a list of excluded GPUs, separated by commas, that
* cannot be attached or used. Each excluded GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs.
*/
#define __NV_EXCLUDED_GPUS ExcludedGpus
#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS)
/*
* Option: NvLinkDisable
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will not attempt to
* initialize or train NVLink connections for any GPUs. System reboot is required
* for changes to take affect.
*
* This option has no effect if no GPUs support NVLink.
*
* Possible Values:
*
* 0: Do not disable NVLink (default)
* 1: Disable NVLink
*/
#define __NV_NVLINK_DISABLE NvLinkDisable
#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE)
/*
* Option: RestrictProfilingToAdminUsers
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will prevent users
* without administrative access (i.e., the CAP_SYS_ADMIN capability) from
* using GPU performance counters.
*
* Possible Values:
*
* 0: Do not restrict GPU counters (default)
* 1: Restrict GPU counters to system administrators only
*/
#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly
#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers
#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY)
/*
* Option: TemporaryFilePath
*
* Description:
*
* When specified, this option changes the location in which the
* NVIDIA kernel module will create unnamed temporary files (e.g. to
* save the contents of video memory in). The indicated file must
* be a directory. By default, temporary files are created in /tmp.
*/
#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath
#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH)
/*
* Option: PreserveVideoMemoryAllocations
*
* If enabled, this option prompts the NVIDIA kernel module to save and
* restore all video memory allocations across system power management
* cycles, i.e. suspend/resume and hibernate/restore. Otherwise,
* only select allocations are preserved.
*
* Possible Values:
*
* 0: Preserve only select video memory allocations (default)
* 1: Preserve all video memory allocations
*/
#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
/*
* Option: EnableS0ixPowerManagement
*
* When this option is enabled, the NVIDIA driver will use S0ix-based
* power management for system suspend/resume, if both the platform and
* the GPU support S0ix.
*
* During system suspend, if S0ix is enabled and
* video memory usage is above the threshold configured by
* 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept
* in self-refresh mode while the rest of the GPU is powered down.
*
* Otherwise, the driver will copy video memory contents to system memory
* and power off the video memory along with the GPU.
*
* Possible Values:
*
* 0: Disable S0ix based power management (default)
* 1: Enable S0ix based power management
*/
#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement
#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \
NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT)
/*
* Option: S0ixPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use during
* S0ix-based system power management.
*
* When S0ix is enabled and the system is suspended, the driver will
* compare the amount of video memory in use with this threshold,
* to decide whether to keep video memory in self-refresh or copy video
* memory content to system memory.
*
* See the 'EnableS0ixPowerManagement' option.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* Default value for this option is 256MB.
*
*/
#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
S0ixPowerManagementVideoMemoryThreshold
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: DynamicPowerManagement
*
* This option controls how aggressively the NVIDIA kernel module will manage
* GPU power through kernel interfaces.
*
* Possible Values:
*
* 0: Never allow the GPU to be powered down (default).
* 1: Power down the GPU when it is not initialized.
* 2: Power down the GPU after it has been inactive for some time.
* 3: (Default) Power down the GPU after a period of inactivity (i.e.,
* mode 2) on Ampere or later notebooks. Otherwise, do not power down
* the GPU.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement
#define NV_REG_DYNAMIC_POWER_MANAGEMENT \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT)
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3
/*
* Option: DynamicPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use
* when selecting the dynamic power management scheme.
*
* When the driver detects that the GPU is idle, it will compare the amount
* of video memory in use with this threshold.
*
* If the current video memory usage is less than the threshold, the
* driver may preserve video memory contents in system memory and power off
* the video memory along with the GPU itself, if supported. Otherwise,
* the video memory will be kept in self-refresh mode while powering down
* the rest of the GPU, if supported.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* If the requested value is greater than 200MB (the default), then it
* will be capped to 200MB.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
DynamicPowerManagementVideoMemoryThreshold
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: RegisterPCIDriver
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with
* PCI subsystem.
*
* Possible values:
*
* 1 - register as PCI driver (default)
* 0 - do not register as PCI driver
*/
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
/*
* Option: EnablePCIERelaxedOrderingMode
*
* Description:
*
* When this option is enabled, the registry key RmSetPCIERelaxedOrdering will
* be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing
* every device to set the relaxed ordering bit to 1 in all outbound MWr
* transaction-layer packets. This is equivalent to setting the regkey to
* FORCE_ENABLE as a non-per-device registry key.
*
* Possible values:
* 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default)
* 1 - Enable PCIe TLP relaxed ordering bit-setting
*/
#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableGpuFirmware
*
* Description:
*
* When this option is enabled, the NVIDIA driver will enable use of GPU
* firmware.
*
* Possible mode values:
* 0 - Do not enable GPU firmware
* 1 - Enable GPU firmware
* 2 - (Default) Use the default enablement policy for GPU firmware
*
* Setting this to anything other than 2 will alter driver firmware-
* enablement policies, possibly disabling GPU firmware where it would
* have otherwise been enabled by default.
*
* If this key is set globally to the system, the driver may still attempt
* to apply some policies to maintain uniform firmware modes across all
* GPUS. This may result in the driver failing initialization on some GPUs
* to maintain such a policy.
*
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
* will attempt to honor whatever configuration is specified without applying
* additional policies. This may also result in failed GPU initialzations if
* the configuration is not possible (for example if the firmware is missing
* from the filesystem, or the GPU is not capable).
*
* Policy bits:
*
* POLICY_ALLOW_FALLBACK:
* As the normal behavior is to fail GPU initialization if this registry
* entry is set in such a way that results in an invalid configuration, if
* instead the user would like the driver to automatically try to fallback
* to initializing the failing GPU with firmware disabled, then this bit can
* be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
* Note that this can result in a mixed mode configuration (ex: GPU0 has
* firmware enabled, but GPU1 does not).
*
*/
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF
/*
* Option: EnableGpuFirmwareLogs
*
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
* to the system log, when possible.
*
* Possible values:
* 0 - Do not send GPU firmware logs to the system log
* 1 - Enable sending of GPU firmware logs to the system log
* 2 - (Default) Enable sending of GPU firmware logs to the system log for
* the debug kernel driver build only
*/
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
/*
* Option: EnableDbgBreakpoint
*
* When this option is set to a non-zero value, and the kernel is configured
* appropriately, assertions within resman will trigger a CPU breakpoint (e.g.,
* INT3 on x86_64), assumed to be caught by an attached debugger.
*
* When this option is set to the value zero (the default), assertions within
* resman will print to the system log, but no CPU breakpoint will be triggered.
*/
#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint
/*
* Option: OpenRmEnableUnsupportedGpus
*
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
* is still fairly immature, so for now require users to opt into use of open
* nvidia.ko with a special registry key, if not on a Data Center GPU.
*/
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
*---------registry key parameter declarations--------------
*/
NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0);
NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666);
NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1);
NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0);
NV_DEFINE_REG_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS, 1);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1);
NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
/*
*----------------registry database definition----------------------
*/
/*
* You can enable any of the registry options disabled by default by
* editing their respective entries in the table below. The last field
* determines if the option is considered valid - in order for the
* changes to take effect, you need to recompile and reload the NVIDIA
* kernel module.
*/
nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
{NULL, NULL}
};
#elif defined(NVRM)
extern nv_parm_t nv_parms[];
#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */
#endif /* _RM_REG_H_ */

View File

@@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_
#define _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_
#include <nvos.h>
/*
* This is a wrapper for NVOS02_PARAMETERS with file descriptor
*/
typedef struct
{
NVOS02_PARAMETERS params;
int fd;
} nv_ioctl_nvos02_parameters_with_fd;
/*
* This is a wrapper for NVOS33_PARAMETERS with file descriptor
*/
typedef struct
{
NVOS33_PARAMETERS params;
int fd;
} nv_ioctl_nvos33_parameters_with_fd;
#endif // _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_ESCAPE_H_INCLUDED
#define NV_ESCAPE_H_INCLUDED
#define NV_ESC_RM_ALLOC_MEMORY 0x27
#define NV_ESC_RM_ALLOC_OBJECT 0x28
#define NV_ESC_RM_FREE 0x29
#define NV_ESC_RM_CONTROL 0x2A
#define NV_ESC_RM_ALLOC 0x2B
#define NV_ESC_RM_CONFIG_GET 0x32
#define NV_ESC_RM_CONFIG_SET 0x33
#define NV_ESC_RM_DUP_OBJECT 0x34
#define NV_ESC_RM_SHARE 0x35
#define NV_ESC_RM_CONFIG_GET_EX 0x37
#define NV_ESC_RM_CONFIG_SET_EX 0x38
#define NV_ESC_RM_I2C_ACCESS 0x39
#define NV_ESC_RM_IDLE_CHANNELS 0x41
#define NV_ESC_RM_VID_HEAP_CONTROL 0x4A
#define NV_ESC_RM_ACCESS_REGISTRY 0x4D
#define NV_ESC_RM_MAP_MEMORY 0x4E
#define NV_ESC_RM_UNMAP_MEMORY 0x4F
#define NV_ESC_RM_GET_EVENT_DATA 0x52
#define NV_ESC_RM_ALLOC_CONTEXT_DMA2 0x54
#define NV_ESC_RM_ADD_VBLANK_CALLBACK 0x56
#define NV_ESC_RM_MAP_MEMORY_DMA 0x57
#define NV_ESC_RM_UNMAP_MEMORY_DMA 0x58
#define NV_ESC_RM_BIND_CONTEXT_DMA 0x59
#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C
#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D
#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E
#endif // NV_ESCAPE_H_INCLUDED

View File

@@ -0,0 +1,234 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Os interface definitions needed by os-interface.c
*/
#ifndef OS_INTERFACE_H
#define OS_INTERFACE_H
/******************* Operating System Interface Routines *******************\
* *
* Operating system wrapper functions used to abstract the OS. *
* *
\***************************************************************************/
#include <nvtypes.h>
#include <nvstatus.h>
#include "nv_stdarg.h"
#include <nv-kernel-interface-api.h>
#include <os/nv_memory_type.h>
#include <nv-caps.h>
typedef struct
{
NvU32 os_major_version;
NvU32 os_minor_version;
NvU32 os_build_number;
const char * os_build_version_str;
const char * os_build_date_plus_str;
}os_version_info;
/* Each OS defines its own version of this opaque type */
struct os_work_queue;
/* Each OS defines its own version of this opaque type */
typedef struct os_wait_queue os_wait_queue;
/*
* ---------------------------------------------------------------------------
*
* Function prototypes for OS interface.
*
* ---------------------------------------------------------------------------
*/
NvU64 NV_API_CALL os_get_num_phys_pages (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
NvU64 NV_API_CALL os_get_current_tick (void);
NvU64 NV_API_CALL os_get_current_tick_hr (void);
NvU64 NV_API_CALL os_get_tick_resolution (void);
NV_STATUS NV_API_CALL os_delay (NvU32);
NV_STATUS NV_API_CALL os_delay_us (NvU32);
NvU64 NV_API_CALL os_get_cpu_frequency (void);
NvU32 NV_API_CALL os_get_current_process (void);
void NV_API_CALL os_get_current_process_name (char *, NvU32);
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
char* NV_API_CALL os_string_copy (char *, const char *);
NvU32 NV_API_CALL os_string_length (const char *);
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
void NV_API_CALL os_log_error (const char *, va_list);
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
NvBool NV_API_CALL os_pci_remove_supported (void);
void NV_API_CALL os_pci_remove (void *);
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
NvU8 NV_API_CALL os_io_read_byte (NvU32);
NvU16 NV_API_CALL os_io_read_word (NvU32);
NvU32 NV_API_CALL os_io_read_dword (NvU32);
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
void NV_API_CALL os_io_write_word (NvU32, NvU16);
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
NvBool NV_API_CALL os_is_administrator (void);
NvBool NV_API_CALL os_allow_priority_override (void);
void NV_API_CALL os_dbg_init (void);
void NV_API_CALL os_dbg_breakpoint (void);
void NV_API_CALL os_dbg_set_level (NvU32);
NvU32 NV_API_CALL os_get_cpu_count (void);
NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
NvU64 NV_API_CALL os_acquire_spinlock (void *);
void NV_API_CALL os_release_spinlock (void *, NvU64);
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
void NV_API_CALL os_free_mutex (void *);
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
void NV_API_CALL os_release_mutex (void *);
void* NV_API_CALL os_alloc_semaphore (NvU32);
void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
NvBool NV_API_CALL os_is_efi_enabled (void);
NvBool NV_API_CALL os_is_xen_dom0 (void);
NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
void NV_API_CALL os_wake_up (os_wait_queue *);
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
extern NvU8 os_page_shift;
extern NvU32 os_sev_status;
extern NvBool os_sev_enabled;
extern NvBool os_dma_buf_enabled;
/*
* ---------------------------------------------------------------------------
*
* Debug macros.
*
* ---------------------------------------------------------------------------
*/
#define NV_DBG_INFO 0x0
#define NV_DBG_SETUP 0x1
#define NV_DBG_USERERRORS 0x2
#define NV_DBG_WARNINGS 0x3
#define NV_DBG_ERRORS 0x4
void NV_API_CALL out_string(const char *str);
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__)
#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status)
/*
* Fields for os_lock_user_pages flags parameter
*/
#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
#endif /* OS_INTERFACE_H */

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OS_CUSTOM_H_
#define _OS_CUSTOM_H_
/*!
* @file os_custom.h
* @brief OS module specific definitions for this OS
*/
#include <os-interface.h>
#include <osfuncs.h>
// File modes, added for NVIDIA capabilities.
#define OS_RUSR 00400 // read permission, owner
#define OS_WUSR 00200 // write permission, owner
#define OS_XUSR 00100 // execute/search permission, owner
#define OS_RWXU (OS_RUSR | OS_WUSR | OS_XUSR) // read, write, execute/search, owner
#define OS_RGRP 00040 // read permission, group
#define OS_WGRP 00020 // write permission, group
#define OS_XGRP 00010 // execute/search permission, group
#define OS_RWXG (OS_RGRP | OS_WGRP | OS_XGRP) // read, write, execute/search, group
#define OS_ROTH 00004 // read permission, other
#define OS_WOTH 00002 // write permission, other
#define OS_XOTH 00001 // execute/search permission, other
#define OS_RWXO (OS_ROTH | OS_WOTH | OS_XOTH) // read, write, execute/search, other
#define OS_RUGO (OS_RUSR | OS_RGRP | OS_ROTH)
#define OS_WUGO (OS_WUSR | OS_WGRP | OS_WOTH)
#define OS_XUGO (OS_XUSR | OS_XGRP | OS_XOTH)
// Trigger for collecting GPU state for later extraction.
NV_STATUS RmLogGpuCrash(OBJGPU *);
// This is callback function in the miniport.
// The argument is a device extension, and must be cast as such to be useful.
typedef void (*MINIPORT_CALLBACK)(void*);
NV_STATUS osPackageRegistry(OBJGPU *pGpu, PACKED_REGISTRY_TABLE *, NvU32 *);
#endif // _OS_CUSTOM_H_

View File

@@ -0,0 +1,192 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OSAPI_H_
#define _OSAPI_H_
#include "core/system.h"
#include "gpu/gpu.h"
#include <os-interface.h> // NV_DBG_ERRORS
#include <rmapi/rmapi.h>
#include <core/thread_state.h>
#if defined(__use_altstack__)
#if defined(QA_BUILD)
//---------------------------------------------------------------------------
//
// 32 bit debug marker values.
//
//---------------------------------------------------------------------------
#define NV_MARKER1 (NvU32)(('M' << 24) | ('R' << 16) | ('V' << 8) | 'N')
#define NV_MARKER2 (NvU32)(('N' << 24) | ('V' << 16) | ('R' << 8) | 'M')
//
// The two macros below implement a simple alternate stack usage sanity
// check for QA_BUILD RM builds. NV_ALTSTACK_WRITE_MARKERS() fills
// altstacks with NV_MARKER1, which enables NV_ALTSTACK_CHECK_MARKERS()
// to determine the stack usage fairly reliably by looking for the
// first clobbered marker. If more than 7/8 of the alternate stack were
// used, NV_ALTSTACK_CHECK_MARKERS() prints an error and asserts.
//
#define NV_ALTSTACK_WRITE_MARKERS(sp) \
{ \
NvU32 i, *stack = (void *)(sp)->stack; \
for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \
stack[i] = NV_MARKER1; \
}
#define NV_ALTSTACK_CHECK_MARKERS(sp) \
{ \
NvU32 i, *stack = (void *)(sp)->stack; \
for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \
{ \
if (stack[i] != NV_MARKER1) \
break; \
} \
if ((i * sizeof(NvU32)) < ((sp)->size / 8)) \
{ \
nv_printf(NV_DBG_ERRORS, "NVRM: altstack: used %d of %d bytes!\n", \
((sp)->size - (i * sizeof(NvU32))), (sp)->size); \
NV_ASSERT_PRECOMP((i * sizeof(NvU32)) >= ((sp)->size / 8)); \
} \
}
#else
#define NV_ALTSTACK_WRITE_MARKERS(sp)
#define NV_ALTSTACK_CHECK_MARKERS(sp)
#endif
#if defined(NVCPU_X86_64)
#define NV_ENTER_RM_RUNTIME(sp,fp) \
{ \
NV_ALTSTACK_WRITE_MARKERS(sp); \
__asm__ __volatile__ ("movq %%rbp,%0" : "=r" (fp)); /* save %rbp */ \
__asm__ __volatile__ ("movq %0,%%rbp" :: "r" ((sp)->top)); \
}
#define NV_EXIT_RM_RUNTIME(sp,fp) \
{ \
register void *__rbp __asm__ ("rbp"); \
if (__rbp != (sp)->top) \
{ \
nv_printf(NV_DBG_ERRORS, "NVRM: detected corrupted runtime stack!\n"); \
NV_ASSERT_PRECOMP(__rbp == (sp)->top); \
} \
NV_ALTSTACK_CHECK_MARKERS(sp); \
__asm__ __volatile__ ("movq %0,%%rbp" :: "r" (fp)); /* restore %rbp */ \
}
#else
#error "gcc \"altstacks\" support is not implemented on this platform!"
#endif
#else
#define NV_ENTER_RM_RUNTIME(sp,fp) { (void)sp; (void)fp; }
#define NV_EXIT_RM_RUNTIME(sp,fp)
#endif
void RmShutdownRm (void);
NvBool RmInitPrivateState (nv_state_t *);
void RmFreePrivateState (nv_state_t *);
NvBool RmInitAdapter (nv_state_t *);
NvBool RmPartiallyInitAdapter (nv_state_t *);
void RmShutdownAdapter (nv_state_t *);
void RmDisableAdapter (nv_state_t *);
void RmPartiallyDisableAdapter(nv_state_t *);
NV_STATUS RmGetAdapterStatus (nv_state_t *, NvU32 *);
NV_STATUS RmExcludeAdapter (nv_state_t *);
NvBool RmGpuHasIOSpaceEnabled (nv_state_t *);
void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *);
NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS RmFreeOsEvent (NvHandle, NvU32);
void RmI2cAddGpuPorts(nv_state_t *);
NV_STATUS RmInitX86EmuState(OBJGPU *);
void RmFreeX86EmuState(OBJGPU *);
NV_STATUS RmSystemEvent(nv_state_t *, NvU32, NvU32);
const NvU8 *RmGetGpuUuidRaw(nv_state_t *);
NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *);
int amd_adv_spec_cache_feature(OBJOS *);
int amd_msr_c0011022_incompatible(OBJOS *);
NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *);
NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS rm_free_os_event (NvHandle, NvU32);
NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
void rm_client_free_os_events (NvHandle);
NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32);
NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *);
NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *);
// registry management
NV_STATUS RmInitRegistry (void);
NV_STATUS RmDestroyRegistry (nv_state_t *);
NV_STATUS RmWriteRegistryDword (nv_state_t *, const char *, NvU32 );
NV_STATUS RmReadRegistryDword (nv_state_t *, const char *, NvU32 *);
NV_STATUS RmWriteRegistryString (nv_state_t *, const char *, const char *, NvU32);
NV_STATUS RmReadRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32 *);
NV_STATUS RmWriteRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32);
NV_STATUS RmReadRegistryString (nv_state_t *, const char *, NvU8 *, NvU32 *);
NV_STATUS RmPackageRegistry (nv_state_t *, PACKED_REGISTRY_TABLE *, NvU32 *);
NvBool RmIsNvifFunctionSupported(NvU32, NvU32);
void RmInitAcpiMethods (OBJOS *, OBJSYS *, OBJGPU *);
void RmUnInitAcpiMethods (OBJSYS *);
void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64);
void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64);
void RmInitS0ixPowerManagement (nv_state_t *);
void RmInitDeferredDynamicPowerManagement (nv_state_t *);
void RmDestroyDeferredDynamicPowerManagement(nv_state_t *);
NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void RmHandleDisplayChange (nvidia_stack_t *, nv_state_t *);
void RmUpdateGc6ConsoleRefCount (nv_state_t *, NvBool);
NvBool rm_get_uefi_console_status (nv_state_t *);
NvU64 rm_get_uefi_console_size (nv_state_t *, NvU64 *);
RM_API *RmUnixRmApiPrologue (nv_state_t *, THREAD_STATE_NODE *, NvU32 module);
void RmUnixRmApiEpilogue (nv_state_t *, THREAD_STATE_NODE *);
static inline NvBool rm_is_system_notebook(void)
{
return (nv_is_chassis_notebook() || nv_acpi_is_battery_present());
}
#endif // _OSAPI_H_

View File

@@ -0,0 +1,55 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef OSFUNCS_H
#define OSFUNCS_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Declarations for the Operating System Specific Functions. *
* *
\***************************************************************************/
#include <os/os.h>
OSQueueWorkItem osQueueWorkItem;
OSQueueWorkItemWithFlags osQueueWorkItemWithFlags;
OSQueueSystemWorkItem osQueueSystemWorkItem;
OSDbgBreakpointEnabled osDbgBreakpointEnabled;
void* osGetStereoDongleInterface(void);
OSCallACPI_DSM osCallACPI_DSM;
OSCallACPI_DDC osCallACPI_DDC;
OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM;
OSCallACPI_DOD osCallACPI_DOD;
OSCallACPI_MXDS osCallACPI_MXDS;
OSCallACPI_MXDM osCallACPI_MXDM;
#if defined(NVCPU_X86_64)
OSnv_rdcr4 nv_rdcr4;
NvU64 nv_rdcr3(OBJOS *);
OSnv_cpuid nv_cpuid;
#endif
#endif // OSFUNCS_H

View File

@@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMOBJEXPORTIMPORT_H_
#define _RMOBJEXPORTIMPORT_H_
#include "nvstatus.h"
typedef NvHandle RmObjExportHandle;
NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance);
void RmFreeObjExportHandle(RmObjExportHandle hObject);
NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
NvHandle *phDstObject, RmObjExportHandle hSrcObject,
NvU8 *pObjectType);
NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance);
#endif // _RMOBJEXPORTIMPORT_H_

View File

@@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <os/os.h>
/* inline assembler routines for UNIX platforms */
#if defined(NVCPU_X86_64)
NvS32 nv_cpuid(
OBJOS *pOS,
NvS32 op,
NvS32 subop,
NvU32 *eax,
NvU32 *ebx,
NvU32 *ecx,
NvU32 *edx
)
{
asm volatile (" cpuid \n"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "a" (op),
"c" (subop)
: "cc");
return 1;
}
#endif

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <os/os.h>
/* inline assembler routines for UNIX platforms */
#if defined(NVCPU_X86_64)
NvU32 nv_rdcr4(OBJOS *pOS)
{
NvU64 val;
asm volatile ("movq %%cr4,%0" : "=r" (val));
return (NvU32)val;
}
NvU64 nv_rdcr3(OBJOS *pOS)
{
NvU64 val;
asm volatile ("movq %%cr3,%0" : "=r" (val));
return val;
}
#endif

View File

@@ -0,0 +1,820 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//***************************** Module Header **********************************
//
// This code is linked into the resource manager proper. It receives the
// ioctl from the resource manager's customer, unbundles the args and
// calls the correct resman routines.
//
//******************************************************************************
#include <core/prelude.h>
#include <core/locks.h>
#include <nv.h>
#include <nv_escape.h>
#include <osapi.h>
#include <rmapi/exports.h>
#include <nv-unix-nvos-params-wrappers.h>
#include <nvos.h>
#include <class/cl0000.h> // NV01_ROOT
#include <class/cl0001.h> // NV01_ROOT_NON_PRIV
#include <class/cl0005.h> // NV01_EVENT
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#define NV_CTL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) == 0) \
{ \
rmStatus = NV_ERR_INVALID_ARGUMENT; \
goto done; \
} \
}
#define NV_ACTUAL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) != 0) \
{ \
rmStatus = NV_ERR_INVALID_ARGUMENT; \
goto done; \
} \
}
// only return errors through pApi->status
static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
NV_STATUS rmStatus;
NvBool writable;
NvU32 flags = 0;
NvU64 allocSize, pageCount, *pPteArray = NULL;
void *pDescriptor, *pPageArray = NULL;
pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor);
if (((NvUPtr)pDescriptor & ~os_page_mask) != 0)
{
rmStatus = NV_ERR_NOT_SUPPORTED;
goto done;
}
// Check to prevent an NvU64 overflow
if ((pApi->data.AllocOsDesc.limit + 1) == 0)
{
rmStatus = NV_ERR_INVALID_LIMIT;
goto done;
}
allocSize = (pApi->data.AllocOsDesc.limit + 1);
pageCount = (1 + ((allocSize - 1) / os_page_size));
writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2);
flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags);
rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags);
if (rmStatus == NV_OK)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (rmStatus == NV_ERR_INVALID_ADDRESS)
{
rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount,
&pPteArray, &pPageArray);
if (rmStatus == NV_OK)
{
if (pPageArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (pPteArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY;
}
else
{
NV_ASSERT_FAILED("unknown memory import type");
rmStatus = NV_ERR_NOT_SUPPORTED;
}
}
}
if (rmStatus != NV_OK)
goto done;
Nv04VidHeapControlWithSecInfo(pApi, secInfo);
if (pApi->status != NV_OK)
{
switch (pApi->data.AllocOsDesc.descriptorType)
{
default:
break;
case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY:
os_unlock_user_pages(pageCount, pPageArray);
break;
}
}
done:
if (rmStatus != NV_OK)
pApi->status = rmStatus;
}
// only return errors through pApi->status
static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
NV_STATUS rmStatus = NV_OK;
NvU32 flags, attr, attr2;
NVOS32_PARAMETERS *pVidHeapParams;
if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) ||
!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags))
{
rmStatus = NV_ERR_INVALID_FLAGS;
goto done;
}
attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI);
if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) ||
FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags))
{
attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr);
}
else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags))
attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr);
else {
rmStatus = NV_ERR_INVALID_FLAGS;
goto done;
}
if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags))
attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr);
else
attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr);
if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags))
attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES);
else
attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO);
pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS));
if (pVidHeapParams == NULL)
{
rmStatus = NV_ERR_NO_MEMORY;
goto done;
}
portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS));
pVidHeapParams->hRoot = pApi->hRoot;
pVidHeapParams->hObjectParent = pApi->hObjectParent;
pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR;
flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED |
NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED);
if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags))
attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2);
// currently CPU-RO memory implies GPU-RO as well
if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) ||
DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags))
attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2);
pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew;
pVidHeapParams->data.AllocOsDesc.flags = flags;
pVidHeapParams->data.AllocOsDesc.attr = attr;
pVidHeapParams->data.AllocOsDesc.attr2 = attr2;
pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory;
pVidHeapParams->data.AllocOsDesc.limit = pApi->limit;
pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS;
RmCreateOsDescriptor(pVidHeapParams, secInfo);
pApi->status = pVidHeapParams->status;
portMemFree(pVidHeapParams);
done:
if (rmStatus != NV_OK)
pApi->status = rmStatus;
}
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms));
NV_STATUS RmIoctl(
nv_state_t *nv,
nv_file_private_t *nvfp,
NvU32 cmd,
void *data,
NvU32 dataSize
)
{
NV_STATUS rmStatus = NV_ERR_GENERIC;
API_SECURITY_INFO secInfo = { };
secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER;
secInfo.paramLocation = PARAM_LOCATION_USER;
secInfo.pProcessToken = NULL;
secInfo.clientOSInfo = nvfp->ctl_nvfp;
if (secInfo.clientOSInfo == NULL)
secInfo.clientOSInfo = nvfp;
switch (cmd)
{
case NV_ESC_RM_ALLOC_MEMORY:
{
nv_ioctl_nvos02_parameters_with_fd *pApi;
NVOS02_PARAMETERS *pParms;
pApi = data;
pParms = &pApi->params;
NV_ACTUAL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_nvos02_parameters_with_fd))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR)
RmAllocOsDescriptor(pParms, secInfo);
else
{
NvU32 flags = pParms->flags;
Nv01AllocMemoryWithSecInfo(pParms, secInfo);
//
// If the system memory is going to be mapped immediately,
// create the mmap context for it now.
//
if ((pParms->hClass == NV01_MEMORY_SYSTEM) &&
(!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) &&
(!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) &&
(pParms->status == NV_OK))
{
if (rm_create_mmap_context(nv, pParms->hRoot,
pParms->hObjectParent, pParms->hObjectNew,
pParms->pMemory, pParms->limit + 1, 0,
pApi->fd) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"could not create mmap context for %p\n",
NvP64_VALUE(pParms->pMemory));
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
}
}
break;
}
case NV_ESC_RM_ALLOC_OBJECT:
{
NVOS05_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS05_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv01AllocObjectWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_ALLOC:
{
NVOS21_PARAMETERS *pApi = data;
NVOS64_PARAMETERS *pApiAccess = data;
NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS));
if ((dataSize != sizeof(NVOS21_PARAMETERS)) &&
(dataSize != sizeof(NVOS64_PARAMETERS)))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
switch (pApi->hClass)
{
case NV01_ROOT:
case NV01_ROOT_CLIENT:
case NV01_ROOT_NON_PRIV:
{
NV_CTL_DEVICE_ONLY(nv);
// Force userspace client allocations to be the _CLIENT class.
pApi->hClass = NV01_ROOT_CLIENT;
break;
}
case NV01_EVENT:
case NV01_EVENT_OS_EVENT:
case NV01_EVENT_KERNEL_CALLBACK:
case NV01_EVENT_KERNEL_CALLBACK_EX:
{
break;
}
default:
{
NV_CTL_DEVICE_ONLY(nv);
break;
}
}
if (!bAccessApi)
{
Nv04AllocWithSecInfo(pApi, secInfo);
}
else
{
Nv04AllocWithAccessSecInfo(pApiAccess, secInfo);
}
break;
}
case NV_ESC_RM_FREE:
{
NVOS00_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS00_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv01FreeWithSecInfo(pApi, secInfo);
if (pApi->status == NV_OK &&
pApi->hObjectOld == pApi->hRoot)
{
rm_client_free_os_events(pApi->hRoot);
}
break;
}
case NV_ESC_RM_VID_HEAP_CONTROL:
{
NVOS32_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS32_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR)
RmCreateOsDescriptor(pApi, secInfo);
else
Nv04VidHeapControlWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_I2C_ACCESS:
{
NVOS_I2C_ACCESS_PARAMS *pApi = data;
NV_ACTUAL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS_I2C_ACCESS_PARAMS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04I2CAccessWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_IDLE_CHANNELS:
{
NVOS30_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS30_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04IdleChannelsWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_MAP_MEMORY:
{
nv_ioctl_nvos33_parameters_with_fd *pApi;
NVOS33_PARAMETERS *pParms;
pApi = data;
pParms = &pApi->params;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_nvos33_parameters_with_fd))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04MapMemoryWithSecInfo(pParms, secInfo);
if (pParms->status == NV_OK)
{
pParms->status = rm_create_mmap_context(nv, pParms->hClient,
pParms->hDevice, pParms->hMemory,
pParms->pLinearAddress, pParms->length,
pParms->offset, pApi->fd);
if (pParms->status != NV_OK)
{
NVOS34_PARAMETERS params;
portMemSet(&params, 0, sizeof(NVOS34_PARAMETERS));
params.hClient = pParms->hClient;
params.hDevice = pParms->hDevice;
params.hMemory = pParms->hMemory;
params.pLinearAddress = pParms->pLinearAddress;
params.flags = pParms->flags;
Nv04UnmapMemoryWithSecInfo(&params, secInfo);
}
}
break;
}
case NV_ESC_RM_UNMAP_MEMORY:
{
NVOS34_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS34_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04UnmapMemoryWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_ACCESS_REGISTRY:
{
NVOS38_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS38_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_access_registry(pApi->hClient,
pApi->hObject,
pApi->AccessType,
pApi->pDevNode,
pApi->DevNodeLength,
pApi->pParmStr,
pApi->ParmStrLength,
pApi->pBinaryData,
&pApi->BinaryDataLength,
&pApi->Data,
&pApi->Entry);
break;
}
case NV_ESC_RM_ALLOC_CONTEXT_DMA2:
{
NVOS39_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS39_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04AllocContextDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_BIND_CONTEXT_DMA:
{
NVOS49_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS49_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04BindContextDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_MAP_MEMORY_DMA:
{
NVOS46_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS46_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04MapMemoryDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_UNMAP_MEMORY_DMA:
{
NVOS47_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS47_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_DUP_OBJECT:
{
NVOS55_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS55_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04DupObjectWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_SHARE:
{
NVOS57_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS57_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04ShareWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_ALLOC_OS_EVENT:
{
nv_ioctl_alloc_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_alloc_os_event(pApi->hClient,
nvfp,
pApi->fd);
break;
}
case NV_ESC_FREE_OS_EVENT:
{
nv_ioctl_free_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_free_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd);
break;
}
case NV_ESC_RM_GET_EVENT_DATA:
{
NVOS41_PARAMETERS *pApi = data;
if (dataSize != sizeof(NVOS41_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_get_event_data(nvfp,
pApi->pEvent,
&pApi->MoreEvents);
break;
}
case NV_ESC_STATUS_CODE:
{
nv_state_t *pNv;
nv_ioctl_status_code_t *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_status_code_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot);
if (pNv == NULL)
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
rmStatus = rm_get_adapter_status(pNv, &pApi->status);
if (rmStatus != NV_OK)
goto done;
break;
}
case NV_ESC_RM_CONTROL:
{
NVOS54_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS54_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04ControlWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO:
{
NVOS56_PARAMETERS *pApi = data;
void *pOldCpuAddress;
void *pNewCpuAddress;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS56_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress);
pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress);
pApi->status = rm_update_device_mapping_info(pApi->hClient,
pApi->hDevice,
pApi->hMemory,
pOldCpuAddress,
pNewCpuAddress);
break;
}
case NV_ESC_REGISTER_FD:
{
nv_ioctl_register_fd_t *params = data;
void *priv = NULL;
nv_file_private_t *ctl_nvfp;
if (dataSize != sizeof(nv_ioctl_register_fd_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
if (rmStatus != NV_OK)
goto done;
// If there is already a ctl fd registered on this nvfp, fail.
if (nvfp->ctl_nvfp != NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_STATE;
goto done;
}
//
// Note that this call is valid for both "actual" devices and ctrl
// devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with
// both types of devices.
// But, the ctl_fd passed in should always correspond to a control FD.
//
ctl_nvfp = nv_get_file_private(params->ctl_fd,
NV_TRUE, /* require ctl fd */
&priv);
if (ctl_nvfp == NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// Disallow self-referential links, and disallow links to FDs that
// themselves have a link.
if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL))
{
nv_put_file_private(priv);
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
//
// nvfp->ctl_nvfp is read outside the lock, so set it atomically.
// Note that once set, this can never be removed until the fd
// associated with nvfp is closed. We hold on to 'priv' until the
// fd is closed, too, to ensure that the fd associated with
// ctl_nvfp remains valid.
//
portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp);
nvfp->ctl_nvfp_priv = priv;
// UNLOCK: release API lock
rmApiLockRelease();
// NOTE: nv_put_file_private(priv) is not called here. It MUST be
// called during cleanup of this nvfp.
rmStatus = NV_OK;
break;
}
default:
{
NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd);
goto done;
}
}
rmStatus = NV_OK;
done:
return rmStatus;
}

View File

@@ -0,0 +1,256 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <rmconfig.h>
#include <gpu/subdevice/subdevice.h>
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
return NV_OK;
}
void NV_API_CALL rm_init_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bPr3AcpiMethodPresent
)
{
}
void NV_API_CALL rm_cleanup_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS NV_API_CALL rm_ref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void NV_API_CALL rm_unref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_transition_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bEnter
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
return NV_OK;
}
const char* NV_API_CALL rm_get_vidmem_power_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_dynamic_power_management_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_gpu_gcx_support(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvBool bGcxTypeGC6
)
{
return "?";
}
NV_STATUS
subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams
)
{
return NV_OK;
}
void
RmUpdateGc6ConsoleRefCount
(
nv_state_t *nv,
NvBool bIncrease
)
{
}
void
RmInitS0ixPowerManagement
(
nv_state_t *nv
)
{
}
void
RmInitDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void
RmDestroyDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void RmHandleDisplayChange
(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS
os_ref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void
os_unref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *limitRated,
NvU32 *limitCurr
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
deviceCtrlCmdOsUnixVTSwitch_IMPL
(
Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_save_low_res_mode(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS RmInitX86EmuState(OBJGPU *pGpu)
{
return NV_OK;
}
void RmFreeX86EmuState(OBJGPU *pGpu)
{
}

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvtypes.h>
#include <os-interface.h>
void* memset(void* s, int c, NvUPtr n)
{
return os_mem_set(s, (NvU8)c, (NvU32)n);
}
void* memcpy(void* dest, const void* src, NvUPtr n)
{
return os_mem_copy(dest, src, (NvU32)n);
}

View File

@@ -0,0 +1,150 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvstatus.h"
#include "os/os.h"
#include "nv.h"
#include "nv-hypervisor.h"
HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
{
return OS_HYPERVISOR_UNKNOWN;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 *numVgpuTypes,
NvU32 **vgpuTypeIds,
NvBool isVirtfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU8 cmd,
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvBool isMdevAttached,
void *vf_pci_info
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 vgpuTypeId,
char *buffer,
int type_info,
NvU8 devfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_create_request(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_update_request(
nvidia_stack_t *sp ,
const NvU8 *pMdevUuid,
VGPU_DEVICE_STATE deviceState,
NvU64 *offsets,
NvU64 *sizes,
const char *configParams
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
nvidia_stack_t *sp ,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 **offsets,
NvU64 **sizes,
NvU32 *numAreas
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_gpu_bind_event(
nvidia_stack_t *sp
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_start(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
void *waitQueue,
NvS32 *returnStatus,
NvU8 *vmName,
NvU32 qemuPid
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_delete(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
NvU16 vgpuId
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 *size,
NvU32 regionIndex,
void *pVgpuVfioRef
)
{
return NV_ERR_NOT_SUPPORTED;
}
void initVGXSpecificRegistry(OBJGPU *pGpu)
{}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,676 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include "kernel/gpu/nvlink/kernel_nvlink.h"
#include "osapi.h"
#if defined(INCLUDE_NVLINK_LIB)
#include "nvlink.h"
//
// The functions in this file are a workaround for a significant design flaw
// where RM callbacks for the nvlink library are built with the altstack
// enabled, but the nvlink library is not built with altstack support. Whenever
// the library calls a callback, the stack switching needs to be accounted for
// or else we will observe corruption of data structures in the nvlink library
// as data is pushed onto what the callback thinks is the stack. See bug
// 1710300.
//
// This bug has also exposed other problems, such as the complete lack of
// locking awareness by these callbacks (e.g., assumption that the RMAPI and
// GPU locks are always held on entry, which is not a legitimate assumption).
// For now, we ignore that just to unblock testing.
//
extern NvlStatus knvlinkCoreAddLinkCallback(struct nvlink_link *);
extern NvlStatus knvlinkCoreRemoveLinkCallback(struct nvlink_link *);
extern NvlStatus knvlinkCoreLockLinkCallback(struct nvlink_link *);
extern void knvlinkCoreUnlockLinkCallback(struct nvlink_link *);
extern NvlStatus knvlinkCoreQueueLinkChangeCallback(struct nvlink_link_change *);
extern NvlStatus knvlinkCoreSetDlLinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreGetDlLinkModeCallback(struct nvlink_link *, NvU64 *);
extern NvlStatus knvlinkCoreSetTlLinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreGetTlLinkModeCallback(struct nvlink_link *, NvU64 *);
extern NvlStatus knvlinkCoreGetTxSublinkModeCallback(struct nvlink_link *, NvU64 *, NvU32 *);
extern NvlStatus knvlinkCoreSetTxSublinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreGetRxSublinkModeCallback(struct nvlink_link *, NvU64 *, NvU32 *);
extern NvlStatus knvlinkCoreSetRxSublinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreReadDiscoveryTokenCallback(struct nvlink_link *, NvU64 *);
extern NvlStatus knvlinkCoreWriteDiscoveryTokenCallback(struct nvlink_link *, NvU64);
extern void knvlinkCoreTrainingCompleteCallback(struct nvlink_link *);
extern void knvlinkCoreGetUphyLoadCallback(struct nvlink_link *, NvBool*);
/*!
* @brief Helper to allocate an alternate stack from within core RM.
*
* This needs to be an NV_API_CALL (built to use the original stack instead
* of the altstack) since it is called before we switch to using the altstack.
*/
static NV_STATUS NV_API_CALL osNvlinkAllocAltStack(nvidia_stack_t **pSp)
{
NV_STATUS status = NV_OK;
nvidia_stack_t *sp = NULL;
#if defined(NVCPU_X86_64) && defined(__use_altstack__)
status = os_alloc_mem((void **)&sp, sizeof(nvidia_stack_t));
if (status == NV_OK)
{
sp->size = sizeof(sp->stack);
sp->top = sp->stack + sp->size;
}
#endif
*pSp = sp;
return status;
}
/*!
* @brief Helper to free an alternate stack from within core RM.
*
* This needs to be an NV_API_CALL (built to use the original stack instead
* of the altstack) since it is called after we've switched back to using the
* original stack.
*/
static void NV_API_CALL osNvlinkFreeAltStack(nvidia_stack_t *sp)
{
#if defined(NVCPU_X86_64) && defined(__use_altstack__)
os_free_mem(sp);
#endif
}
static NvlStatus NV_API_CALL rm_nvlink_ops_add_link
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp;
if (NV_OK != osNvlinkAllocAltStack(&sp))
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
status = knvlinkCoreAddLinkCallback(link);
NV_EXIT_RM_RUNTIME(sp, fp);
if (status == NVL_SUCCESS)
{
pLink->pOsInfo = sp;
}
else
{
osNvlinkFreeAltStack(sp);
}
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_remove_link
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
pLink->pOsInfo = NULL;
NV_ENTER_RM_RUNTIME(sp, fp);
status = knvlinkCoreRemoveLinkCallback(link);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkFreeAltStack(sp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_lock_link
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreLockLinkCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static void NV_API_CALL rm_nvlink_ops_unlock_link
(
struct nvlink_link *link
)
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
knvlinkCoreUnlockLinkCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_queue_link_change
(
struct nvlink_link_change *link_change
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link_change->master->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreQueueLinkChangeCallback(link_change);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_dl_link_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetDlLinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_dl_link_mode
(
struct nvlink_link *link,
NvU64 *mode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetDlLinkModeCallback(link, mode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_tl_link_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetTlLinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_tl_link_mode
(
struct nvlink_link *link,
NvU64 *mode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetTlLinkModeCallback(link, mode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_tx_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetTxSublinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_tx_mode
(
struct nvlink_link *link,
NvU64 *mode,
NvU32 *subMode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetTxSublinkModeCallback(link, mode, subMode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetRxSublinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_mode
(
struct nvlink_link *link,
NvU64 *mode,
NvU32 *subMode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetRxSublinkModeCallback(link, mode, subMode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_detect
(
struct nvlink_link *link,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetRxSublinkDetectCallback(link, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_detect
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetRxSublinkDetectCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static void NV_API_CALL rm_nvlink_get_uphy_load
(
struct nvlink_link *link,
NvBool *bUnlocked
)
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
knvlinkCoreGetUphyLoadCallback(link, bUnlocked);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_read_link_discovery_token
(
struct nvlink_link *link,
NvU64 *token
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreReadDiscoveryTokenCallback(link, token);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_write_link_discovery_token
(
struct nvlink_link *link,
NvU64 token
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreWriteDiscoveryTokenCallback(link, token);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static void NV_API_CALL rm_nvlink_ops_training_complete
(
struct nvlink_link *link
)
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
knvlinkCoreTrainingCompleteCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
}
#endif /* defined(INCLUDE_NVLINK_LIB) */
const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void)
{
#if defined(INCLUDE_NVLINK_LIB)
static const struct nvlink_link_handlers rm_nvlink_link_ops =
{
.add = rm_nvlink_ops_add_link,
.remove = rm_nvlink_ops_remove_link,
.lock = rm_nvlink_ops_lock_link,
.unlock = rm_nvlink_ops_unlock_link,
.queue_link_change = rm_nvlink_ops_queue_link_change,
.set_dl_link_mode = rm_nvlink_ops_set_dl_link_mode,
.get_dl_link_mode = rm_nvlink_ops_get_dl_link_mode,
.set_tl_link_mode = rm_nvlink_ops_set_tl_link_mode,
.get_tl_link_mode = rm_nvlink_ops_get_tl_link_mode,
.set_tx_mode = rm_nvlink_ops_set_link_tx_mode,
.get_tx_mode = rm_nvlink_ops_get_link_tx_mode,
.set_rx_mode = rm_nvlink_ops_set_link_rx_mode,
.get_rx_mode = rm_nvlink_ops_get_link_rx_mode,
.set_rx_detect = rm_nvlink_ops_set_link_rx_detect,
.get_rx_detect = rm_nvlink_ops_get_link_rx_detect,
.write_discovery_token = rm_nvlink_ops_write_link_discovery_token,
.read_discovery_token = rm_nvlink_ops_read_link_discovery_token,
.training_complete = rm_nvlink_ops_training_complete,
.get_uphy_load = rm_nvlink_get_uphy_load,
};
return &rm_nvlink_link_ops;
#else
return NULL;
#endif
}
/*
* @brief Verif only function to get the chiplib overrides for link connection
* state for all NVLINKs.
*
* If chiplib overrides exist, each link can either be enabled (1) or disabled (0)
*
* @param[in] pGpu GPU object pointer
* @param[in] maxLinks Size of pLinkConnection array
* @param[out] pLinkConnection array of pLinkConnection values to be populated by MODS
*
* @return NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available)
*/
NV_STATUS
osGetForcedNVLinkConnection
(
OBJGPU *pGpu,
NvU32 maxLinks,
NvU32 *pLinkConnection
)
{
int i, ret;
NV_STATUS status;
char path[64];
OBJSYS *pSys;
OBJOS *pOS;
NV_ASSERT_OR_RETURN((pLinkConnection != NULL), NV_ERR_INVALID_POINTER);
NV_ASSERT_OR_RETURN((maxLinks > 0), NV_ERR_NOT_SUPPORTED);
NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT);
pSys = SYS_GET_INSTANCE();
pOS = SYS_GET_OS(pSys);
if (pOS == NULL || pOS->osSimEscapeRead == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s: escape reads not supported on platform\n",
__FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
for (i = 0; i < maxLinks; i++)
{
ret = os_snprintf(path, sizeof(path), "CPU_MODEL|CM_ATS_ADDRESS|NVLink%u", i);
NV_ASSERT((ret > 0) && (ret < (sizeof(path) - 1)));
status = pOS->osSimEscapeRead(pGpu, path, 0, 4, &pLinkConnection[i]);
if (status == NV_OK)
{
NV_PRINTF(LEVEL_INFO, "%s: %s=0x%X\n", __FUNCTION__,
path, pLinkConnection[i]);
}
else
{
NV_PRINTF(LEVEL_INFO, "%s: osSimEscapeRead for '%s' failed (%u)\n",
__FUNCTION__, path, status);
return NV_ERR_NOT_SUPPORTED;
}
}
return NV_OK;
}
/*
* @brief Get Platform suggested NVLink linerate
*
* NVLink will use this function to get the platform suggested linerate
* if available in FRU or device tree.
*
* @param[in] pGpu GPU object pointer
* @param[out] NvU32 * Suggested datarate
*
* @return NV_OK or NV_ERR_NOT_SUPPORTED (platform linerate data not available)
*/
NV_STATUS
osGetPlatformNvlinkLinerate
(
OBJGPU *pGpu,
NvU32 *lineRate
)
{
#if defined(NVCPU_PPC64LE)
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu);
if (!pKernelNvlink)
return NV_ERR_INVALID_ARGUMENT;
return nv_get_nvlink_line_rate(nv, lineRate);
#else
//TODO : FRU based method to be filled out by Bug 200285656
//*lineRate = 0;
//return NV_OK;
return NV_ERR_NOT_SUPPORTED;
#endif
}
void
osSetNVLinkSysmemLinkState
(
OBJGPU *pGpu,
NvBool enabled
)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
NV_ASSERT(enabled);
if (enabled)
nv_dma_enable_nvlink(nv->dma_dev);
}

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/***************************** HW State Routines ***************************\
* *
* Fills in os specific function pointers for the Unix OS object. *
* *
\***************************************************************************/
#include <osfuncs.h>
#include <os/os.h>
static void initOSSpecificFunctionPointers(OBJOS *);
static void initMiscOSFunctionPointers(OBJOS *);
static void initUnixOSFunctionPointers(OBJOS *);
static void initOSSpecificProperties(OBJOS *);
void
osInitObjOS(OBJOS *pOS)
{
initOSSpecificFunctionPointers(pOS);
initOSSpecificProperties(pOS);
}
static void
initOSSpecificFunctionPointers(OBJOS *pOS)
{
initMiscOSFunctionPointers(pOS);
initUnixOSFunctionPointers(pOS);
}
static void
initMiscOSFunctionPointers(OBJOS *pOS)
{
pOS->osQueueWorkItem = osQueueWorkItem;
pOS->osQueueWorkItemWithFlags = osQueueWorkItemWithFlags;
pOS->osQueueSystemWorkItem = osQueueSystemWorkItem;
}
static void
initUnixOSFunctionPointers(OBJOS *pOS)
{
#if defined(NVCPU_X86_64)
pOS->osNv_rdcr4 = nv_rdcr4;
pOS->osNv_cpuid = nv_cpuid;
#endif
pOS->osCallACPI_DSM = osCallACPI_DSM;
pOS->osCallACPI_DDC = osCallACPI_DDC;
pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM;
pOS->osCallACPI_DOD = osCallACPI_DOD;
pOS->osCallACPI_MXDM = osCallACPI_MXDM;
pOS->osCallACPI_MXDS = osCallACPI_MXDS;
pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled;
}
static void
initOSSpecificProperties
(
OBJOS *pOS
)
{
pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE);
}

View File

@@ -0,0 +1,705 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <nv-priv.h>
#include <nvos.h>
#if defined(DEBUG_REGISTRY)
#define DBG_REG_PRINTF(a, ...) \
NV_PRINTF(LEVEL_INFO, a, ##__VA_ARGS__)
#else
#define DBG_REG_PRINTF(a, ...)
#endif
static NvS32 stringCaseCompare(
const char *string1,
const char *string2
)
{
NvU8 c1, c2;
do
{
c1 = *string1, c2 = *string2;
if (c1 >= 'A' && c1 <= 'Z')
c1 += ('a' - 'A');
if (c2 >= 'A' && c2 <= 'Z')
c2 += ('a' - 'A');
string1++, string2++;
}
while ((c1 == c2) && (c1 != '\0'));
return (c1 - c2);
}
static nv_reg_entry_t *the_registry = NULL;
static nv_reg_entry_t* regCreateNewRegistryKey(
nv_state_t *nv,
const char *regParmStr
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *new_reg = NULL;
char *new_ParmStr = NULL;
NvU32 parm_size;
if (regParmStr == NULL)
{
DBG_BREAKPOINT();
return NULL;
}
new_reg = portMemAllocNonPaged(sizeof(nv_reg_entry_t));
if (NULL == new_reg)
{
NV_PRINTF(LEVEL_ERROR, "failed to grow registry\n");
return NULL;
}
portMemSet(new_reg, 0, sizeof(nv_reg_entry_t));
if (regParmStr != NULL)
{
parm_size = (portStringLength(regParmStr) + 1);
new_ParmStr = portMemAllocNonPaged(parm_size);
if (NULL == new_ParmStr)
{
NV_PRINTF(LEVEL_ERROR, "failed to allocate registry param string\n");
portMemFree(new_reg);
return NULL;
}
NV_ASSERT(parm_size <= NVOS38_MAX_REGISTRY_STRING_LENGTH);
if (portMemCopy(new_ParmStr, parm_size, regParmStr, parm_size) == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to copy registry param string\n");
portMemFree(new_ParmStr);
portMemFree(new_reg);
return NULL;
}
}
new_reg->regParmStr = new_ParmStr;
new_reg->type = NV_REGISTRY_ENTRY_TYPE_UNKNOWN;
if (nvp != NULL)
{
new_reg->next = nvp->pRegistry;
nvp->pRegistry = new_reg;
DBG_REG_PRINTF("local registry now at 0x%p\n", nvp->pRegistry);
}
else
{
new_reg->next = the_registry;
the_registry = new_reg;
DBG_REG_PRINTF("global registry now at 0x%p\n", the_registry);
}
return new_reg;
}
static NV_STATUS regFreeEntry(nv_reg_entry_t *tmp)
{
portMemFree(tmp->regParmStr);
tmp->regParmStr = NULL;
{
portMemFree(tmp->pdata);
tmp->pdata = NULL;
tmp->len = 0;
}
portMemFree(tmp);
return NV_OK;
}
static nv_reg_entry_t* regFindRegistryEntry(
nv_state_t *nv,
const char *regParmStr,
NvU32 type,
NvBool *bGlobalEntry
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *tmp;
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
if (nvp != NULL)
{
tmp = nvp->pRegistry;
DBG_REG_PRINTF(" local registry at 0x%p\n", tmp);
while ((tmp != NULL) && (tmp->regParmStr != NULL))
{
DBG_REG_PRINTF(" Testing against %s\n",
tmp->regParmStr);
if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) &&
(type == tmp->type))
{
DBG_REG_PRINTF(" found a match!\n");
if (bGlobalEntry)
*bGlobalEntry = NV_FALSE;
return tmp;
}
tmp = tmp->next;
}
}
tmp = the_registry;
DBG_REG_PRINTF(" global registry at 0x%p\n", tmp);
while ((tmp != NULL) && (tmp->regParmStr != NULL))
{
DBG_REG_PRINTF(" Testing against %s\n",
tmp->regParmStr);
if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) &&
(type == tmp->type))
{
DBG_REG_PRINTF(" found a match!\n");
if (bGlobalEntry)
*bGlobalEntry = NV_TRUE;
return tmp;
}
tmp = tmp->next;
}
DBG_REG_PRINTF(" no match\n");
return NULL;
}
NV_STATUS RmWriteRegistryDword(
nv_state_t *nv,
const char *regParmStr,
NvU32 Data
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if (regParmStr == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s -> 0x%x\n", __FUNCTION__, regParmStr, Data);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_DWORD, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
tmp->data = Data;
if (stringCaseCompare(regParmStr, "ResmanDebugLevel") == 0)
{
os_dbg_set_level(Data);
}
return NV_OK;
}
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
return NV_ERR_GENERIC;
tmp->type = NV_REGISTRY_ENTRY_TYPE_DWORD;
tmp->data = Data;
return NV_OK;
}
NV_STATUS RmReadRegistryDword(
nv_state_t *nv,
const char *regParmStr,
NvU32 *Data
)
{
nv_reg_entry_t *tmp;
if ((regParmStr == NULL) || (Data == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_DWORD, NULL);
if (tmp == NULL)
{
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, NULL);
if ((tmp != NULL) && (tmp->len >= sizeof(NvU32)))
{
*Data = *(NvU32 *)tmp->pdata;
}
else
{
DBG_REG_PRINTF(" not found\n");
return NV_ERR_GENERIC;
}
}
else
{
*Data = tmp->data;
}
DBG_REG_PRINTF(" found in the_registry: 0x%x\n", *Data);
return NV_OK;
}
NV_STATUS RmReadRegistryBinary(
nv_state_t *nv,
const char *regParmStr,
NvU8 *Data,
NvU32 *cbLen
)
{
nv_reg_entry_t *tmp;
NV_STATUS status;
if ((regParmStr == NULL) || (Data == NULL) || (cbLen == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, NULL);
if (tmp == NULL)
{
DBG_REG_PRINTF(" not found\n");
return NV_ERR_GENERIC;
}
DBG_REG_PRINTF(" found\n");
if (*cbLen >= tmp->len)
{
portMemCopy((NvU8 *)Data, *cbLen, (NvU8 *)tmp->pdata, tmp->len);
*cbLen = tmp->len;
status = NV_OK;
}
else
{
NV_PRINTF(LEVEL_ERROR,
"buffer (length: %u) is too small (data length: %u)\n",
*cbLen, tmp->len);
status = NV_ERR_GENERIC;
}
return status;
}
NV_STATUS RmWriteRegistryBinary(
nv_state_t *nv,
const char *regParmStr,
NvU8 *Data,
NvU32 cbLen
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if ((regParmStr == NULL) || (Data == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
if (tmp->pdata != NULL)
{
portMemFree(tmp->pdata);
tmp->pdata = NULL;
tmp->len = 0;
}
}
else
{
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to create binary registry entry\n");
return NV_ERR_GENERIC;
}
}
tmp->pdata = portMemAllocNonPaged(cbLen);
if (NULL == tmp->pdata)
{
NV_PRINTF(LEVEL_ERROR, "failed to write binary registry entry\n");
return NV_ERR_GENERIC;
}
tmp->type = NV_REGISTRY_ENTRY_TYPE_BINARY;
tmp->len = cbLen;
portMemCopy((NvU8 *)tmp->pdata, tmp->len, (NvU8 *)Data, cbLen);
return NV_OK;
}
NV_STATUS RmWriteRegistryString(
nv_state_t *nv,
const char *regParmStr,
const char *buffer,
NvU32 bufferLength
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if ((regParmStr == NULL) || (buffer == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_STRING, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
if (tmp->pdata != NULL)
{
portMemFree(tmp->pdata);
tmp->len = 0;
tmp->pdata = NULL;
}
}
else
{
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
{
NV_PRINTF(LEVEL_ERROR,
"failed to allocate a string registry entry!\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
}
tmp->pdata = portMemAllocNonPaged(bufferLength);
if (tmp->pdata == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to write a string registry entry!\n");
return NV_ERR_NO_MEMORY;
}
tmp->type = NV_REGISTRY_ENTRY_TYPE_STRING;
tmp->len = bufferLength;
portMemCopy((void *)tmp->pdata, tmp->len, buffer, (bufferLength - 1));
tmp->pdata[bufferLength-1] = '\0';
return NV_OK;
}
NV_STATUS RmReadRegistryString(
nv_state_t *nv,
const char *regParmStr,
NvU8 *buffer,
NvU32 *pBufferLength
)
{
NvU32 bufferLength;
nv_reg_entry_t *tmp;
if ((regParmStr == NULL) || (buffer == NULL) || (pBufferLength == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
bufferLength = *pBufferLength;
*pBufferLength = 0;
*buffer = '\0';
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_STRING, NULL);
if (tmp == NULL)
{
return NV_ERR_GENERIC;
}
if (bufferLength >= tmp->len)
{
portMemCopy((void *)buffer, bufferLength, (void *)tmp->pdata, tmp->len);
*pBufferLength = tmp->len;
}
else
{
NV_PRINTF(LEVEL_ERROR,
"buffer (length: %u) is too small (data length: %u)\n",
bufferLength, tmp->len);
return NV_ERR_BUFFER_TOO_SMALL;
}
return NV_OK;
}
NV_STATUS RmInitRegistry(void)
{
NV_STATUS rmStatus;
rmStatus = os_registry_init();
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "failed to initialize the OS registry!\n");
}
return rmStatus;
}
NV_STATUS RmDestroyRegistry(nv_state_t *nv)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *tmp;
if (nvp != NULL)
{
tmp = nvp->pRegistry;
nvp->pRegistry = NULL;
}
else
{
tmp = the_registry;
the_registry = NULL;
}
while (tmp != NULL)
{
nv_reg_entry_t *entry = tmp;
tmp = tmp->next;
regFreeEntry(entry);
}
return NV_OK;
}
static void regCountEntriesAndSize(
NvU32 *pNumEntries, // Pointer to number of entries
NvU32 *pSize, // Pointer to total size
nv_reg_entry_t *pRegEntry // Pointer local or global registry
)
{
//
// Note that *pNumEntries and *pSize are not initialized here. This is so
// we can accumulate totals of both global and local registries.
//
NvU32 numEntries = *pNumEntries;
NvU32 size = *pSize;
while ((pRegEntry != NULL) && (pRegEntry->regParmStr != NULL))
{
size += portStringLength(pRegEntry->regParmStr) + 1 + pRegEntry->len;
numEntries++;
pRegEntry = pRegEntry->next;
}
*pNumEntries = numEntries;
*pSize = size;
}
static NV_STATUS regCopyEntriesToPackedBuffer(
PACKED_REGISTRY_TABLE *pRegTable, // Pointer to packed record
nv_reg_entry_t *pRegEntry, // Pointer local or global registry
NvU32 *pEntryIndex, // Pointer to next index
NvU32 *pDataOffset // Pointer to offset of next data byte.
)
{
NvU8 *pByte = (NvU8 *)pRegTable; // Byte version of record pointer.
NV_STATUS nvStatus = NV_OK;
NvU32 entryIndex = *pEntryIndex;
NvU32 dataOffset = *pDataOffset;
// Walk the records and copy the data.
while ((pRegEntry != NULL) && (pRegEntry->regParmStr != NULL))
{
PACKED_REGISTRY_ENTRY *pEntry = &pRegTable->entries[entryIndex];
NvU32 slen = portStringLength(pRegEntry->regParmStr) + 1;
// Sanity check the data offset and index against counted totals.
if ((dataOffset + slen + pRegEntry->len > pRegTable->size) ||
(entryIndex >= pRegTable->numEntries))
{
// Something has changed since we counted them?
NV_PRINTF(LEVEL_ERROR, "Registry entry record is full\n");
nvStatus = NV_ERR_INVALID_STATE;
break;
}
// Copy registry entry name to data blob.
pEntry->nameOffset = dataOffset;
portMemCopy(&pByte[dataOffset], slen, pRegEntry->regParmStr, slen);
dataOffset += slen;
switch (pRegEntry->type)
{
case NV_REGISTRY_ENTRY_TYPE_DWORD:
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_DWORD;
pEntry->length = sizeof(NvU32);
pEntry->data = pRegEntry->data;
break;
case NV_REGISTRY_ENTRY_TYPE_BINARY:
case NV_REGISTRY_ENTRY_TYPE_STRING:
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_STRING;
if (pRegEntry->type == NV_REGISTRY_ENTRY_TYPE_BINARY)
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_BINARY;
pEntry->length = pRegEntry->len;
pEntry->data = dataOffset;
portMemCopy(&pByte[dataOffset], pEntry->length,
pRegEntry->pdata, pRegEntry->len);
dataOffset += pRegEntry->len;
break;
default:
// We should never get here.
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN;
pEntry->length = 0;
pEntry->data = 0;
DBG_BREAKPOINT();
break;
}
pRegEntry = pRegEntry->next;
entryIndex++;
}
*pEntryIndex = entryIndex;
*pDataOffset = dataOffset;
return nvStatus;
}
// Package registry entries
NV_STATUS RmPackageRegistry(
nv_state_t *nv,
PACKED_REGISTRY_TABLE *pRegTable,
NvU32 *pSize
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *pLocalRegistry = NULL;
NV_STATUS nvStatus = NV_OK;
NvU32 totalSize;
NvU32 numEntries;
if (pSize == NULL)
return NV_ERR_INVALID_ARGUMENT;
// Use the local (per-device) registry if we have one.
if (nvp != NULL)
pLocalRegistry = nvp->pRegistry;
numEntries = 0;
totalSize = NV_OFFSETOF(PACKED_REGISTRY_TABLE, entries);
// Count the number of global entries and total size.
regCountEntriesAndSize(&numEntries, &totalSize, the_registry);
// Count the number of local entries and total size.
regCountEntriesAndSize(&numEntries, &totalSize, pLocalRegistry);
// Add table record size into total size.
totalSize += sizeof(PACKED_REGISTRY_ENTRY) * numEntries;
//
// If this function is called to only compute total size of registry table,
// then we are done here.
//
if (pRegTable == NULL)
{
*pSize = totalSize;
return NV_OK;
}
// Return warning if there are no registry entries.
if (numEntries == 0)
return NV_WARN_NOTHING_TO_DO;
if (totalSize > *pSize)
{
NV_PRINTF(LEVEL_ERROR, "Registry entries overflow RPC record\n");
return NV_ERR_BUFFER_TOO_SMALL;
}
// Fill in our new structure with the first pass (counting) values.
pRegTable->size = totalSize;
*pSize = totalSize;
pRegTable->numEntries = numEntries;
// Offset of first byte after the registry entry table.
totalSize = NV_OFFSETOF(PACKED_REGISTRY_TABLE, entries) +
(sizeof(PACKED_REGISTRY_ENTRY) * numEntries);
// Starting index in the registry entry table.
numEntries = 0;
// Walk the global registry and copy the data.
nvStatus = regCopyEntriesToPackedBuffer(pRegTable,
the_registry, &numEntries, &totalSize);
// Walk the local registry and copy the data.
if (nvStatus == NV_OK)
{
nvStatus = regCopyEntriesToPackedBuffer(pRegTable,
pLocalRegistry, &numEntries, &totalSize);
}
// Sanity check second pass against first pass.
if ((numEntries != pRegTable->numEntries) || (totalSize != pRegTable->size))
{
NV_PRINTF(LEVEL_ERROR, "First/second pass mismatch\n");
nvStatus = NV_ERR_INVALID_STATE;
}
return nvStatus;
}

View File

@@ -0,0 +1,812 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <os/os.h>
#include <osapi.h>
#include <core/thread_state.h>
#include "rmapi/nv_gpu_ops.h"
#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
NV_STATUS NV_API_CALL rm_gpu_ops_create_session(
nvidia_stack_t *sp,
struct gpuSession **session)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCreateSession(session);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (
nvidia_stack_t *sp, gpuSessionHandle session)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDestroySession(session);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (
nvidia_stack_t *sp,
nvgpuSessionHandle_t session,
const gpuInfo *pGpuInfo,
const NvProcessorUuid *gpuUuid,
nvgpuDeviceHandle_t *device,
NvBool bCreateSmcPartition)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDeviceCreate(session, pGpuInfo, gpuUuid, device, bCreateSmcPartition);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (
nvidia_stack_t *sp,
gpuDeviceHandle device)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDeviceDestroy(device);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create (
nvidia_stack_t *sp,
gpuDeviceHandle device,
NvU64 vaBase,
NvU64 vaSize,
gpuAddressSpaceHandle *vaSpace,
gpuAddressSpaceInfo *vaSpaceInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsAddressSpaceCreate(device, vaBase, vaSize, vaSpace,
vaSpaceInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(
nvidia_stack_t *sp,
gpuDeviceHandle device,
NvHandle hUserClient,
NvHandle hUserVASpace,
gpuAddressSpaceHandle *dupedVaspace,
gpuAddressSpaceInfo *vaSpaceInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDupAddressSpace(device, hUserClient, hUserVASpace,
dupedVaspace, vaSpaceInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *sp,
gpuAddressSpaceHandle vaspace)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsAddressSpaceDestroy(vaspace);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsMemoryAllocFb(vaspace, size, gpuOffset, allocInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *sp,
gpuDeviceHandle device1,
gpuDeviceHandle device2,
getP2PCapsParams *pP2pCapsParams)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetP2PCaps(device1, device2, pP2pCapsParams);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsMemoryAllocSys(vaspace, size, gpuOffset, allocInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(
nvidia_stack_t *sp,
void *pPma,
pmaEvictPagesCb_t evictPages,
pmaEvictRangeCb_t evictRange,
void *callbackData)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
rmStatus = pmaRegisterEvictionCb(pPma, evictPages, evictRange, callbackData);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(
nvidia_stack_t *sp,
void *pPma)
{
THREAD_STATE_NODE threadState;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
pmaUnregisterEvictionCb(pPma);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(
nvidia_stack_t *sp,
gpuDeviceHandle device,
void **pPma,
const nvgpuPmaStatistics_t *pPmaPubStats)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetPmaObject(device, pPma,
(const UvmPmaStatistics **)pPmaPubStats);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(
nvidia_stack_t *sp, void *pPma,
NvLength pageCount, NvU32 pageSize,
nvgpuPmaAllocationOptions_t pPmaAllocOptions,
NvU64 *pPages)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPmaAllocPages(pPma, pageCount, pageSize,
pPmaAllocOptions, pPages);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(
nvidia_stack_t *sp, void *pPma,
NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPmaPinPages(pPma, pPages, pageCount, pageSize, flags);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(
nvidia_stack_t *sp, void *pPma,
NvU64 *pPages, NvLength pageCount, NvU32 pageSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPmaUnpinPages(pPma, pPages, pageCount, pageSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU32 pageSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsMemoryCpuMap(vaspace, gpuOffset, length, cpuPtr,
pageSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, void* cpuPtr)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsMemoryCpuUnMap(vaspace, cpuPtr);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *sp,
gpuAddressSpaceHandle vaspace,
const gpuChannelAllocParams *allocParams,
gpuChannelHandle *channel,
gpuChannelInfo *channelInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsChannelAllocate(vaspace, allocParams, channel,
channelInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t * sp,
nvgpuChannelHandle_t channel)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsChannelDestroy(channel);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *sp,
void *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsPmaFreePages(pPma, pPages, pageCount, pageSize, flags);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, NvU64 gpuOffset)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsMemoryFree(vaspace, gpuOffset);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuCaps * caps)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsQueryCaps(device, caps);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuCesCaps *caps)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsQueryCesCaps(device, caps);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *sp,
const NvProcessorUuid *pUuid,
const gpuClientInfo *pGpuClientInfo,
gpuInfo *pGpuInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetGpuInfo(pUuid, pGpuClientInfo, pGpuInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *sp,
gpuDeviceHandle device)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsServiceDeviceInterruptsRM(device);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *sp,
gpuAddressSpaceHandle vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsSetPageDirectory(vaSpace, physAddress, numEntries,
bVidMemAperture, pasid);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *sp,
gpuAddressSpaceHandle vaSpace)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsUnsetPageDirectory(vaSpace);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *sp,
gpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
gpuAddressSpaceHandle dstVaSpace,
NvU64 *dstAddress)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstAddress);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *sp,
gpuDeviceHandle device,
NvHandle hClient,
NvHandle hPhysMemory,
NvHandle *hDupMemory,
nvgpuMemoryInfo_t gpuMemoryInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDupMemory(device, hClient, hPhysMemory, hDupMemory, gpuMemoryInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle (nvidia_stack_t *sp,
gpuDeviceHandle device,
NvHandle hPhysHandle)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsFreeDupedHandle(device, hPhysHandle);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuFbInfo * fbInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetFbInfo(device, fbInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuEccInfo * eccInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetEccInfo(device, eccInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
//
// Please see the comments for nvUvmInterfaceOwnPageFaultIntr(), in
// nv_uvm_interface.h, for the recommended way to use this routine.
//
// How it works:
//
// The rmGpuLocksAcquire call generally saves the current GPU interrupt
// state, then disables interrupt generation for one (or all) GPUs.
// Likewise, the rmGpuLocksRelease call restores (re-enables) those
// interrupts to their previous state. However, the rmGpuLocksRelease
// call does NOT restore interrupts that RM does not own.
//
// This is rather hard to find in the code, so: very approximately, the
// following sequence happens: rmGpuLocksRelease, osEnableInterrupts,
// intrRestoreNonStall_HAL, intrEncodeIntrEn_HAL, and that last one skips
// over any interrupts that RM does not own.
//
// This means that things are a bit asymmetric, because this routine
// actually changes that ownership in between the rmGpuLocksAcquire and
// rmGpuLocksRelease calls. So:
//
// -- If you call this routine with bOwnInterrupts == NV_TRUE (UVM is
// taking ownership from the RM), then rmGpuLocksAcquire disables all
// GPU interrupts. Then the ownership is taken away from RM, so the
// rmGpuLocksRelease call leaves the replayable page fault interrupts
// disabled. It is then up to UVM (the caller) to enable replayable
// page fault interrupts when it is ready.
//
// -- If you call this routine with bOwnInterrupts == NV_FALSE (UVM is
// returning ownership to the RM), then rmGpuLocksAcquire disables
// all GPU interrupts that RM owns. Then the ownership is returned to
// RM, so the rmGpuLocksRelease call re-enables replayable page fault
// interrupts. So, that implies that you need to disable replayable page
// fault interrupts before calling this routine, in order to hand
// over a GPU to RM that is not generating interrupts, until RM is
// ready to handle the interrupts.
//
NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *sp,
struct gpuDevice *device,
NvBool bOwnInterrupts)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsOwnPageFaultIntr(device, bOwnInterrupts);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuFaultInfo *pFaultInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsInitFaultInfo(device, pFaultInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuFaultInfo *pFaultInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDestroyFaultInfo(device, pFaultInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
// Functions
//
// - rm_gpu_ops_has_pending_non_replayable_faults
// - rm_gpu_ops_get_non_replayable_faults
//
// Cannot take the GPU/RM lock because it is called during fault servicing.
// This could produce deadlocks if the UVM bottom half gets stuck behind a
// stalling interrupt that cannot be serviced if UVM is holding the lock.
//
// However, these functions can be safely called with no locks because it is
// just accessing the given client shadow fault buffer, which is implemented
// using a lock-free queue. There is a different client shadow fault buffer
// per GPU: RM top-half producer, UVM top/bottom-half consumer.
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *sp,
gpuFaultInfo *pFaultInfo,
NvBool *hasPendingFaults)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsHasPendingNonReplayableFaults(pFaultInfo, hasPendingFaults);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp,
gpuFaultInfo *pFaultInfo,
void *faultBuffer,
NvU32 *numFaults)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetNonReplayableFaults(pFaultInfo, faultBuffer, numFaults);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDestroyAccessCntrInfo(device, accessCntrInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo,
gpuAccessCntrConfig *accessCntrConfig)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsEnableAccessCntr(device, accessCntrInfo, accessCntrConfig);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDisableAccessCntr(device, accessCntrInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_p2p_object_create(nvidia_stack_t *sp,
gpuDeviceHandle device1,
gpuDeviceHandle device2,
NvHandle *hP2pObject)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsP2pObjectCreate(device1, device2, hP2pObject);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *sp,
nvgpuSessionHandle_t session,
NvHandle hP2pObject)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
nvGpuOpsP2pObjectDestroy(session, hP2pObject);
NV_EXIT_RM_RUNTIME(sp, fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t* sp,
nvgpuAddressSpaceHandle_t vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsGetExternalAllocPtes(vaSpace, hDupedMemory, offset, size,
gpuExternalMappingInfo);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_retain_channel(nvidia_stack_t* sp,
nvgpuAddressSpaceHandle_t vaSpace,
NvHandle hClient,
NvHandle hChannel,
void **retainedChannel,
nvgpuChannelInstanceInfo_t channelInstanceInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsRetainChannel(vaSpace, hClient, hChannel,
(gpuRetainedChannel **)retainedChannel,
channelInstanceInfo);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_bind_channel_resources(nvidia_stack_t* sp,
void *retainedChannel,
nvgpuChannelResourceBindParams_t channelResourceBindParams)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsBindChannelResources(retainedChannel,
channelResourceBindParams);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_release_channel(nvidia_stack_t *sp, void *retainedChannel)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
nvGpuOpsReleaseChannel(retainedChannel);
NV_EXIT_RM_RUNTIME(sp, fp);
}
void NV_API_CALL
rm_gpu_ops_stop_channel(nvidia_stack_t * sp,
void *retainedChannel,
NvBool bImmediate)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsStopChannel(retainedChannel, bImmediate);
NV_EXIT_RM_RUNTIME(sp, fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t* sp,
nvgpuAddressSpaceHandle_t vaSpace,
NvP64 resourceDescriptor,
NvU64 offset,
NvU64 size,
nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsGetChannelResourcePtes(vaSpace, resourceDescriptor,
offset, size,
gpuExternalMappingInfo);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *sp,
nvgpuDeviceHandle_t device,
const void *pFaultPacket)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsReportNonReplayableFault(device, pFaultPacket);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *sp,
gpuDeviceHandle device,
const gpuPagingChannelAllocParams *allocParams,
gpuPagingChannelHandle *channel,
gpuPagingChannelInfo *channelInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPagingChannelAllocate(device, allocParams, channel,
channelInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *sp,
gpuPagingChannelHandle channel)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsPagingChannelDestroy(channel);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channels_map(nvidia_stack_t *sp,
gpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
gpuDeviceHandle device,
NvU64 *dstAddress)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPagingChannelsMap(srcVaSpace, srcAddress, device, dstAddress);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *sp,
gpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
gpuDeviceHandle device)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsPagingChannelsUnmap(srcVaSpace, srcAddress, device);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp,
gpuPagingChannelHandle channel,
char *methodStream,
NvU32 methodStreamSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPagingChannelPushStream(channel, methodStream, methodStreamSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -0,0 +1,624 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file
*
* @brief Provides RmExportObject, RmImportObject, RmFreeObjExportHandle and
* RmGetExportObjectInfo interfaces :
*
* These interfaces allow rm clients to export their objects into
* a unique RmObjExportHandle which another rm client could
* import, even if the source rm client gets destroyed.
*
* RM's device instance may get destroyed asynchronously, in which
* case exported objects residing on that device instance also get
* destroyed. This means it is not possible to import it back, but the
* RmObjExportHandle into which the object had been exported still
* remains valid but no other object could get it.
*
* There are not init/fini routines, it is the responsibility of the
* rest of RM's eco-system to make sure that all RmObjExportHandles get
* freed during driver unload.
*
* The api lock is expected to be held before calling into
* rmobjexportimport.c; do not hold gpu or any other lock.
*/
#include "rmobjexportimport.h"
#include "nvlimits.h"
#include "gpu/device/device.h"
#include "containers/map.h"
#include "rmapi/rmapi.h"
#include "rmapi/rs_utils.h"
#include "class/cl0080.h"
#include "class/cl2080.h"
#include <ctrl/ctrl0000/ctrl0000unix.h>
#include <ctrl/ctrl0000/ctrl0000client.h>
//
// A reference to an RmObjExportHandle
// generated by function RmGenerateObjExportHandle().
//
typedef struct
{
NvU32 deviceInstance;
} RmObjExportHandleRef;
MAKE_MAP(RmObjExportHandleMap, RmObjExportHandleRef);
//
// Memory allocator
//
PORT_MEM_ALLOCATOR *pMemAllocator;
//
// Map RmObjExportHandle -> RmObjExportHandleRef
//
RmObjExportHandleMap objExportHandleMap;
//
// Rm client to use to dup an object exported to RmObjExportHandle. The minimal
// requirement for duping is to have a device object allocated. This rm client
// is simply like any other external rm client and has no any special handling.
//
// We keep this rm client just like any other external rm client: if
// gpu(s)/device gets powered-down/uninitialized, rm objects allocated by
// external rm clients and located on that gpu(s)/device gets freed (the
// os-layer does that). In that way, code in this file doesn't need to worry
// about freeing exported objects located on that gpu(s)/device.
//
NvHandle hObjExportRmClient;
//
// Tracker for device and subdevice handles. For now only one subdevice
// (instance 0) is supported per device.
//
typedef struct
{
NvHandle hRmDevice;
NvHandle hRmSubDevice;
} RmObjExportDevice;
RmObjExportDevice objExportDevice[NV_MAX_DEVICES];
//
// Usage reference counter for static object in this file like rm client used to
// dup an exported object, memory allocator, map etc.
//
NvU64 objExportImportRefCount;
//
// Static functions for internal use to code in this file.
//
static NV_STATUS RmRefObjExportImport (void);
static void RmUnrefObjExportImport (void);
static RmObjExportHandle RmGenerateObjExportHandle (NvU32 deviceInstance);
static NV_STATUS RmUnrefObjExportHandle (RmObjExportHandle hObject);
//
// Free the RmObjExportHandle.
//
static NV_STATUS RmUnrefObjExportHandle(RmObjExportHandle hObject)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
RmObjExportHandleRef *pHandleRef =
mapFind(&objExportHandleMap, hObject);
if (pHandleRef == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
if (pRmApi->Free(pRmApi,
hObjExportRmClient,
(NvHandle)mapKey(&objExportHandleMap, pHandleRef)) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"Exported object trying to free was zombie in %s\n",
__FUNCTION__);
}
mapRemove(&objExportHandleMap, pHandleRef);
return NV_OK;
}
//
// Generate unique RmObjExportHandle.
//
static RmObjExportHandle RmGenerateObjExportHandle(NvU32 deviceInstance)
{
//
// The object export handle belongs to range of 0 to
// (MAX_OBJ_EXPORT_HANDLES - 1).
//
// Handle 0 is considered as invalid object handle, this function generates
// handle from range of 1 to (MAX_OBJ_EXPORT_HANDLES - 1).
//
#define MAX_OBJ_EXPORT_HANDLES 0x80000
static NvHandle hObjExportHandleNext = 1;
RmObjExportHandle hStartHandle = hObjExportHandleNext;
RmObjExportHandle hObject = 0;
do
{
RmObjExportHandleRef *pHandleRef;
hObject = hObjExportHandleNext++;
/* Reset hObjExportHandleNext to next valid handle */
if (hObjExportHandleNext == MAX_OBJ_EXPORT_HANDLES) {
hObjExportHandleNext = 1;
}
pHandleRef = mapFind(&objExportHandleMap, hObject);
if (hObject != hObjExportRmClient && pHandleRef == NULL)
{
break;
}
else
{
hObject = 0;
}
} while(hObjExportHandleNext != hStartHandle);
if (hObject != 0)
{
RmObjExportHandleRef *pHandleRef =
mapInsertNew(&objExportHandleMap, hObject);
if (pHandleRef != NULL)
{
pHandleRef->deviceInstance = deviceInstance;
}
else
{
hObject = 0;
}
}
return hObject;
}
//
// Validate that the given hObject is not one of our internally used handles.
//
// Note that mapFind(&objExportHandleMap, hObject) could still fail; that is the
// caller's responsibility.
//
static NvBool RmValidateHandleAgainstInternalHandles(RmObjExportHandle hObject)
{
NvU32 i;
//
// No external RmObjExportHandle could be valid if hObjExportRmClient has
// not been allocated yet, or if it is equal to any of the handles used
// internally by code in this file.
//
if (objExportImportRefCount == 0 || hObjExportRmClient == 0 ||
hObject == hObjExportRmClient)
{
return NV_FALSE;
}
for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++)
{
if (objExportDevice[i].hRmDevice != 0 &&
(hObject == objExportDevice[i].hRmDevice ||
hObject == objExportDevice[i].hRmSubDevice))
{
return NV_FALSE;
}
}
return NV_TRUE;
}
//
// Increment reference count of static objects internally
// used by code in this file.
//
static NV_STATUS RmRefObjExportImport(void)
{
NV_STATUS rmStatus = NV_OK;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if ((objExportImportRefCount++) != 0)
{
NV_ASSERT(hObjExportRmClient != 0);
NV_ASSERT(pMemAllocator != NULL);
return NV_OK;
}
rmStatus = pRmApi->AllocWithHandle(pRmApi,
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_ROOT,
&hObjExportRmClient);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__);
goto failed;
}
pMemAllocator = portMemAllocatorCreateNonPaged();
if (pMemAllocator == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Failed to alloc memory allocator in %s\n",
__FUNCTION__);
goto failed;
}
mapInit(&objExportHandleMap, pMemAllocator);
return NV_OK;
failed:
RmUnrefObjExportImport();
return rmStatus;
}
//
// Decrement reference count of static objects internally used by code in this
// file, and free them if reference count reaches to zero.
//
static void RmUnrefObjExportImport(void)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
if ((--objExportImportRefCount) != 0)
{
return;
}
if (pMemAllocator != NULL)
{
NvU32 i;
for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++)
{
if (objExportDevice[i].hRmDevice != 0)
{
RmUnrefObjExportHandle(objExportDevice[i].hRmSubDevice);
objExportDevice[i].hRmSubDevice = 0;
RmUnrefObjExportHandle(objExportDevice[i].hRmDevice);
objExportDevice[i].hRmDevice = 0;
}
}
mapDestroy(&objExportHandleMap);
portMemAllocatorRelease(pMemAllocator);
pMemAllocator = NULL;
}
if (hObjExportRmClient != 0)
{
NV_STATUS rmStatus = pRmApi->Free(pRmApi,
hObjExportRmClient,
hObjExportRmClient);
NV_ASSERT(rmStatus == NV_OK);
hObjExportRmClient = 0;
}
}
NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance)
{
RmObjExportHandle hDstObject;
NvU32 deviceInstance = NV_MAX_DEVICES;
NvHandle hTmpObject;
NV_STATUS status;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if (pDstObject == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
//
// Find the device instance on which the rm object exists.
//
hTmpObject = hSrcObject;
do
{
RsResourceRef *pResourceRef;
status = serverutilGetResourceRef(hSrcClient, hTmpObject, &pResourceRef);
if (status != NV_OK)
return status;
Device *pDevice = dynamicCast(pResourceRef->pResource, Device);
if (pDevice != NULL)
{
deviceInstance = pDevice->deviceInst;
break;
}
hTmpObject = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0;
} while (hTmpObject != 0);
if ((hTmpObject == 0) || (deviceInstance >= NV_MAX_DEVICES))
{
return NV_ERR_OBJECT_NOT_FOUND;
}
status = RmRefObjExportImport();
if (status != NV_OK)
{
return status;
}
if (objExportDevice[deviceInstance].hRmDevice == 0 ||
serverutilValidateNewResourceHandle(hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice))
{
//
// Device object has not been created or it got destroyed in the
// teardown path of device instance destruction; allocate a fresh device
// object.
//
NV0080_ALLOC_PARAMETERS params;
NV2080_ALLOC_PARAMETERS subdevParams;
if (objExportDevice[deviceInstance].hRmDevice == 0)
{
NV_ASSERT(objExportDevice[deviceInstance].hRmSubDevice == 0);
objExportDevice[deviceInstance].hRmDevice =
RmGenerateObjExportHandle(deviceInstance);
objExportDevice[deviceInstance].hRmSubDevice =
RmGenerateObjExportHandle(deviceInstance);
if (objExportDevice[deviceInstance].hRmDevice == 0 ||
objExportDevice[deviceInstance].hRmSubDevice == 0)
{
NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handles in %s\n",
__FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto done;
}
}
portMemSet(&params, 0, sizeof(NV0080_ALLOC_PARAMETERS));
params.deviceId = deviceInstance;
status = pRmApi->AllocWithHandle(pRmApi,
hObjExportRmClient,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
NV01_DEVICE_0,
&params);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n",
__FUNCTION__);
goto done;
}
portMemSet(&subdevParams, 0, sizeof(NV2080_ALLOC_PARAMETERS));
subdevParams.subDeviceId = 0;
status = pRmApi->AllocWithHandle(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
objExportDevice[deviceInstance].hRmSubDevice,
NV20_SUBDEVICE_0,
&subdevParams);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n",
__FUNCTION__);
(void) pRmApi->Free(pRmApi, hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice);
goto done;
}
}
hDstObject = RmGenerateObjExportHandle(deviceInstance);
if (hDstObject == 0)
{
NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handle in %s\n",
__FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto done;
}
// If duping under device handle fails, try subdevice handle.
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
&hDstObject,
hSrcClient,
hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
if (status == NV_ERR_INVALID_OBJECT_PARENT)
{
NV_PRINTF(LEVEL_INFO,
"pRmApi->DupObject(Dev, failed due to invalid parent in %s."
" Now attempting DupObject with Subdev handle.\n",
__FUNCTION__);
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmSubDevice,
&hDstObject,
hSrcClient,
hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
RmUnrefObjExportHandle(hDstObject);
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(Subdev, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
goto done;
}
}
else
{
RmUnrefObjExportHandle(hDstObject);
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(Dev, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
goto done;
}
}
if (pDeviceInstance != NULL)
{
*pDeviceInstance = deviceInstance;
}
*pDstObject = hDstObject;
done:
if (status != NV_OK)
{
RmUnrefObjExportImport();
}
return status;
}
void RmFreeObjExportHandle(RmObjExportHandle hObject)
{
if (!RmValidateHandleAgainstInternalHandles(hObject))
{
NV_PRINTF(LEVEL_ERROR, "Invalid handle to exported object in %s\n",
__FUNCTION__);
return;
}
RmUnrefObjExportHandle(hObject);
RmUnrefObjExportImport();
}
NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
NvHandle *phDstObject, RmObjExportHandle hSrcObject,
NvU8 *pObjectType)
{
NV_STATUS status;
NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS params;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if (!RmValidateHandleAgainstInternalHandles(hSrcObject))
{
return NV_ERR_INVALID_ARGUMENT;
}
if (mapFind(&objExportHandleMap, hSrcObject) == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
if (pObjectType != NULL)
{
params.hObject = hSrcObject;
params.mapFlags = 0;
params.addrSpaceType = \
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID;
status = pRmApi->Control(pRmApi, hObjExportRmClient, hObjExportRmClient,
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE,
&params, sizeof(params));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"GET_ADDR_SPACE_TYPE failed with error code 0x%x in %s\n",
status, __FUNCTION__);
return status;
}
switch (params.addrSpaceType)
{
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM;
break;
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM;
break;
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC;
break;
default:
NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT);
}
}
status = pRmApi->DupObject(pRmApi, hDstClient, hDstParent, phDstObject,
hObjExportRmClient, hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(pRmApi, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
return status;
}
return NV_OK;
}
NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance)
{
RmObjExportHandleRef *pHandleRef = NULL;
if (!RmValidateHandleAgainstInternalHandles(hSrcObject))
{
return NV_ERR_INVALID_ARGUMENT;
}
pHandleRef = mapFind(&objExportHandleMap, hSrcObject);
if (pHandleRef == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
*deviceInstance = pHandleRef->deviceInstance;
return NV_OK;
}

View File

@@ -0,0 +1,100 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h> // NV device driver interface
#include <nv-priv.h>
#include <os/os.h>
#include <nvos.h>
#include <osapi.h>
#include "gpu/gpu.h"
#include "gpu/gpu_resource.h"
#include "gpu/subdevice/subdevice.h"
#include <osfuncs.h>
#include <diagnostics/journal.h>
#include "gpu/mem_mgr/mem_desc.h"
#include "mem_mgr/mem.h"
#include <nvpcie.h>
#include <core/locks.h>
#include "rmapi/rs_utils.h"
#include "rmapi/client_resource.h"
#include <class/cl0000.h>
#include <class/cl90cd.h>
#include <class/cl0005.h> // NV01_EVENT
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl844c.h> // G84_PERFBUFFER
#include <ctrl/ctrl0000/ctrl0000gpu.h>
#include <ctrl/ctrl0000/ctrl0000unix.h>
#include <ctrl/ctrl2080/ctrl2080gpu.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
/*!
* @brief Implements the NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS
* RmControl request. It will check if the GPU video memory will be
* persistent during system suspend/resume cycle.
*
* @param[in] pSubdevice
* @param[in,out] pParams
*
* @return
* NV_OK Success
*/
NV_STATUS
subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS *pParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
pParams->bVidmemPersistent = !gpuIsVidmemPreservationBrokenBug3172217(pGpu) &&
(nv->preserve_vidmem_allocations ||
nvp->s0ix_pm_enabled);
return NV_OK;
}
/*!
* @brief Implements the NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS
* RmControl request. It sets restore TGP flag which is used
* to restore TGP limits when client is killed.
*
* @param[in] pSubdevice
* @param[in] pParams
*
* @return
* NV_OK Success
*/
NV_STATUS
subdeviceCtrlCmdOsUnixUpdateTgpStatus_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS *pParams
)
{
pSubdevice->bUpdateTGP = pParams->bUpdateTGP;
return NV_OK;
}

View File

@@ -0,0 +1,76 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <core/locks.h>
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <gpu/device/device.h>
#include <gpu/gpu.h>
#include <gpu/mem_mgr/mem_mgr.h>
#include <gpu/mem_mgr/mem_desc.h>
#include <nv-priv.h>
#include <nv.h>
#include <osapi.h>
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FB) == NV_OK)
{
// See if the console is on one of the subdevices of this device.
portMemSet(pParams, 0, sizeof(*pParams));
SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
if (memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) != NULL)
{
NvU64 baseAddr;
// There should only be one.
NV_ASSERT(pParams->width == 0);
pParams->subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
// Console is either mapped to BAR1 or BAR2 + 16 MB
os_get_screen_info(&baseAddr, &pParams->width,
&pParams->height, &pParams->depth,
&pParams->pitch,
nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
}
SLI_LOOP_END
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
else
{
NV_PRINTF(LEVEL_INFO,"%s: Failed to acquire GPU lock", __FUNCTION__);
}
return NV_OK;
}

View File

@@ -0,0 +1,570 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <nv-priv.h>
#include <osapi.h>
#include <core/thread_state.h>
#include <core/locks.h>
#include <gpu/gpu.h>
#include "kernel/gpu/intr/intr.h"
#include <gpu/bif/kernel_bif.h>
#include "gpu/disp/kern_disp.h"
#include "objtmr.h"
static NvBool osInterruptPending(
OBJGPU *pGpu,
NvBool *serviced,
THREAD_STATE_NODE *pThreadState
)
{
POBJDISP pDisp;
KernelDisplay *pKernelDisplay;
NvBool pending, sema_release;
THREAD_STATE_NODE threadState;
NvU32 gpuMask, gpuInstance;
Intr *pIntr = NULL;
MC_ENGINE_BITVECTOR intr0Pending;
MC_ENGINE_BITVECTOR intr1Pending;
*serviced = NV_FALSE;
pending = NV_FALSE;
sema_release = NV_TRUE;
OBJGPU *pDeviceLockGpu = pGpu;
NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer
PORT_MEM_ALLOCATOR *pIsrAllocator;
//
// GPU interrupt servicing ("top half")
//
// Top-level processing of GPU interrupts is performed using the
// steps below; although the code is straight forward, there
// are a few points to be aware of:
//
// 1) The GPUs lock is acquired for two reasons: to allow
// looping over GPUs atomically in SLI and to sanity
// check the PCI configuration space of any initialized
// GPUs. If the acquisition fails, the early return
// is acceptable since GPU interrupts are disabled while
// the lock is held; note that returning success
// in this case could interfere with the processing
// of third-party device interrupts if the IRQ is shared.
// Due to the above, some interrupts may be reported as
// unhandled if invocations of the ISR registered with
// the kernel are not serialized. This is bad, but
// ignored by currently supported kernels, provided most
// interrupts are handled.
//
// 2) Since acquisition of the lock disables interrupts
// on all initialized GPUs, NV_PMC_INTR_EN_0 can not be
// relied up on to determine whether interrupts are
// expected from a given GPU. The code below is therefore
// forced to rely on software state. NV_PMC_INTR_EN_0
// is read only as a sanity check to guard against
// invalid GPU state (lack of PCI memory access, etc.).
//
// 3) High priority interrupts (VBLANK, etc.), are serviced in
// this function, service of all other interrupts is
// deferred until a bottom half. If a bottom half needs
// to be scheduled, release of the GPUs lock is
// likewise deferred until completion of the bottom half.
//
// 4) To reduce the risk of starvation, an effort is made to
// consolidate processing of interrupts pending on
// all GPUs sharing a given IRQ.
//
// 5) Care is taken to ensure that the consolidated interrupt
// processing is performed in the context of a GPU
// that has interrupts pending. Else if additional ISR
// processing via a bottom-half is required, this
// bottom-half ISR might race against the GPU's shut-down
// path.
//
pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator));
tlsIsrInit(pIsrAllocator);
// For SWRL granular locking process the countdown timer interrupt.
if (pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_SWRL_GRANULAR_LOCKING))
{
threadStateInitISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
gpuMask = gpumgrGetGpuMask(pDeviceLockGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
pIntr = GPU_GET_INTR(pGpu);
if (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))
{
// If interrupt enable is garbage the GPU is probably in a bad state
if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, &threadState) > INTERRUPT_TYPE_MAX)
{
continue;
}
intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState);
POBJTMR pTmr = GPU_GET_TIMER(pGpu);
*serviced = tmrServiceSwrlWrapper(pGpu, pTmr, &intr0Pending, &threadState);
}
}
threadStateFreeISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
}
// LOCK: try to acquire GPUs lock
if (rmDeviceGpuLocksAcquire(pDeviceLockGpu, GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_ISR) == NV_OK)
{
threadStateInitISRAndDeferredIntHandler(&threadState,
pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR);
gpuMask = gpumgrGetGpuMask(pDeviceLockGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
pIntr = GPU_GET_INTR(pGpu);
pDisp = GPU_GET_DISP(pGpu);
pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
if ((pDisp != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
}
else if ((pIntr != NULL) && INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))
{
// If interrupt enable is garbage the GPU is probably in a bad state
if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, &threadState) > INTERRUPT_TYPE_MAX)
continue;
intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState);
if (bitVectorTest(&intr0Pending, MC_ENGINE_IDX_DISP))
{
if (pKernelDisplay != NULL)
{
kdispServiceVblank_HAL(pGpu, pKernelDisplay, 0,
(VBLANK_STATE_PROCESS_LOW_LATENCY |
VBLANK_STATE_PROCESS_CALLED_FROM_ISR),
&threadState);
*serviced = NV_TRUE;
intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState);
}
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
pIntr = GPU_GET_INTR(pGpu);
if (pIntr != NULL)
{
NvBool bCtxswLog = NV_FALSE;
intrGetPendingNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState);
intrCheckFecsEventbufferPending(pGpu, pIntr, &intr1Pending, &bCtxswLog);
}
}
if (!bitVectorTestAllCleared(&intr0Pending) ||
!bitVectorTestAllCleared(&intr1Pending))
{
pending = NV_TRUE;
sema_release = NV_FALSE;
}
}
}
threadStateFreeISRAndDeferredIntHandler(&threadState,
pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR);
if (sema_release)
{
NV_ASSERT(!pending);
// UNLOCK: release GPUs lock
rmDeviceGpuLocksRelease(pDeviceLockGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
else
{
rmDeviceGpuLockSetOwner(pDeviceLockGpu, GPUS_LOCK_OWNER_PENDING_DPC_REFRESH);
}
}
if (pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
threadStateInitISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
gpuMask = gpumgrGetGpuMask(pDeviceLockGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
pIntr = GPU_GET_INTR(pGpu);
if ((pIntr != NULL) && (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)))
{
NvBool bCtxswLog = NV_FALSE;
intrGetPendingNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState);
intrCheckFecsEventbufferPending(pGpu, pIntr, &intr1Pending, &bCtxswLog);
if (!bitVectorTestAllCleared(&intr1Pending))
{
intrServiceNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState);
*serviced = NV_TRUE;
}
}
}
threadStateFreeISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
}
tlsIsrDestroy(pIsrAllocator);
portMemAllocatorRelease(pIsrAllocator);
return pending;
}
NV_STATUS osIsr(
OBJGPU *pGpu
)
{
NV_STATUS status = NV_OK;
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvBool pending = NV_FALSE;
NvBool serviced = NV_FALSE;
Intr *pIntr;
if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
pending = osInterruptPending(pGpu, &serviced, NULL /* threadstate */);
}
else
{
pIntr = GPU_GET_INTR(pGpu);
if (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))
{
KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
pending = osInterruptPending(pGpu, &serviced, NULL /* threadstate */);
kbifCheckAndRearmMSI(pGpu, pKernelBif);
}
}
}
if (!pending && (IS_VIRTUAL(pGpu) || !serviced))
status = NV_ERR_NO_INTR_PENDING;
else if (pending)
status = NV_WARN_MORE_PROCESSING_REQUIRED;
return status;
}
/*
* Helper function to determine when the RM SEMA/GPUS LOCK should toggle
* interrupts. Based on the state of the GPU - we must add cases here as we
* discover them.
*
* Noteworthy special cases:
*
* - Suspend/resume: the GPU could still be suspended and not accessible
* on the bus, while passive-level threads need to grab the GPUs
* lock, or other GPUs are being resumed and triggering interrupts.
*
* - SLI state transitions: interrupts are disabled manually prior to
* removing GPUs from the lock mask leading up to SLI link/unlink
* operations on UNIX, but since the GPUs lock is not held by design in
* these paths, it needs to be ensured that GPUs lock acquisitions
* occurring aynchronously do not re-enable interrupts on any of the
* GPUs undergoing the SLI state transition.
*
* @param[in] pGpu OBJGPU pointer
*
* @return NV_TRUE if the RM SEMA/GPUS LOCK should toggle interrupts, NV_FALSE
* otherwise.
*/
NvBool osLockShouldToggleInterrupts(OBJGPU *pGpu)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
return NV_TRUE;
return (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) &&
gpuIsStateLoaded(pGpu) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SLI_LINK_CODEPATH));
}
void osEnableInterrupts(OBJGPU *pGpu)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
// enable irq through os call
nv_control_soc_irqs(NV_GET_NV_STATE(pGpu), NV_TRUE);
return;
}
else
{
Intr *pIntr = GPU_GET_INTR(pGpu);
NvU32 intrEn;
if (!pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING))
NV_ASSERT(intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL) == INTERRUPT_TYPE_DISABLED);
intrEn = intrGetIntrEn(pIntr);
intrSetIntrEnInHw_HAL(pGpu, pIntr, intrEn, NULL);
if (pIntr != NULL)
{
intrSetStall_HAL(pGpu, pIntr, intrEn, NULL);
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED))
{
if (pIntr != NULL)
{
intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL);
}
}
}
}
void osDisableInterrupts(
OBJGPU *pGpu,
NvBool bIsr
)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
// disable irq through os call
nv_control_soc_irqs(NV_GET_NV_STATE(pGpu), NV_FALSE);
return;
}
else
{
Intr *pIntr = GPU_GET_INTR(pGpu);
NvU32 new_intr_en_0 = INTERRUPT_TYPE_DISABLED;
intrSetIntrEnInHw_HAL(pGpu, pIntr, new_intr_en_0, NULL);
if (pIntr != NULL)
{
intrSetStall_HAL(pGpu, pIntr, new_intr_en_0, NULL);
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED))
{
if (pIntr != NULL)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL);
}
else
{
intrRestoreNonStall_HAL(pGpu, pIntr, new_intr_en_0, NULL);
}
}
}
}
}
static void RmIsrBottomHalf(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
THREAD_STATE_NODE threadState;
OS_THREAD_HANDLE threadId;
NvU32 gpuMask, gpuInstance;
OBJGPU *pDeviceLockGpu = pGpu;
Intr *pIntr = NULL;
POBJDISP pDisp = NULL;
NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer
PORT_MEM_ALLOCATOR *pIsrAllocator;
pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator));
tlsIsrInit(pIsrAllocator);
//
// The owning thread changes as the ISR acquires the GPUs lock,
// but the bottom half releases it. Refresh the ThreadId owner to be
// correct here for the bottom half context.
//
osGetCurrentThread(&threadId);
rmDeviceGpuLockSetOwner(pDeviceLockGpu, threadId);
gpuMask = gpumgrGetGpuMask(pGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
threadStateInitISRAndDeferredIntHandler(&threadState,
pGpu, THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER);
pIntr = GPU_GET_INTR(pGpu);
pDisp = GPU_GET_DISP(pGpu);
//
// Call disp service incase of SOC Display,
// TODO : with multi interrupt handling based on irq aux interrupts are serviced by dpAuxService
// See JIRA task TDS-4253.
//
if ((pDisp != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
}
else if ((pIntr != NULL) && (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)))
{
intrServiceStall_HAL(pGpu, pIntr);
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
MC_ENGINE_BITVECTOR intrPending;
intrServiceNonStall_HAL(pGpu, pIntr, &intrPending, &threadState);
}
}
threadStateFreeISRAndDeferredIntHandler(&threadState,
pGpu, THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER);
}
// UNLOCK: release GPUs lock
rmDeviceGpuLocksRelease(pDeviceLockGpu, GPUS_LOCK_FLAGS_NONE, NULL);
tlsIsrDestroy(pIsrAllocator);
portMemAllocatorRelease(pIsrAllocator);
}
static void RmIsrBottomHalfUnlocked(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
Intr *pIntr;
THREAD_STATE_NODE threadState;
// In the GSP client scenario, the fatal fault interrupt is not shared
// by UVM and CPU-RM. Instead, it is handled entirely by GSP-RM. We
// therefore do not expect this function to be called. But if it is, bail
// without attempting to service interrupts.
if (IS_GSP_CLIENT(pGpu))
{
return;
}
// Grab GPU lock here as this kthread-item was enqueued without grabbing GPU lock
if (rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DPC) == NV_OK)
{
if (FULL_GPU_SANITY_CHECK(pGpu))
{
pIntr = GPU_GET_INTR(pGpu);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (intrGetIntrEn(pIntr) != INTERRUPT_TYPE_DISABLED)
{
MC_ENGINE_BITVECTOR intrPending;
intrGetPendingStall_HAL(pGpu, pIntr, &intrPending, &threadState);
intrServiceNonStallBottomHalf(pGpu, pIntr, &intrPending, &threadState);
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
}
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
}
NvBool NV_API_CALL rm_isr(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *NeedBottomHalf
)
{
NV_STATUS status;
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
OBJGPU *pGpu;
NvBool retval;
void *fp;
if ((nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) == 0)
{
return NV_FALSE;
}
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL)
{
return NV_FALSE;
}
NV_ENTER_RM_RUNTIME(sp,fp);
// call actual isr function here
status = isrWrapper(pGpu->testIntr, pGpu);
switch (status)
{
case NV_OK:
*NeedBottomHalf = NV_FALSE;
retval = NV_TRUE;
break;
case NV_WARN_MORE_PROCESSING_REQUIRED:
*NeedBottomHalf = NV_TRUE;
retval = NV_TRUE;
break;
case NV_ERR_NO_INTR_PENDING:
default:
*NeedBottomHalf = NV_FALSE;
retval = NV_FALSE;
break;
}
NV_EXIT_RM_RUNTIME(sp,fp);
return retval;
}
void NV_API_CALL rm_isr_bh(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
RmIsrBottomHalf(pNv);
NV_EXIT_RM_RUNTIME(sp,fp);
}
void NV_API_CALL rm_isr_bh_unlocked(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
RmIsrBottomHalfUnlocked(pNv);
NV_EXIT_RM_RUNTIME(sp,fp);
}