530.30.02

This commit is contained in:
Andy Ritger
2023-02-28 11:12:44 -08:00
parent e598191e8e
commit 4397463e73
928 changed files with 124728 additions and 88525 deletions

View File

@@ -36,53 +36,49 @@
// giving us a total of 128 LEAF registers.
// GPU vector: The 128 LEAF registers give us a total of (128*32) GPU vectors
// giving us a total of 4096 GPU vectors
//
//
//
// Given a subtree index, the below macros give us the index of the TOP level
// register and the bit within the TOP level register to program for that
// subtree.
//
#define NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(i) ((i) / 32)
#define NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i) ((i) % 32)
#define NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(i) (((NvU32)(i)) / 32)
#define NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i) (((NvU32)(i)) % 32)
//
// Given a subtree index, the below macros give us the two LEAF register indices
// that correspond to that subtree.
//
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i) ((i)*2)
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i) (((i)*2) + 1)
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i) (((NvU32)(i)) * 2)
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i) ((((NvU32)(i)) * 2) + 1)
#define NV_CTRL_INTR_LEAF_IDX_TO_SUBTREE(i) ((i)/2)
#define NV_CTRL_INTR_LEAF_IDX_TO_SUBTREE(i) (((NvU32)(i)) / 2)
//
// Given a LEAF register index, the below macros give us the range of GPU
// interrupt vectors that correspond to those leafs.
//
#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(i) ((i)*32)
#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_END(i) (((i)*32) + 31)
#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(i) (((NvU32)(i)) * 32)
#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_END(i) ((((NvU32)(i)) * 32) + 31)
//
// Given a GPU interrupt vector, the below macros give us the index of the
// LEAF register and the bit within the LEAF register to program for that
// GPU interrupt vector.
//
#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i) ((i) / 32)
#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32)
#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i) (((NvU32)(i)) / 32)
#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(i) (((NvU32)(i)) % 32)
//
// Given a GPU interrupt vector, the below macro gives us the subtree in which
// it belongs.
//
#define NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
#define NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(i) \
((NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
//
// The max number of leaf registers we expect
// This is enforced to be greater than or equal to
// (NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_STALL_SUBTREE_LAST) + 1)
// for the largest NV_CPU_INTR_STALL_SUBTREE_LAST
//
#define NV_MAX_INTR_LEAVES 12
#define NV_MAX_INTR_LEAVES 16
// In SW, this specifies an invalid interrupt vector
#define NV_INTR_VECTOR_INVALID (NV_U32_MAX)

View File

@@ -33,15 +33,7 @@
// RM uses channel 0 for FSP EMEM.
#define FSP_EMEM_CHANNEL_RM 0x0
// PMU/SOE use channel 4 for FSP EMEM.
#define FSP_EMEM_CHANNEL_PMU_SOE 0x4
#define FSP_EMEM_CHANNEL_MAX 0x8
// EMEM channel 0 (RM) is allocated 1K bytes.
#define FSP_EMEM_CHANNEL_RM_SIZE 1024
// EMEM channel 4 (PMU/SOE) is allocated 1K bytes.
#define FSP_EMEM_CHANNEL_PMU_SOE_SIZE 1024
#define FSP_EMEM_CHANNEL_PMU_SOE_OFFSET 4096
#endif // _FSP_EMEM_CHANNELS_H_

View File

@@ -39,5 +39,6 @@
#define NVDM_TYPE_FSP_RESPONSE 0x15
#define NVDM_TYPE_INFOROM 0x17
#define NVDM_TYPE_SMBPBI 0x18
#define NVDM_TYPE_UEFI_RM 0x1C
#endif // _FSP_NVDM_FORMAT_H_

View File

@@ -0,0 +1,83 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#pragma once
#ifndef GSP_FW_SR_META_H_
#define GSP_FW_SR_META_H_
/*!
* GSP firmware SR metadata
*
* Initialized by CPU-RM and kept in Sysmem.
* Verified by Booter.
*
*/
typedef struct
{
//
// Magic
// Use for verification by Booter
//
NvU64 magic; // = GSP_FW_SR_META_MAGIC;
//
// Revision number
// Bumped up when we change this interface so it is not backward compatible.
// Bumped up when we revoke GSP-RM ucode
//
NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
//
// ---- Members regarding data in SYSMEM ----------------------------
// Consumed by Booter for DMA
//
NvU64 sysmemAddrOfSuspendResumeData;
NvU64 sizeOfSuspendResumeData;
// ---- Members for crypto ops across S/R ---------------------------
//
// IV used for encryption of the Suspend/Resume data
//
NvU8 IV[32];
//
// Hash generated of the Suspend/Resume data
//
NvU8 hash[64];
// ---- Unused members ----------------------------------------------
//
// Pad structure to exactly 256 bytes (1 DMA chunk). Can replace padding with additional
// fields without incrementing revision. Padding initialized to 0.
//
NvU32 padding[32];
} GspFwSRMeta;
#define GSP_FW_SR_META_REVISION 1
#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
#endif // GSP_FW_SR_META_H_

View File

@@ -139,12 +139,16 @@ typedef struct
NvU32 elfCodeSize;
NvU32 elfDataSize;
// Bit 0 is used to check if [VGPU-GSP] mode is active in init partition
NvU8 driverModel;
// Used during GSP-RM resume to check for revocation
NvU32 lsUcodeVersion;
// Number of VF partitions allocating sub-heaps from the WPR heap
// Used during boot to ensure the heap is adequately sized
NvU8 gspFwHeapVfPartitionCount;
// Pad structure to exactly 256 bytes. Can replace padding with additional
// fields without incrementing revision. Padding initialized to 0.
NvU8 padding[11];
NvU8 padding[7];
// BL to use for verification (i.e. Booter says OK to boot)
NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
@@ -154,7 +158,4 @@ typedef struct
#define GSP_FW_WPR_META_REVISION 1
#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
// Bit 0 is used to check if [VGPU-GSP] mode is active in init partition
#define DRIVERMODEL_VGPU 0
#endif // GSP_FW_WPR_META_H_

View File

@@ -77,9 +77,9 @@ typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS
*/
typedef struct GSP_RM_PARAMS
{
// Physical memory aperture through which bootArgsPa is accessed
// Physical memory aperture through which bootArgsOffset is accessed
GSP_DMA_TARGET target;
// Physical address that will be stuffed in NV_PGSP_FALCON_MAILBOX(0|1)
// Physical offset in the memory aperture that will be passed to GSP-RM
NvU64 bootArgsOffset;
} GSP_RM_PARAMS;

File diff suppressed because it is too large Load Diff

View File

@@ -26,6 +26,7 @@
#include "inforom/types.h"
#include "inforom/ifrnvl.h"
#include "inforom/ifrecc.h"
#include "inforom/ifrdem.h"
#include "inforom/omsdef.h"
@@ -68,7 +69,9 @@ struct INFOROM_OBD_OBJECT_V2_XX
inforom_U032 productLength;
inforom_U032 productWidth;
inforom_U032 productHeight;
inforom_U008 reserved[89];
inforom_U008 vbiosFactoryVersion[5];
inforom_U008 board965PartNumber[20];
inforom_U008 reserved[64];
};
#define INFOROM_OBD_OBJECT_V2_XX_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d111b3d89b"
typedef struct INFOROM_OBD_OBJECT_V2_XX INFOROM_OBD_OBJECT_V2_XX;

View File

@@ -81,12 +81,12 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_AD10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ad10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_tu10x.bin");
@@ -100,12 +100,12 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_AD10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ad10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_tu10x.bin");
@@ -125,7 +125,7 @@ static inline const char *nv_firmware_path(
// which will then be invoked (at the top-level) for each
// gsp_*.bin (but not gsp_log_*.bin)
#if defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ad10x.bin")
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ga10x.bin")
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_tu10x.bin")
#endif // defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)

View File

@@ -290,7 +290,6 @@ VENDORNAME vendorName[] =
{PCI_VENDOR_ID_FUJITSU, "Fujitsu"},
{PCI_VENDOR_ID_CADENCE, "Cadence"},
{PCI_VENDOR_ID_ARM, "ARM"},
{PCI_VENDOR_ID_ALIBABA, "Alibaba"},
{0, "Unknown"} // Indicates end of the table
};
@@ -346,7 +345,6 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1}, // Arm Neoverse N1
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX}, // Marvell OCTEON CN96xx
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX}, // Marvell OCTEON CN98xx
{PCI_VENDOR_ID_ALIBABA, 0x8000, CS_ALIBABA_YITIAN}, // Alibaba Yitian
// last element must have chipset CS_UNKNOWN (zero)
{0, 0, CS_UNKNOWN}

View File

@@ -1,699 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 200-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVDEVID_H
#define NVDEVID_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Private device ids defines - only defines ! *
* *
\***************************************************************************/
///////////////////////////////////////////////////////////////////////////////////////////
//
// VENDOR/SUBVENDOR IDS
// XXX Cleanup to do: change PCI_VENDOR_* to NV_PCI_SUBID_VENDOR_*
//
///////////////////////////////////////////////////////////////////////////////////////////
#define NV_PCI_SUBID_VENDOR 15:0 /* RW--F */
#define NV_PCI_SUBID_VENDOR_AMD 0x1022
#define NV_PCI_SUBID_VENDOR_ALI 0x10B9
#define NV_PCI_SUBID_VENDOR_NVIDIA 0x10DE
#define NV_PCI_SUBID_VENDOR_INTEL 0x8086
#define NV_PCI_SUBID_VENDOR_VIA 0x1106
#define NV_PCI_SUBID_VENDOR_RCC 0x1166
#define NV_PCI_SUBID_VENDOR_MICRON_1 0x1042
#define NV_PCI_SUBID_VENDOR_MICRON_2 0x1344
#define NV_PCI_SUBID_VENDOR_APPLE 0x106B
#define NV_PCI_SUBID_VENDOR_SIS 0x1039
#define NV_PCI_SUBID_VENDOR_ATI 0x1002
#define NV_PCI_SUBID_VENDOR_TRANSMETA 0x1279
#define NV_PCI_SUBID_VENDOR_HP 0x103C
#define NV_PCI_SUBID_VENDOR_DELL 0x1028
#define NV_PCI_SUBID_VENDOR_FUJITSU 0x10cf
#define NV_PCI_SUBID_VENDOR_ASUS 0x1043
#define NV_PCI_SUBID_VENDOR_MSI 0x1462
#define NV_PCI_SUBID_VENDOR_FOXCONN 0x105B
#define NV_PCI_SUBID_VENDOR_ECS 0x1019
#define NV_PCI_SUBID_VENDOR_DFI_1 0x106E
#define NV_PCI_SUBID_VENDOR_TOSHIBA 0x1179
#define NV_PCI_SUBID_VENDOR_DFI_2 0x15BD
#define NV_PCI_SUBID_VENDOR_ACER 0x1025
#define NV_PCI_SUBID_VENDOR_GIGABYTE 0x1458
#define NV_PCI_SUBID_VENDOR_EVGA 0x3842
#define NV_PCI_SUBID_VENDOR_BROADCOM 0x1166
#define NV_PCI_SUBID_VENDOR_SUPERMICRO 0x15D9
#define NV_PCI_SUBID_VENDOR_BIOSTAR 0x1565
#define NV_PCI_SUBID_VENDOR_XFX 0x1682
#define NV_PCI_SUBID_VENDOR_PCPARTNER 0x19DA
#define NV_PCI_SUBID_VENDOR_LENOVO 0x17AA
#define NV_PCI_SUBID_VENDOR_FSC 0x1734
#define NV_PCI_SUBID_VENDOR_FTS 0x1734
#define NV_PCI_SUBID_VENDOR_COLORFUL 0x7377
#define NV_PCI_SUBID_VENDOR_ASROCK 0x1849
#define NV_PCI_SUBID_VENDOR_SHUTTLE 0x1297
#define NV_PCI_SUBID_VENDOR_CLEVO 0x1558
#define NV_PCI_SUBID_VENDOR_PEGATRON 0x1B0A
#define NV_PCI_SUBID_VENDOR_JETWAY 0x16F3
#define NV_PCI_SUBID_VENDOR_HIGHGRADE 0x1C6C
#define NV_PCI_SUBID_VENDOR_GALAXY 0x1B4C
#define NV_PCI_SUBID_VENDOR_ZOTAC 0x19DA
#define NV_PCI_SUBID_VENDOR_ARIMA 0x161F
#define NV_PCI_SUBID_VENDOR_BFG 0x19F1
#define NV_PCI_SUBID_VENDOR_SONY 0x104D
#define NV_PCI_SUBID_VENDOR_BITLAND 0x1642
#define NV_PCI_SUBID_VENDOR_PC_PARTNER 0x174B
#define NV_PCI_SUBID_VENDOR_NZXT 0x1D96
// XXX CKEANUP TO REMOVE IN FAVOR OF NV_PCI_SUBID_VENDOR_*
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_VENDOR_ID_ALI 0x10B9
#define PCI_VENDOR_ID_NVIDIA 0x10DE
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_VENDOR_ID_RCC 0x1166
#define PCI_VENDOR_ID_MICRON_1 0x1042
#define PCI_VENDOR_ID_MICRON_2 0x1344
#define PCI_VENDOR_ID_APPLE 0x106B
#define PCI_VENDOR_ID_SIS 0x1039
#define PCI_VENDOR_ID_ATI 0x1002
#define PCI_VENDOR_ID_TRANSMETA 0x1279
#define PCI_VENDOR_ID_HP 0x103C
#define PCI_VENDOR_ID_DELL 0x1028
#define PCI_VENDOR_ID_FUJITSU 0x10cf
#define PCI_VENDOR_ID_ASUS 0x1043
#define PCI_VENDOR_ID_MSI 0x1462
#define PCI_VENDOR_ID_FOXCONN 0x105B
#define PCI_VENDOR_ID_ECS 0x1019
#define PCI_VENDOR_ID_DFI_1 0x106E
#define PCI_VENDOR_ID_TOSHIBA 0x1179
#define PCI_VENDOR_ID_DFI_2 0x15BD
#define PCI_VENDOR_ID_ACER 0x1025
#define PCI_VENDOR_ID_GIGABYTE 0x1458
#define PCI_VENDOR_ID_EVGA 0x3842
#define PCI_VENDOR_ID_BROADCOM 0x1166
#define PCI_VENDOR_ID_SUPERMICRO 0x15D9
#define PCI_VENDOR_ID_BIOSTAR 0x1565
#define PCI_VENDOR_ID_XFX 0x1682
#define PCI_VENDOR_ID_PCPARTNER 0x19DA
#define PCI_VENDOR_ID_LENOVO 0x17AA
#define PCI_VENDOR_ID_FSC 0x1734
#define PCI_VENDOR_ID_FTS 0x1734
#define PCI_VENDOR_ID_COLORFUL 0x7377
#define PCI_VENDOR_ID_ASROCK 0x1849
#define PCI_VENDOR_ID_SHUTTLE 0x1297
#define PCI_VENDOR_ID_CLEVO 0x1558
#define PCI_VENDOR_ID_PEGATRON 0x1B0A
#define PCI_VENDOR_ID_JETWAY 0x16F3
#define PCI_VENDOR_ID_HIGHGRADE 0x1C6C
#define PCI_VENDOR_ID_GALAXY 0x1B4C
#define PCI_VENDOR_ID_ZOTAC 0x19DA
#define PCI_VENDOR_ID_ARIMA 0x161F
#define PCI_VENDOR_ID_PC_PARTNER 0x174B
#define PCI_VENDOR_ID_APM 0x10E8
#define PCI_VENDOR_ID_IBM 0x1014
#define PCI_VENDOR_ID_NZXT 0x1D96
#define PCI_VENDOR_ID_MARVELL 0x177D
#define PCI_VENDOR_ID_REDHAT 0x1B36
#define PCI_VENDOR_ID_AMPERE 0x1DEF
#define PCI_VENDOR_ID_HUAWEI 0x19E5
#define PCI_VENDOR_ID_MELLANOX 0x15B3
#define PCI_VENDOR_ID_AMAZON 0x1D0F
#define PCI_VENDOR_ID_CADENCE 0x17CD
#define PCI_VENDOR_ID_ARM 0x13B5
#define PCI_VENDOR_ID_HYGON 0x1D94
#define PCI_VENDOR_ID_ALIBABA 0x1DED
#define NV_PCI_DEVID_DEVICE 31:16 /* RW--F */
#define NV_PCI_SUBID_DEVICE 31:16 /* RW--F */
///////////////////////////////////////////////////////////////////////////////////////////
//
// GPU DEVICE IDS
//
///////////////////////////////////////////////////////////////////////////////////////////
#define NV_PCI_DEVID_DEVICE_PG171_SKU200_PG179_SKU220 0x25B6 /* NVIDIA A16 / NVIDIA A2 */
#define NV_PCI_DEVID_DEVICE_PG189_SKU600 0x1EBA
///////////////////////////////////////////////////////////////////////////////////////////
//
// SUBDEVICE IDs
//
///////////////////////////////////////////////////////////////////////////////////////////
// A16
#define NV_PCI_SUBID_DEVICE_PG171_SKU200 0x14A9
///////////////////////////////////////////////////////////////////////////////////////////
//
// CHIPSET IDs
//
///////////////////////////////////////////////////////////////////////////////////////////
// Desktop flavor of X58
#define X58_DESKTOP_DEVIDS 0x3400, 0x3405
// Mobile version of X58
#define X58_MOBILE_DEVID 0x3405
#define X58_MOBILE_CLEVO_7200_SSDEVID 0x7200
// Sandy bridge CLEVO platform
#define SANDYBRIDGE_P180HM_SSDEVID 0x8000
#define SandyBridge_E_X79_P270WM_SSDEVID 0x270
#define IvyBridge_Z75_P370EM_SSDEVID 0x371
// Device ID's of Devices present on Patsburg's PCIE bus.
#define PATSBURG_PCIE_DEVICE_MIN_DEVID 0x1D10
#define PATSBURG_PCIE_DEVICE_MAX_DEVID 0x1D1F
#define PATSBURG_PCIE_DEVICE_DEVID 0x244E
//Tylersburg Congurations
#define TYLERSBURG_DEVID 0x3406
// Intel Grantsdale definitions
#define DEVICE_ID_INTEL_2580_HOST_BRIDGE 0x2580
#define DEVICE_ID_INTEL_2581_ROOT_PORT 0x2581
// Intel Alderwood definitions
#define DEVICE_ID_INTEL_2584_HOST_BRIDGE 0x2584
#define DEVICE_ID_INTEL_2585_ROOT_PORT 0x2585
// Intel Alviso definitions
#define DEVICE_ID_INTEL_2590_HOST_BRIDGE 0x2590
#define DEVICE_ID_INTEL_2591_ROOT_PORT 0x2591
// Intel Tumwater definitions
#define DEVICE_ID_INTEL_359E_HOST_BRIDGE 0x359E
#define DEVICE_ID_INTEL_3597_ROOT_PORT 0x3597
// Intel Stoakley definitions
#define INTEL_4000_SUBDEVICE_ID 0x021D
// Intel SkullTrail definitions
#define INTEL_4003_SUBDEVICE_ID 0x5358
// Intel Core I7 CPU
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I7 0x2C01
// Intel Core I5 CPU Lynnfield
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_L 0x2C81
#define INTEL_LYNNFIELD_ROOTPORT_CPU1 0xD138
#define INTEL_LYNNFIELD_ROOTPORT_CPU2 0xD13A
// Intel Core I5 CPU Auburndale
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_A 0x2D41
// Intel Core I5 CPU 650
#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_6 0x2D01
// Intel Poulsbo definitions
#define DEVICE_ID_INTEL_8100_HOST_BRIDGE 0x8100
#define DEVICE_ID_INTEL_8110_ROOT_PORT 0x8110
#define DEVICE_ID_INTEL_8112_ROOT_PORT 0x8112
// Intel TunnelCreek definitions
#define DEVICE_ID_INTEL_8180_ROOT_PORT 0x8180
#define DEVICE_ID_INTEL_8181_ROOT_PORT 0x8181
#define DEVICE_ID_INTEL_8184_ROOT_PORT 0x8184
#define DEVICE_ID_INTEL_8185_ROOT_PORT 0x8185
// Intel I/O Hub definitions
#define DEVICE_ID_INTEL_3408_ROOT_PORT 0x3408
#define DEVICE_ID_INTEL_3411_ROOT_PORT 0x3411
#define DEVICE_ID_INTEL_3420_ROOT_PORT 0x3420
#define DEVICE_ID_INTEL_3421_ROOT_PORT 0x3421
// Intel SandyBridge IIO definitions
#define DEVICE_ID_INTEL_3C02_ROOT_PORT 0x3c02
#define DEVICE_ID_INTEL_3C03_ROOT_PORT 0x3c03
#define DEVICE_ID_INTEL_3C04_ROOT_PORT 0x3c04
#define DEVICE_ID_INTEL_3C05_ROOT_PORT 0x3c05
#define DEVICE_ID_INTEL_3C06_ROOT_PORT 0x3c06
#define DEVICE_ID_INTEL_3C07_ROOT_PORT 0x3c07
#define DEVICE_ID_INTEL_3C08_ROOT_PORT 0x3c08
#define DEVICE_ID_INTEL_3C09_ROOT_PORT 0x3c09
#define DEVICE_ID_INTEL_3C0A_ROOT_PORT 0x3c0a
#define DEVICE_ID_INTEL_3C0B_ROOT_PORT 0x3c0b
// Intel Haswell-E definitions
#define DEVICE_ID_INTEL_2F00_HOST_BRIDGE 0x2f00
#define DEVICE_ID_INTEL_2F01_ROOT_PORT 0x2f01
#define DEVICE_ID_INTEL_2F02_ROOT_PORT 0x2f02
#define DEVICE_ID_INTEL_2F03_ROOT_PORT 0x2f03
#define DEVICE_ID_INTEL_2F04_ROOT_PORT 0x2f04
#define DEVICE_ID_INTEL_2F05_ROOT_PORT 0x2f05
#define DEVICE_ID_INTEL_2F06_ROOT_PORT 0x2f06
#define DEVICE_ID_INTEL_2F07_ROOT_PORT 0x2f07
#define DEVICE_ID_INTEL_2F08_ROOT_PORT 0x2f08
#define DEVICE_ID_INTEL_2F09_ROOT_PORT 0x2f09
#define DEVICE_ID_INTEL_2F0A_ROOT_PORT 0x2f0a
#define DEVICE_ID_INTEL_2F0B_ROOT_PORT 0x2f0b
#define DEVICE_ID_INTEL_0C01_ROOT_PORT 0x0c01
// Intel IvyTown definitions
#define DEVICE_ID_INTEL_0E02_ROOT_PORT 0x0e02
#define DEVICE_ID_INTEL_0E03_ROOT_PORT 0x0e03
#define DEVICE_ID_INTEL_0E04_ROOT_PORT 0x0e04
#define DEVICE_ID_INTEL_0E05_ROOT_PORT 0x0e05
#define DEVICE_ID_INTEL_0E06_ROOT_PORT 0x0e06
#define DEVICE_ID_INTEL_0E07_ROOT_PORT 0x0e07
#define DEVICE_ID_INTEL_0E08_ROOT_PORT 0x0e08
#define DEVICE_ID_INTEL_0E09_ROOT_PORT 0x0e09
#define DEVICE_ID_INTEL_0E0A_ROOT_PORT 0x0e0a
#define DEVICE_ID_INTEL_0E0B_ROOT_PORT 0x0e0b
// Intel Ivy Bridge E definitions
#define DEVICE_ID_INTEL_0E00_HOST_BRIDGE 0x0E00
// Intel Haswell definitions
#define DEVICE_ID_INTEL_0C00_HASWELL_HOST_BRIDGE 0x0C00
#define DEVICE_ID_INTEL_0C04_HASWELL_HOST_BRIDGE 0x0C04
// Intel PCH definitions
#define DEVICE_ID_INTEL_9D10_PCH_BRIDGE 0x9d10
#define DEVICE_ID_INTEL_9D18_PCH_BRIDGE 0x9d18
#define DEVICE_ID_INTEL_A117_PCH_BRIDGE 0xa117
#define DEVICE_ID_INTEL_A118_PCH_BRIDGE 0xa118
#define DEVICE_ID_INTEL_9C98_PCH_BRIDGE 0x9c98
// Intel Broadwell definitions
#define DEVICE_ID_INTEL_6F00_HOST_BRIDGE 0x6f00
#define DEVICE_ID_INTEL_6F01_ROOT_PORT 0x6f01
#define DEVICE_ID_INTEL_6F02_ROOT_PORT 0x6f02
#define DEVICE_ID_INTEL_6F03_ROOT_PORT 0x6f03
#define DEVICE_ID_INTEL_6F04_ROOT_PORT 0x6f04
#define DEVICE_ID_INTEL_6F05_ROOT_PORT 0x6f05
#define DEVICE_ID_INTEL_6F06_ROOT_PORT 0x6f06
#define DEVICE_ID_INTEL_6F07_ROOT_PORT 0x6f07
#define DEVICE_ID_INTEL_6F08_ROOT_PORT 0x6f08
#define DEVICE_ID_INTEL_6F09_ROOT_PORT 0x6f09
#define DEVICE_ID_INTEL_6F0A_ROOT_PORT 0x6f0A
#define DEVICE_ID_INTEL_6F0B_ROOT_PORT 0x6f0B
#define DEVICE_ID_INTEL_1601_ROOT_PORT 0x1601
#define DEVICE_ID_INTEL_1605_ROOT_PORT 0x1605
#define DEVICE_ID_INTEL_1609_ROOT_PORT 0x1609
#define DEVICE_ID_INTEL_BROADWELL_U_HOST_BRIDGE 0x1604
#define DEVICE_ID_INTEL_BROADWELL_H_HOST_BRIDGE 0x1614
// Intel Skylake definitions
#define DEVICE_ID_INTEL_1901_ROOT_PORT 0x1901
#define DEVICE_ID_INTEL_1905_ROOT_PORT 0x1905
#define DEVICE_ID_INTEL_1909_ROOT_PORT 0x1909
#define DEVICE_ID_INTEL_SKYLAKE_U_HOST_BRIDGE 0x1904
#define DEVICE_ID_INTEL_SKYLAKE_S_HOST_BRIDGE 0x191F
#define DEVICE_ID_INTEL_SKYLAKE_H_HOST_BRIDGE 0x1910
// Intel Skylake-E definitions
#define DEVICE_ID_INTEL_2030_ROOT_PORT 0x2030
#define DEVICE_ID_INTEL_2033_ROOT_PORT 0x2033
// Intel Kabylake definitions
#define DEVICE_ID_INTEL_KABYLAKE_U_HOST_BRIDGE 0x5904
#define DEVICE_ID_INTEL_KABYLAKE_H_HOST_BRIDGE 0x5910
// AMD Matisse, Rome definitions
#define DEVICE_ID_AMD_1483_ROOT_PORT 0x1483
// AMD Castle Peak definition
#define DEVICE_ID_AMD_1480_ROOT_PORT 0x1480
// AMD Renoir-H definition
#define DEVICE_ID_AMD_1630_ROOT_PORT 0x1630
// Dell SkullTrail definitions
#define DELL_4003_SUBDEVICE_ID 0x021D
// Dell Quicksilver MLK definitions
#define DELL_0040_SUBDEVICE_ID 0x043a
// HP Tylersburg definitions
#define TYLERSBURG_Z800_SSDEVID 0x130B
// HP Romley definitions
#define ROMLEY_Z820_SSDEVID 0x158B
#define ROMLEY_Z620_SSDEVID 0x158A
#define ROMLEY_Z420_SSDEVID 0x1589
// HP Grantley definitions
#define GRANTLEY_Z840_SSDEVID 0x2129
#define GRANTLEY_Z640_SSDEVID 0x212A
#define GRANTLEY_Z440_SSDEVID 0x212B
// HP PURELY definitions
#define HP_QUADRO_Z4GEN4_DEVID 0xA2D2
#define PURLEY_Z8GEN4_SSDEVID 0x81C7
#define PURLEY_Z6GEN4_SSDEVID 0x81C6
#define PURLEY_Z4GEN4_SSDEVID 0x81C5
// Lenovo Romley definitions
#define ROMLEY_C30_SSDEVID 0x1028
#define ROMLEY_D30_SSDEVID 0x1027
#define ROMLEY_S30_SSDEVID 0x1026
// Dell Romley definitions
#define ROMLEY_T7600_SSDEVID 0x0495
#define ROMLEY_T5600_SSDEVID 0x0496
#define ROMLEY_T3600_SSDEVID 0x0497
// Dell Romley + IVB-EP CPU Refresh
#define IVYTOWN_T7610_SSDEVID 0x05D4
#define IVYTOWN_T5610_SSDEVID 0x05D3
// Dell Romley (Ipanema)
#define ROMLEY_R7610_SSDEVID 0x05A1
// FTS Romley definitions
#define ROMLEY_R920_SSDEVID 0x11B6
// Lenovo Grantley (Messi, Pele, Ronaldo)
#define GRANTLEY_V40_SSDEVID 0x1031
#define GRANTLEY_D40_SSDEVID 0x1030
#define GRANTLEY_S40_SSDEVID 0x102F
// Dell Grantley (Avalon)
#define GRANTLEY_T7810_SSDEVID 0x0618
#define GRANTLEY_T7910_SSDEVID 0x0619
// Lenovo Purley (Nile, Volga)
#define PURLEY_P920_SSDEVID 0x1038
#define PURLEY_P720_SSDEVID 0x1037
#define PURLEY_P520_SSDEVID 0x1036
// Lenovo P520c
#define LENOVO_P520C_SSDEVID 0x103C
// Dell Purley(Matira)
#define PURLEY_MATIRA3X_DEVID 0xA2D2
#define PURLEY_MATIRA3X_SSDEVID 0x08B1
#define PURLEY_MATIRA3_SSDEVID 0x0738
#define PURLEY_MATIRA5_SSDEVID 0x0739
#define PURLEY_MATIRA7_SSDEVID 0x073A
//FTS Grantley
#define GRANTLEY_R940_SSDEVID 0x1201
//FTS Purley
#define PURLEY_R970_SSDEVID 0x1230
#define PURLEY_M770_SSDEVID 0x1231
// HP Arrandale, Clarksfield, X58 workstation definitions
#define ARRANDALE_Z200SFF_SSDEVID 0x304A
#define CLARKSFIELD_Z200_SSDEVID 0x170B
#define X58_Z400_SSDEVID 0x1309
// GIGABYTE Sniper 3 (Z77)
#define GIGABYTE_SNIPER_3_SSDEVID_1 0x5000
#define GIGABYTE_SNIPER_3_SSDEVID_2 0x5001
// Supermicro Quadro VCA definitions
#define SUPERMICRO_QUADRO_VCA_DEVID 0x8D44
#define SUPERMICRO_QUADRO_VCA_SSDEVID 0x7270
// Supermicro SYS-4027GR-TRT
#define SUPERMICRO_SYS_4027GR_TRT_DEVID 0x1D41
#define SUPERMICRO_SYS_4027GR_TRT_SSDEVID 0x0732
// Supermicro SYS-4029GP-TRT2
#define SUPERMICRO_SYS_4029GP_TRT2_DEVID 0xA1C2
#define SUPERMICRO_SYS_4029GP_TRT2_SSDEVID 0x7270
// Asus Quadro BOXX definitions
#define ASUS_QUADRO_BOXX_DEVID 0x8D44
#define ASUS_QUADRO_BOXX_SSDEVID 0x85F6
// APEXX8 Quadro BOXX definitions
#define APEXX8_QUADRO_BOXX_DEVID 0xA2D3
#define APEXX8_QUADRO_BOXX_SSDEVID 0x098e
// APEXX5 Quadro BOXX definitions
#define APEXX5_QUADRO_BOXX_DEVID 0xA2D3
#define APEXX5_QUADRO_BOXX_SSDEVID 0x1000
// ASUS X99-E-10G
#define ASUS_X99_E_10G_SSDEVID 0x8600
// VIA definitions
#define DEVICE_ID_VIA_VT8369B_HOST_BRIDGE 0x0308
// Foxconn Einstein 64 [8086:a1c1][105b:7270]
#define FOXCONN_EINSTEIN_64_DEVID 0xA1C1
#define FOXCONN_EINSTEIN_64_SSDEVID 0x7270
// Tyan Workstation
#define TYAN_B7100_DEVID 0xA1C1
#define TYAN_B7100_SSDEVID 0x7270
// ESC 4000 Series Workstation
#define ESC_4000_G4_DEVID 0xA1C1
#define ESC_4000_G4_SSDEVID 0x871E
// NVIDIA C51
#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_0 0x2F0
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_1 0x2F1
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_2 0x2F2
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_3 0x2F3
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_0 0x2F4
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_1 0x2F5
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_2 0x2F6
#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_3 0x2F7
#define NVIDIA_C51_DEVICE_ID_MAX 0x2F7
// NVIDIA MCP55
#define NVIDIA_MCP55_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0369
// NVIDIA MCP61
#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x03EA
#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_PA 0x03E2
// NVIDIA C55
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLIX16 0x03A1
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLI 0x03A3
#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_U 0x03A2
// NVIDIA MCP65
#define NVIDIA_MCP65_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0444
// NVIDIA MCP67/MCP68
#define NVIDIA_MCP67_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0547
// NVIDIA MCP73
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_PV 0x07C0
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_O 0x07C1
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_S 0x07C2
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_V 0x07C3
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_0 0x07C4
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_1 0x07C5
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_2 0x07C6
#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_D 0x07C7
// NVIDIA C73
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI2 0x0800
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI_ALL 0x0801
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLIX8 0x0802
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_U 0x0803
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_0 0x0804
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_1 0x0805
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_2 0x0806
#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_3 0x0807
// NVIDIA MCP77/78
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0754
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_1 0x0755
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_2 0x0756
#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_3 0x0757
#define NVIDIA_MCP77_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0752
// NVIDIA MCP79/7A
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_DEFAULT 0x0A80
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLIX16 0x0A81
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLI 0x0A82
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_U 0x0A83
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GM 0x0A84
#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GVM 0x0A85
#define NVIDIA_MCP79_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0AA2
// NVIDIA MCP89/P83
#define NVIDIA_MCP89_CPU_PCI_0_DEVICE_ID_DEFAULT 0x00000D60
///////////////////////////////////////////////////////////////////////////////////////////
//
// enumeration of chipset families
//
///////////////////////////////////////////////////////////////////////////////////////////
//
// When adding a variable to the following enum, please
// add it also to the following chipset_names[].
//
enum {
CS_UNKNOWN = 0x0000,
CS_UNKNOWN_PCIE = 0x1000
, CS_INTEL_2580
, CS_INTEL_2584
, CS_INTEL_2588
, CS_INTEL_2590
, CS_INTEL_25E0
, CS_INTEL_29X0
, CS_INTEL_29E0
, CS_INTEL_359E
, CS_INTEL_4000
, CS_INTEL_4003
, CS_INTEL_3400
, CS_INTEL_3B42
, CS_INTEL_2770
, CS_INTEL_2774
, CS_INTEL_277C
, CS_INTEL_2A40
, CS_INTEL_2E00
, CS_INTEL_0040
, CS_INTEL_1C10
, CS_INTEL_1C46
, CS_INTEL_1C49
, CS_INTEL_1D40
, CS_INTEL_8D47
, CS_INTEL_1E10
, CS_INTEL_8C4B
, CS_INTEL_8CC4
, CS_INTEL_A145
, CS_INTEL_A2C5
, CS_INTEL_A242
, CS_INTEL_A2D2
, CS_INTEL_A2C9
, CS_INTEL_A301
, CS_INTEL_0685
, CS_INTEL_4381
, CS_INTEL_7A82
, CS_NVIDIA_CK804
, CS_NVIDIA_C19
, CS_NVIDIA_C51
, CS_NVIDIA_MCP55
, CS_NVIDIA_MCP61
, CS_NVIDIA_C55
, CS_NVIDIA_MCP65
, CS_NVIDIA_MCP67
, CS_NVIDIA_MCP73
, CS_NVIDIA_C73
, CS_NVIDIA_MCP77
, CS_NVIDIA_MCP79
, CS_NVIDIA_MCP89
, CS_NVIDIA_TEGRA3
, CS_SIS_649
, CS_SIS_656
, CS_ATI_RS400
, CS_ATI_RS400_A21
, CS_ATI_RS480
, CS_ATI_RS480_A21
, CS_AMD_RS780
, CS_VIA_VT8369B
, CS_ATI_FX790
, CS_ATI_RD850
, CS_ATI_RD870
, CS_ATI_RD890
, CS_ATI_FX890
, CS_ATI_RX780
, CS_ATI_FX990
, CS_AMD_GX890
, CS_AMD_X370
, CS_VIA_VX900
, CS_APM_STORM
, CS_IBM_VENICE
, CS_NVIDIA_T124
, CS_NVIDIA_T210
, CS_NVIDIA_T186
, CS_NVIDIA_T194
, CS_NVIDIA_T234
, CS_NVIDIA_T23x
, CS_MARVELL_THUNDERX2
, CS_REDHAT_QEMU
, CS_AMPERE_EMAG
, CS_HUAWEI_KUNPENG920
, CS_MELLANOX_BLUEFIELD
, CS_AMAZON_GRAVITRON2
, CS_FUJITSU_A64FX
, CS_AMPERE_ALTRA
, CS_ARM_NEOVERSEN1
, CS_MARVELL_OCTEON_CN96XX
, CS_MARVELL_OCTEON_CN98XX
, CS_INTEL_C620
, CS_HYGON_C86
, CS_PHYTIUM_S2500
, CS_MELLANOX_BLUEFIELD2
, CS_ALIBABA_YITIAN
, CS_INTEL_1B81
, CS_INTEL_18DC
, CS_INTEL_7A04
, CS_MAX_PCIE
};
enum {
RP_UNKNOWN = 0
, RP_BROADCOM_HT2100
, RP_INTEL_2581
, RP_INTEL_2585
, RP_INTEL_2589
, RP_INTEL_2591
, RP_INTEL_3597
, RP_INTEL_2775
, RP_INTEL_2771
, RP_INTEL_8110
, RP_INTEL_8112
, RP_INTEL_8180
, RP_INTEL_8181
, RP_INTEL_8184
, RP_INTEL_8185
, RP_INTEL_3C02
, RP_INTEL_3C03
, RP_INTEL_3C04
, RP_INTEL_3C05
, RP_INTEL_3C06
, RP_INTEL_3C07
, RP_INTEL_3C08
, RP_INTEL_3C09
, RP_INTEL_3C0A
, RP_INTEL_3C0B
, RP_INTEL_2F04
, RP_INTEL_2F08
, RP_INTEL_0C01
, RP_INTEL_1601
, RP_INTEL_1605
, RP_INTEL_1609
, RP_INTEL_1901
, RP_INTEL_1905
, RP_INTEL_1909
, RP_INTEL_5904
, RP_NVIDIA_CK804
, RP_NVIDIA_C19
, RP_NVIDIA_C51
, RP_NVIDIA_MCP55
, RP_NVIDIA_MCP61
, RP_NVIDIA_C55
, RP_NVIDIA_MCP65
};
#endif //NVDEVID_H

View File

@@ -24,6 +24,8 @@
#ifndef NVPCIE_H
#define NVPCIE_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Private PCI Express related defines and structures. *
@@ -53,6 +55,8 @@
#define PCI_INVALID_SUBVENDORID 0xFFFF
#define PCI_INVALID_SUBDEVICEID 0xFFFF
#define PCI_IS_VENDORID_VALID(id) (((id) != 0x0000) && ((id) != 0xFFFF))
#define PCI_CLASS_BRIDGE_DEV 0x06
#define PCI_SUBCLASS_BR_HOST 0x00
#define PCI_MULTIFUNCTION 0x80
@@ -470,7 +474,8 @@ typedef struct
CL_PCIE_DC_CAPABILITY_MAP_ENTRY entries[PCI_MAX_CAPS];
} CL_PCIE_DC_CAPABILITY_MAP;
struct OBJCL;
typedef struct OBJCL OBJCL;
typedef struct OBJGPU OBJGPU;
// root port setup functions
NV_STATUS Broadcom_HT2100_setupFunc(OBJGPU *, OBJCL*);
@@ -494,6 +499,6 @@ NV_STATUS AMD_RP1630_setupFunc(OBJGPU *, OBJCL*);
NV_STATUS AMD_RP1483_setupFunc(OBJGPU *, OBJCL*);
// Determines if the GPU is in a multi-GPU board based on devid checks
NvBool gpuIsMultiGpuBoard(OBJGPU *, NvBool *);
NvBool gpuIsMultiGpuBoard(OBJGPU *);
#endif // NVPCIE_H

View File

@@ -112,6 +112,10 @@
* Changes to the define needs to be reflected in path [1]
* For new Falcon Id adding, we need to append to the end;
* don't insert the new falcon Id in the middle.
*
* @note If a newly added Falcon has multiple instances sharing
* the same Falcon Id, the LSF_FALCON_USES_INSTANCE macro
* need to be updated.
*/
#define LSF_FALCON_ID_PMU (0U)
#define LSF_FALCON_ID_DPU (1U)
@@ -136,12 +140,15 @@
#define LSF_FALCON_ID_NVDEC_RISCV (18U)
#define LSF_FALCON_ID_NVDEC_RISCV_EB (19U)
#define LSF_FALCON_ID_NVJPG (20U)
#define LSF_FALCON_ID_END (21U)
#define LSF_FALCON_ID_FECS_RISCV (21U)
#define LSF_FALCON_ID_GPCCS_RISCV (22U)
#define LSF_FALCON_ID_NVJPG_RISCV_EB (23U)
#define LSF_FALCON_ID_END (24U)
#define LSF_FALCON_ID_INVALID (0xFFFFFFFFU)
//
// ************************ NOTIFICATION *********************************
// ************************ NOTIFICATION *********************************
// In case anyone needs to add new LSF falconId, please must calculate
// WPR header size per LSF_FALCON_ID_END. RM needs to call lsfmGetWprHeaderSizeMax_HAL
// to align with acrReadSubWprHeader_HAL in ACR. Otherwise, ACR can't get correct
@@ -162,7 +169,14 @@
#define LSF_FALCON_INSTANCE_INVALID (0xFFFFFFFFU)
#define LSF_FALCON_INDEX_MASK_DEFAULT_0 (0x0)
/*!
* Checks if the LSF Falcon specified by falconId uses a falconInstance to uniquely identify itself.
* Some Falcons (eg: NVENC) use separate FalconId for each instance while some (eg: NVJPG)
* shares the same falconId across all instances of that engine. Those engines require a falconInstance
* to uniquely identify it.
* @note this macro should be updated as needed whenever LSF_FALCON_ID* defines are added. See Bug: 3833461
*/
#define LSF_FALCON_USES_INSTANCE(falconId) ((falconId == LSF_FALCON_ID_NVDEC_RISCV_EB) || (falconId == LSF_FALCON_ID_NVJPG) || (falconId == LSF_FALCON_ID_NVJPG_RISCV_EB))
/*!
* Size in entries of the ucode descriptor's dependency map.
@@ -358,8 +372,8 @@ typedef struct
// The PMU supports the ACR task on GM20X_thru_VOLTA profiles only.
// In order to prevent LSF_FALCON_ID_END changes to affect older / shipped PMU ucodes (increase of DMEM footprint)
// adding PMU specific ***_END define capturing value covering all PMU profiles that this with the ACR task.
//
#define LSF_FALCON_ID_END_PMU (LSF_FALCON_ID_FBFALCON + 1)
//
#define LSF_FALCON_ID_END_PMU (LSF_FALCON_ID_FBFALCON + 1)
#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX_PMU (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END_PMU), LSF_WPR_HEADER_ALIGNMENT))
// Maximum SUB WPR header size

View File

@@ -104,7 +104,7 @@ typedef struct nv_ioctl_rm_api_version
#define NV_RM_API_VERSION_CMD_STRICT 0
#define NV_RM_API_VERSION_CMD_RELAXED '1'
#define NV_RM_API_VERSION_CMD_OVERRIDE '2'
#define NV_RM_API_VERSION_CMD_QUERY '2'
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1

View File

@@ -699,6 +699,22 @@
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableResizableBar
*
* Description:
*
* When this option is enabled, the NVIDIA driver will attempt to resize
* BAR1 to match framebuffer size, or the next largest available size on
* supported machines. This is currently only implemented for Linux.
*
* Possible values:
* 0 - Do not enable PCI BAR resizing
* 1 - Enable PCI BAR resizing
*/
#define __NV_ENABLE_RESIZABLE_BAR EnableResizableBar
#define NV_REG_ENABLE_RESIZABLE_BAR NV_REG_STRING(__NV_ENABLE_RESIZABLE_BAR)
/*
* Option: EnableGpuFirmware
*
@@ -825,6 +841,26 @@
#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000
#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001
/*
* Option: NVreg_RmNvlinkBandwidth
*
* Description:
*
* This option allows user to reduce the NVLINK P2P bandwidth to save power.
* The option is in the string format.
*
* Possible string values:
* OFF: 0% bandwidth
* MIN: 15%-25% bandwidth depending on the system's NVLink topology
* HALF: 50% bandwidth
* 3QUARTER: 75% bandwidth
* FULL: 100% bandwidth (default)
*
* This option is only for Hopper+ GPU with NVLINK version 4.0.
*/
#define __NV_RM_NVLINK_BW RmNvlinkBandwidth
#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW)
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
@@ -861,6 +897,7 @@ NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
@@ -870,6 +907,7 @@ NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL);
/*
*----------------registry database definition----------------------
@@ -910,6 +948,7 @@ nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_RESIZABLE_BAR),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),

View File

@@ -315,6 +315,7 @@ typedef enum
NV_SOC_IRQ_DPAUX_TYPE,
NV_SOC_IRQ_GPIO_TYPE,
NV_SOC_IRQ_HDACODEC_TYPE,
NV_SOC_IRQ_TCPC2DISP_TYPE,
NV_SOC_IRQ_INVALID_TYPE
} nv_soc_irq_type_t;
@@ -329,6 +330,7 @@ typedef struct nv_soc_irq_info_s {
NvU32 gpio_num;
NvU32 dpaux_instance;
} irq_data;
NvS32 ref_count;
} nv_soc_irq_info_t;
#define NV_MAX_SOC_IRQS 6
@@ -384,9 +386,11 @@ typedef struct nv_state_t
NvS32 current_soc_irq;
NvU32 num_soc_irqs;
NvU32 hdacodec_irq;
NvU32 tcpc2disp_irq;
NvU8 *soc_dcb_blob;
NvU32 soc_dcb_size;
NvU32 disp_sw_soc_chip_id;
NvBool soc_is_dpalt_mode_supported;
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
NvU32 igpu_nonstall_irq;
@@ -649,7 +653,8 @@ static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
return ((nv->fb) && (nv->fb->size != 0) &&
(offset >= nv->fb->cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
}
@@ -739,7 +744,7 @@ nv_state_t* NV_API_CALL nv_get_ctl_state (void);
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
@@ -915,7 +920,6 @@ NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *
void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *);
char* NV_API_CALL rm_remove_spaces (const char *);
char* NV_API_CALL rm_string_token (char **, const char);
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
@@ -985,11 +989,12 @@ const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *,
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);

View File

@@ -181,7 +181,6 @@ NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);

View File

@@ -39,13 +39,6 @@ OSDbgBreakpointEnabled osDbgBreakpointEnabled;
void* osGetStereoDongleInterface(void);
OSCallACPI_DSM osCallACPI_DSM;
OSCallACPI_DDC osCallACPI_DDC;
OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM;
OSCallACPI_DOD osCallACPI_DOD;
OSCallACPI_MXDS osCallACPI_MXDS;
OSCallACPI_MXDM osCallACPI_MXDM;
#if defined(NVCPU_X86_64)
OSnv_rdcr4 nv_rdcr4;
NvU64 nv_rdcr3(OBJOS *);

View File

@@ -0,0 +1,210 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <rmconfig.h>
#include <nvlog_inc.h>
#include <nv.h>
#include <nv-priv.h>
#include <nv-reg.h>
#include <nv_ref.h>
#include <osapi.h>
#include <gpu/mem_mgr/mem_mgr.h>
#include <core/locks.h>
#include "kernel/gpu/intr/intr.h"
//
// Function to update fixed fbsr modes to support multiple vairants such as
// GCOFF and cuda S3/resume.
//
static void
RmUpdateFixedFbsrModes(OBJGPU *pGpu)
{
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_GCOFF_STATE_ENTERING))
{
pMemoryManager->fixedFbsrModesMask = NVBIT(FBSR_TYPE_DMA);
}
else if (nv->preserve_vidmem_allocations)
{
pMemoryManager->fixedFbsrModesMask = NVBIT(FBSR_TYPE_FILE);
}
}
static NV_STATUS
RmPowerManagementInternal(
OBJGPU *pGpu,
nv_pm_action_t pmAction
)
{
// default to NV_OK. there may cases where resman is loaded, but
// no devices are allocated (we're still at the console). in these
// cases, it's fine to let the system do whatever it wants.
NV_STATUS rmStatus = NV_OK;
if (pGpu)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvBool bcState = gpumgrGetBcEnabledStatus(pGpu);
Intr *pIntr = GPU_GET_INTR(pGpu);
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
if ((pmAction == NV_PM_ACTION_HIBERNATE) || (pmAction == NV_PM_ACTION_STANDBY))
{
//
// pFb object store the FBSR mode through which FB state unload has happened,
// so os layer doesn't need to set FBSR mode on resume.
//
RmUpdateFixedFbsrModes(pGpu);
}
switch (pmAction)
{
case NV_PM_ACTION_HIBERNATE:
nvp->pm_state.InHibernate = NV_TRUE;
nvp->pm_state.IntrEn = intrGetIntrEn(pIntr);
intrSetIntrEn(pIntr, INTERRUPT_TYPE_DISABLED);
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
rmStatus = gpuEnterHibernate(pGpu);
gpumgrSetBcEnabledStatus(pGpu, bcState);
break;
case NV_PM_ACTION_STANDBY:
nvp->pm_state.InHibernate = NV_FALSE;
nvp->pm_state.IntrEn = intrGetIntrEn(pIntr);
intrSetIntrEn(pIntr, INTERRUPT_TYPE_DISABLED);
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
rmStatus = gpuEnterStandby(pGpu);
gpumgrSetBcEnabledStatus(pGpu, bcState);
break;
case NV_PM_ACTION_RESUME:
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
if (nvp->pm_state.InHibernate)
{
gpuResumeFromHibernate(pGpu);
}
else
{
gpuResumeFromStandby(pGpu);
}
intrSetIntrEn(pIntr, nvp->pm_state.IntrEn);
gpumgrSetBcEnabledStatus(pGpu, bcState);
NvU32 ac_plugged = 0;
if (IsMobile(pGpu))
{
if (nv_acpi_get_powersource(&ac_plugged) == NV_OK)
{
//
// As we have already acquired API and GPU lock here, we are
// directly calling RmPowerSourceChangeEvent.
//
RmPowerSourceChangeEvent(nv, !ac_plugged);
}
}
break;
default:
rmStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
pMemoryManager->fixedFbsrModesMask = 0;
}
return rmStatus;
}
static NV_STATUS
RmPowerManagement(
OBJGPU *pGpu,
nv_pm_action_t pmAction
)
{
NV_STATUS rmStatus;
rmStatus = RmPowerManagementInternal(pGpu, pmAction);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu != NULL)
{
if ((rmStatus = os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE)) == NV_OK)
{
// LOCK: acquire GPUs lock
if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
{
{
rmStatus = RmPowerManagement(pGpu, pmAction);
}
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE);
}
}
// UNLOCK: release API lock
rmapiLockRelease();
}
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -70,7 +70,7 @@ static NvBool RmIsDeviceRefNeeded(NVOS54_PARAMETERS *pApi)
{
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
case NV00FD_CTRL_CMD_ATTACH_GPU:
return NV_TRUE;
default:
return NV_FALSE;
@@ -88,8 +88,8 @@ static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd)
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
paramSize = sizeof(NV00FD_CTRL_ATTACH_MEM_PARAMS);
case NV00FD_CTRL_CMD_ATTACH_GPU:
paramSize = sizeof(NV00FD_CTRL_ATTACH_GPU_PARAMS);
break;
default:
return NV_ERR_INVALID_ARGUMENT;
@@ -103,8 +103,8 @@ static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd)
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
*pFd = (NvS32)((NV00FD_CTRL_ATTACH_MEM_PARAMS *)pKernelParams)->devDescriptor;
case NV00FD_CTRL_CMD_ATTACH_GPU:
*pFd = (NvS32)((NV00FD_CTRL_ATTACH_GPU_PARAMS *)pKernelParams)->devDescriptor;
break;
default:
NV_ASSERT(0);

View File

@@ -83,15 +83,6 @@ NV_STATUS NV_API_CALL rm_transition_dynamic_power(
return NV_OK;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
return NV_OK;
}
const char* NV_API_CALL rm_get_vidmem_power_status(
nvidia_stack_t *sp,
nv_state_t *pNv

View File

@@ -447,11 +447,14 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf
NvU32 gpuPciBdf,
NvBool *is_driver_vm
)
{
THREAD_STATE_NODE threadState;
OBJSYS *pSys = SYS_GET_INSTANCE();
void *fp = NULL;
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
NV_STATUS rmStatus = NV_OK;
NV_ENTER_RM_RUNTIME(sp,fp);
@@ -463,6 +466,8 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
rmStatus = kvgpumgrCreateRequestVgpu(pNv->gpu_id, pMdevUuid,
vgpuTypeId, vgpuId, gpuPciBdf);
*is_driver_vm = pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED);
// UNLOCK: release API lock
rmapiLockRelease();
}
@@ -979,6 +984,9 @@ NV_STATUS osIsVgpuVfioPresent(void)
void initVGXSpecificRegistry(OBJGPU *pGpu)
{
NvU32 data32;
osWriteRegistryDword(pGpu, NV_REG_STR_RM_POWER_FEATURES, 0x55455555);
osWriteRegistryDword(pGpu, NV_REG_STR_RM_INFOROM_DISABLE_BBX,
NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES);
#if !defined(NVCPU_X86_64)
osWriteRegistryDword(pGpu, NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB, 4);
#endif

View File

@@ -45,6 +45,7 @@
#include "mem_mgr/io_vaspace.h"
#include <diagnostics/journal.h>
#include "gpu/mem_mgr/mem_desc.h"
#include "gpu/mem_mgr/mem_mgr.h"
#include "core/thread_state.h"
#include <nvacpitypes.h>
#include <platform/acpi_common.h>
@@ -299,7 +300,13 @@ void* osMapKernelSpace(
offset = (Start & ~os_page_mask);
Start &= os_page_mask;
Size = ((Size + offset + ~os_page_mask) & os_page_mask);
if (!portSafeAddU64(Size, offset, &Size) ||
!portSafeAddU64(Size, ~os_page_mask, &Size))
{
return NULL;
}
Size &= os_page_mask;
ptr = os_map_kernel_space(Start, Size, Mode);
if (ptr != NULL)
@@ -892,6 +899,7 @@ NV_STATUS osAllocPagesInternal(
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
void *pMemData = NULL;
NV_STATUS status;
NvS32 nodeId = -1;
memdescSetAddress(pMemDesc, NvP64_NULL);
memdescSetMemData(pMemDesc, NULL, NULL);
@@ -923,16 +931,19 @@ NV_STATUS osAllocPagesInternal(
if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE)))
nv->force_dma32_alloc = NV_TRUE;
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
memdescGetContiguity(pMemDesc, AT_CPU),
memdescGetCpuCacheAttrib(pMemDesc),
pSys->getProperty(pSys,
PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
unencrypted,
memdescGetPteArray(pMemDesc, AT_CPU),
&pMemData);
{
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
memdescGetContiguity(pMemDesc, AT_CPU),
memdescGetCpuCacheAttrib(pMemDesc),
pSys->getProperty(pSys,
PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
unencrypted,
nodeId,
memdescGetPteArray(pMemDesc, AT_CPU),
&pMemData);
}
if (nv && nv->force_dma32_alloc)
nv->force_dma32_alloc = NV_FALSE;
@@ -942,7 +953,7 @@ NV_STATUS osAllocPagesInternal(
{
return status;
}
//
// If the OS layer doesn't think in RM page size, we need to inflate the
// PTE array into RM pages.
@@ -5240,3 +5251,4 @@ osDmabufIsSupported(void)
{
return os_dma_buf_enabled;
}

View File

@@ -404,7 +404,7 @@ void RmFreeUnusedClients(
NvU32 *pClientList;
NvU32 numClients, i;
NV_STATUS status;
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
//
// The 'nvfp' pointer uniquely identifies an open instance in kernel space
@@ -427,7 +427,7 @@ void RmFreeUnusedClients(
if (numClients != 0)
{
pRmApi->FreeClientList(pRmApi, pClientList, numClients);
pRmApi->DisableClients(pRmApi, pClientList, numClients);
portMemFree(pClientList);
}
@@ -1008,6 +1008,16 @@ static NV_STATUS RmPerformVersionCheck(
NvBool relaxed = NV_FALSE;
NvU32 i;
//
// rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within
// NV_RM_API_VERSION_STRING_LENGTH, so that:
//
// (1) If the versions don't match, we can return rmStr in
// pParams->versionString.
// (2) The below loop is guaranteed to not overrun rmStr.
//
ct_assert(sizeof(NV_VERSION_STRING) <= NV_RM_API_VERSION_STRING_LENGTH);
if (dataSize != sizeof(nv_ioctl_rm_api_version_t))
return NV_ERR_INVALID_ARGUMENT;
@@ -1020,11 +1030,11 @@ static NV_STATUS RmPerformVersionCheck(
pParams->reply = NV_RM_API_VERSION_REPLY_RECOGNIZED;
//
// the client requested to override the version check; just return
// success.
// the client is just querying the version, not verifying against expected.
//
if (pParams->cmd == NV_RM_API_VERSION_CMD_OVERRIDE)
if (pParams->cmd == NV_RM_API_VERSION_CMD_QUERY)
{
os_string_copy(pParams->versionString, rmStr);
return NV_OK;
}
@@ -1037,19 +1047,6 @@ static NV_STATUS RmPerformVersionCheck(
relaxed = NV_TRUE;
}
//
// rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within
// NV_RM_API_VERSION_STRING_LENGTH, so that:
//
// (1) If the versions don't match, we can return rmStr in
// pParams->versionString.
// (2) The below loop is guaranteed to not overrun rmStr.
//
if ((os_string_length(rmStr) + 1) > NV_RM_API_VERSION_STRING_LENGTH)
{
return NV_ERR_BUFFER_TOO_SMALL;
}
for (i = 0; i < NV_RM_API_VERSION_STRING_LENGTH; i++)
{
clientCh = pParams->versionString[i];
@@ -1353,6 +1350,24 @@ RmDmabufPutClientAndDevice(
NV_ASSERT_OK(kmigmgrDecRefCount(pKernelMIGGpuInstance->pShare));
}
static void
RmHandleNvpcfEvents(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
THREAD_STATE_NODE threadState;
if (RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_ACPI) == NULL)
{
return;
}
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_NVPCF_EVENTS, NULL, 0, 0, 0);
RmUnixRmApiEpilogue(pNv, &threadState);
}
/*
* ---------------------------------------------------------------------------
*
@@ -2446,6 +2461,27 @@ NV_STATUS NV_API_CALL rm_ioctl(
return rmStatus;
}
static void _deferredClientListFreeCallback(void *unused)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
NV_STATUS status = serverFreeDisabledClients(&g_resServ, 0, pSys->clientListDeferredFreeLimit);
//
// Possible return values:
// NV_WARN_MORE_PROCESSING_REQUIRED - Iteration limit reached, need to call again
// NV_ERR_IN_USE - Already running on another thread, try again later
// In both cases, schedule a worker to clean up anything that remains
//
if (status != NV_OK)
{
status = osQueueSystemWorkItem(_deferredClientListFreeCallback, unused);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_NOTICE, "Failed to schedule deferred free callback. Freeing immediately.\n");
serverFreeDisabledClients(&g_resServ, 0, 0);
}
}
}
void NV_API_CALL rm_cleanup_file_private(
nvidia_stack_t *sp,
nv_state_t *pNv,
@@ -2454,19 +2490,23 @@ void NV_API_CALL rm_cleanup_file_private(
{
THREAD_STATE_NODE threadState;
void *fp;
RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL);
RM_API *pRmApi;
RM_API_CONTEXT rmApiContext = {0};
NvU32 i;
OBJSYS *pSys = SYS_GET_INSTANCE();
NV_ENTER_RM_RUNTIME(sp,fp);
pRmApi = rmapiGetInterface(RMAPI_EXTERNAL);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
threadStateSetTimeoutOverride(&threadState, 10 * 1000);
if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK)
if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK) {
NV_EXIT_RM_RUNTIME(sp,fp);
return;
}
// LOCK: acquire API lock
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
// LOCK: acquire API lock. Low priority so cleanup doesn't block active threads
if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_LOW_PRIORITY, RM_LOCK_MODULES_OSAPI) == NV_OK)
{
// Unref any object which was exported on this file.
if (nvfp->handles != NULL)
@@ -2487,13 +2527,21 @@ void NV_API_CALL rm_cleanup_file_private(
nvfp->maxHandles = 0;
}
// Free any RM clients associated with this file.
// Disable any RM clients associated with this file.
RmFreeUnusedClients(pNv, nvfp);
// Unless configured otherwise, immediately free all disabled clients
if (!pSys->bUseDeferredClientListFree)
serverFreeDisabledClients(&g_resServ, RM_LOCK_STATES_API_LOCK_ACQUIRED, 0);
// UNLOCK: release API lock
rmapiLockRelease();
}
// Start the deferred free callback if necessary
if (pSys->bUseDeferredClientListFree)
_deferredClientListFreeCallback(NULL);
rmapiEpilogue(pRmApi, &rmApiContext);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -2929,7 +2977,7 @@ static NV_STATUS RmRunNanoTimerCallback(
THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
// Call timer event service
status = tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, (PTMR_EVENT)pTmrEvent);
status = tmrEventServiceTimer(pGpu, pTmr, (PTMR_EVENT)pTmrEvent);
// Out of conflicting thread
threadStateFreeISRAndDeferredIntHandler(&threadState,
@@ -5318,25 +5366,6 @@ void NV_API_CALL rm_dma_buf_put_client_and_device(
// NOTE: Used only on VMWware
//
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(
nvidia_stack_t *sp,
NvBool is_driver_vm
)
{
OBJSYS *pSys;
POBJHYPERVISOR pHypervisor;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
pSys = SYS_GET_INSTANCE();
pHypervisor = SYS_GET_HYPERVISOR(pSys);
pHypervisor->setProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED, is_driver_vm);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NvBool NV_API_CALL rm_is_altstack_in_use(void)
{
#if defined(__use_altstack__)
@@ -5345,3 +5374,21 @@ NvBool NV_API_CALL rm_is_altstack_in_use(void)
return NV_FALSE;
#endif
}
void NV_API_CALL rm_acpi_nvpcf_notify(
nvidia_stack_t *sp
)
{
void *fp;
OBJGPU *pGpu = gpumgrGetGpu(0);
NV_ENTER_RM_RUNTIME(sp,fp);
if (pGpu != NULL)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
RmHandleNvpcfEvents(nv);
}
NV_EXIT_RM_RUNTIME(sp,fp);
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -665,14 +665,14 @@ osInitNvMapping(
"NV fb using linear address : 0x%p\n", pGpu->registerAccess.gpuFbAddr);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED, NV_TRUE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_FALSE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_TRUE);
if (osReadRegistryDword(pGpu,
NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR, &data) == NV_OK)
{
if (data == NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE)
if (data == NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_DISABLE)
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_TRUE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_FALSE);
}
}
@@ -680,6 +680,12 @@ osInitNvMapping(
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE);
}
else
{
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE);
}
}
if ((osReadRegistryDword(NULL,
NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS,
@@ -766,6 +772,11 @@ RmSetConsolePreservationParams(OBJGPU *pGpu)
if (os_is_vgx_hyper() || IS_VIRTUAL(pGpu))
return;
if (!gpuFuseSupportsDisplay_HAL(pGpu))
{
return;
}
//
// Check the OS layer for any video memory used by a console
// driver that should be reserved.
@@ -937,6 +948,9 @@ RmInitNvDevice(
return;
}
// Setup GPU scalability
(void) RmInitScalability(pGpu);
return;
}

View File

@@ -66,13 +66,6 @@ initUnixOSFunctionPointers(OBJOS *pOS)
pOS->osNv_cpuid = nv_cpuid;
#endif
pOS->osCallACPI_DSM = osCallACPI_DSM;
pOS->osCallACPI_DDC = osCallACPI_DDC;
pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM;
pOS->osCallACPI_DOD = osCallACPI_DOD;
pOS->osCallACPI_MXDM = osCallACPI_MXDM;
pOS->osCallACPI_MXDS = osCallACPI_MXDS;
pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled;
}

View File

@@ -570,6 +570,17 @@ NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *sp,
gpuDeviceHandle device)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsFlushReplayableFaultBuffer(device);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)

View File

@@ -303,9 +303,6 @@ NV_STATUS osIsr(
*/
NvBool osLockShouldToggleInterrupts(OBJGPU *pGpu)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
return NV_TRUE;
return (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) &&
gpuIsStateLoaded(pGpu) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SLI_LINK_CODEPATH));
@@ -591,10 +588,10 @@ NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
goto done;
}
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
status = NV_ERR_NOT_SUPPORTED;
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
status = NV_OK;
goto done;
}
@@ -614,11 +611,6 @@ static NV_STATUS _rm_gpu_copy_mmu_faults_unlocked(
THREAD_STATE_NODE *pThreadState
)
{
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
return NV_ERR_NOT_SUPPORTED;
}
return NV_OK;
}
@@ -643,6 +635,7 @@ NV_STATUS rm_gpu_handle_mmu_faults(
if (pGpu == NULL)
{
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_ERR_OBJECT_NOT_FOUND;
}
@@ -666,7 +659,7 @@ NV_STATUS rm_gpu_handle_mmu_faults(
{
// We have to clear the top level interrupt bit here since otherwise
// the bottom half will attempt to service the interrupt on the CPU
// side before GSP recieves the notification and services it
// side before GSP receives the notification and services it
kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, &threadState);
status = intrTriggerPrivDoorbell_HAL(pGpu, pIntr, NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE);
@@ -681,7 +674,15 @@ NV_STATUS rm_gpu_handle_mmu_faults(
}
else
{
status = _rm_gpu_copy_mmu_faults_unlocked(pGpu, faultsCopied, &threadState);
if (IS_GSP_CLIENT(pGpu))
{
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
status = NV_OK;
}
else
{
status = _rm_gpu_copy_mmu_faults_unlocked(pGpu, faultsCopied, &threadState);
}
}
threadStateFreeISRLockless(&threadState, pGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);