560.28.03

This commit is contained in:
Gaurav Juvekar
2024-07-19 15:45:15 -07:00
parent 5fdf5032fb
commit 448d5cc656
859 changed files with 165424 additions and 91129 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -21,15 +21,14 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GSP_ERROR_H
#define GSP_ERROR_H
#ifndef CC_KEYROTATION_H
#define CC_KEYROTATION_H
// Definitions for GSP-RM to report errors to CPU-RM via mailbox
#define NV_GSP_ERROR_CODE 7:0
#define NV_GSP_ERROR_REASON 15:8
#define NV_GSP_ERROR_TASK 23:16
#define NV_GSP_ERROR_SKIPPED 27:24
#define NV_GSP_ERROR_TAG 31:28
#define NV_GSP_ERROR_TAG_VAL 0xE
//
// Default threshold value derived from SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT
// Minimum threshold defined based on minimum in confComputeSetKeyRotation.
//
#define KEY_ROTATION_MINIMUM_INTERNAL_THRESHOLD (134217727u)
#define KEY_ROTATION_DEFAULT_INTERNAL_THRESHOLD (24296003999ull)
#endif // GSP_ERROR_H
#endif // CC_KEYROTATION_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -55,8 +55,8 @@ enum
CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT,
CC_LKEYID_CPU_GSP_RESERVED2,
CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT,
CC_LKEYID_GSP_SEC2_LOCKED_RPC,
CC_LKEYID_SEC2_GSP_LOCKED_RPC,
CC_LKEYID_GSP_SEC2_LOCKED_RPC,
CC_KEYSPACE_GSP_SIZE // This is always the last element.
};
// The fault buffers only support GPU-to-CPU encryption, so the CPU-to-GPU encryption slot
@@ -75,13 +75,17 @@ enum
CC_LKEYID_CPU_SEC2_HMAC_USER,
CC_LKEYID_CPU_SEC2_DATA_KERN,
CC_LKEYID_CPU_SEC2_HMAC_KERN,
CC_LKEYID_CPU_SEC2_DATA_SCRUBBER,
CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER,
CC_KEYSPACE_SEC2_SIZE // This is always the last element.
};
#define CC_LKEYID_CPU_SEC2_DATA_USER_STR "cpu_sec2_data_user"
#define CC_LKEYID_CPU_SEC2_HMAC_USER_STR "cpu_sec2_hmac_user"
#define CC_LKEYID_CPU_SEC2_DATA_KERN_STR "cpu_sec2_data_kernel"
#define CC_LKEYID_CPU_SEC2_HMAC_KERN_STR "cpu_sec2_hmac_kernel"
#define CC_LKEYID_CPU_SEC2_DATA_USER_STR "cpu_sec2_data_user"
#define CC_LKEYID_CPU_SEC2_HMAC_USER_STR "cpu_sec2_hmac_user"
#define CC_LKEYID_CPU_SEC2_DATA_KERN_STR "cpu_sec2_data_kernel"
#define CC_LKEYID_CPU_SEC2_HMAC_KERN_STR "cpu_sec2_hmac_kernel"
#define CC_LKEYID_CPU_SEC2_DATA_SCRUBBER_STR "cpu_sec2_data_scrubber"
#define CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER_STR "cpu_sec2_hmac_scrubber"
enum
{
@@ -188,7 +192,11 @@ enum
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_KERN) ? \
CC_LKEYID_CPU_SEC2_DATA_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_KERN) ? \
CC_LKEYID_CPU_SEC2_HMAC_KERN_STR : NULL : \
CC_LKEYID_CPU_SEC2_HMAC_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_SCRUBBER) ? \
CC_LKEYID_CPU_SEC2_DATA_SCRUBBER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER) ? \
CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE0) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE0_H2D_USER_STR : \

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -21,20 +21,31 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef OSFUNCS_H
#define OSFUNCS_H
#ifndef FSP_CAPS_QUERY_RPC_H
#define FSP_CAPS_QUERY_RPC_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Declarations for the Operating System Specific Functions. *
* *
\***************************************************************************/
#include "fsp/nvdm_payload_cmd_response.h"
#include <os/os.h>
#pragma pack(1)
#if defined(NVCPU_X86_64)
OSnv_rdcr4 nv_rdcr4;
OSnv_cpuid nv_cpuid;
#endif
/*!
* @biief Capability query payload command to FSP
*/
typedef struct
{
NvU8 subMessageId;
} FSP_CAPS_QUERY_RPC_PAYLOAD_PARAMS;
#endif // OSFUNCS_H
/*!
* @brief Capability query response payload for FSP capability query
*/
typedef struct
{
NvU8 nvdmType;
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 rspPayload[4];
} FSP_CAPS_QUERY_RESPONSE_PAYLOAD_PARAMS;
#pragma pack()
#endif // FSP_CAPS_QUERY_RPC_H

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef FSP_CLOCK_BOOST_RPC_H
#define FSP_CLOCK_BOOST_RPC_H
#define FSP_CLOCK_BOOST_FEATURE_DISABLE_SUBMESSAGE_ID 0x0
#define FSP_CLOCK_BOOST_FEATURE_ENABLE_SUBMESSAGE_ID 0x1
#define FSP_CLOCK_BOOST_TRIGGER_RESTORE_SUBMESSAGE_ID 0x2
#pragma pack(1)
/*!
* @brief Clock Boost payload command to FSP
*/
typedef struct
{
NvU8 subMessageId;
} FSP_CLOCK_BOOST_RPC_PAYLOAD_PARAMS;
#pragma pack()
#endif // FSP_CLOCK_BOOST_RPC_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -45,5 +45,6 @@
#define NVDM_TYPE_UEFI_RM 0x1C
#define NVDM_TYPE_UEFI_XTL_DEBUG_INTR 0x1D
#define NVDM_TYPE_TNVL 0x1F
#define NVDM_TYPE_CLOCK_BOOST 0x20
#endif // FSP_NVDM_FORMAT_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -54,6 +54,9 @@ static inline nv_firmware_chip_family_t nv_firmware_get_chip_family(
case GPU_ARCHITECTURE_HOPPER:
return NV_FIRMWARE_CHIP_FAMILY_GH100;
case GPU_ARCHITECTURE_BLACKWELL:
return NV_FIRMWARE_CHIP_FAMILY_GB10X;
}
return NV_FIRMWARE_CHIP_FAMILY_NULL;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -44,6 +44,7 @@ typedef enum
NV_FIRMWARE_CHIP_FAMILY_GA10X = 4,
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
NV_FIRMWARE_CHIP_FAMILY_GB10X = 8,
NV_FIRMWARE_CHIP_FAMILY_END,
} nv_firmware_chip_family_t;
@@ -52,6 +53,7 @@ static inline const char *nv_firmware_chip_family_to_string(
)
{
switch (fw_chip_family) {
case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x";
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
@@ -66,13 +68,13 @@ static inline const char *nv_firmware_chip_family_to_string(
return NULL;
}
// The includer (presumably nv.c) may optionally define
// NV_FIRMWARE_PATH_FOR_FILENAME(filename)
// to return a string "path" given a gsp_*.bin or gsp_log_*.bin filename.
// The includer may optionally define
// NV_FIRMWARE_FOR_NAME(name)
// to return a platform-defined string for a given a gsp_* or gsp_log_* name.
//
// The function nv_firmware_path will then be available.
#if defined(NV_FIRMWARE_PATH_FOR_FILENAME)
static inline const char *nv_firmware_path(
// The function nv_firmware_for_chip_family will then be available.
#if defined(NV_FIRMWARE_FOR_NAME)
static inline const char *nv_firmware_for_chip_family(
nv_firmware_type_t fw_type,
nv_firmware_chip_family_t fw_chip_family
)
@@ -81,15 +83,16 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_ga10x");
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_tu10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_tu10x");
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
@@ -100,15 +103,16 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_log_ga10x");
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_tu10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_log_tu10x");
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
@@ -118,15 +122,15 @@ static inline const char *nv_firmware_path(
return "";
}
#endif // defined(NV_FIRMWARE_PATH_FOR_FILENAME)
#endif // defined(NV_FIRMWARE_FOR_NAME)
// The includer (presumably nv.c) may optionally define
// NV_FIRMWARE_DECLARE_GSP_FILENAME(filename)
// The includer may optionally define
// NV_FIRMWARE_DECLARE_GSP(name)
// which will then be invoked (at the top-level) for each
// gsp_*.bin (but not gsp_log_*.bin)
#if defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ga10x.bin")
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_tu10x.bin")
#endif // defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
// gsp_* (but not gsp_log_*)
#if defined(NV_FIRMWARE_DECLARE_GSP)
NV_FIRMWARE_DECLARE_GSP("gsp_ga10x")
NV_FIRMWARE_DECLARE_GSP("gsp_tu10x")
#endif // defined(NV_FIRMWARE_DECLARE_GSP)
#endif // NV_FIRMWARE_DECLARE_GSP_FILENAME
#endif // NV_FIRMWARE_DECLARE_GSP

View File

@@ -192,6 +192,7 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_INTEL, 0x7A8A, CS_INTEL_1B81, "Intel-SapphireRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x7A04, CS_INTEL_7A04, "Intel-RaptorLake", Intel_7A04_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x5795, CS_INTEL_5795, "Intel-GraniteRapids", NULL},
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
@@ -279,6 +280,7 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_AMPERE, 0xE205, CS_AMPERE_AMPEREONE, "Ampere AmpereOne", Ampere_AmpereOne_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE206, CS_AMPERE_AMPEREONE, "Ampere AmpereOne", Ampere_AmpereOne_setupFunc},
{PCI_VENDOR_ID_AMPERE, 0xE207, CS_AMPERE_AMPEREONE, "Ampere AmpereOne", Ampere_AmpereOne_setupFunc},
{PCI_VENDOR_ID_PHYTIUM, 0x5C16, CS_PHYTIUM_S5000, "Phytium S5000", NULL},
///////////////////////////////////////////////////////////////////////////////////////////////////
@@ -316,6 +318,7 @@ VENDORNAME vendorName[] =
{PCI_VENDOR_ID_ALIBABA, "Alibaba"},
{PCI_VENDOR_ID_SIFIVE, "SiFive"},
{PCI_VENDOR_ID_PLDA, "PLDA"},
{PCI_VENDOR_ID_PHYTIUM, "Phytium"},
{0, "Unknown"} // Indicates end of the table
};
@@ -360,7 +363,8 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD}, // Mellanox BlueField
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2 Crypto disabled
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3 Crypto disabled
{PCI_VENDOR_ID_MELLANOX, 0xA2DA, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3 Crypto enabled
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2}, // Amazon Gravitron2
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX}, // Fujitsu A64FX
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500}, // Phytium S2500
@@ -387,6 +391,7 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_AMPERE, 0xE205, CS_AMPERE_AMPEREONE}, // Ampere AmpereOne
{PCI_VENDOR_ID_AMPERE, 0xE206, CS_AMPERE_AMPEREONE}, // Ampere AmpereOne
{PCI_VENDOR_ID_AMPERE, 0xE207, CS_AMPERE_AMPEREONE}, // Ampere AmpereOne
{PCI_VENDOR_ID_PHYTIUM, 0x5C16, CS_PHYTIUM_S5000}, // Phytium S5000
// last element must have chipset CS_UNKNOWN (zero)
{0, 0, CS_UNKNOWN}

View File

@@ -344,6 +344,7 @@
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE 15:8
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_CORRECTABLE_ERROR 0
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_UNCORRECTABLE_ERROR 1
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_ECC_STATE_FLAGS 2
#define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_GPU 0x00000000
#define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_MODULE 0x00000003
@@ -968,6 +969,10 @@
#define NV_MSGBOX_DATA_CAP_5_MEMORY_CAPACITY_UTILIZATION_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_5_MEMORY_CAPACITY_UTILIZATION_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_5_SRAM_ERROR_THRESHOLD_EXCEEDED 9:9
#define NV_MSGBOX_DATA_CAP_5_SRAM_ERROR_THRESHOLD_EXCEEDED_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_5_SRAM_ERROR_THRESHOLD_EXCEEDED_AVAILABLE 0x00000001
/* ECC counters */
#define NV_MSGBOX_DATA_ECC_CNT_16BIT_DBE 31:16
#define NV_MSGBOX_DATA_ECC_CNT_16BIT_SBE 16:0
@@ -1002,6 +1007,13 @@
#define NV_MSGBOX_DATA_ECC_V5_METADATA_LOCATION_ID 26:22
#define NV_MSGBOX_DATA_ECC_V5_METADATA_SUBLOCATION_ID 31:27
/* ECC state flags */
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS 31:0
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS_SRAM_ERROR_THRESHOLD_EXCEEDED 0:0
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS_SRAM_ERROR_THRESHOLD_EXCEEDED_FALSE 0
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS_SRAM_ERROR_THRESHOLD_EXCEEDED_TRUE 1
/* NV_MSGBOX_CMD_OPCODE_SCRATCH_COPY src offset argument */
#define NV_MSGBOX_DATA_COPY_SRC_OFFSET 7:0

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -76,7 +76,7 @@ typedef struct _NV_SPDM_DESC_HEADER
#define NV_SPDM_MAX_TRANSCRIPT_BUFFER_SIZE (2 * NV_SPDM_MAX_SPDM_PAYLOAD_SIZE)
// Limited by the transport size, do not increase without increasing transport buffer.
#define NV_SPDM_MAX_RANDOM_MSG_BYTES (0x80)
#define NV_SPDM_MAX_RANDOM_MSG_BYTES (0x0)
#ifdef NVRM
#include "gpu/mem_mgr/mem_desc.h"

View File

@@ -110,15 +110,15 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
TEGRASOC_WHICH_CLK_SPPLL0_VCO,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
TEGRASOC_WHICH_CLK_SPPLL1_VCO,
TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
TEGRASOC_WHICH_CLK_VPLL0_REF,
TEGRASOC_WHICH_CLK_VPLL0,
TEGRASOC_WHICH_CLK_VPLL1,
@@ -132,7 +132,7 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_DSI_PIXEL,
TEGRASOC_WHICH_CLK_PRE_SOR0,
TEGRASOC_WHICH_CLK_PRE_SOR1,
TEGRASOC_WHICH_CLK_DP_LINK_REF,
TEGRASOC_WHICH_CLK_DP_LINKA_REF,
TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
@@ -143,7 +143,7 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_PLLHUB,
TEGRASOC_WHICH_CLK_SOR0,
TEGRASOC_WHICH_CLK_SOR1,
TEGRASOC_WHICH_CLK_SOR_PAD_INPUT,
TEGRASOC_WHICH_CLK_SOR_PADA_INPUT,
TEGRASOC_WHICH_CLK_PRE_SF0,
TEGRASOC_WHICH_CLK_SF0,
TEGRASOC_WHICH_CLK_SF1,
@@ -332,7 +332,9 @@ typedef struct nv_soc_irq_info_s {
#define NV_MAX_SOC_IRQS 6
#define NV_MAX_DPAUX_NUM_DEVICES 4
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2
#define NV_IGPU_LEGACY_STALL_IRQ 70
#define NV_IGPU_MAX_STALL_IRQS 3
@@ -495,12 +497,6 @@ typedef struct nv_state_t
} iommus;
} nv_state_t;
// These define need to be in sync with defines in system.h
#define OS_TYPE_LINUX 0x1
#define OS_TYPE_FREEBSD 0x2
#define OS_TYPE_SUNOS 0x3
#define OS_TYPE_VMWARE 0x4
#define NVFP_TYPE_NONE 0x0
#define NVFP_TYPE_REFCOUNTED 0x1
#define NVFP_TYPE_REGISTERED 0x2
@@ -893,8 +889,6 @@ void NV_API_CALL nv_cap_drv_exit(void);
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
NvU32 NV_API_CALL nv_get_os_type(void);
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *);
@@ -1081,6 +1075,9 @@ NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t
void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
// Host1x specific functions.
NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *);
#if defined(NVCPU_X86_64)
static inline NvU64 nv_rdtsc(void)

View File

@@ -151,6 +151,7 @@ void NV_API_CALL os_release_rwlock_read (void *);
void NV_API_CALL os_release_rwlock_write (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);

View File

@@ -30,7 +30,6 @@
*/
#include <os-interface.h>
#include <osfuncs.h>
// File modes, added for NVIDIA capabilities.
#define OS_RUSR 00400 // read permission, owner

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -121,9 +121,6 @@ NvBool RmGpuHasIOSpaceEnabled (nv_state_t *);
void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *);
NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS RmFreeOsEvent (NvHandle, NvU32);
void RmI2cAddGpuPorts(nv_state_t *);
NV_STATUS RmInitX86EmuState(OBJGPU *);
@@ -141,9 +138,6 @@ int amd_msr_c0011022_incompatible(OBJOS *);
NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *);
NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS rm_free_os_event (NvHandle, NvU32);
NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
void rm_client_free_os_events (NvHandle);
NV_STATUS rm_create_mmap_context (NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32);
@@ -171,9 +165,8 @@ void RmUnInitAcpiMethods (OBJSYS *);
void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64);
void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64);
void RmInitS0ixPowerManagement (nv_state_t *);
void RmInitDeferredDynamicPowerManagement (nv_state_t *);
void RmDestroyDeferredDynamicPowerManagement(nv_state_t *);
void RmInitPowerManagement (nv_state_t *);
void RmDestroyPowerManagement (nv_state_t *);
NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);

View File

@@ -27,10 +27,9 @@
#if defined(NVCPU_X86_64)
NvS32 nv_cpuid(
OBJOS *pOS,
NvS32 op,
NvS32 subop,
int osNv_cpuid(
int op,
int subop,
NvU32 *eax,
NvU32 *ebx,
NvU32 *ecx,

View File

@@ -27,7 +27,7 @@
#if defined(NVCPU_X86_64)
NvU32 nv_rdcr4(OBJOS *pOS)
NvU32 osNv_rdcr4(void)
{
NvU64 val;
asm volatile ("movq %%cr4,%0" : "=r" (val));

View File

@@ -88,7 +88,7 @@
#include <gpu/mem_sys/kern_mem_sys.h>
#include <gpu/subdevice/subdevice.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
#include <objtmr.h>
#include <gpu/timer/objtmr.h>
//
// Schedule timer based callback, to check for the complete GPU Idleness.
@@ -732,7 +732,7 @@ rmReadAndParseDynamicPowerRegkey
return NV_OK;
}
chipId = DRF_VAL(_PMC, _BOOT_42, _CHIP_ID, pNvp->pmc_boot_42);
chipId = gpuGetChipIdFromPmcBoot42(pNvp->pmc_boot_42);
// From GA102+, we enable RTD3 only if system is found to be Notebook
if ((chipId >= NV_PMC_BOOT_42_CHIP_ID_GA102) &&
@@ -1258,7 +1258,7 @@ static NvBool RmCheckForGcxSupportOnCurrentState(
* finished.
*
* Queue with lock flags:
* OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW
* OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE
*
* @param[in] gpuInstance GPU instance ID.
* @param[in] pArgs Unused callback closure.
@@ -1297,12 +1297,20 @@ static void timerCallbackToRemoveIdleHoldoff(
void *pCallbackData
)
{
OBJGPU *pGpu = reinterpretCast(pCallbackData, OBJGPU *);
NV_STATUS status = NV_OK;
OBJGPU *pGpu = reinterpretCast(pCallbackData, OBJGPU *);
osQueueWorkItemWithFlags(pGpu,
RmRemoveIdleHoldoff,
NULL,
OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW);
status = osQueueWorkItemWithFlags(pGpu,
RmRemoveIdleHoldoff,
NULL,
OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"Queuing of remove idle holdoff work item failed with status : 0x%x\n",
status);
}
}
/*!
@@ -1852,7 +1860,9 @@ static void RmScheduleCallbackToRemoveIdleHoldoff(
* @param[in] pGpu OBJGPU pointer.
*/
static NvBool RmCheckRtd3GcxSupport(
nv_state_t *pNv
nv_state_t *pNv,
NvBool *bGC6Support,
NvBool *bGCOFFSupport
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(pNv);
@@ -1860,31 +1870,21 @@ static NvBool RmCheckRtd3GcxSupport(
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
NV_STATUS status;
NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS virtModeParams = { 0 };
NvBool bGC6Support = NV_FALSE;
NvBool bGCOFFSupport = NV_FALSE;
if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED))
{
NV_PRINTF(LEVEL_NOTICE, "RTD3/GC6 is not supported for this arch\n");
NV_PRINTF(LEVEL_NOTICE, "RTD3 is not supported for this arch\n");
return NV_FALSE;
}
if (nvp->b_mobile_config_enabled)
{
bGC6Support = pGpu->getProperty(pGpu, PDB_PROP_GPU_RTD3_GC6_SUPPORTED);
bGCOFFSupport = bGC6Support;
}
else
{
bGC6Support = pGpu->getProperty(pGpu, PDB_PROP_GPU_RTD3_GC6_SUPPORTED);
bGCOFFSupport = pGpu->getProperty(pGpu, PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED);
}
*bGC6Support = pGpu->getProperty(pGpu, PDB_PROP_GPU_RTD3_GC6_SUPPORTED);
*bGCOFFSupport = nvp->b_mobile_config_enabled ? *bGC6Support :
pGpu->getProperty(pGpu, PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED);
if (!bGC6Support && !bGCOFFSupport)
if (!(*bGC6Support) && !(*bGCOFFSupport))
{
NV_PRINTF(LEVEL_NOTICE,
"Disabling RTD3. [GC6 support=%d GCOFF support=%d]\n",
bGC6Support, bGCOFFSupport);
*bGC6Support, *bGCOFFSupport);
return NV_FALSE;
}
@@ -1903,7 +1903,7 @@ static NvBool RmCheckRtd3GcxSupport(
if ((virtModeParams.virtualizationMode != NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE) &&
(virtModeParams.virtualizationMode != NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS))
{
NV_PRINTF(LEVEL_NOTICE, "RTD3/GC6 is not supported on VM\n");
NV_PRINTF(LEVEL_NOTICE, "RTD3 is not supported on VM\n");
return NV_FALSE;
}
@@ -1916,47 +1916,42 @@ static NvBool RmCheckRtd3GcxSupport(
*
* @param[in] nv nv_state_t pointer.
*/
void RmInitDeferredDynamicPowerManagement(
nv_state_t *nv
static void RmInitDeferredDynamicPowerManagement(
nv_state_t *nv,
NvBool bRtd3Support
)
{
NV_STATUS status;
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
// LOCK: acquire GPUs lock
if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
if (nvp->dynamic_power.mode == NV_DYNAMIC_PM_FINE)
{
if (nvp->dynamic_power.mode == NV_DYNAMIC_PM_FINE)
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (!bRtd3Support)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (!RmCheckRtd3GcxSupport(nv))
{
nvp->dynamic_power.mode = NV_DYNAMIC_PM_NEVER;
nvp->dynamic_power.b_fine_not_supported = NV_TRUE;
NV_PRINTF(LEVEL_NOTICE, "RTD3/GC6 is not supported\n");
goto unlock;
}
osAddGpuDynPwrSupported(gpuGetInstance(pGpu));
nvp->dynamic_power.b_fine_not_supported = NV_FALSE;
status = CreateDynamicPowerCallbacks(pGpu);
if (status == NV_OK)
{
RmScheduleCallbackForIdlePreConditionsUnderGpuLock(pGpu);
nvp->dynamic_power.deferred_idle_enabled = NV_TRUE;
// RM's default is GCOFF allow
nvp->dynamic_power.clients_gcoff_disallow_refcount = 0;
}
nvp->dynamic_power.mode = NV_DYNAMIC_PM_NEVER;
nvp->dynamic_power.b_fine_not_supported = NV_TRUE;
NV_PRINTF(LEVEL_NOTICE, "RTD3 is not supported.\n");
return;
}
unlock:
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
osAddGpuDynPwrSupported(gpuGetInstance(pGpu));
nvp->dynamic_power.b_fine_not_supported = NV_FALSE;
status = CreateDynamicPowerCallbacks(pGpu);
if (status != NV_OK)
NV_PRINTF(LEVEL_ERROR, "Failed to register for dynamic power callbacks\n");
if (status == NV_OK)
{
RmScheduleCallbackForIdlePreConditionsUnderGpuLock(pGpu);
nvp->dynamic_power.deferred_idle_enabled = NV_TRUE;
// RM's default is GCOFF allow
nvp->dynamic_power.clients_gcoff_disallow_refcount = 0;
}
else
{
NV_PRINTF(LEVEL_ERROR, "Failed to register for dynamic power callbacks\n");
}
}
}
/*!
@@ -2610,7 +2605,7 @@ static void RmQueueIdleSustainedWorkitem(
status = osQueueWorkItemWithFlags(pGpu,
RmHandleIdleSustained,
NULL,
OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW);
OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
@@ -2625,14 +2620,15 @@ static void RmQueueIdleSustainedWorkitem(
/*
* Allocate resources needed for S0ix-based system power management.
*/
void
static void
RmInitS0ixPowerManagement(
nv_state_t *nv
nv_state_t *nv,
NvBool bRtd3Support,
NvBool bGC6Support
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvU32 data;
NvBool bRtd3Gc6Support = NV_FALSE;
// S0ix-based S2Idle, on desktops, is not supported yet. Return early for desktop SKUs
if (!nvp->b_mobile_config_enabled)
@@ -2640,15 +2636,6 @@ RmInitS0ixPowerManagement(
return;
}
// LOCK: acquire GPUs lock
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
{
bRtd3Gc6Support = RmCheckRtd3GcxSupport(nv);
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
/*
* The GPU S0ix-based system power management will be enabled
* only if all the following necessary requirements are met:
@@ -2657,7 +2644,7 @@ RmInitS0ixPowerManagement(
* 2. The platform has support for s0ix.
* 3. Feature regkey EnableS0ixPowerManagement is enabled.
*/
if (bRtd3Gc6Support &&
if (bRtd3Support && bGC6Support &&
nv_platform_supports_s0ix() &&
(osReadRegistryDword(NULL, NV_REG_ENABLE_S0IX_POWER_MANAGEMENT,
&data) == NV_OK) && (data == 1))
@@ -2678,6 +2665,32 @@ RmInitS0ixPowerManagement(
}
}
void RmInitPowerManagement(
nv_state_t *nv
)
{
// LOCK: acquire GPUs lock
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
{
NvBool bGC6Support = NV_FALSE;
NvBool bGCOFFSupport = NV_FALSE;
NvBool bRtd3Support = RmCheckRtd3GcxSupport(nv, &bGC6Support, &bGCOFFSupport);
RmInitDeferredDynamicPowerManagement(nv, bRtd3Support);
RmInitS0ixPowerManagement(nv, bRtd3Support, bGC6Support);
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
}
void RmDestroyPowerManagement(
nv_state_t *nv
)
{
RmDestroyDeferredDynamicPowerManagement(nv);
}
void NV_API_CALL rm_get_power_info(
nvidia_stack_t *sp,
nv_state_t *pNv,

View File

@@ -677,52 +677,6 @@ NV_STATUS RmIoctl(
break;
}
case NV_ESC_ALLOC_OS_EVENT:
{
nv_ioctl_alloc_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_alloc_os_event(pApi->hClient,
nvfp,
pApi->fd);
break;
}
case NV_ESC_FREE_OS_EVENT:
{
nv_ioctl_free_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_free_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd);
break;
}
case NV_ESC_RM_GET_EVENT_DATA:
{
NVOS41_PARAMETERS *pApi = data;
if (dataSize != sizeof(NVOS41_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_get_event_data(nvfp,
pApi->pEvent,
&pApi->MoreEvents);
break;
}
case NV_ESC_STATUS_CODE:
{
nv_state_t *pNv;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@@ -31,7 +31,7 @@
#include "os/os.h"
#include "nv.h"
#include "nv-priv.h"
#include <nvRmReg.h>
#include <nvrm_registry.h>
#include <virtualization/hypervisor/hypervisor.h>
#include "core/thread_state.h"
#include "core/locks.h"
@@ -40,7 +40,7 @@
#include "kernel/gpu/fifo/kernel_fifo.h"
#include "osapi.h"
#include "virtualization/kernel_hostvgpudeviceapi.h"
#include <objtmr.h>
#include "gpu/timer/objtmr.h"
#include "gpu/bif/kernel_bif.h"
#include "gpu/bus/kern_bus.h"
#include <nv_ref.h> // NV_PMC_BOOT_1_VGPU

View File

@@ -35,7 +35,6 @@
#include "gpu/gpu.h"
#include <gpu_mgr/gpu_mgr.h>
#include <osfuncs.h>
#include <platform/chipset/chipset.h>
#include "nverror.h"
@@ -63,6 +62,8 @@
#include "mem_mgr/mem.h"
#include "gpu/mem_mgr/virt_mem_allocator_common.h"
#include "vgpu/vgpu_util.h"
#include <acpidsmguids.h>
#include <pex.h>
#include "gps.h"
@@ -742,10 +743,10 @@ NV_STATUS osQueueWorkItemWithFlags(
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW;
if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS)
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS;
if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW)
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW;
if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW)
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW;
if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE)
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE;
if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE)
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE;
if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY)
pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY;
@@ -941,6 +942,9 @@ NV_STATUS osAllocPagesInternal(
memdescSetMemData(pMemDesc, pMemData, NULL);
if ((pGpu != NULL) && IS_VIRTUAL(pGpu))
NV_ASSERT_OK_OR_RETURN(vgpuUpdateGuestSysmemPfnBitMap(pGpu, pMemDesc, NV_TRUE));
return status;
}
@@ -951,6 +955,9 @@ void osFreePagesInternal(
OBJGPU *pGpu = pMemDesc->pGpu;
NV_STATUS rmStatus;
if ((pGpu != NULL) && IS_VIRTUAL(pGpu))
NV_ASSERT_OR_RETURN_VOID(vgpuUpdateGuestSysmemPfnBitMap(pGpu, pMemDesc, NV_FALSE) == NV_OK);
if (NV_RM_PAGE_SIZE < os_page_size &&
!memdescGetContiguity(pMemDesc, AT_CPU))
{
@@ -2698,7 +2705,6 @@ NV_STATUS osCallACPI_NVHG_ROM
void osInitSystemStaticConfig(SYS_STATIC_CONFIG *pConfig)
{
pConfig->bIsNotebook = rm_is_system_notebook();
pConfig->osType = nv_get_os_type();
pConfig->bOsCCEnabled = os_cc_enabled;
pConfig->bOsCCTdxEnabled = os_cc_tdx_enabled;
}
@@ -2871,6 +2877,11 @@ NV_STATUS osGetVersion(NvU32 *majorVer, NvU32 *minorVer, NvU32 *buildNum, NvU16
return rmStatus;
}
NV_STATUS osGetIsOpenRM(NvBool *bOpenRm)
{
return os_get_is_openrm(bOpenRm);
}
NV_STATUS
osGetCarveoutInfo
(
@@ -5213,7 +5224,7 @@ osGetSyncpointAperture
NvU32 *offset
)
{
return NV_ERR_NOT_SUPPORTED;
return nv_get_syncpoint_aperture(syncpointId, physAddr, limit, offset);
}
/*!

View File

@@ -25,6 +25,7 @@
#include <nv_ref.h>
#include <nv.h>
#include <nv_escape.h>
#include <nv-priv.h>
#include <os/os.h>
#include <osapi.h>
@@ -32,10 +33,9 @@
#include <rmosxfac.h> // Declares RmInitRm().
#include "gpu/gpu.h"
#include "gps.h"
#include <osfuncs.h>
#include <platform/chipset/chipset.h>
#include <objtmr.h>
#include <gpu/timer/objtmr.h>
#include <gpu/subdevice/subdevice.h>
#include <mem_mgr/mem.h>
#include "kernel/gpu/mem_mgr/mem_mgr.h"
@@ -406,6 +406,39 @@ static void free_os_events(
portSyncSpinlockRelease(nv->event_spinlock);
}
static NV_STATUS get_os_event_data(
nv_file_private_t *nvfp,
NvP64 pEvent,
NvU32 *MoreEvents
)
{
nv_event_t nv_event;
NvUnixEvent *nv_unix_event;
NV_STATUS status;
status = os_alloc_mem((void**)&nv_unix_event, sizeof(NvUnixEvent));
if (status != NV_OK)
return status;
status = nv_get_event(nvfp, &nv_event, MoreEvents);
if (status != NV_OK)
{
status = NV_ERR_OPERATING_SYSTEM;
goto done;
}
os_mem_set(nv_unix_event, 0, sizeof(NvUnixEvent));
nv_unix_event->hObject = nv_event.hObject;
nv_unix_event->NotifyIndex = nv_event.index;
nv_unix_event->info32 = nv_event.info32;
nv_unix_event->info16 = nv_event.info16;
status = os_memcpy_to_user(NvP64_VALUE(pEvent), nv_unix_event, sizeof(NvUnixEvent));
done:
os_free_mem(nv_unix_event);
return status;
}
void rm_client_free_os_events(
NvHandle client
)
@@ -482,6 +515,12 @@ static NV_STATUS allocate_os_event(
goto done;
}
new_event->hParent = hParent;
new_event->nvfp = nvfp;
new_event->fd = fd;
new_event->active = NV_TRUE;
new_event->refcount = 0;
portSyncSpinlockAcquire(nv->event_spinlock);
for (event = nv->event_list; event; event = event->next)
{
@@ -496,45 +535,26 @@ static NV_STATUS allocate_os_event(
new_event->next = nv->event_list;
nv->event_list = new_event;
nvfp->bCleanupRmapi = NV_TRUE;
portSyncSpinlockRelease(nv->event_spinlock);
done:
if (status == NV_OK)
{
new_event->hParent = hParent;
new_event->nvfp = nvfp;
new_event->fd = fd;
new_event->active = NV_TRUE;
new_event->refcount = 0;
nvfp->bCleanupRmapi = NV_TRUE;
NV_PRINTF(LEVEL_INFO, "allocated OS event:\n");
NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent);
NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd);
}
else
{
NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event: 0x%08x\n", status);
status = NV_ERR_INSUFFICIENT_RESOURCES;
portMemFree(new_event);
}
return status;
}
NV_STATUS RmAllocOsEvent(
NvHandle hParent,
nv_file_private_t *nvfp,
NvU32 fd
)
{
if (NV_OK != allocate_os_event(hParent, nvfp, fd))
{
NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
return NV_OK;
}
static NV_STATUS free_os_event(
NvHandle hParent,
NvU32 fd
@@ -585,18 +605,6 @@ static NV_STATUS free_os_event(
return result;
}
NV_STATUS RmFreeOsEvent(
NvHandle hParent,
NvU32 fd
)
{
if (NV_OK != free_os_event(hParent, fd))
{
return NV_ERR_INVALID_EVENT;
}
return NV_OK;
}
static void RmExecuteWorkItem(
void *pWorkItem
)
@@ -607,8 +615,8 @@ static void RmExecuteWorkItem(
if (!(pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) &&
((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS) ||
(pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) ||
(pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) ||
(pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE) ||
(pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE) ||
(pWi->flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY)))
{
// Requesting one of the GPU locks without providing a GPU instance
@@ -656,40 +664,6 @@ done:
portMemFree((void *)pWi);
}
static NV_STATUS RmGetEventData(
nv_file_private_t *nvfp,
NvP64 pEvent,
NvU32 *MoreEvents,
NvBool bUserModeArgs
)
{
NV_STATUS RmStatus;
NvUnixEvent *pKernelEvent = NULL;
nv_event_t nv_event;
RMAPI_PARAM_COPY paramCopy;
RmStatus = nv_get_event(nvfp, &nv_event, MoreEvents);
if (RmStatus != NV_OK)
return NV_ERR_OPERATING_SYSTEM;
// setup for access to client's parameters
RMAPI_PARAM_COPY_INIT(paramCopy, pKernelEvent, pEvent, 1, sizeof(NvUnixEvent));
RmStatus = rmapiParamsAcquire(&paramCopy, bUserModeArgs);
if (RmStatus != NV_OK)
return NV_ERR_OPERATING_SYSTEM;
pKernelEvent->hObject = nv_event.hObject;
pKernelEvent->NotifyIndex = nv_event.index;
pKernelEvent->info32 = nv_event.info32;
pKernelEvent->info16 = nv_event.info16;
// release client buffer access, with copyout as needed
if (rmapiParamsRelease(&paramCopy) != NV_OK)
return NV_ERR_OPERATING_SYSTEM;
return NV_OK;
}
static NV_STATUS RmAccessRegistry(
NvHandle hClient,
NvHandle hObject,
@@ -1016,6 +990,7 @@ static NV_STATUS RmUpdateDeviceMappingInfo(
goto done;
if ((objDynamicCastById(pMappableRef->pResource, classId(Memory)) == NULL) &&
(objDynamicCastById(pMappableRef->pResource, classId(KernelCcuApi)) == NULL) &&
(objDynamicCastById(pMappableRef->pResource, classId(KernelChannel)) == NULL))
{
status = NV_ERR_INVALID_OBJECT_HANDLE;
@@ -2107,7 +2082,7 @@ static NV_STATUS RmGetMmapPteArray(
}
else
{
// Offset is accounted in mmap_start.
// Offset is accounted in nvuap->mmap_start.start.
for (nvuap->page_array[0] = nvuap->mmap_start, i = 1;
i < pages; i++)
{
@@ -2738,16 +2713,68 @@ NV_STATUS NV_API_CALL rm_ioctl(
NvU32 dataSize
)
{
NV_STATUS rmStatus;
NV_STATUS rmStatus = NV_OK;
THREAD_STATE_NODE threadState;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize);
//
// Some ioctls are handled entirely inside the OS layer and don't need to
// suffer the overhead of calling into RM core.
//
switch (Command)
{
case NV_ESC_ALLOC_OS_EVENT:
{
nv_ioctl_alloc_os_event_t *pApi = pData;
if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
pApi->Status = allocate_os_event(pApi->hClient, nvfp, pApi->fd);
break;
}
case NV_ESC_FREE_OS_EVENT:
{
nv_ioctl_free_os_event_t *pApi = pData;
if (dataSize != sizeof(nv_ioctl_free_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
pApi->Status = free_os_event(pApi->hClient, pApi->fd);
break;
}
case NV_ESC_RM_GET_EVENT_DATA:
{
NVOS41_PARAMETERS *pApi = pData;
if (dataSize != sizeof(NVOS41_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
pApi->status = get_os_event_data(nvfp,
pApi->pEvent,
&pApi->MoreEvents);
break;
}
default:
{
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
break;
}
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
@@ -2882,65 +2909,6 @@ void NV_API_CALL rm_unbind_lock(
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS rm_alloc_os_event(
NvHandle hClient,
nv_file_private_t *nvfp,
NvU32 fd
)
{
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
RmStatus = RmAllocOsEvent(hClient, nvfp, fd);
// UNLOCK: release API lock
rmapiLockRelease();
}
return RmStatus;
}
NV_STATUS rm_free_os_event(
NvHandle hClient,
NvU32 fd
)
{
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
RmStatus = RmFreeOsEvent(hClient, fd);
// UNLOCK: release API lock
rmapiLockRelease();
}
return RmStatus;
}
NV_STATUS rm_get_event_data(
nv_file_private_t *nvfp,
NvP64 pEvent,
NvU32 *MoreEvents
)
{
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
RmStatus = RmGetEventData(nvfp, pEvent, MoreEvents, NV_TRUE);
// UNLOCK: release API lock
rmapiLockRelease();
}
return RmStatus;
}
NV_STATUS NV_API_CALL rm_read_registry_dword(
nvidia_stack_t *sp,
nv_state_t *nv,
@@ -3254,43 +3222,52 @@ NV_STATUS NV_API_CALL rm_run_rc_callback(
return NV_OK;
}
static void _tmrEventServiceTimerWorkItem
(
NvU32 gpuInstance,
void *pArgs
)
{
OBJGPU *pGpu = gpumgrGetGpu(gpuInstance);
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
NV_STATUS status = NV_OK;
status = tmrEventServiceTimer(pGpu, pTmr, (TMR_EVENT *)pArgs);
if (status != NV_OK)
NV_PRINTF(LEVEL_ERROR, "Timer event failed from OS timer callback workitem with status :0x%x\n", status);
}
static NV_STATUS RmRunNanoTimerCallback(
OBJGPU *pGpu,
void *pTmrEvent
)
{
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
THREAD_STATE_NODE threadState;
NV_STATUS status = NV_OK;
// LOCK: try to acquire GPUs lock
if ((status = rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK)
{
TMR_EVENT *pEvent = (TMR_EVENT *)pTmrEvent;
NvU64 timeNs = pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH) ?
TIMER_RESCHED_TIME_DURING_PM_RESUME_NS :
osGetTickResolution();
//
// We failed to acquire the lock - depending on what's holding it,
// the lock could be held for a while, so try again soon, but not too
// soon to prevent the owner from making forward progress indefinitely.
//
return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->pOSTmrCBdata, timeNs);
}
threadStateInitISRAndDeferredIntHandler(&threadState, pGpu,
THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
// Call timer event service
status = tmrEventServiceTimer(pGpu, pTmr, (PTMR_EVENT)pTmrEvent);
//
// OS timers come in ISR context, hence schedule workitem for timer event service.
// GPU timer events are also handled in same mammer allowing us to have same functionality
// for callback functions.
//
status = osQueueWorkItemWithFlags(pGpu,
_tmrEventServiceTimerWorkItem,
pTmrEvent,
(OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE |
OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,"Queuing workitem for timer event failed with status :0x%x\n", status);
}
// Out of conflicting thread
threadStateFreeISRAndDeferredIntHandler(&threadState,
pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu);
return status;
}
@@ -3542,7 +3519,7 @@ NV_STATUS NV_API_CALL rm_is_supported_device(
bIsFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pmc_boot_42,
NV_IS_SOC_DISPLAY_DEVICE(pNv),
NULL,
NV_FALSE /* isMCDM */);
NV_FALSE /* bIsTccOrMcdm */);
if (!bIsFirmwareCapable)
{
if (hypervisorIsVgxHyper())

View File

@@ -42,7 +42,7 @@
#include <core/system.h>
#include <os/os.h>
#include "gpu/gpu.h"
#include <objtmr.h>
#include <gpu/timer/objtmr.h>
#include "gpu/bus/kern_bus.h"
#include "nverror.h"
#include <gpu/bif/kernel_bif.h>
@@ -99,7 +99,6 @@ typedef enum
RM_INIT_GPU_PRE_INIT_FAILED,
RM_INIT_GPU_STATE_INIT_FAILED,
RM_INIT_GPU_LOAD_FAILED,
RM_INIT_GPU_UNIVERSAL_VALIDATION_FAILED,
RM_INIT_GPU_DMA_CONFIGURATION_FAILED,
RM_INIT_GPU_GPUMGR_EXPANDED_VISIBILITY_FAILED,
@@ -1130,14 +1129,6 @@ RmInitNvDevice(
}
nvp->flags |= NV_INIT_FLAG_GPU_STATE_LOAD;
status->rmStatus = gpuPerformUniversalValidation_HAL(pGpu);
if (status->rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "*** Failed universal validation\n");
RM_SET_ERROR(*status, RM_INIT_GPU_UNIVERSAL_VALIDATION_FAILED);
return;
}
// Setup GPU scalability
(void) RmInitScalability(pGpu);
@@ -1672,7 +1663,7 @@ static NV_STATUS RmFetchGspRmImages
{
nv_firmware_chip_family_t chipFamily;
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvU32 gpuArch = (DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, nvp->pmc_boot_42) <<
NvU32 gpuArch = (gpuGetArchitectureFromPmcBoot42(nvp->pmc_boot_42) <<
GPU_ARCH_SHIFT);
NvU32 gpuImpl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, nvp->pmc_boot_42);
@@ -1950,7 +1941,7 @@ NvBool RmInitAdapter(
nvp->status = NV_ERR_IRQ_NOT_FIRING;
break;
}
NV_PRINTF(LEVEL_ERROR, "RmVerifySystemEnvironment failed, bailing!\n");
NV_PRINTF(LEVEL_ERROR, "osVerifySystemEnvironment failed, bailing!\n");
goto shutdown;
}
@@ -2041,8 +2032,7 @@ NvBool RmInitAdapter(
pOS->setProperty(pOS, PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED, NV_TRUE);
RmInitS0ixPowerManagement(nv);
RmInitDeferredDynamicPowerManagement(nv);
RmInitPowerManagement(nv);
if (!NV_IS_SOC_DISPLAY_DEVICE(nv) && !NV_IS_SOC_IGPU_DEVICE(nv))
{
@@ -2139,7 +2129,7 @@ void RmShutdownAdapter(
//
NV_ASSERT_OK(gpumgrThreadEnableExpandedGpuVisibility());
RmDestroyDeferredDynamicPowerManagement(nv);
RmDestroyPowerManagement(nv);
freeNbsiTable(pGpu);

View File

@@ -28,47 +28,10 @@
* *
\***************************************************************************/
#include <osfuncs.h>
#include <os/os.h>
static void initOSSpecificFunctionPointers(OBJOS *);
static void initMiscOSFunctionPointers(OBJOS *);
static void initUnixOSFunctionPointers(OBJOS *);
static void initOSSpecificProperties(OBJOS *);
#include <os/os.h>
void
osInitObjOS(OBJOS *pOS)
{
initOSSpecificFunctionPointers(pOS);
initOSSpecificProperties(pOS);
}
static void
initOSSpecificFunctionPointers(OBJOS *pOS)
{
initMiscOSFunctionPointers(pOS);
initUnixOSFunctionPointers(pOS);
}
static void
initMiscOSFunctionPointers(OBJOS *pOS)
{
}
static void
initUnixOSFunctionPointers(OBJOS *pOS)
{
#if defined(NVCPU_X86_64)
pOS->osNv_rdcr4 = nv_rdcr4;
pOS->osNv_cpuid = nv_cpuid;
#endif
}
static void
initOSSpecificProperties
(
OBJOS *pOS
)
{
pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE);

View File

@@ -848,18 +848,6 @@ rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_key_rotation_channel_disable(nvidia_stack_t *sp,
gpuChannelHandle channelList[],
NvU32 channeListCount)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsKeyRotationChannelDisable(channelList, channeListCount);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *sp,
struct ccslContext_t **ctx,
gpuChannelHandle channel)
@@ -883,14 +871,14 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_update(nvidia_stack_t *sp,
UvmCslContext *contextList[],
NvU32 contextListCount)
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *sp,
UvmCslContext *contextList[],
NvU32 contextListCount)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslContextUpdate(contextList, contextListCount);
rmStatus = nvGpuOpsCcslRotateKey(contextList, contextListCount);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
@@ -943,6 +931,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
@@ -951,7 +940,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, outputBuffer,
rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, outputBuffer,
addAuthData, addAuthDataSize, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
@@ -999,14 +988,15 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU32 bufferSize)
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 direction,
NvU32 bufferSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsLogDeviceEncryption(ctx, bufferSize);
rmStatus = nvGpuOpsLogEncryption(ctx, direction, bufferSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -29,7 +29,6 @@
#include "gpu/gpu.h"
#include "gpu/gpu_resource.h"
#include "gpu/subdevice/subdevice.h"
#include <osfuncs.h>
#include <diagnostics/journal.h>
#include "gpu/mem_mgr/mem_desc.h"
#include "mem_mgr/mem.h"

View File

@@ -103,10 +103,15 @@ RmSaveDisplayState
preUnixConsoleParams.bSave = NV_TRUE;
preUnixConsoleParams.bUseVbios = use_vbios;
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice,
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE,
&preUnixConsoleParams, sizeof(preUnixConsoleParams)), done);
if (preUnixConsoleParams.bReturnEarly)
{
goto done;
}
if (use_vbios)
{
//
@@ -128,10 +133,10 @@ RmSaveDisplayState
postUnixConsoleParams.bSave = NV_TRUE;
postUnixConsoleParams.bUseVbios = use_vbios;
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, pRmApi->Control(pRmApi, nv->rmapi.hClient,
nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE,
&postUnixConsoleParams, sizeof(postUnixConsoleParams)), done);
NV_CHECK_OK(status, LEVEL_ERROR,
pRmApi->Control(pRmApi, nv->rmapi.hClient,nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE,
&postUnixConsoleParams, sizeof(postUnixConsoleParams)));
done:
os_enable_console_access();
@@ -195,6 +200,11 @@ static void RmRestoreDisplayState
NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE,
&preUnixConsoleParams, sizeof(preUnixConsoleParams)), done);
if (preUnixConsoleParams.bReturnEarly)
{
goto done;
}
if (use_vbios)
{
eax = 0x4f02;
@@ -209,10 +219,10 @@ static void RmRestoreDisplayState
postUnixConsoleParams.bSave = NV_FALSE;
postUnixConsoleParams.bUseVbios = use_vbios;
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, pRmApi->Control(pRmApi, nv->rmapi.hClient,
nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE,
&postUnixConsoleParams, sizeof(postUnixConsoleParams)), done);
NV_CHECK_OK(status, LEVEL_ERROR,
pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE,
&postUnixConsoleParams, sizeof(postUnixConsoleParams)));
done:
os_enable_console_access();

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,6 +32,7 @@
#include "gpu/bif/kernel_bif.h"
#include "gpu/mmu/kern_gmmu.h"
#include "gpu/disp/kern_disp.h"
#include "gpu/disp/head/kernel_head.h"
#include <nv_sriov_defines.h>
static NvBool osInterruptPending(
@@ -251,12 +252,41 @@ static NvBool osInterruptPending(
{
if (pKernelDisplay != NULL)
{
NvU32 head = 0;
NvU32 headIntrMask = 0;
MC_ENGINE_BITVECTOR intrDispPending;
kdispServiceVblank_HAL(pGpu, pKernelDisplay, 0,
(VBLANK_STATE_PROCESS_LOW_LATENCY |
VBLANK_STATE_PROCESS_CALLED_FROM_ISR),
&threadState);
// handle RG line interrupt, if it is forwared from GSP.
if (IS_GSP_CLIENT(pGpu))
{
for (head = 0; head < kdispGetNumHeads(pKernelDisplay); ++head)
{
KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, head);
headIntrMask = kheadReadPendingRgLineIntr_HAL(pGpu, pKernelHead, &threadState);
if (headIntrMask != 0)
{
NvU32 clearIntrMask = 0;
kheadProcessRgLineCallbacks_HAL(pGpu,
pKernelHead,
head,
&headIntrMask,
&clearIntrMask,
osIsISR());
if (clearIntrMask != 0)
{
kheadResetRgLineIntrMask_HAL(pGpu, pKernelHead, clearIntrMask, &threadState);
}
}
}
}
*serviced = NV_TRUE;
bitVectorClr(&intr0Pending, MC_ENGINE_IDX_DISP);