535.43.02

This commit is contained in:
Andy Ritger
2023-05-30 10:11:36 -07:00
parent 6dd092ddb7
commit eb5c7665a1
1403 changed files with 295367 additions and 86235 deletions

View File

@@ -0,0 +1,293 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CC_KEYSTORE_H
#define CC_KEYSTORE_H
//
// Keys are typically grouped in two's so that the first key is used for CPU-to-GPU encryption
// and the second key is used for GPU-to-CPU encryption.
//
// Keyspace identifiers.
enum
{
CC_KEYSPACE_GSP,
CC_KEYSPACE_SEC2,
CC_KEYSPACE_LCE0,
CC_KEYSPACE_LCE1,
CC_KEYSPACE_LCE2,
CC_KEYSPACE_LCE3,
CC_KEYSPACE_LCE4,
CC_KEYSPACE_LCE5,
CC_KEYSPACE_LCE6,
CC_KEYSPACE_LCE7,
CC_KEYSPACE_SIZE // This is always the last element.
};
enum
{
CC_LKEYID_GSP_CPU_LOCKED_RPC,
CC_LKEYID_CPU_GSP_LOCKED_RPC,
CC_LKEYID_GSP_CPU_DMA,
CC_LKEYID_CPU_GSP_DMA,
CC_LKEYID_CPU_GSP_RESERVED1,
CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT,
CC_LKEYID_CPU_GSP_RESERVED2,
CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT,
CC_KEYSPACE_GSP_SIZE // This is always the last element.
};
// The fault buffers only support GPU-to-CPU encryption, so the CPU-to-GPU encryption slot
// is left reserved.
#define CC_LKEYID_GSP_CPU_LOCKED_RPC_STR "gsp_cpu_locked_rpc"
#define CC_LKEYID_CPU_GSP_LOCKED_RPC_STR "cpu_gsp_locked_rpc"
#define CC_LKEYID_GSP_CPU_DMA_STR "gsp_cpu_dma"
#define CC_LKEYID_CPU_GSP_DMA_STR "cpu_gsp_dma"
#define CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT_STR "gsp_cpu_replayable_fault"
#define CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT_STR "gsp_cpu_non_replayable_fault"
enum
{
CC_LKEYID_CPU_SEC2_DATA_USER,
CC_LKEYID_CPU_SEC2_HMAC_USER,
CC_LKEYID_CPU_SEC2_DATA_KERN,
CC_LKEYID_CPU_SEC2_HMAC_KERN,
CC_KEYSPACE_SEC2_SIZE // This is always the last element.
};
#define CC_LKEYID_CPU_SEC2_DATA_USER_STR "cpu_sec2_data_user"
#define CC_LKEYID_CPU_SEC2_HMAC_USER_STR "cpu_sec2_hmac_user"
#define CC_LKEYID_CPU_SEC2_DATA_KERN_STR "cpu_sec2_data_kernel"
#define CC_LKEYID_CPU_SEC2_HMAC_KERN_STR "cpu_sec2_hmac_kernel"
enum
{
CC_LKEYID_LCE_H2D_USER,
CC_LKEYID_LCE_D2H_USER,
CC_LKEYID_LCE_H2D_KERN,
CC_LKEYID_LCE_D2H_KERN,
CC_LKEYID_LCE_H2D_P2P,
CC_LKEYID_LCE_D2H_P2P,
CC_KEYSPACE_LCE_SIZE // This is always the last element.
};
#define CC_KEYSPACE_TOTAL_SIZE (CC_KEYSPACE_GSP_SIZE + CC_KEYSPACE_SEC2_SIZE + (8 * CC_KEYSPACE_LCE_SIZE))
#define CC_LKEYID_LCE0_H2D_USER_STR "Lce0_h2d_user"
#define CC_LKEYID_LCE0_D2H_USER_STR "Lce0_d2h_user"
#define CC_LKEYID_LCE0_H2D_KERN_STR "Lce0_h2d_kernel"
#define CC_LKEYID_LCE0_D2H_KERN_STR "Lce0_d2h_kernel"
#define CC_LKEYID_LCE0_H2D_P2P_STR "Lce0_h2d_p2p"
#define CC_LKEYID_LCE0_D2H_P2P_STR "Lce0_d2h_p2p"
#define CC_LKEYID_LCE1_H2D_USER_STR "Lce1_h2d_user"
#define CC_LKEYID_LCE1_D2H_USER_STR "Lce1_d2h_user"
#define CC_LKEYID_LCE1_H2D_KERN_STR "Lce1_h2d_kernel"
#define CC_LKEYID_LCE1_D2H_KERN_STR "Lce1_d2h_kernel"
#define CC_LKEYID_LCE1_H2D_P2P_STR "Lce1_h2d_p2p"
#define CC_LKEYID_LCE1_D2H_P2P_STR "Lce1_d2h_p2p"
#define CC_LKEYID_LCE2_H2D_USER_STR "Lce2_h2d_user"
#define CC_LKEYID_LCE2_D2H_USER_STR "Lce2_d2h_user"
#define CC_LKEYID_LCE2_H2D_KERN_STR "Lce2_h2d_kernel"
#define CC_LKEYID_LCE2_D2H_KERN_STR "Lce2_d2h_kernel"
#define CC_LKEYID_LCE2_H2D_P2P_STR "Lce2_h2d_p2p"
#define CC_LKEYID_LCE2_D2H_P2P_STR "Lce2_d2h_p2p"
#define CC_LKEYID_LCE3_H2D_USER_STR "Lce3_h2d_user"
#define CC_LKEYID_LCE3_D2H_USER_STR "Lce3_d2h_user"
#define CC_LKEYID_LCE3_H2D_KERN_STR "Lce3_h2d_kernel"
#define CC_LKEYID_LCE3_D2H_KERN_STR "Lce3_d2h_kernel"
#define CC_LKEYID_LCE3_H2D_P2P_STR "Lce3_h2d_p2p"
#define CC_LKEYID_LCE3_D2H_P2P_STR "Lce3_d2h_p2p"
#define CC_LKEYID_LCE4_H2D_USER_STR "Lce4_h2d_user"
#define CC_LKEYID_LCE4_D2H_USER_STR "Lce4_d2h_user"
#define CC_LKEYID_LCE4_H2D_KERN_STR "Lce4_h2d_kernel"
#define CC_LKEYID_LCE4_D2H_KERN_STR "Lce4_d2h_kernel"
#define CC_LKEYID_LCE4_H2D_P2P_STR "Lce4_h2d_p2p"
#define CC_LKEYID_LCE4_D2H_P2P_STR "Lce4_d2h_p2p"
#define CC_LKEYID_LCE5_H2D_USER_STR "Lce5_h2d_user"
#define CC_LKEYID_LCE5_D2H_USER_STR "Lce5_d2h_user"
#define CC_LKEYID_LCE5_H2D_KERN_STR "Lce5_h2d_kernel"
#define CC_LKEYID_LCE5_D2H_KERN_STR "Lce5_d2h_kernel"
#define CC_LKEYID_LCE5_H2D_P2P_STR "Lce5_h2d_p2p"
#define CC_LKEYID_LCE5_D2H_P2P_STR "Lce5_d2h_p2p"
#define CC_LKEYID_LCE6_H2D_USER_STR "Lce6_h2d_user"
#define CC_LKEYID_LCE6_D2H_USER_STR "Lce6_d2h_user"
#define CC_LKEYID_LCE6_H2D_KERN_STR "Lce6_h2d_kernel"
#define CC_LKEYID_LCE6_D2H_KERN_STR "Lce6_d2h_kernel"
#define CC_LKEYID_LCE6_H2D_P2P_STR "Lce6_h2d_p2p"
#define CC_LKEYID_LCE6_D2H_P2P_STR "Lce6_d2h_p2p"
#define CC_LKEYID_LCE7_H2D_USER_STR "Lce7_h2d_user"
#define CC_LKEYID_LCE7_D2H_USER_STR "Lce7_d2h_user"
#define CC_LKEYID_LCE7_H2D_KERN_STR "Lce7_h2d_kernel"
#define CC_LKEYID_LCE7_D2H_KERN_STR "Lce7_d2h_kernel"
#define CC_LKEYID_LCE7_H2D_P2P_STR "Lce7_h2d_p2p"
#define CC_LKEYID_LCE7_D2H_P2P_STR "Lce7_d2h_p2p"
// Generate a global key ID from a keyspace (a) and local key ID (b).
#define CC_GKEYID_GEN(a, b) (NvU32)(((a) << 16) | (b))
// Get the keyspace component from a global key ID.
#define CC_GKEYID_GET_KEYSPACE(a) (NvU16)((a) >> 16)
// Get the local key ID from a global key ID.
#define CC_GKEYID_GET_LKEYID(a) (NvU16)((a) & 0xffff)
// Get the unqiue string from a global key ID.
#define CC_GKEYID_GET_STR(a) \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_GSP) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_GSP_CPU_LOCKED_RPC) ? \
CC_LKEYID_GSP_CPU_LOCKED_RPC_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_GSP_LOCKED_RPC) ? \
CC_LKEYID_CPU_GSP_LOCKED_RPC_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_GSP_CPU_DMA) ? \
CC_LKEYID_GSP_CPU_DMA_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_GSP_DMA) ? \
CC_LKEYID_CPU_GSP_DMA_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT) ? \
CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT) ? \
CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_SEC2) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_USER) ? \
CC_LKEYID_CPU_SEC2_DATA_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_USER) ? \
CC_LKEYID_CPU_SEC2_HMAC_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_KERN) ? \
CC_LKEYID_CPU_SEC2_DATA_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_KERN) ? \
CC_LKEYID_CPU_SEC2_HMAC_KERN_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE0) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE0_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE0_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE0_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE0_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE0_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE0_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE1) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE1_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE1_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE1_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE1_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE1_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE1_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE2) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE2_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE2_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE2_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE2_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE2_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE2_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE3) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE3_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE3_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE3_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE3_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE3_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE3_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE4) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE4_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE4_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE4_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE4_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE4_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE4_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE5) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE5_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE5_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE5_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE5_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE5_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE5_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE6) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE6_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE6_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE6_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE6_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE6_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE6_D2H_P2P_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE7) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE7_H2D_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_USER) ? \
CC_LKEYID_LCE7_D2H_USER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_KERN) ? \
CC_LKEYID_LCE7_H2D_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_KERN) ? \
CC_LKEYID_LCE7_D2H_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_P2P) ? \
CC_LKEYID_LCE7_H2D_P2P_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_D2H_P2P) ? \
CC_LKEYID_LCE7_D2H_P2P_STR : NULL : NULL
#define CC_EXPORT_MASTER_KEY_SIZE_BYTES 32
#endif // CC_KEYSTORE_H

View File

@@ -31,10 +31,6 @@
#include "nvtypes.h"
#include "flcnretval.h"
#ifndef NV_SIZEOF32
#define NV_SIZEOF32(v) (sizeof(v))
#endif
#ifndef NV_ARRAY_ELEMENTS
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#endif

View File

@@ -289,4 +289,7 @@ typedef NvU8 FLCN_STATUS;
// Arithmetic errors
#define FLCN_ERR_ARITHMETIC_OVERFLOW (0xFAU)
// Pri errros
#define FLCN_ERR_OUTSTANDING_PRI_ERROR (0xFBU)
#endif // FLCNRETVAL_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,6 +39,8 @@
#define NVDM_TYPE_FSP_RESPONSE 0x15
#define NVDM_TYPE_INFOROM 0x17
#define NVDM_TYPE_SMBPBI 0x18
#define NVDM_TYPE_ROMREAD 0x1A
#define NVDM_TYPE_UEFI_RM 0x1C
#define NVDM_TYPE_UEFI_XTL_DEBUG_INTR 0x1D
#endif // _FSP_NVDM_FORMAT_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,9 @@
#ifndef GSP_FW_SR_META_H_
#define GSP_FW_SR_META_H_
#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
#define GSP_FW_SR_META_REVISION 2
/*!
* GSP firmware SR metadata
*
@@ -39,14 +42,14 @@ typedef struct
// Magic
// Use for verification by Booter
//
NvU64 magic; // = GSP_FW_SR_META_MAGIC;
NvU64 magic; // = GSP_FW_SR_META_MAGIC;
//
// Revision number
// Bumped up when we change this interface so it is not backward compatible.
// Bumped up when we revoke GSP-RM ucode
//
NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
//
// ---- Members regarding data in SYSMEM ----------------------------
@@ -58,26 +61,26 @@ typedef struct
// ---- Members for crypto ops across S/R ---------------------------
//
// IV used for encryption of the Suspend/Resume data
// HMAC over the entire GspFwSRMeta structure (including padding)
// with the hmac field itself zeroed.
//
NvU8 IV[32];
NvU8 hmac[32];
// Hash over GspFwWprMeta structure
NvU8 wprMetaHash[32];
// Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
NvU8 heapFreeListHash[32];
// Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
NvU8 dataHash[32];
//
// Hash generated of the Suspend/Resume data
// Pad structure to exactly 256 bytes (1 DMA chunk).
// Padding initialized to zero.
//
NvU8 hash[64];
// ---- Unused members ----------------------------------------------
//
// Pad structure to exactly 256 bytes (1 DMA chunk). Can replace padding with additional
// fields without incrementing revision. Padding initialized to 0.
//
NvU32 padding[32];
NvU32 padding[24];
} GspFwSRMeta;
#define GSP_FW_SR_META_REVISION 1
#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
#endif // GSP_FW_SR_META_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -80,8 +80,26 @@ typedef struct
NvU64 bootloaderDataOffset;
NvU64 bootloaderManifestOffset;
NvU64 sysmemAddrOfSignature;
NvU64 sizeOfSignature;
union
{
// Used only at initial boot
struct
{
NvU64 sysmemAddrOfSignature;
NvU64 sizeOfSignature;
};
//
// Used at suspend/resume to read GspFwHeapFreeList
// Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
//
struct
{
NvU32 gspFwHeapFreeListWprOffset;
NvU32 unused0;
NvU64 unused1;
};
};
// ---- Members describing FB layout --------------------------------
NvU64 gspFwRsvdStart;
@@ -158,4 +176,25 @@ typedef struct
#define GSP_FW_WPR_META_REVISION 1
#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
#define GSP_FW_WPR_HEAP_FREE_REGION_COUNT 128
typedef struct
{
//
// offset relative to GspFwWprMeta FBMEM PA
// describes a region at [offs, offs + length)
//
NvU32 offs; // start, inclusive
NvU32 length;
} GspFwHeapFreeRegion;
typedef struct
{
NvU64 magic;
NvU32 nregions;
GspFwHeapFreeRegion regions[GSP_FW_WPR_HEAP_FREE_REGION_COUNT];
} GspFwHeapFreeList;
#define GSP_FW_HEAP_FREE_LIST_MAGIC 0x4845415046524545ULL
#endif // GSP_FW_WPR_META_H_

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGSP_PROXY_REG_H
#define NVGSP_PROXY_REG_H
#define NVGSP_PROXY_REG_CONFIDENTIAL_COMPUTE 0:0
#define NVGSP_PROXY_REG_CONFIDENTIAL_COMPUTE_DISABLE 0x00000000
#define NVGSP_PROXY_REG_CONFIDENTIAL_COMPUTE_ENABLE 0x00000001
#define NVGSP_PROXY_REG_CONF_COMPUTE_EARLY_INIT 1:1
#define NVGSP_PROXY_REG_CONF_COMPUTE_EARLY_INIT_DISABLE 0x00000000
#define NVGSP_PROXY_REG_CONF_COMPUTE_EARLY_INIT_ENABLE 0x00000001
#define NVGSP_PROXY_REG_CONF_COMPUTE_DEV_MODE 2:2
#define NVGSP_PROXY_REG_CONF_COMPUTE_DEV_MODE_DISABLE 0x00000000
#define NVGSP_PROXY_REG_CONF_COMPUTE_DEV_MODE_ENABLE 0x00000001
#endif // NVGSP_PROXY_REG_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -52,8 +52,8 @@ static inline nv_firmware_chip_family_t nv_firmware_get_chip_family(
return NV_FIRMWARE_CHIP_FAMILY_AD10X;
case GPU_ARCHITECTURE_HOPPER:
if (gpuImpl == GPU_IMPLEMENTATION_GH100)
return NV_FIRMWARE_CHIP_FAMILY_GH100;
return NV_FIRMWARE_CHIP_FAMILY_GH100;
}
return NV_FIRMWARE_CHIP_FAMILY_NULL;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -81,11 +81,11 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
@@ -100,11 +100,11 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,7 +62,6 @@ CHIPSET_SETUP_FUNC(Intel_A2D2_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A2C9_setupFunc)
CHIPSET_SETUP_FUNC(Intel_A301_setupFunc)
CHIPSET_SETUP_FUNC(Intel_0685_setupFunc)
CHIPSET_SETUP_FUNC(Intel_IceLake_setupFunc)
CHIPSET_SETUP_FUNC(Intel_4381_setupFunc)
CHIPSET_SETUP_FUNC(Intel_7A82_setupFunc)
CHIPSET_SETUP_FUNC(Intel_7A04_setupFunc)
@@ -91,6 +90,7 @@ CHIPSET_SETUP_FUNC(Ampere_Altra_setupFunc)
CHIPSET_SETUP_FUNC(Arm_NeoverseN1_setupFunc)
CHIPSET_SETUP_FUNC(Nvidia_T210_setupFunc)
CHIPSET_SETUP_FUNC(Nvidia_T194_setupFunc)
CHIPSET_SETUP_FUNC(Nvidia_TH500_setupFunc)
// Keep string length <=32 (including termination) to avoid string copy overflow
@@ -179,13 +179,13 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_INTEL, 0xA30D, CS_INTEL_A2C9, "IntelH370", Intel_A2C9_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA301, CS_INTEL_A301, "Intel-CannonLake", Intel_A301_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x0685, CS_INTEL_0685, "Intel-CometLake", Intel_0685_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1CB, CS_INTEL_C620, "Intel-IceLake", Intel_IceLake_setupFunc},
{PCI_VENDOR_ID_INTEL, 0xA1CB, CS_INTEL_C620, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x4381, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x4385, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A82, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A84, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1B81, CS_INTEL_1B81, "Intel-SapphireRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", Intel_IceLake_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x7A04, CS_INTEL_7A04, "Intel-RaptorLake", Intel_7A04_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
@@ -200,6 +200,12 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_NVIDIA, 0x229E, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22C2, CS_NVIDIA_T23x, "T23x", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22C3, CS_NVIDIA_T23x, "T23x", Nvidia_T194_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22B1, CS_NVIDIA_TH500, "TH500", Nvidia_TH500_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22B2, CS_NVIDIA_TH500, "TH500", Nvidia_TH500_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22B3, CS_NVIDIA_TH500, "TH500", Nvidia_TH500_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22B4, CS_NVIDIA_TH500, "TH500", Nvidia_TH500_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22B8, CS_NVIDIA_TH500, "TH500", Nvidia_TH500_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x22B9, CS_NVIDIA_TH500, "TH500", Nvidia_TH500_setupFunc},
{PCI_VENDOR_ID_SIS, 0x0649, CS_SIS_649, "649", SiS_656_setupFunc},
{PCI_VENDOR_ID_SIS, 0x0656, CS_SIS_656, "656", SiS_656_setupFunc},
@@ -238,6 +244,7 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD, "Mellanox BlueField", Mellanox_BlueField_setupFunc},
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2", NULL},
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2 Crypto disabled", NULL},
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3, "Mellanox BlueField 3", NULL},
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2, "Amazon Gravitron2", Amazon_Gravitron2_setupFunc},
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX, "Fujitsu A64FX", Fujitsu_A64FX_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL},
@@ -290,6 +297,7 @@ VENDORNAME vendorName[] =
{PCI_VENDOR_ID_FUJITSU, "Fujitsu"},
{PCI_VENDOR_ID_CADENCE, "Cadence"},
{PCI_VENDOR_ID_ARM, "ARM"},
{PCI_VENDOR_ID_ALIBABA, "Alibaba"},
{0, "Unknown"} // Indicates end of the table
};
@@ -312,6 +320,12 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_NVIDIA, 0x229E, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP2
{PCI_VENDOR_ID_NVIDIA, 0x22C2, CS_NVIDIA_T23x}, // NVIDIA Tegra RP0
{PCI_VENDOR_ID_NVIDIA, 0x22C3, CS_NVIDIA_T23x}, // NVIDIA Tegra RP1
{PCI_VENDOR_ID_NVIDIA, 0x22B1, CS_NVIDIA_TH500}, // NVIDIA TH500 RP for GH100 GPU in GH180.
{PCI_VENDOR_ID_NVIDIA, 0x22B2, CS_NVIDIA_TH500}, // NVIDIA TH500 RP x16
{PCI_VENDOR_ID_NVIDIA, 0x22B3, CS_NVIDIA_TH500}, // NVIDIA TH500 RP x4
{PCI_VENDOR_ID_NVIDIA, 0x22B4, CS_NVIDIA_TH500}, // NVIDIA TH500 RP x1
{PCI_VENDOR_ID_NVIDIA, 0x22B8, CS_NVIDIA_TH500}, // NVIDIA TH500 RP x8
{PCI_VENDOR_ID_NVIDIA, 0x22B9, CS_NVIDIA_TH500}, // NVIDIA TH500 RP x2
{PCI_VENDOR_ID_APM, 0xe004, CS_APM_STORM}, // Applied Micro X-Gene "Storm"
{PCI_VENDOR_ID_MARVELL, 0xAF00, CS_MARVELL_THUNDERX2}, // Marvell ThunderX2
@@ -328,6 +342,7 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD}, // Mellanox BlueField
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2 Crypto disabled
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2}, // Amazon Gravitron2
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX}, // Fujitsu A64FX
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500}, // Phytium S2500
@@ -345,6 +360,7 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1}, // Arm Neoverse N1
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX}, // Marvell OCTEON CN96xx
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX}, // Marvell OCTEON CN98xx
{PCI_VENDOR_ID_ALIBABA, 0x8000, CS_ALIBABA_YITIAN}, // Alibaba Yitian
// last element must have chipset CS_UNKNOWN (zero)
{0, 0, CS_UNKNOWN}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2010-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -112,6 +112,7 @@
#define NV_MSGBOX_CMD_ARG1_POWER_TOTAL 0x00000000
#define NV_MSGBOX_CMD_ARG1_SMBPBI_POWER 0x00000001
#define NV_MSGBOX_CMD_ARG1_POWER_FB 0x00000002
#define NV_MSGBOX_CMD_ARG1_POWER_MODULE 0x00000003
/* SysId info type encodings for opcode NV_MSGBOX_CMD_OPCODE_GET_SYS_ID_DATA (0x05) */
#define NV_MSGBOX_CMD_ARG1_BOARD_PART_NUM_V1 0x00000000
#define NV_MSGBOX_CMD_ARG1_OEM_INFO_V1 0x00000001
@@ -134,7 +135,8 @@
#define NV_MSGBOX_CMD_ARG1_PCIE_SPEED_V1 0x00000012
#define NV_MSGBOX_CMD_ARG1_PCIE_WIDTH_V1 0x00000013
#define NV_MSGBOX_CMD_ARG1_TGP_LIMIT_V1 0x00000014
#define NV_MSGBOX_CMD_ARG1_SYS_ID_DATA_TYPE_MAX 0x00000014 /* Adjust, when adding new types */
#define NV_MSGBOX_CMD_ARG1_MODULE_POWER_LIMIT_V1 0x00000016
#define NV_MSGBOX_CMD_ARG1_SYS_ID_DATA_TYPE_MAX 0x00000016 /* Adjust, when adding new types */
#define NV_MSGBOX_CMD_ARG1_REGISTER_ACCESS_WRITE 0x00000000
#define NV_MSGBOX_CMD_ARG1_REGISTER_ACCESS_READ 0x00000001
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_TARGET 0x00000000
@@ -142,6 +144,11 @@
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_SHUTDN 0x00000002
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_HBM_SLOWDN 0x00000003
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_SW_SLOWDN 0x00000004
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_TARGET_TLIMIT 0x00000005
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_HW_SLOWDN_TLIMIT 0x00000006
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_SHUTDN_TLIMIT 0x00000007
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_HBM_SLOWDN_TLIMIT 0x00000008
#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_SW_SLOWDN_TLIMIT 0x00000009
#define NV_MSGBOX_CMD_ARG1_GET_MISC_ECC_ENABLED_STATE 0x00000000
#define NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_RESET_REQUIRED 0x00000001
#define NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_FLAGS_PAGE_0 0x00000000
@@ -181,6 +188,10 @@
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_1 0x00000001
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 0x00000002
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_3 0x00000003
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_4 0x00000004
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_6 0x00000006
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_8 0x00000008
#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_9 0x00000009
/* Async requests */
#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_GET \
@@ -219,6 +230,12 @@
0x00000010
#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_CONFIGURE_PROGRAMMABLE_EDPP \
0x00000011
#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_CONTROL_GET \
0x00000012
#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_CONTROL_SET \
0x00000013
#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_INFO_GET \
0x00000014
#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_POLL 0x000000ff
@@ -324,6 +341,9 @@
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_CORRECTABLE_ERROR 0
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_UNCORRECTABLE_ERROR 1
#define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_GPU 0x00000000
#define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_MODULE 0x00000003
// Query type of _GET_POWER_HINT_INFO
#define NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_CLK 0x00000000
#define NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_TEMP 0x00000001
@@ -439,6 +459,34 @@
#define NV_MSGBOX_CMD_ARG2_REMAP_ROWS_STATE_FLAGS_PAGE0 0x00000000
/*!
* Arg2 for _GET_PCIE_LINK_INFO
* Arg1 == _GET_PCIE_LINK_INFO_PAGE_8
* Return TX EQ parameters
*/
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8_LANE_IDX 3:0
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8_SPEED_SELECT 5:4
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8_SPEED_SELECT_GEN_3 \
0x00000000
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8_SPEED_SELECT_GEN_4 \
0x00000001
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8_SPEED_SELECT_GEN_5 \
0x00000002
/*!
* Arg2 for _GET_PCIE_LINK_INFO
* Arg1 == _GET_PCIE_LINK_INFO_PAGE_9
* Return RX EQ parameters
*/
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_9_LANE_IDX 3:0
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_9_SPEED_SELECT 5:4
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_9_SPEED_SELECT_GEN_3 \
0x00000000
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_9_SPEED_SELECT_GEN_4 \
0x00000001
#define NV_MSGBOX_CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_9_SPEED_SELECT_GEN_5 \
0x00000002
/*!
* Arg2 for _GET_POWER_HINT_INFO
* ARG1 == _GET_POWER_HINT_INFO_CLK
@@ -614,6 +662,18 @@
#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_ADT7473 0x00000002
#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_SFXP11_5 0x00000005
#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_SFXP24_8 0x00000008
#define NV_MSGBOX_DATA_CAP_0_GET_ENERGY_COUNTER_MODULE 12:12
#define NV_MSGBOX_DATA_CAP_0_GET_ENERGY_COUNTER_MODULE_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_GET_ENERGY_COUNTER_MODULE_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_CONTROL_GET 13:13
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_CONTROL_GET_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_CONTROL_GET_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_CONTROL_SET 14:14
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_CONTROL_SET_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_CONTROL_SET_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_INFO_GET 15:15
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_INFO_GET_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_MODULE_LIMIT_INFO_GET_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_POWER_TOTAL 16:16
#define NV_MSGBOX_DATA_CAP_0_POWER_TOTAL_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_POWER_TOTAL_AVAILABLE 0x00000001
@@ -623,7 +683,24 @@
#define NV_MSGBOX_DATA_CAP_0_GPU_SYSCONTROL 18:18
#define NV_MSGBOX_DATA_CAP_0_GPU_SYSCONTROL_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_GPU_SYSCONTROL_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_THERMP_BITS 28:24 // Adjust when adding new bits
#define NV_MSGBOX_DATA_CAP_0_THERMP_BITS 28:19 // Adjust when adding new bits
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_TLIMIT_BITS 23:19 // Adjust when adding new bits
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_BITS 28:24 // Adjust when adding new bits
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_TLIMIT 19:19
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_TLIMIT_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_TLIMIT_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SLOWDN_TLIMIT 20:20
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SLOWDN_TLIMIT_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SLOWDN_TLIMIT_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SHUTDN_TLIMIT 21:21
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SHUTDN_TLIMIT_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SHUTDN_TLIMIT_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_MEMORY_TLIMIT 22:22
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_MEMORY_TLIMIT_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_MEMORY_TLIMIT_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_GPU_SW_SLOWDOWN_TLIMIT 23:23
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_GPU_SW_SLOWDOWN_TLIMIT_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_GPU_SW_SLOWDOWN_TLIMIT_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC 24:24
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_AVAILABLE 0x00000001
@@ -645,6 +722,9 @@
#define NV_MSGBOX_DATA_CAP_0_POWER_FB 30:30
#define NV_MSGBOX_DATA_CAP_0_POWER_FB_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_POWER_FB_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_POWER_MODULE 31:31
#define NV_MSGBOX_DATA_CAP_0_POWER_MODULE_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_POWER_MODULE_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_1 1
#define NV_MSGBOX_DATA_CAP_1_BOARD_PART_NUM_V1 0:0
@@ -692,6 +772,9 @@
#define NV_MSGBOX_DATA_CAP_1_INFOROM_VER_V1 14:14
#define NV_MSGBOX_DATA_CAP_1_INFOROM_VER_V1_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_1_INFOROM_VER_V1_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_1_MODULE_LIMIT_V1 15:15
#define NV_MSGBOX_DATA_CAP_1_MODULE_LIMIT_V1_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_1_MODULE_LIMIT_V1_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_1_ECC_V1 16:16
#define NV_MSGBOX_DATA_CAP_1_ECC_V1_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_1_ECC_V1_AVAILABLE 0x00000001
@@ -1070,16 +1153,14 @@
* Response to
* NV_MSGBOX_CMD_ARG1_GET_CLOCK_THROTTLE_REASON
*/
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON 31:0
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_NONE 0x00000000
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_POWER_CAP 0x00000001
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_SLOWDOWN 0x00000002
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SYNC_BOOST 0x00000004
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN_TLIMIT 0x00000008
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN_TAVG 0x00000010
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN_TMEM 0x00000020
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_THERMAL_SLOWDOWN 0x00000040
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_POWER_BREAK_SLOWDOWN 0x00000080
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON 31:0
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_NONE 0x00000000
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_POWER_CAP 0x00000001
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_SLOWDOWN 0x00000002
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_THERMAL_SLOWDOWN 0x00000004
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_POWER_BREAK_SLOWDOWN 0x00000008
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SYNC_BOOST 0x00000010
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN 0x00000020
/*
* Number of Nvlink data outputs (dataOut, extData) for
@@ -1105,6 +1186,8 @@
#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_SAFE 0x00000001
#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_ACTIVE 0x00000002
#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_ERROR 0x00000003
#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_L1_LOW_POWER 0x00000004
#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_NVLINK_DISABLED 0x00000005
#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_INVALID 0x000000ff
/* Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_BANDWIDTH (in Mps) */
@@ -1171,7 +1254,7 @@
#define NV_MSGBOX_DATA_REMAP_ROW_HISTOGRAM_LOW_AVAILABILITY 15:0
#define NV_MSGBOX_DATA_REMAP_ROW_HISTOGRAM_PARTIAL_AVAILABILITY 31:16
/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_0 */
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_0 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED 2:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_UNKNOWN 0x00000000
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_2500_MTPS 0x00000001
@@ -1191,13 +1274,61 @@
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_FATAL_ERROR_COUNT 23:16
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_UNSUPP_REQ_COUNT 31:24
/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_1 */
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_1 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_1_L0_TO_RECOVERY_COUNT 31:0
/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 */
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_2_REPLAY_ROLLOVER_COUNT 15:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_2_NAKS_RCVD_COUNT 31:16
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_3 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED 2:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_UNKNOWN 0x00000000
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_2500_MTPS 0x00000001
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_5000_MTPS 0x00000002
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_8000_MTPS 0x00000003
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_16000_MTPS 0x00000004
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_32000_MTPS 0x00000005
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_RESERVED 2:0
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_4 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_4_TX_COUNT 31:0
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_6 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE 4:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_DETECT 0x00000000
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_POLLING 0x00000001
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_CONFIGURATION 0x00000002
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_RECOVERY 0x00000003
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_RECOVERY_EQZN 0x00000004
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L0 0x00000005
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L0S 0x00000006
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L1 0x00000007
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L1_PLL_PD 0x00000008
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L2 0x00000009
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L1_CPM 0x0000000a
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L1_1 0x0000000b
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_L1_2 0x0000000c
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_HOT_RESET 0x0000000d
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_LOOPBACK 0x0000000e
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_DISABLED 0x0000000f
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_LINK_DOWN 0x00000010
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_LINK_READY 0x00000011
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_LANES_IN_SLEEP 0x00000012
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_6_LTSSM_STATE_ILLEGAL 0x0000001f
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_8 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_PRESET 3:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_USE_PRESET 4:4
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_FS 10:5
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_LF 16:11
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_9 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_PRESET 3:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_USE_PRESET 4:4
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_FS 10:5
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_LF 16:11
/*
* Input for NV_MSGBOX_CMD_OPCODE_GPU_PERFORMANCE_MONITORING. Value is valid
* only if Arg2 != GPM_PARTITION_AGGREGATE and Arg2.Bit7 == 1
@@ -1224,15 +1355,18 @@
/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 */
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_2_NAKS_SENT_COUNT 15:0
/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_3 */
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED 2:0
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_UNKNOWN 0x00000000
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_2500_MTPS 0x00000001
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_5000_MTPS 0x00000002
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_8000_MTPS 0x00000003
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_16000_MTPS 0x00000004
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_32000_MTPS 0x00000005
#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_RESERVED 2:0
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_4 */
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_4_RX_COUNT 31:0
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_8 */
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_PRECUR 5:0
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_MAINCUR 11:6
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_8_EQ_TX_LOCAL_POSTCUR 17:12
/* Response to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_9 */
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_PRECUR 5:0
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_MAINCUR 11:6
#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_9_EQ_RX_REMOTE_POSTCUR 17:12
/* Response to NV_MSGBOX_CMD_ARG1_REMAP_ROWS_HISTOGRAM */
#define NV_MSGBOX_EXT_DATA_REMAP_ROW_HISTOGRAM_MAX_AVAILABILITY 31:16
@@ -1341,6 +1475,7 @@ typedef enum
NV_MSGBOX_EVENT_TYPE_MIG_TOGGLE_SUCCESS,
NV_MSGBOX_EVENT_TYPE_SERVER_RESTART_WARM,
NV_MSGBOX_EVENT_TYPE_DRIVER_ERROR_MESSAGE_NEW,
NV_MSGBOX_EVENT_TYPE_MODULE_LIMIT_SET_SUCCESS,
NV_MSGBOX_NUM_EVENTS, /* insert new event types before this line */
} NvMsgboxEventType;
@@ -1680,8 +1815,10 @@ typedef enum
/*!
* This structure is used to hold parameters for
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_GET and
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_GET,
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET,
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_CONTROL_GET and
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_CONTROL_SET
*/
typedef struct
{
@@ -1693,12 +1830,12 @@ typedef struct
* If flags:_CLEAR is _ON, it will clear the TGP limit. The
* persistence still depends on persist flag.
*/
#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST 0:0
#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST_OFF 0x00000000
#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST_ON 0x00000001
#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR 1:1
#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR_OFF 0x00000000
#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR_ON 0x00000001
#define NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST 0:0
#define NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST_OFF 0x00000000
#define NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST_ON 0x00000001
#define NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR 1:1
#define NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR_OFF 0x00000000
#define NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR_ON 0x00000001
/*!
* Current total GPU power limit value to enforce, requested by the
@@ -1714,11 +1851,12 @@ typedef struct
* in milliwatts.
*/
NvU32 limitCurrOutput;
} NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS;
} NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS;
/*!
* This structure is used to hold parameters for
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_INFO_GET
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_INFO_GET and
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_INFO_GET
*/
typedef struct
{
@@ -1726,13 +1864,14 @@ typedef struct
* Current total GPU power limit lower and upper bounds and the
* default setting, expressed in milliwatts.
* These constraints must be observed, when the limit
* is being set with
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET.
* is being set with either
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET or
* NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_MODULE_LIMIT_CONTROL_SET.
*/
NvU32 limitMin;
NvU32 limitMax;
NvU32 limitDefault;
} NV_MSGBOX_PMGR_PWR_TGP_LIMIT_INFO_PARAMS;
} NV_MSGBOX_PMGR_PWR_POWER_LIMIT_INFO_PARAMS;
/*!
* This structure is used to hold parameters for
@@ -1917,6 +2056,7 @@ typedef struct
//<! have been lost
//<! bit 1: the text message has been
//<! truncated
NvU8 xidIdExt; //<! event type Id (Xid) extension
NvU32 seqNumber; //<! record sequential number
NvU32 timeStamp; //<! seconds since the epoch UTC
@@ -2118,7 +2258,7 @@ typedef struct
* amount of space parameter blocks can take.
*/
typedef union {
NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS tgpLimitControl;
NV_MSGBOX_PMGR_PWR_POWER_LIMIT_CONTROL_PARAMS tgpLimitControl;
NV_MSGBOX_THERMAL_FAN_V1_COUNT_PARAMS fanCountV1Get;
NV_MSGBOX_THERMAL_FAN_V1_INFO_PARAMS fanCountV1Info;
NV_MSGBOX_THERMAL_FAN_V1_STATUS_PARAMS fanCountV1Status;
@@ -2544,12 +2684,45 @@ typedef union {
FLD_SET_DRF(_MSGBOX, _CMD, _COPY_DATA, _ON, (cmd)) \
)
#define NV_MSGBOX_CMD_GPM_GET_METRIC(type, metric, partition) \
#define NV_MSGBOX_CMD_GET_PCIE_LINK_INFO_1(page) \
( \
NV_MSGBOX_CMD(_GET_PCIE_LINK_INFO, \
(page), \
0) \
)
// We use the fact here that for pages ## 8 and 9 Arg2 definitions are similar
#define NV_MSGBOX_CMD_GET_PCIE_LINK_INFO_2(page, lane, speed) \
( \
NV_MSGBOX_CMD(_GET_PCIE_LINK_INFO, \
(page), \
DRF_NUM(_MSGBOX, _CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8, \
_LANE_IDX, (lane)) | \
DRF_NUM(_MSGBOX, _CMD_ARG2_GET_PCIE_LINK_INFO_PAGE_8, \
_SPEED_SELECT, (speed))) \
)
#define NV_MSGBOX_CMD_GPM_GET_METRIC(type, metric, req_ci_metrics, partition) \
( \
NV_MSGBOX_CMD(_GPU_PERFORMANCE_MONITORING, 0, 0) | \
DRF_DEF(_MSGBOX, _CMD, _ARG1_GPM_ACTION, type) | \
DRF_NUM(_MSGBOX, _CMD, _ARG1_GPM_METRIC, metric) | \
DRF_NUM(_MSGBOX, _CMD, _ARG2_GPM_PARTITION_INDEX, partition) \
DRF_NUM(_MSGBOX, _CMD, _ARG2_GPM_CI_METRICS_REQUESTED, req_ci_metrics) | \
DRF_NUM(_MSGBOX, _CMD, _ARG2_GPM_PARTITION_INDEX, partition) \
)
#define NV_MSGBOX_CMD_GPM_GET_METRIC_AGGREGATE(type, metric) \
( \
NV_MSGBOX_CMD(_GPU_PERFORMANCE_MONITORING, 0, 0) | \
DRF_DEF(_MSGBOX, _CMD, _ARG1_GPM_ACTION, type) | \
DRF_NUM(_MSGBOX, _CMD, _ARG1_GPM_METRIC, metric) | \
DRF_DEF(_MSGBOX, _CMD, _ARG2, _GPM_PARTITION_AGGREGATE) \
)
#define NV_MSGBOX_DATA_IN_GPM(instance_id, ci_index) \
( \
DRF_NUM(_MSGBOX, _DATA, _GPM_NVDEC_INSTANCE, instance_id) | \
DRF_NUM(_MSGBOX, _DATA, _GPM_COMPUTE_INSTANCE_INDEX, ci_index) \
)
#define NV_MSGBOX_CMD_GPM_SET_MULTIPLIER(multiplier) \

View File

@@ -166,14 +166,31 @@ typedef enum GSP_SEQUENCER_BUFFER_ERR
// Sequencer implementation of FLD_WR_DRF_DEF()
#define GSP_SEQ_FLD_WR_DRF_DEF(gpu, gsp, d, r, f, c) \
{ \
GSP_SEQUENCER_BUFFER_CMD cmd; \
cmd.opCode = GSP_SEQ_BUF_OPCODE_REG_MODIFY; \
cmd.payload.regModify.addr = NV##d##r; \
cmd.payload.regModify.mask = DRF_MASK(NV##d##r##f) << DRF_SHIFT(NV##d##r##f); \
cmd.payload.regModify.val = DRF_DEF(d, r, f, c); \
(void)gspAppendToSequencerBuffer(gpu, gsp, &cmd); \
}
{ \
GSP_SEQUENCER_BUFFER_CMD cmd; \
cmd.opCode = GSP_SEQ_BUF_OPCODE_REG_MODIFY; \
cmd.payload.regModify.addr = NV##d##r; \
cmd.payload.regModify.mask = DRF_MASK(NV##d##r##f) \
<< DRF_SHIFT(NV##d##r##f); \
cmd.payload.regModify.val = DRF_DEF(d, r, f, c); \
(void)gspAppendToSequencerBuffer(gpu, gsp, &cmd); \
}
//
// Sequencer implementation similar to REG_FLD_WR_DRF_DEF() but with a base
// address specified instead of an aperture.
//
#define GSP_SEQ_BASE_FLD_WR_DRF_DEF(gpu, gsp, b, d, r, f, c) \
{ \
GSP_SEQUENCER_BUFFER_CMD cmd; \
cmd.opCode = GSP_SEQ_BUF_OPCODE_REG_MODIFY; \
cmd.payload.regModify.addr = (b) + NV##d##r; \
cmd.payload.regModify.mask = DRF_MASK(NV##d##r##f) \
<< DRF_SHIFT(NV##d##r##f); \
cmd.payload.regModify.val = DRF_DEF(d, r, f, c); \
(void)gspAppendToSequencerBuffer(gpu, gsp, &cmd); \
}
/*!
* Forward references

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -83,6 +83,11 @@
*/
#define LSF_VPR_REGION_ID (0x3U)
/*!
* Expected REGION ID to be used for the CPR region with conf compute.
*/
#define LSF_CPR_REGION_ID (0x3U)
/*!
* Size of the separate bootloader data that could be present in WPR region.
*/
@@ -143,10 +148,20 @@
#define LSF_FALCON_ID_FECS_RISCV (21U)
#define LSF_FALCON_ID_GPCCS_RISCV (22U)
#define LSF_FALCON_ID_NVJPG_RISCV_EB (23U)
#define LSF_FALCON_ID_END (24U)
#define LSF_FALCON_ID_OFA_RISCV_EB (24U)
#define LSF_FALCON_ID_NVENC_RISCV_EB (25U)
#define LSF_FALCON_ID_END (26U)
#define LSF_FALCON_ID_INVALID (0xFFFFFFFFU)
//
// TODO: Remove below Alias and add _EB Patching to macro LSF_FALCON_ID_FECS_RISCV, similarly for GPCCS,
// and similar cleanups in RM since RISCV based CTXSW engines are to be booted externally.
// Tracking in Bug 3808599
//
#define LSF_FALCON_ID_FECS_RISCV_EB (LSF_FALCON_ID_FECS_RISCV)
#define LSF_FALCON_ID_GPCCS_RISCV_EB (LSF_FALCON_ID_GPCCS_RISCV)
//
// ************************ NOTIFICATION *********************************
// In case anyone needs to add new LSF falconId, please must calculate
@@ -176,7 +191,7 @@
* to uniquely identify it.
* @note this macro should be updated as needed whenever LSF_FALCON_ID* defines are added. See Bug: 3833461
*/
#define LSF_FALCON_USES_INSTANCE(falconId) ((falconId == LSF_FALCON_ID_NVDEC_RISCV_EB) || (falconId == LSF_FALCON_ID_NVJPG) || (falconId == LSF_FALCON_ID_NVJPG_RISCV_EB))
#define LSF_FALCON_USES_INSTANCE(falconId) ((falconId == LSF_FALCON_ID_NVDEC_RISCV_EB) || (falconId == LSF_FALCON_ID_NVJPG) || (falconId == LSF_FALCON_ID_NVJPG_RISCV_EB) || (falconId == LSF_FALCON_ID_NVENC_RISCV_EB))
/*!
* Size in entries of the ucode descriptor's dependency map.
@@ -376,6 +391,13 @@ typedef struct
#define LSF_FALCON_ID_END_PMU (LSF_FALCON_ID_FBFALCON + 1)
#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX_PMU (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END_PMU), LSF_WPR_HEADER_ALIGNMENT))
//
// In order to prevent LSF_FALCON_ID_END changes to affect older / shipped SEC2/ACR ucodes (increase of DMEM footprint)
// adding SEC2/ACR specific ***_END define covering all supported falcons in pre-hopper SEC2-RTOS/ACR ucode.
//
#define LSF_FALCON_ID_END_ACR_ON_SEC2 (LSF_FALCON_ID_NVJPG + 1)
#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX_ACR_ON_SEC2 (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END_ACR_ON_SEC2), LSF_WPR_HEADER_ALIGNMENT))
// Maximum SUB WPR header size
#define LSF_SUB_WPR_HEADERS_TOTAL_SIZE_MAX (NV_ALIGN_UP((sizeof(LSF_SHARED_SUB_WPR_HEADER) * LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX), LSF_SUB_WPR_HEADER_ALIGNMENT))

View File

@@ -0,0 +1,63 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RM_SPDM_TRANSPORT_H_
#define _RM_SPDM_TRANSPORT_H_
/* ------------------------- Macros and Defines ----------------------------- */
// TODO CONFCOMP-1277: All these defines should be reviewed and simplified or removed.
//
// The following defines and macros are shared for any message-related constants
// shared between RM and endpoint.
//
#pragma pack(1)
#define NV_SPDM_MESSAGE_TYPE_NORMAL (0)
#define NV_SPDM_MESSAGE_TYPE_SECURED (1)
// SPDM Command Types
#define CC_CTRL_CODE_UNDEFINED (0)
#define CC_CTRL_CODE_SPDM_MESSAGE_PROCESS (1)
#define CC_CTRL_CODE_SESSION_MESSAGE_PROCESS (2)
#define CC_CTRL_CODE_APPLICATION_MESSAGE_PROCESS (3)
#define CC_SPDM_ENDPOINT_ID_INVALID (0xFFFFFFFF)
#define CC_SPDM_GUEST_ID_INVALID (0xFFFFFFFF)
#define NV_SPDM_DESC_HEADER_SIZE_IN_BYTE (NvU32)sizeof(NV_SPDM_DESC_HEADER)
#define NV_SPDM_DESC_HEADER_VERSION_1_0 (0x10)
#define NV_SPDM_DESC_HEADER_VERSION_CURRENT NV_SPDM_DESC_HEADER_VERSION_1_0
#define NV_SPDM_DESC_HEADER_ALIGNMENT (256)
#define NV_SPDM_RM_SURFACE_SIZE_IN_BYTE (0x2000)
#define NV_RM_BUFFER_SIZE_IN_BYTE (NV_SPDM_RM_SURFACE_SIZE_IN_BYTE - NV_SPDM_DESC_HEADER_SIZE_IN_BYTE)
#define NV_SPDM_UNPROTECTED_REGION_ID (0x0U)
typedef struct _NV_SPDM_DESC_HEADER
{
NvU32 msgType;
NvU32 msgSizeByte;
} NV_SPDM_DESC_HEADER, *PNV_SPDM_DESC_HEADER;
#pragma pack()
#endif // _RM_SPDM_TRANSPORT_H_

View File

@@ -0,0 +1,48 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMSPDMVENDORDEF_H_
#define _RMSPDMVENDORDEF_H_
#include <nvtypes.h>
/*!
* Defines structures and interfaces for any SPDM Vendor-defined behavior.
*/
/* ------------------------- NVIDIA Export Secrets -------------------------- */
/* ------------------------- Macros ----------------------------------------- */
#define SPDM_ATTESTATION_REPORT_MAX_SIZE (0x2000)
#define SPDM_MAX_MESSAGE_BUFFER_SIZE (0x1000)
#define SPDM_MAX_EXCHANGE_BUFFER_SIZE (2 * SPDM_MAX_MESSAGE_BUFFER_SIZE)
#define SPDM_MAX_CERT_CHAIN_SIZE (SPDM_MAX_MESSAGE_BUFFER_SIZE)
#define SPDM_CERT_DEFAULT_SLOT_ID (0)
#define SPDM_CAPABILITIES_CT_EXPONENT_MAX (0xFF)
#define BIN_STR_CONCAT_BUFFER_MAX_BYTES (128)
#define NV_BYTE_TO_BIT_OVERFLOW_MASK_UINT32 (0xE0000000)
#define IS_BYTE_TO_BIT_OVERFLOW_UINT32(a) \
((a & NV_BYTE_TO_BIT_OVERFLOW_MASK_UINT32) != 0)
#endif // _RMSPDMVENDORDEF_H_

View File

@@ -21,10 +21,15 @@
* DEALINGS IN THE SOFTWARE.
*/
//
// This file holds Unix-specific NVIDIA driver options
//
#ifndef _RM_REG_H_
#define _RM_REG_H_
#include "nvtypes.h"
#include "nv-firmware-registry.h"
/*
* use NV_REG_STRING to stringify a registry key when using that registry key
@@ -723,72 +728,33 @@
* When this option is enabled, the NVIDIA driver will enable use of GPU
* firmware.
*
* Possible mode values:
* 0 - Do not enable GPU firmware
* 1 - Enable GPU firmware
* 2 - (Default) Use the default enablement policy for GPU firmware
*
* Setting this to anything other than 2 will alter driver firmware-
* enablement policies, possibly disabling GPU firmware where it would
* have otherwise been enabled by default.
*
* If this key is set globally to the system, the driver may still attempt
* to apply some policies to maintain uniform firmware modes across all
* GPUS. This may result in the driver failing initialization on some GPUs
* to maintain such a policy.
*
*
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
* will attempt to honor whatever configuration is specified without applying
* additional policies. This may also result in failed GPU initialzations if
* the configuration is not possible (for example if the firmware is missing
* from the filesystem, or the GPU is not capable).
*
* Policy bits:
*
* POLICY_ALLOW_FALLBACK:
* As the normal behavior is to fail GPU initialization if this registry
* entry is set in such a way that results in an invalid configuration, if
* instead the user would like the driver to automatically try to fallback
* to initializing the failing GPU with firmware disabled, then this bit can
* be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
* Note that this can result in a mixed mode configuration (ex: GPU0 has
* firmware enabled, but GPU1 does not).
* from the filesystem, or the GPU is not capable).
*
* NOTE: More details for this regkey can be found in nv-firmware-registry.h
*/
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF
/*
* Option: EnableGpuFirmwareLogs
*
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
* to the system log, when possible.
*
* Possible values:
* 0 - Do not send GPU firmware logs to the system log
* 1 - Enable sending of GPU firmware logs to the system log
* 2 - (Default) Enable sending of GPU firmware logs to the system log for
* the debug kernel driver build only
* NOTE: More details for this regkey can be found in nv-firmware-registry.h
*/
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
/*
* Option: EnableDbgBreakpoint
*

View File

@@ -347,6 +347,12 @@ typedef struct nv_soc_irq_info_s {
/* DMA-capable device data, defined by kernel interface layer */
typedef struct nv_dma_device nv_dma_device_t;
typedef struct nv_phys_addr_range
{
NvU64 addr;
NvU64 len;
} nv_phys_addr_range_t;
typedef struct nv_state_t
{
void *priv; /* private data */
@@ -466,6 +472,9 @@ typedef struct nv_state_t
/* Bool to check if ISO iommu enabled */
NvBool iso_iommu_present;
/* Bool to check if NISO iommu enabled */
NvBool niso_iommu_present;
/* Bool to check if dma-buf is supported */
NvBool dma_buf_supported;
@@ -477,6 +486,8 @@ typedef struct nv_state_t
/* Bool to check if the device received a shutdown notification */
NvBool is_shutdown;
/* Bool to check if the GPU has a coherent sysmem link */
NvBool coherent;
} nv_state_t;
// These define need to be in sync with defines in system.h
@@ -505,6 +516,8 @@ struct nv_file_private_t
typedef struct gpuSession *nvgpuSessionHandle_t;
typedef struct gpuDevice *nvgpuDeviceHandle_t;
typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t;
typedef struct gpuTsg *nvgpuTsgHandle_t;
typedef struct UvmGpuTsgAllocParams_tag nvgpuTsgAllocParams_t;
typedef struct gpuChannel *nvgpuChannelHandle_t;
typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t;
typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t;
@@ -531,7 +544,7 @@ typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocPara
typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t;
typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t;
typedef enum UvmPmaGpuMemoryType_tag nvgpuGpuMemoryType_t;
typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t);
/*
@@ -599,6 +612,8 @@ typedef enum
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
((nv)->iso_iommu_present)
#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \
((nv)->niso_iommu_present)
/*
* GPU add/remove events
*/
@@ -813,6 +828,7 @@ nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
@@ -920,6 +936,7 @@ NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *
void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *);
char* NV_API_CALL rm_remove_spaces (const char *);
char* NV_API_CALL rm_string_token (char **, const char);
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
@@ -951,12 +968,12 @@ NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, N
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU32, NvU32, NvU64 *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *);
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, NvU64 *);
NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
@@ -994,7 +1011,7 @@ void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -188,7 +188,7 @@ NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
@@ -207,6 +207,9 @@ enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;

View File

@@ -0,0 +1,211 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for debug definitions.
*
****************************************************************************/
#ifndef __X86EMU_DEBUG_H
#define __X86EMU_DEBUG_H
/*---------------------- Macros and type definitions ----------------------*/
/* checks to be enabled for "runtime" */
#define CHECK_IP_FETCH_F 0x1
#define CHECK_SP_ACCESS_F 0x2
#define CHECK_MEM_ACCESS_F 0x4 /*using regular linear pointer */
#define CHECK_DATA_ACCESS_F 0x8 /*using segment:offset*/
#ifdef X86EMU_DEBUG
# define CHECK_IP_FETCH() (M.x86.check & CHECK_IP_FETCH_F)
# define CHECK_SP_ACCESS() (M.x86.check & CHECK_SP_ACCESS_F)
# define CHECK_MEM_ACCESS() (M.x86.check & CHECK_MEM_ACCESS_F)
# define CHECK_DATA_ACCESS() (M.x86.check & CHECK_DATA_ACCESS_F)
#else
# define CHECK_IP_FETCH() 0
# define CHECK_SP_ACCESS() 0
# define CHECK_MEM_ACCESS() 0
# define CHECK_DATA_ACCESS() 0
#endif
#ifdef X86EMU_DEBUG
# define DEBUG_INSTRUMENT() (M.x86.debug & DEBUG_INSTRUMENT_F)
# define DEBUG_DECODE() (M.x86.debug & DEBUG_DECODE_F)
# define DEBUG_TRACE() (M.x86.debug & DEBUG_TRACE_F)
# define DEBUG_STEP() (M.x86.debug & DEBUG_STEP_F)
# define DEBUG_DISASSEMBLE() (M.x86.debug & DEBUG_DISASSEMBLE_F)
# define DEBUG_BREAK() (M.x86.debug & DEBUG_BREAK_F)
# define DEBUG_SVC() (M.x86.debug & DEBUG_SVC_F)
# define DEBUG_SAVE_IP_CS() (M.x86.debug & DEBUG_SAVE_IP_CS_F)
# define DEBUG_FS() (M.x86.debug & DEBUG_FS_F)
# define DEBUG_PROC() (M.x86.debug & DEBUG_PROC_F)
# define DEBUG_SYSINT() (M.x86.debug & DEBUG_SYSINT_F)
# define DEBUG_TRACECALL() (M.x86.debug & DEBUG_TRACECALL_F)
# define DEBUG_TRACECALLREGS() (M.x86.debug & DEBUG_TRACECALL_REGS_F)
# define DEBUG_SYS() (M.x86.debug & DEBUG_SYS_F)
# define DEBUG_MEM_TRACE() (M.x86.debug & DEBUG_MEM_TRACE_F)
# define DEBUG_IO_TRACE() (M.x86.debug & DEBUG_IO_TRACE_F)
# define DEBUG_DECODE_NOPRINT() (M.x86.debug & DEBUG_DECODE_NOPRINT_F)
#else
# define DEBUG_INSTRUMENT() 0
# define DEBUG_DECODE() 0
# define DEBUG_TRACE() 0
# define DEBUG_STEP() 0
# define DEBUG_DISASSEMBLE() 0
# define DEBUG_BREAK() 0
# define DEBUG_SVC() 0
# define DEBUG_SAVE_IP_CS() 0
# define DEBUG_FS() 0
# define DEBUG_PROC() 0
# define DEBUG_SYSINT() 0
# define DEBUG_TRACECALL() 0
# define DEBUG_TRACECALLREGS() 0
# define DEBUG_SYS() 0
# define DEBUG_MEM_TRACE() 0
# define DEBUG_IO_TRACE() 0
# define DEBUG_DECODE_NOPRINT() 0
#endif
#ifdef X86EMU_DEBUG
# define DECODE_PRINTF(x) if (DEBUG_DECODE()) \
x86emu_decode_printf(x)
# define DECODE_PRINTF2(x,y) if (DEBUG_DECODE()) \
x86emu_decode_printf2(x,y)
/*
* The following allow us to look at the bytes of an instruction. The
* first INCR_INSTRN_LEN, is called everytime bytes are consumed in
* the decoding process. The SAVE_IP_CS is called initially when the
* major opcode of the instruction is accessed.
*/
#define INC_DECODED_INST_LEN(x) \
if (DEBUG_DECODE()) \
x86emu_inc_decoded_inst_len(x)
#define SAVE_IP_CS(x,y) \
if (DEBUG_DECODE() | DEBUG_TRACECALL() | DEBUG_BREAK() \
| DEBUG_IO_TRACE() | DEBUG_SAVE_IP_CS()) { \
M.x86.saved_cs = x; \
M.x86.saved_ip = y; \
}
#else
# define INC_DECODED_INST_LEN(x)
# define DECODE_PRINTF(x)
# define DECODE_PRINTF2(x,y)
# define SAVE_IP_CS(x,y)
#endif
#ifdef X86EMU_DEBUG
#define TRACE_REGS() \
if (DEBUG_DISASSEMBLE()) { \
x86emu_just_disassemble(); \
goto EndOfTheInstructionProcedure; \
} \
if (DEBUG_TRACE() || DEBUG_DECODE()) X86EMU_trace_regs()
#else
# define TRACE_REGS()
#endif
#ifdef X86EMU_DEBUG
# define SINGLE_STEP() if (DEBUG_STEP()) x86emu_single_step()
#else
# define SINGLE_STEP()
#endif
#define TRACE_AND_STEP() \
TRACE_REGS(); \
SINGLE_STEP()
#ifdef X86EMU_DEBUG
# define START_OF_INSTR()
# define END_OF_INSTR() EndOfTheInstructionProcedure: x86emu_end_instr();
# define END_OF_INSTR_NO_TRACE() x86emu_end_instr();
#else
# define START_OF_INSTR()
# define END_OF_INSTR()
# define END_OF_INSTR_NO_TRACE()
#endif
#ifdef X86EMU_DEBUG
#include <os/os.h>
# define CALL_TRACE(u,v,w,x,s) \
if (DEBUG_TRACECALLREGS()) \
x86emu_dump_regs(); \
if (DEBUG_TRACECALL()) \
NV_PRINTF(LEVEL_INFO, "%04x:%04x: CALL %s%04x:%04x\n", u , v, s, w, x);
# define RETURN_TRACE(n,u,v) \
if (DEBUG_TRACECALLREGS()) \
x86emu_dump_regs(); \
if (DEBUG_TRACECALL()) \
NV_PRINTF(LEVEL_INFO, "%04x:%04x: %s\n",u,v,n);
#else
# define CALL_TRACE(u,v,w,x,s)
# define RETURN_TRACE(n,u,v)
#endif
#ifdef X86EMU_DEBUG
#define DB(x) x
#else
#define DB(x)
#endif
/*-------------------------- Function Prototypes --------------------------*/
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
extern void x86emu_inc_decoded_inst_len (int x);
extern void x86emu_decode_printf (const char *x);
extern void x86emu_decode_printf2 (const char *x, int y);
extern void x86emu_just_disassemble (void);
extern void x86emu_single_step (void);
extern void x86emu_end_instr (void);
extern void x86emu_dump_regs (void);
extern void x86emu_dump_xregs (void);
extern void x86emu_print_int_vect (u16 iv);
extern void x86emu_instrument_instruction (void);
extern void x86emu_check_ip_access (void);
extern void x86emu_check_sp_access (void);
extern void x86emu_check_mem_access (u32 p);
extern void x86emu_check_data_access (uint s, uint o);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_DEBUG_H */

View File

@@ -0,0 +1,88 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for instruction decoding logic.
*
****************************************************************************/
#ifndef __X86EMU_DECODE_H
#define __X86EMU_DECODE_H
/*---------------------- Macros and type definitions ----------------------*/
/* Instruction Decoding Stuff */
#define FETCH_DECODE_MODRM(mod,rh,rl) fetch_decode_modrm(&mod,&rh,&rl)
#define DECODE_RM_BYTE_REGISTER(r) decode_rm_byte_register(r)
#define DECODE_RM_WORD_REGISTER(r) decode_rm_word_register(r)
#define DECODE_RM_LONG_REGISTER(r) decode_rm_long_register(r)
#define DECODE_CLEAR_SEGOVR() M.x86.mode &= ~SYSMODE_CLRMASK
/*-------------------------- Function Prototypes --------------------------*/
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
void x86emu_intr_raise (u8 type);
void fetch_decode_modrm (int *mod,int *regh,int *regl);
u8 fetch_byte_imm (void);
u16 fetch_word_imm (void);
u32 fetch_long_imm (void);
u8 fetch_data_byte (uint offset);
u8 fetch_data_byte_abs (uint segment, uint offset);
u16 fetch_data_word (uint offset);
u16 fetch_data_word_abs (uint segment, uint offset);
u32 fetch_data_long (uint offset);
u32 fetch_data_long_abs (uint segment, uint offset);
void store_data_byte (uint offset, u8 val);
void store_data_byte_abs (uint segment, uint offset, u8 val);
void store_data_word (uint offset, u16 val);
void store_data_word_abs (uint segment, uint offset, u16 val);
void store_data_long (uint offset, u32 val);
void store_data_long_abs (uint segment, uint offset, u32 val);
u8* decode_rm_byte_register(int reg);
u16* decode_rm_word_register(int reg);
u32* decode_rm_long_register(int reg);
u16* decode_rm_seg_register(int reg);
u32 decode_rm00_address(int rm);
u32 decode_rm01_address(int rm);
u32 decode_rm10_address(int rm);
u32 decode_sib_address(int sib, int mod);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_DECODE_H */

View File

@@ -0,0 +1,61 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for FPU instruction decoding.
*
****************************************************************************/
#ifndef __X86EMU_FPU_H
#define __X86EMU_FPU_H
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
/* these have to be defined, whether 8087 support compiled in or not. */
extern void x86emuOp_esc_coprocess_d8 (u8 op1);
extern void x86emuOp_esc_coprocess_d9 (u8 op1);
extern void x86emuOp_esc_coprocess_da (u8 op1);
extern void x86emuOp_esc_coprocess_db (u8 op1);
extern void x86emuOp_esc_coprocess_dc (u8 op1);
extern void x86emuOp_esc_coprocess_dd (u8 op1);
extern void x86emuOp_esc_coprocess_de (u8 op1);
extern void x86emuOp_esc_coprocess_df (u8 op1);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_FPU_H */

View File

@@ -0,0 +1,119 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for FPU register definitions.
*
****************************************************************************/
#ifndef __X86EMU_FPU_REGS_H
#define __X86EMU_FPU_REGS_H
#ifdef X86_FPU_SUPPORT
#ifdef PACK
# pragma PACK
#endif
/* Basic 8087 register can hold any of the following values: */
union x86_fpu_reg_u {
s8 tenbytes[10];
double dval;
float fval;
s16 sval;
s32 lval;
};
struct x86_fpu_reg {
union x86_fpu_reg_u reg;
char tag;
};
/*
* Since we are not going to worry about the problems of aliasing
* registers, every time a register is modified, its result type is
* set in the tag fields for that register. If some operation
* attempts to access the type in a way inconsistent with its current
* storage format, then we flag the operation. If common, we'll
* attempt the conversion.
*/
#define X86_FPU_VALID 0x80
#define X86_FPU_REGTYP(r) ((r) & 0x7F)
#define X86_FPU_WORD 0x0
#define X86_FPU_SHORT 0x1
#define X86_FPU_LONG 0x2
#define X86_FPU_FLOAT 0x3
#define X86_FPU_DOUBLE 0x4
#define X86_FPU_LDBL 0x5
#define X86_FPU_BSD 0x6
#define X86_FPU_STKTOP 0
struct x86_fpu_registers {
struct x86_fpu_reg x86_fpu_stack[8];
int x86_fpu_flags;
int x86_fpu_config; /* rounding modes, etc. */
short x86_fpu_tos, x86_fpu_bos;
};
#ifdef END_PACK
# pragma END_PACK
#endif
/*
* There are two versions of the following macro.
*
* One version is for opcode D9, for which there are more than 32
* instructions encoded in the second byte of the opcode.
*
* The other version, deals with all the other 7 i87 opcodes, for
* which there are only 32 strings needed to describe the
* instructions.
*/
#endif /* X86_FPU_SUPPORT */
#ifdef X86EMU_DEBUG
# define DECODE_PRINTINSTR32(t,mod,rh,rl) \
DECODE_PRINTF(t[(mod<<3)+(rh)]);
# define DECODE_PRINTINSTR256(t,mod,rh,rl) \
DECODE_PRINTF(t[(mod<<6)+(rh<<3)+(rl)]);
#else
# define DECODE_PRINTINSTR32(t,mod,rh,rl)
# define DECODE_PRINTINSTR256(t,mod,rh,rl)
#endif
#endif /* __X86EMU_FPU_REGS_H */

View File

@@ -0,0 +1,45 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for operand decoding functions.
*
****************************************************************************/
#ifndef __X86EMU_OPS_H
#define __X86EMU_OPS_H
extern void (*x86emu_optab[0x100])(u8 op1);
extern void (*x86emu_optab2[0x100])(u8 op2);
#endif /* __X86EMU_OPS_H */

View File

@@ -0,0 +1,141 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for primitive operation functions.
*
****************************************************************************/
#ifndef __X86EMU_PRIM_OPS_H
#define __X86EMU_PRIM_OPS_H
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
u16 aaa_word (u16 d);
u16 aas_word (u16 d);
u16 aad_word (u16 d);
u16 aam_word (u8 d);
u8 adc_byte (u8 d, u8 s);
u16 adc_word (u16 d, u16 s);
u32 adc_long (u32 d, u32 s);
u8 add_byte (u8 d, u8 s);
u16 add_word (u16 d, u16 s);
u32 add_long (u32 d, u32 s);
u8 and_byte (u8 d, u8 s);
u16 and_word (u16 d, u16 s);
u32 and_long (u32 d, u32 s);
u8 cmp_byte (u8 d, u8 s);
u16 cmp_word (u16 d, u16 s);
u32 cmp_long (u32 d, u32 s);
u8 daa_byte (u8 d);
u8 das_byte (u8 d);
u8 dec_byte (u8 d);
u16 dec_word (u16 d);
u32 dec_long (u32 d);
u8 inc_byte (u8 d);
u16 inc_word (u16 d);
u32 inc_long (u32 d);
u8 or_byte (u8 d, u8 s);
u16 or_word (u16 d, u16 s);
u32 or_long (u32 d, u32 s);
u8 neg_byte (u8 s);
u16 neg_word (u16 s);
u32 neg_long (u32 s);
u8 not_byte (u8 s);
u16 not_word (u16 s);
u32 not_long (u32 s);
u8 rcl_byte (u8 d, u8 s);
u16 rcl_word (u16 d, u8 s);
u32 rcl_long (u32 d, u8 s);
u8 rcr_byte (u8 d, u8 s);
u16 rcr_word (u16 d, u8 s);
u32 rcr_long (u32 d, u8 s);
u8 rol_byte (u8 d, u8 s);
u16 rol_word (u16 d, u8 s);
u32 rol_long (u32 d, u8 s);
u8 ror_byte (u8 d, u8 s);
u16 ror_word (u16 d, u8 s);
u32 ror_long (u32 d, u8 s);
u8 shl_byte (u8 d, u8 s);
u16 shl_word (u16 d, u8 s);
u32 shl_long (u32 d, u8 s);
u8 shr_byte (u8 d, u8 s);
u16 shr_word (u16 d, u8 s);
u32 shr_long (u32 d, u8 s);
u8 sar_byte (u8 d, u8 s);
u16 sar_word (u16 d, u8 s);
u32 sar_long (u32 d, u8 s);
u16 shld_word (u16 d, u16 fill, u8 s);
u32 shld_long (u32 d, u32 fill, u8 s);
u16 shrd_word (u16 d, u16 fill, u8 s);
u32 shrd_long (u32 d, u32 fill, u8 s);
u8 sbb_byte (u8 d, u8 s);
u16 sbb_word (u16 d, u16 s);
u32 sbb_long (u32 d, u32 s);
u8 sub_byte (u8 d, u8 s);
u16 sub_word (u16 d, u16 s);
u32 sub_long (u32 d, u32 s);
void test_byte (u8 d, u8 s);
void test_word (u16 d, u16 s);
void test_long (u32 d, u32 s);
u8 xor_byte (u8 d, u8 s);
u16 xor_word (u16 d, u16 s);
u32 xor_long (u32 d, u32 s);
void imul_byte (u8 s);
void imul_word (u16 s);
void imul_long (u32 s);
void imul_long_direct(u32 *res_lo, u32* res_hi,u32 d, u32 s);
void mul_byte (u8 s);
void mul_word (u16 s);
void mul_long (u32 s);
void idiv_byte (u8 s);
void idiv_word (u16 s);
void idiv_long (u32 s);
void div_byte (u8 s);
void div_word (u16 s);
void div_long (u32 s);
void ins (int size);
void outs (int size);
u16 mem_access_word (int addr);
void push_word (u16 w);
void push_long (u32 w);
u16 pop_word (void);
u32 pop_long (void);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_PRIM_OPS_H */

View File

@@ -0,0 +1,340 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for x86 register definitions.
*
****************************************************************************/
#ifndef __X86EMU_REGS_H
#define __X86EMU_REGS_H
#include "x86emu/debug.h"
/*---------------------- Macros and type definitions ----------------------*/
#ifdef PACK
# pragma PACK
#endif
/*
* General EAX, EBX, ECX, EDX type registers. Note that for
* portability, and speed, the issue of byte swapping is not addressed
* in the registers. All registers are stored in the default format
* available on the host machine. The only critical issue is that the
* registers should line up EXACTLY in the same manner as they do in
* the 386. That is:
*
* EAX & 0xff === AL
* EAX & 0xffff == AX
*
* etc. The result is that alot of the calculations can then be
* done using the native instruction set fully.
*/
#ifdef __BIG_ENDIAN__
typedef struct {
u32 e_reg;
} I32_reg_t;
typedef struct {
u16 filler0, x_reg;
} I16_reg_t;
typedef struct {
u8 filler0, filler1, h_reg, l_reg;
} I8_reg_t;
#else /* !__BIG_ENDIAN__ */
typedef struct {
u32 e_reg;
} I32_reg_t;
typedef struct {
u16 x_reg;
} I16_reg_t;
typedef struct {
u8 l_reg, h_reg;
} I8_reg_t;
#endif /* __BIG_ENDIAN__ */
typedef union {
I32_reg_t I32_reg;
I16_reg_t I16_reg;
I8_reg_t I8_reg;
} i386_general_register;
struct i386_general_regs {
i386_general_register A, B, C, D;
};
typedef struct i386_general_regs Gen_reg_t;
struct i386_special_regs {
i386_general_register SP, BP, SI, DI, IP;
u32 FLAGS;
};
/*
* Segment registers here represent the 16 bit quantities
* CS, DS, ES, SS.
*/
struct i386_segment_regs {
u16 CS, DS, SS, ES, FS, GS;
};
/* 8 bit registers */
#define R_AH gen.A.I8_reg.h_reg
#define R_AL gen.A.I8_reg.l_reg
#define R_BH gen.B.I8_reg.h_reg
#define R_BL gen.B.I8_reg.l_reg
#define R_CH gen.C.I8_reg.h_reg
#define R_CL gen.C.I8_reg.l_reg
#define R_DH gen.D.I8_reg.h_reg
#define R_DL gen.D.I8_reg.l_reg
/* 16 bit registers */
#define R_AX gen.A.I16_reg.x_reg
#define R_BX gen.B.I16_reg.x_reg
#define R_CX gen.C.I16_reg.x_reg
#define R_DX gen.D.I16_reg.x_reg
/* 32 bit extended registers */
#define R_EAX gen.A.I32_reg.e_reg
#define R_EBX gen.B.I32_reg.e_reg
#define R_ECX gen.C.I32_reg.e_reg
#define R_EDX gen.D.I32_reg.e_reg
/* special registers */
#define R_SP spc.SP.I16_reg.x_reg
#define R_BP spc.BP.I16_reg.x_reg
#define R_SI spc.SI.I16_reg.x_reg
#define R_DI spc.DI.I16_reg.x_reg
#define R_IP spc.IP.I16_reg.x_reg
#define R_FLG spc.FLAGS
/* special registers */
#define R_SP spc.SP.I16_reg.x_reg
#define R_BP spc.BP.I16_reg.x_reg
#define R_SI spc.SI.I16_reg.x_reg
#define R_DI spc.DI.I16_reg.x_reg
#define R_IP spc.IP.I16_reg.x_reg
#define R_FLG spc.FLAGS
/* special registers */
#define R_ESP spc.SP.I32_reg.e_reg
#define R_EBP spc.BP.I32_reg.e_reg
#define R_ESI spc.SI.I32_reg.e_reg
#define R_EDI spc.DI.I32_reg.e_reg
#define R_EIP spc.IP.I32_reg.e_reg
#define R_EFLG spc.FLAGS
/* segment registers */
#define R_CS seg.CS
#define R_DS seg.DS
#define R_SS seg.SS
#define R_ES seg.ES
#define R_FS seg.FS
#define R_GS seg.GS
/* flag conditions */
#define FB_CF 0x0001 /* CARRY flag */
#define FB_PF 0x0004 /* PARITY flag */
#define FB_AF 0x0010 /* AUX flag */
#define FB_ZF 0x0040 /* ZERO flag */
#define FB_SF 0x0080 /* SIGN flag */
#define FB_TF 0x0100 /* TRAP flag */
#define FB_IF 0x0200 /* INTERRUPT ENABLE flag */
#define FB_DF 0x0400 /* DIR flag */
#define FB_OF 0x0800 /* OVERFLOW flag */
/* 80286 and above always have bit#1 set */
#define F_ALWAYS_ON (0x0002) /* flag bits always on */
/*
* Define a mask for only those flag bits we will ever pass back
* (via PUSHF)
*/
#define F_MSK (FB_CF|FB_PF|FB_AF|FB_ZF|FB_SF|FB_TF|FB_IF|FB_DF|FB_OF)
/* following bits masked in to a 16bit quantity */
#define F_CF 0x0001 /* CARRY flag */
#define F_PF 0x0004 /* PARITY flag */
#define F_AF 0x0010 /* AUX flag */
#define F_ZF 0x0040 /* ZERO flag */
#define F_SF 0x0080 /* SIGN flag */
#define F_TF 0x0100 /* TRAP flag */
#define F_IF 0x0200 /* INTERRUPT ENABLE flag */
#define F_DF 0x0400 /* DIR flag */
#define F_OF 0x0800 /* OVERFLOW flag */
#define TOGGLE_FLAG(flag) (M.x86.R_FLG ^= (flag))
#define SET_FLAG(flag) (M.x86.R_FLG |= (flag))
#define CLEAR_FLAG(flag) (M.x86.R_FLG &= ~(flag))
#define ACCESS_FLAG(flag) (M.x86.R_FLG & (flag))
#define CLEARALL_FLAG(m) (M.x86.R_FLG = 0)
#define CONDITIONAL_SET_FLAG(COND,FLAG) \
if (COND) SET_FLAG(FLAG); else CLEAR_FLAG(FLAG)
#define F_PF_CALC 0x010000 /* PARITY flag has been calced */
#define F_ZF_CALC 0x020000 /* ZERO flag has been calced */
#define F_SF_CALC 0x040000 /* SIGN flag has been calced */
#define F_ALL_CALC 0xff0000 /* All have been calced */
/*
* Emulator machine state.
* Segment usage control.
*/
#define SYSMODE_SEG_DS_SS 0x00000001
#define SYSMODE_SEGOVR_CS 0x00000002
#define SYSMODE_SEGOVR_DS 0x00000004
#define SYSMODE_SEGOVR_ES 0x00000008
#define SYSMODE_SEGOVR_FS 0x00000010
#define SYSMODE_SEGOVR_GS 0x00000020
#define SYSMODE_SEGOVR_SS 0x00000040
#define SYSMODE_PREFIX_REPE 0x00000080
#define SYSMODE_PREFIX_REPNE 0x00000100
#define SYSMODE_PREFIX_DATA 0x00000200
#define SYSMODE_PREFIX_ADDR 0x00000400
#define SYSMODE_INTR_PENDING 0x10000000
#define SYSMODE_EXTRN_INTR 0x20000000
#define SYSMODE_HALTED 0x40000000
#define SYSMODE_SEGMASK (SYSMODE_SEG_DS_SS | \
SYSMODE_SEGOVR_CS | \
SYSMODE_SEGOVR_DS | \
SYSMODE_SEGOVR_ES | \
SYSMODE_SEGOVR_FS | \
SYSMODE_SEGOVR_GS | \
SYSMODE_SEGOVR_SS)
#define SYSMODE_CLRMASK (SYSMODE_SEG_DS_SS | \
SYSMODE_SEGOVR_CS | \
SYSMODE_SEGOVR_DS | \
SYSMODE_SEGOVR_ES | \
SYSMODE_SEGOVR_FS | \
SYSMODE_SEGOVR_GS | \
SYSMODE_SEGOVR_SS | \
SYSMODE_PREFIX_DATA | \
SYSMODE_PREFIX_ADDR)
#define INTR_SYNCH 0x1
#define INTR_ASYNCH 0x2
#define INTR_HALTED 0x4
typedef struct {
struct i386_general_regs gen;
struct i386_special_regs spc;
struct i386_segment_regs seg;
/*
* MODE contains information on:
* REPE prefix 2 bits repe,repne
* SEGMENT overrides 5 bits normal,DS,SS,CS,ES
* Delayed flag set 3 bits (zero, signed, parity)
* reserved 6 bits
* interrupt # 8 bits instruction raised interrupt
* BIOS video segregs 4 bits
* Interrupt Pending 1 bits
* Extern interrupt 1 bits
* Halted 1 bits
*/
u32 mode;
volatile int intr; /* mask of pending interrupts */
int debug;
#ifdef X86EMU_DEBUG
int check;
u16 saved_ip;
u16 saved_cs;
int enc_pos;
int enc_str_pos;
char decode_buf[32]; /* encoded byte stream */
char decoded_buf[256]; /* disassembled strings */
char debug_cmd[64]; /* input for x86emu_single_step */
#endif
u8 intno;
u8 __pad[3];
} X86EMU_regs;
/****************************************************************************
REMARKS:
Structure maintaining the emulator machine state.
MEMBERS:
mem_base - Base real mode memory for the emulator
mem_size - Size of the real mode memory block for the emulator
private - private data pointer
x86 - X86 registers
****************************************************************************/
typedef struct {
unsigned long mem_base;
unsigned long mem_size;
void* private;
X86EMU_regs x86;
} X86EMU_sysEnv;
#ifdef END_PACK
# pragma END_PACK
#endif
/*----------------------------- Global Variables --------------------------*/
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
/* Global emulator machine state.
*
* We keep it global to avoid pointer dereferences in the code for speed.
*/
extern X86EMU_sysEnv _X86EMU_env;
#define M _X86EMU_env
/*-------------------------- Function Prototypes --------------------------*/
/* Function to log information at runtime */
void printk(const char *fmt, ...);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_REGS_H */

View File

@@ -0,0 +1,93 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for x86 emulator type definitions.
*
****************************************************************************/
#ifndef __X86EMU_TYPES_H
#define __X86EMU_TYPES_H
#ifndef NO_SYS_HEADERS
#include <sys/types.h>
#endif
/*
* The following kludge is an attempt to work around typedef conflicts with
* <sys/types.h>.
*/
#define u8 x86emuu8
#define u16 x86emuu16
#define u32 x86emuu32
#define u64 x86emuu64
#define s8 x86emus8
#define s16 x86emus16
#define s32 x86emus32
#define s64 x86emus64
#define uint x86emuuint
#define sint x86emusint
/*---------------------- Macros and type definitions ----------------------*/
/* Currently only for Linux/32bit */
#undef __HAS_LONG_LONG__
#if defined(__GNUC__) && !defined(NO_LONG_LONG)
#define __HAS_LONG_LONG__
#endif
#define NUM32 int
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned NUM32 u32;
#ifdef __HAS_LONG_LONG__
typedef unsigned long long u64;
#endif
typedef char s8;
typedef short s16;
typedef NUM32 s32;
#ifdef __HAS_LONG_LONG__
typedef long long s64;
#endif
typedef unsigned int uint;
typedef int sint;
typedef u16 X86EMU_pioAddr;
#undef NUM32
#endif /* __X86EMU_TYPES_H */

View File

@@ -0,0 +1,205 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for public specific functions.
* Any application linking against us should only
* include this header
*
****************************************************************************/
#ifndef __X86EMU_X86EMU_H
#define __X86EMU_X86EMU_H
// NV
#ifndef NO_SYS_HEADERS
#define NO_SYS_HEADERS
#endif
#ifdef SCITECH
#include "scitech.h"
#define X86API _ASMAPI
#define X86APIP _ASMAPIP
typedef int X86EMU_pioAddr;
#else
#include "x86emu/types.h"
#define X86API
#define X86APIP *
#endif
#include "x86emu/regs.h"
#include "x86emu/debug.h"
/*---------------------- Macros and type definitions ----------------------*/
#ifdef PACK
# pragma PACK /* Don't pack structs with function pointers! */
#endif
/****************************************************************************
REMARKS:
Data structure containing ponters to programmed I/O functions used by the
emulator. This is used so that the user program can hook all programmed
I/O for the emulator to handled as necessary by the user program. By
default the emulator contains simple functions that do not do access the
hardware in any way. To allow the emualtor access the hardware, you will
need to override the programmed I/O functions using the X86EMU_setupPioFuncs
function.
HEADER:
x86emu.h
MEMBERS:
inb - Function to read a byte from an I/O port
inw - Function to read a word from an I/O port
inl - Function to read a dword from an I/O port
outb - Function to write a byte to an I/O port
outw - Function to write a word to an I/O port
outl - Function to write a dword to an I/O port
****************************************************************************/
typedef struct {
u8 (X86APIP inb)(X86EMU_pioAddr addr);
u16 (X86APIP inw)(X86EMU_pioAddr addr);
u32 (X86APIP inl)(X86EMU_pioAddr addr);
void (X86APIP outb)(X86EMU_pioAddr addr, u8 val);
void (X86APIP outw)(X86EMU_pioAddr addr, u16 val);
void (X86APIP outl)(X86EMU_pioAddr addr, u32 val);
} X86EMU_pioFuncs;
/****************************************************************************
REMARKS:
Data structure containing ponters to memory access functions used by the
emulator. This is used so that the user program can hook all memory
access functions as necessary for the emulator. By default the emulator
contains simple functions that only access the internal memory of the
emulator. If you need specialised functions to handle access to different
types of memory (ie: hardware framebuffer accesses and BIOS memory access
etc), you will need to override this using the X86EMU_setupMemFuncs
function.
HEADER:
x86emu.h
MEMBERS:
rdb - Function to read a byte from an address
rdw - Function to read a word from an address
rdl - Function to read a dword from an address
wrb - Function to write a byte to an address
wrw - Function to write a word to an address
wrl - Function to write a dword to an address
****************************************************************************/
typedef struct {
u8 (X86APIP rdb)(u32 addr);
u16 (X86APIP rdw)(u32 addr);
u32 (X86APIP rdl)(u32 addr);
void (X86APIP wrb)(u32 addr, u8 val);
void (X86APIP wrw)(u32 addr, u16 val);
void (X86APIP wrl)(u32 addr, u32 val);
} X86EMU_memFuncs;
/****************************************************************************
Here are the default memory read and write
function in case they are needed as fallbacks.
***************************************************************************/
extern u8 X86API rdb(u32 addr);
extern u16 X86API rdw(u32 addr);
extern u32 X86API rdl(u32 addr);
extern void X86API wrb(u32 addr, u8 val);
extern void X86API wrw(u32 addr, u16 val);
extern void X86API wrl(u32 addr, u32 val);
#ifdef END_PACK
# pragma END_PACK
#endif
/*--------------------- type definitions -----------------------------------*/
typedef void (X86APIP X86EMU_intrFuncs)(int num);
extern X86EMU_intrFuncs _X86EMU_intrTab[256];
/*-------------------------- Function Prototypes --------------------------*/
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
void X86EMU_setupMemFuncs(X86EMU_memFuncs *funcs);
void X86EMU_setupPioFuncs(X86EMU_pioFuncs *funcs);
void X86EMU_setupIntrFuncs(X86EMU_intrFuncs funcs[]);
void X86EMU_prepareForInt(int num);
/* decode.c */
void X86EMU_exec(void);
void X86EMU_halt_sys(void);
#ifdef X86EMU_DEBUG
#define HALT_SYS() do { \
NV_PRINTF(LEVEL_INFO, "halt_sys: file %s, line %d\n", __FILE__, __LINE__); \
X86EMU_halt_sys(); \
} while (0)
#else
#define HALT_SYS() X86EMU_halt_sys()
#endif
/* Debug options */
#define DEBUG_DECODE_F 0x000001 /* print decoded instruction */
#define DEBUG_TRACE_F 0x000002 /* dump regs before/after execution */
#define DEBUG_STEP_F 0x000004
#define DEBUG_DISASSEMBLE_F 0x000008
#define DEBUG_BREAK_F 0x000010
#define DEBUG_SVC_F 0x000020
#define DEBUG_SAVE_IP_CS_F 0x000040
#define DEBUG_FS_F 0x000080
#define DEBUG_PROC_F 0x000100
#define DEBUG_SYSINT_F 0x000200 /* bios system interrupts. */
#define DEBUG_TRACECALL_F 0x000400
#define DEBUG_INSTRUMENT_F 0x000800
#define DEBUG_MEM_TRACE_F 0x001000
#define DEBUG_IO_TRACE_F 0x002000
#define DEBUG_TRACECALL_REGS_F 0x004000
#define DEBUG_DECODE_NOPRINT_F 0x008000
#define DEBUG_EXIT 0x010000
#define DEBUG_SYS_F (DEBUG_SVC_F|DEBUG_FS_F|DEBUG_PROC_F)
void X86EMU_trace_regs(void);
void X86EMU_trace_xregs(void);
void X86EMU_dump_memory(u16 seg, u16 off, u32 amt);
int X86EMU_trace_on(void);
int X86EMU_trace_off(void);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_X86EMU_H */

View File

@@ -0,0 +1,113 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: Header file for system specific functions. These functions
* are always compiled and linked in the OS depedent libraries,
* and never in a binary portable driver.
*
****************************************************************************/
#ifndef __X86EMU_X86EMUI_H
#define __X86EMU_X86EMUI_H
/* If we are compiling in C++ mode, we can compile some functions as
* inline to increase performance (however the code size increases quite
* dramatically in this case).
*/
// NV
#ifndef NO_SYS_HEADERS
#define NO_SYS_HEADERS
#endif
#include <stddef.h> /* NULL */
#if defined(__cplusplus) && !defined(_NO_INLINE)
#define _INLINE inline
#else
#define _INLINE static
#endif
/* Get rid of unused parameters in C++ compilation mode */
#ifdef __cplusplus
#define X86EMU_UNUSED(v)
#else
#define X86EMU_UNUSED(v) v
#endif
#include "x86emu.h"
#include "x86emu/regs.h"
#include "x86emu/debug.h"
#include "x86emu/decode.h"
#include "x86emu/ops.h"
#include "x86emu/prim_ops.h"
#include "x86emu/fpu.h"
#include "x86emu/fpu_regs.h"
#ifndef NO_SYS_HEADERS
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#endif
// On x86_64 abs() is not recognized. I don't understand why.
#ifndef abs
#define abs(i) __builtin_abs(i)
#endif
/*--------------------------- Inline Functions ----------------------------*/
#ifdef __cplusplus
extern "C" { /* Use "C" linkage when in C++ mode */
#endif
extern u8 (X86APIP sys_rdb)(u32 addr);
extern u16 (X86APIP sys_rdw)(u32 addr);
extern u32 (X86APIP sys_rdl)(u32 addr);
extern void (X86APIP sys_wrb)(u32 addr,u8 val);
extern void (X86APIP sys_wrw)(u32 addr,u16 val);
extern void (X86APIP sys_wrl)(u32 addr,u32 val);
extern u8 (X86APIP sys_inb)(X86EMU_pioAddr addr);
extern u16 (X86APIP sys_inw)(X86EMU_pioAddr addr);
extern u32 (X86APIP sys_inl)(X86EMU_pioAddr addr);
extern void (X86APIP sys_outb)(X86EMU_pioAddr addr,u8 val);
extern void (X86APIP sys_outw)(X86EMU_pioAddr addr,u16 val);
extern void (X86APIP sys_outl)(X86EMU_pioAddr addr,u32 val);
#ifdef __cplusplus
} /* End of "C" linkage for C++ */
#endif
#endif /* __X86EMU_X86EMUI_H */

File diff suppressed because it is too large Load Diff

View File

@@ -22,205 +22,9 @@
*/
#include <nv.h>
#include "os/os.h"
#include <rmconfig.h>
#include <gpu/subdevice/subdevice.h>
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
return NV_OK;
}
void NV_API_CALL rm_init_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bPr3AcpiMethodPresent
)
{
}
void NV_API_CALL rm_cleanup_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
void NV_API_CALL rm_enable_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS NV_API_CALL rm_ref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void NV_API_CALL rm_unref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_transition_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bEnter
)
{
return NV_OK;
}
const char* NV_API_CALL rm_get_vidmem_power_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_dynamic_power_management_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_gpu_gcx_support(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvBool bGcxTypeGC6
)
{
return "?";
}
NV_STATUS
subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams
)
{
return NV_OK;
}
void
RmUpdateGc6ConsoleRefCount
(
nv_state_t *nv,
NvBool bIncrease
)
{
}
void
RmInitS0ixPowerManagement
(
nv_state_t *nv
)
{
}
void
RmInitDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void
RmDestroyDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void RmHandleDisplayChange
(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS
os_ref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void
os_unref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS
deviceCtrlCmdOsUnixVTSwitch_IMPL
(
Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_save_low_res_mode(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS RmInitX86EmuState(OBJGPU *pGpu)
{
return NV_OK;
}
void RmFreeX86EmuState(OBJGPU *pGpu)
{
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,6 +62,16 @@ NvBool hypervisorIsVgxHyper_IMPL(void)
return os_is_vgx_hyper();
}
NvBool hypervisorIsAC_IMPL(void)
{
return NV_FALSE;
}
void hypervisorSetACSupported_IMPL(POBJHYPERVISOR pHypervisor)
{
pHypervisor->bIsACSupported = NV_TRUE;
}
NV_STATUS hypervisorInjectInterrupt_IMPL
(
POBJHYPERVISOR pHypervisor,
@@ -225,7 +235,6 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
{
THREAD_STATE_NODE threadState;
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJGPU *pGpu = NULL;
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_STATUS rmStatus = NV_OK;
VGPU_TYPE *vgpuTypeInfo;
@@ -238,14 +247,6 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto exit;
}
if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
NV_OK)
{
@@ -447,14 +448,11 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf,
NvBool *is_driver_vm
NvU32 gpuPciBdf
)
{
THREAD_STATE_NODE threadState;
OBJSYS *pSys = SYS_GET_INSTANCE();
void *fp = NULL;
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
NV_STATUS rmStatus = NV_OK;
NV_ENTER_RM_RUNTIME(sp,fp);
@@ -466,8 +464,6 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
rmStatus = kvgpumgrCreateRequestVgpu(pNv->gpu_id, pMdevUuid,
vgpuTypeId, vgpuId, gpuPciBdf);
*is_driver_vm = pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED);
// UNLOCK: release API lock
rmapiLockRelease();
}
@@ -725,7 +721,8 @@ NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
{
if (pKernelHostVgpuDevice->gfid != 0)
{
rmStatus = kbifGetNumVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, numAreas);
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
numAreas, NULL, NULL);
if (rmStatus == NV_OK)
{
os_alloc_mem((void **)&vfRegionOffsets, sizeof(NvU64) * (*numAreas));
@@ -733,7 +730,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
if (vfRegionOffsets && vfRegionSizes)
{
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
vfRegionOffsets, vfRegionSizes);
numAreas, vfRegionOffsets, vfRegionSizes);
if (rmStatus == NV_OK)
{
*offsets = vfRegionOffsets;
@@ -917,7 +914,7 @@ NV_STATUS osVgpuRegisterMdev
)
{
NV_STATUS status = NV_OK;
vgpu_vfio_info vgpu_info;
vgpu_vfio_info vgpu_info = {0};
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
KERNEL_PHYS_GPU_INFO *pPhysGpuInfo;
@@ -935,12 +932,22 @@ NV_STATUS osVgpuRegisterMdev
status = os_alloc_mem((void **)&vgpu_info.vgpuTypeIds,
((vgpu_info.numVgpuTypes) * sizeof(NvU32)));
if (status != NV_OK)
return status;
goto free_mem;
status = os_alloc_mem((void **)&vgpu_info.vgpuNames,
((vgpu_info.numVgpuTypes) * sizeof(char *)));
if (status != NV_OK)
goto free_mem;
vgpu_info.nv = pOsGpuInfo;
for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
{
status = os_alloc_mem((void *)&vgpu_info.vgpuNames[i], (VGPU_STRING_BUFFER_SIZE * sizeof(char)));
if (status != NV_OK)
goto free_mem;
vgpu_info.vgpuTypeIds[i] = pPhysGpuInfo->vgpuTypes[i]->vgpuTypeId;
os_snprintf((char *) vgpu_info.vgpuNames[i], VGPU_STRING_BUFFER_SIZE, "%s\n", pPhysGpuInfo->vgpuTypes[i]->vgpuName);
}
if ((!pPhysGpuInfo->sriovEnabled) ||
@@ -970,7 +977,22 @@ NV_STATUS osVgpuRegisterMdev
}
}
os_free_mem(vgpu_info.vgpuTypeIds);
free_mem:
if (vgpu_info.vgpuTypeIds)
os_free_mem(vgpu_info.vgpuTypeIds);
if (vgpu_info.vgpuNames)
{
for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
{
if (vgpu_info.vgpuNames[i])
{
os_free_mem(vgpu_info.vgpuNames[i]);
}
}
os_free_mem(vgpu_info.vgpuNames);
}
return status;
}
@@ -981,15 +1003,20 @@ NV_STATUS osIsVgpuVfioPresent(void)
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_PRESENT);
}
NV_STATUS osIsVfioPciCorePresent(void)
{
vgpu_vfio_info vgpu_info;
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT);
}
void initVGXSpecificRegistry(OBJGPU *pGpu)
{
NvU32 data32;
osWriteRegistryDword(pGpu, NV_REG_STR_RM_POWER_FEATURES, 0x55455555);
osWriteRegistryDword(pGpu, NV_REG_STR_RM_INFOROM_DISABLE_BBX,
NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES);
#if !defined(NVCPU_X86_64)
osWriteRegistryDword(pGpu, NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB, 4);
#endif
osWriteRegistryDword(pGpu, NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR,
NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE);
if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG, &data32) != NV_OK))

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -931,6 +931,20 @@ NV_STATUS osAllocPagesInternal(
if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE)))
nv->force_dma32_alloc = NV_TRUE;
//
// If AMD SEV is enabled but APM(Ampere protected) or CC(Confidential
// Compute) mode is not enabled on the GPU, all RM and client
// allocations must be an unprotected sysmem. If APM is enabled and RM
// is allocating sysmem for its internal use, then it has to be
// unprotected as protected sysmem is not accessible to GPU.
//
unencrypted = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_IN_UNPROTECTED_MEMORY);
if (pMemDesc->_addressSpace == ADDR_EGM)
{
nodeId = GPU_GET_MEMORY_MANAGER(pGpu)->localEgmNodeId;
}
if (NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount) > NV_U32_MAX)
{
status = NV_ERR_INVALID_LIMIT;
@@ -1220,14 +1234,6 @@ void osUnmapGPU(
}
}
NV_STATUS osDeviceClassToDeviceName(
NvU32 deviceInstance,
NvU8 *szName
)
{
return NV_ERR_GENERIC;
}
static void postEvent(
nv_event_t *event,
NvU32 hEvent,
@@ -1818,12 +1824,25 @@ void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *p
if (hypervisorIsVgxHyper())
{
//
// 1.8 seconds is chosen because it is 90% of the overall hard limit of 2.0
// seconds, imposed by WDDM driver rules.
// Currently primary use case of VGX is Windows, so setting 1.8 as default
//
*pTimeoutUs = 1.8 * 1000000;
if (IS_GSP_CLIENT(pGpu) && pGpu->getProperty(pGpu, PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX) &&
!pGpu->gspRmInitialized)
{
//
// For Hopper, 1.8 seconds is not enough to boot GSP-RM.
// To avoid this issue, 4 seconds timeout is set on initialization,
// and then it's going to be changed 1.8 seconds after GSP initialization.
//
*pTimeoutUs = 4 * 1000000;
}
else
{
//
// 1.8 seconds is chosen because it is 90% of the overall hard limit of 2.0
// seconds, imposed by WDDM driver rules.
// Currently primary use case of VGX is Windows, so setting 1.8 as default
//
*pTimeoutUs = 1.8 * 1000000;
}
}
else
{
@@ -2527,6 +2546,26 @@ NV_STATUS osCallACPI_DSM
pAcpiDsmGuid = (NvU8 *) &PCFG_DSM_GUID;
acpiDsmRev = PCFG_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_GPS_2X:
pAcpiDsmGuid = (NvU8 *) &GPS_DSM_GUID;
acpiDsmRev = GPS_2X_REVISION_ID;
acpiDsmInArgSize = (*pSize);
break;
case ACPI_DSM_FUNCTION_GPS:
if ((IsTU10X(pGpu)) ||
((gpuIsACPIPatchRequiredForBug2473619_HAL(pGpu)) &&
((acpiDsmSubFunction == GPS_FUNC_SUPPORT) ||
(acpiDsmSubFunction == GPS_FUNC_GETCALLBACKS))))
{
pAcpiDsmGuid = (NvU8 *) &GPS_DSM_GUID;
acpiDsmRev = GPS_REVISION_ID;
acpiDsmInArgSize = (*pSize);
}
else
{
return NV_ERR_NOT_SUPPORTED;
}
break;
case ACPI_DSM_FUNCTION_PEX:
pAcpiDsmGuid = (NvU8 *) &PEX_DSM_GUID;
acpiDsmRev = PEX_REVISION_ID;
@@ -3038,7 +3077,32 @@ osGC6PowerControl
NvU32 *pOut
)
{
return NV_ERR_NOT_SUPPORTED;
NV_STATUS status;
NvU32 inOut = cmd;
NvU16 rtnSize = sizeof(inOut);
if (FLD_TEST_DRF(_JT_FUNC, _POWERCONTROL, _GPU_POWER_CONTROL, _GSS, inOut))
{
if (!pOut)
{
return NV_ERR_INVALID_ARGUMENT;
}
}
status = osCallACPI_DSM(pGpu,
ACPI_DSM_FUNCTION_JT,
JT_FUNC_POWERCONTROL,
&inOut,
&rtnSize);
if ((status != NV_OK) || !pOut)
{
return status;
}
*pOut = inOut;
return NV_OK;
}
NvBool osTestPcieExtendedConfigAccess(void *handle, NvU32 offset)
@@ -3593,8 +3657,8 @@ osValidateClientTokens
PTOKEN_USER pClientTokenUser = (PTOKEN_USER)pClientSecurityToken;
PTOKEN_USER pCurrentTokenUser = (PTOKEN_USER)pCurrentSecurityToken;
NV_ASSERT_OR_RETURN((pClientTokenUser != NULL), NV_ERR_INVALID_POINTER);
NV_ASSERT_OR_RETURN((pCurrentTokenUser != NULL), NV_ERR_INVALID_POINTER);
if (pClientTokenUser == NULL || pCurrentTokenUser == NULL)
return NV_ERR_INVALID_POINTER;
if ((pClientTokenUser->euid != pCurrentTokenUser->euid) &&
(pClientTokenUser->pid != pCurrentTokenUser->pid))
@@ -3911,7 +3975,7 @@ osGetSmbiosTable
void *pMappedAddr = NULL;
NvU64 basePAddr = 0;
if (!NVCPU_IS_X86_64)
if (!NVCPU_IS_X86_64 && !NVCPU_IS_AARCH64)
{
return NV_ERR_NOT_SUPPORTED;
}
@@ -4192,7 +4256,7 @@ osWaitForIbmnpuRsync
nv_wait_for_ibmnpu_rsync(pOsGpuInfo);
}
NvU32
NvU64
osGetPageSize(void)
{
return os_page_size;
@@ -4784,6 +4848,8 @@ osRmCapInitDescriptor
* @brief Generates random bytes which can be used as a universally unique
* identifier.
*
* This function may sleep (interruptible).
*
* @param[out] pBytes Array of random bytes
* @param[in] numBytes Size of the array
*/
@@ -4794,9 +4860,7 @@ osGetRandomBytes
NvU16 numBytes
)
{
os_get_random_bytes(pBytes, numBytes);
return NV_OK;
return os_get_random_bytes(pBytes, numBytes);
}
/*
@@ -4879,6 +4943,46 @@ osReadPFPciConfigInVF
return NV_ERR_NOT_SUPPORTED;
}
/*!
* @brief Sends an MRQ (message-request) to BPMP
*
* The request, response, and ret parameters of this function correspond to the
* components of the tegra_bpmp_message struct, which BPMP uses to receive
* MRQs.
*
* @param[in] pOsGpuInfo OS specific GPU information pointer
* @param[in] mrq MRQ_xxx ID specifying what is requested
* @param[in] pRequestData Pointer to request input data
* @param[in] requestDataSize Size of structure pointed to by pRequestData
* @param[out] pResponseData Pointer to response output data
* @param[in] responseDataSize Size of structure pointed to by pResponseData
* @param[out] ret MRQ return code (from "ret" element of
* tegra_bpmp_message struct)
* @param[out] apiRet Return code from tegra_bpmp_transfer call
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available,
* NV_ERR_INVALID_POINTER if the tegra_bpmp struct pointer could not
* be obtained from nv, or
* NV_ERR_GENERIC if the tegra_bpmp_transfer call failed (see apiRet
* for Linux error code).
*/
NV_STATUS
osTegraSocBpmpSendMrq
(
OS_GPU_INFO *pOsGpuInfo,
NvU32 mrq,
const void *pRequestData,
NvU32 requestDataSize,
void *pResponseData,
NvU32 responseDataSize,
NvS32 *pRet,
NvS32 *pApiRet
)
{
return NV_ERR_NOT_SUPPORTED;
}
/*!
* @brief Returns IMP-relevant data collected from other modules
*
@@ -4988,7 +5092,7 @@ osCreateNanoTimer
*
* @param[in] pOsGpuInfo OS specific GPU information pointer
* @param[in] pTimer pointer to high resolution timer object
* @param[in] timens time in nano seconds
* @param[in] timeNs Relative time in nano seconds
*/
NV_STATUS
osStartNanoTimer
@@ -5251,9 +5355,89 @@ osReleaseGpuOsInfo
nv_put_file_private(pOsInfo);
}
/*!
* @brief Add GPU memory as a NUMA node.
*
* @param[in/out] pOsGpuInfo OS specific GPU information pointer
* @param[in] offset start offset of the partition within FB
* @param[in] size size of the partition
* @param[out] pNumaNodeId OS NUMA node id for the added memory.
*
* @returns NV_OK if all is okay. Otherwise an error-specific value.
*
*/
NV_STATUS
osNumaAddGpuMemory
(
OS_GPU_INFO *pOsGpuInfo,
NvU64 offset,
NvU64 size,
NvU32 *pNumaNodeId
)
{
nv_state_t *nv = pOsGpuInfo;
return os_numa_add_gpu_memory(nv->handle, offset, size, pNumaNodeId);
}
/*!
* @brief Remove a particular SMC partition's GPU memory from OS kernel.
*
* Remove GPU memory from the OS kernel that is earlier added as a NUMA node
* to the kernel in platforms where GPU is coherently connected to the CPU.
*
* @param[in/out] pOsGpuInfo OS_GPU_INFO OS specific GPU information pointer
* @param[in] offset start offset of the partition within FB
* @param[in] size size of the partition
* @param[in] numaNodeId OS NUMA node id of the memory to be removed.
*
*/
void
osNumaRemoveGpuMemory
(
OS_GPU_INFO *pOsGpuInfo,
NvU64 offset,
NvU64 size,
NvU32 numaNodeId
)
{
nv_state_t *nv = pOsGpuInfo;
NV_STATUS status = os_numa_remove_gpu_memory(nv->handle, offset, size, numaNodeId);
NV_ASSERT(status == NV_OK);
return;
}
NvBool
osDmabufIsSupported(void)
{
return os_dma_buf_enabled;
}
NV_STATUS
osGetEgmInfo
(
OBJGPU *pGpu,
NvU64 *pPhysAddr,
NvU64 *pSize,
NvS32 *pNodeId
)
{
return nv_get_egm_info(NV_GET_NV_STATE(pGpu), pPhysAddr, pSize, pNodeId);
}
/*!
* @brief Offline (i.e., blacklist) the page containing a given address from OS kernel.
*
* @param[in] address Address (SPA) of the page to be offlined
*
*/
NV_STATUS
osOfflinePageAtAddress
(
NvU64 address
)
{
return os_offline_page_at_address(address);
}

View File

@@ -59,6 +59,7 @@
#include <nv-kernel-rmapi-ops.h>
#include <rmobjexportimport.h>
#include "nv-reg.h"
#include "nv-firmware-registry.h"
#include "core/hal_mgr.h"
#include "gpu/device/device.h"
@@ -337,6 +338,11 @@ RmLogGpuCrash(OBJGPU *pGpu)
"NVRM: A GPU crash dump has been created. If possible, please run\n"
"NVRM: nvidia-bug-report.sh as root to collect this data before\n"
"NVRM: the NVIDIA kernel module is unloaded.\n");
if (hypervisorIsVgxHyper())
{
nv_printf(NV_DBG_ERRORS, "NVRM: Dumping nvlogs buffers\n");
nvlogDumpToKernelLog(NV_FALSE);
}
}
// Restore the disconnected properties, if they were reset
@@ -1214,12 +1220,13 @@ static void RmHandleDNotifierEvent(
static NV_STATUS
RmDmabufVerifyMemHandle(
OBJGPU *pGpu,
NvHandle hSrcClient,
NvHandle hMemory,
NvU64 offset,
NvU64 size,
void *pGpuInstanceInfo
OBJGPU *pGpu,
NvHandle hSrcClient,
NvHandle hMemory,
NvU64 offset,
NvU64 size,
void *pGpuInstanceInfo,
MEMORY_DESCRIPTOR **ppMemDesc
)
{
NV_STATUS status;
@@ -1282,6 +1289,8 @@ RmDmabufVerifyMemHandle(
return NV_ERR_INVALID_ARGUMENT;
}
*ppMemDesc = pMemDesc;
return NV_OK;
}
@@ -1662,6 +1671,43 @@ static void RmHandleGPSStatusChange
nv_state_t *pNv
)
{
NV_STATUS rmStatus;
RM_API *pRmApi;
THREAD_STATE_NODE threadState;
NV0000_CTRL_SYSTEM_GPS_CONTROL_PARAMS gpsControl = { 0 };
pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_ACPI);
if (pRmApi == NULL)
{
return;
}
gpsControl.command = NV0000_CTRL_CMD_SYSTEM_GPS_DATA_INIT_USING_SBIOS_AND_ACK;
gpsControl.locale = NV0000_CTRL_CMD_SYSTEM_GPS_LOCALE_SYSTEM;
//
// NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL is not a subdevice command.
// But, Inside GPS module, the first GPU, which has GPS
// enabled will be grabbed with GPU manager. The RMAPI_API_LOCK_INTERNAL
// will internally grab the GPU locks and wake-up all the GPUs. Ideally
// this control call should be GPU specific command, if internally
// it uses the GPU.
//
rmStatus = pRmApi->Control(pRmApi,
pNv->rmapi.hClient,
pNv->rmapi.hClient,
NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL,
(void *)&gpsControl,
sizeof(NV0000_CTRL_SYSTEM_GPS_CONTROL_PARAMS));
RmUnixRmApiEpilogue(pNv, &threadState);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"%s: Failed to handle ACPI GPS status change event, status=0x%x\n",
__FUNCTION__, rmStatus);
}
}
/*!
@@ -1918,15 +1964,20 @@ static NV_STATUS RmCreateMmapContextLocked(
{
if (CliSetGpuContext(hClient, hDevice, &pGpu, NULL) != NV_OK)
{
NvU32 tmp;
if (CliSetSubDeviceContext(hClient, hDevice, &tmp, &pGpu) != NV_OK)
{
//
// If this mapping isn't for a GPU then we don't need to
// create a context for it.
//
RsClient *pClient;
Subdevice *pSubdevice;
status = serverGetClientUnderLock(&g_resServ, hClient, &pClient);
if (status != NV_OK)
return status;
}
status = subdeviceGetByHandle(pClient, hDevice, &pSubdevice);
if (status != NV_OK)
return status;
pGpu = GPU_RES_GET_GPU(pSubdevice);
GPU_RES_SET_THREAD_BC_STATE(pSubdevice);
}
}
@@ -2125,6 +2176,7 @@ static NV_STATUS RmGetAllocPrivate(
switch (memdescGetAddressSpace(pMemDesc))
{
case ADDR_SYSMEM:
case ADDR_EGM:
break;
default:
rmStatus = NV_ERR_OBJECT_NOT_FOUND;
@@ -3147,37 +3199,6 @@ NV_STATUS rm_update_device_mapping_info(
return RmStatus;
}
static void rm_is_device_rm_firmware_capable(
nv_state_t *pNv,
NvU32 pmcBoot42,
NvBool *pbIsFirmwareCapable,
NvBool *pbEnableByDefault
)
{
NvBool bIsFirmwareCapable = NV_FALSE;
NvBool bEnableByDefault = NV_FALSE;
if (NV_IS_SOC_DISPLAY_DEVICE(pNv))
{
bIsFirmwareCapable = NV_TRUE;
}
else
{
bIsFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pNv->pci_info.device_id,
pmcBoot42,
&bEnableByDefault);
}
if (pbIsFirmwareCapable != NULL)
{
*pbIsFirmwareCapable = bIsFirmwareCapable;
}
if (pbEnableByDefault != NULL)
{
*pbEnableByDefault = bEnableByDefault;
}
}
static NvBool NV_API_CALL rm_is_legacy_device(
NvU16 device_id,
NvU16 subsystem_vendor,
@@ -3268,10 +3289,10 @@ NV_STATUS NV_API_CALL rm_is_supported_device(
{
NvBool bIsFirmwareCapable;
rm_is_device_rm_firmware_capable(pNv,
pmc_boot_42,
&bIsFirmwareCapable,
NULL);
bIsFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pNv->pci_info.device_id,
pmc_boot_42,
NV_IS_SOC_DISPLAY_DEVICE(pNv),
NULL);
if (!bIsFirmwareCapable)
{
nv_printf(NV_DBG_ERRORS,
@@ -4042,7 +4063,7 @@ NV_STATUS NV_API_CALL rm_p2p_dma_map_pages(
nvidia_stack_t *sp,
nv_dma_device_t *peer,
NvU8 *pGpuUuid,
NvU32 pageSize,
NvU64 pageSize,
NvU32 pageCount,
NvU64 *pDmaAddresses,
void **ppPriv
@@ -4444,25 +4465,14 @@ static void rm_set_firmware_logs(
nv_state_t *nv
)
{
NV_STATUS status;
NvU32 data;
void *fp;
NvU32 enableFirmwareLogsRegVal = NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG;
NV_ENTER_RM_RUNTIME(sp,fp);
status = RmReadRegistryDword(nv, NV_REG_ENABLE_GPU_FIRMWARE_LOGS, &data);
if (status == NV_OK)
{
if ((data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE)
#if defined(DEBUG) || defined(DEVELOP)
|| (data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG)
#endif
)
{
nv->enable_firmware_logs = NV_TRUE;
}
}
(void) RmReadRegistryDword(nv, NV_REG_ENABLE_GPU_FIRMWARE_LOGS,
&enableFirmwareLogsRegVal);
nv->enable_firmware_logs = gpumgrGetRmFirmwareLogsEnabled(enableFirmwareLogsRegVal);
NV_EXIT_RM_RUNTIME(sp,fp);
}
@@ -4677,7 +4687,6 @@ void RmInitAcpiMethods(OBJOS *pOS, OBJSYS *pSys, OBJGPU *pGpu)
// Check if NVPCF _DSM functions are implemented under NVPCF or GPU device scope.
RmCheckNvpcfDsmScope(pGpu);
acpiDsmInit(pGpu);
}
//
@@ -5094,7 +5103,8 @@ NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(
NvBool NV_API_CALL rm_disable_iomap_wc(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
return pSys->pCl->getProperty(pSys, PDB_PROP_CL_DISABLE_IOMAP_WC) == NV_TRUE;
OBJCL *pCl = pSys->pCl;
return pCl->getProperty(pCl, PDB_PROP_CL_DISABLE_IOMAP_WC) == NV_TRUE;
}
//
@@ -5112,9 +5122,11 @@ NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle(
NvHandle hMemory,
NvU64 offset,
NvU64 size,
NvHandle *phMemoryDuped
NvHandle *phMemoryDuped,
void **ppStaticMemInfo
)
{
MEMORY_DESCRIPTOR *pMemDesc;
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus;
OBJGPU *pGpu;
@@ -5130,7 +5142,8 @@ NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle(
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
rmStatus = RmDmabufVerifyMemHandle(pGpu, hSrcClient, hMemory,
offset, size, pGpuInstanceInfo);
offset, size, pGpuInstanceInfo,
&pMemDesc);
if (rmStatus == NV_OK)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
@@ -5164,6 +5177,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle(
*phMemoryDuped = hMemoryDuped;
}
}
*ppStaticMemInfo = (void *) pMemDesc;
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -5205,39 +5219,102 @@ void NV_API_CALL rm_dma_buf_undup_mem_handle(
NV_EXIT_RM_RUNTIME(sp,fp);
}
//
// Maps a handle to BAR1.
// Must be called with API lock and GPU lock held.
//
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
nvidia_stack_t *sp,
nv_state_t *nv,
NvHandle hClient,
NvHandle hMemory,
NvU64 offset,
NvU64 size,
NvU64 *pBar1Va
nvidia_stack_t *sp,
nv_state_t *nv,
NvHandle hClient,
NvHandle hMemory,
NvU64 offset,
NvU64 size,
void *pStaticMemInfo,
nv_phys_addr_range_t **ppRanges,
NvU32 *pRangeCount
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus;
NV_STATUS rmStatus = NV_OK;
OBJGPU *pGpu;
KernelBus *pKernelBus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
NV_ASSERT_OR_GOTO(((ppRanges != NULL) && (pRangeCount != NULL)), Done);
pGpu = NV_GET_NV_PRIV_PGPU(nv);
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NV_ASSERT(rmapiLockIsOwner());
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING))
{
KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR *) pStaticMemInfo;
NvU32 pageSize = 0;
NvU32 pageCount = 0;
NvU32 i = 0;
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
pageSize = memdescGetPageSize(pMemDesc, AT_GPU);
pageCount = size / pageSize;
rmStatus = kbusMapFbApertureByHandle(pGpu, pKernelBus, hClient,
hMemory, offset, size, pBar1Va);
rmStatus = os_alloc_mem((void **) ppRanges,
pageCount * sizeof(nv_phys_addr_range_t));
if (rmStatus != NV_OK)
{
goto Done;
}
for (i = 0; i < pageCount; i++)
{
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, offset);
(*ppRanges)[i].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
(*ppRanges)[i].len = pageSize;
offset += pageSize;
}
*pRangeCount = pageCount;
}
else
{
Device *pDevice;
RsClient *pClient;
KernelBus *pKernelBus;
NvU64 bar1Va;
NV_ASSERT(rmapiLockIsOwner());
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
NV_ASSERT_OK_OR_GOTO(rmStatus,
serverGetClientUnderLock(&g_resServ, hClient, &pClient),
Done);
NV_ASSERT_OK_OR_GOTO(rmStatus,
deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice),
Done);
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
rmStatus = kbusMapFbApertureByHandle(pGpu, pKernelBus, hClient,
hMemory, offset, size, &bar1Va,
pDevice);
if (rmStatus != NV_OK)
{
goto Done;
}
// Adjust this alloc when discontiguous BAR1 is supported
rmStatus = os_alloc_mem((void **) ppRanges,
sizeof(nv_phys_addr_range_t));
if (rmStatus != NV_OK)
{
kbusUnmapFbApertureByHandle(pGpu, pKernelBus, hClient,
hMemory, bar1Va);
goto Done;
}
(*ppRanges)[0].addr = bar1Va;
(*ppRanges)[0].len = size;
*pRangeCount = 1;
}
Done:
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -5248,38 +5325,52 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
// Unmaps a handle from BAR1.
// Must be called with API lock and GPU lock held.
//
NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(
nvidia_stack_t *sp,
nv_state_t *nv,
NvHandle hClient,
NvHandle hMemory,
NvU64 size,
NvU64 bar1Va
void NV_API_CALL rm_dma_buf_unmap_mem_handle(
nvidia_stack_t *sp,
nv_state_t *nv,
NvHandle hClient,
NvHandle hMemory,
NvU64 size,
nv_phys_addr_range_t **ppRanges,
NvU32 rangeCount
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus;
NV_STATUS rmStatus = NV_OK;
OBJGPU *pGpu;
KernelBus *pKernelBus;
NvU32 i;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
NV_ASSERT_OR_GOTO(((ppRanges != NULL) && (rangeCount != 0)), Done);
pGpu = NV_GET_NV_PRIV_PGPU(nv);
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NV_ASSERT(rmapiLockIsOwner());
if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING))
{
KernelBus *pKernelBus;
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
NV_ASSERT(rmapiLockIsOwner());
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
rmStatus = kbusUnmapFbApertureByHandle(pGpu, pKernelBus, hClient,
hMemory, bar1Va);
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
for (i = 0; i < rangeCount; i++)
{
rmStatus = kbusUnmapFbApertureByHandle(pGpu, pKernelBus, hClient,
hMemory, (*ppRanges)[i].addr);
NV_ASSERT_OK(rmStatus);
}
}
os_free_mem(*ppRanges);
*ppRanges = NULL;
Done:
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(
@@ -5289,7 +5380,8 @@ NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(
NvHandle *phClient,
NvHandle *phDevice,
NvHandle *phSubdevice,
void **ppGpuInstanceInfo
void **ppGpuInstanceInfo,
NvBool *pbStaticPhysAddrs
)
{
THREAD_STATE_NODE threadState;
@@ -5310,6 +5402,12 @@ NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(
{
rmStatus = RmDmabufGetClientAndDevice(pGpu, hClient, phClient, phDevice,
phSubdevice, ppGpuInstanceInfo);
if (rmStatus == NV_OK)
{
// Note: revisit this when BAR1 static map is supported.
*pbStaticPhysAddrs = pGpu->getProperty(pGpu,
PDB_PROP_GPU_COHERENT_CPU_MAPPING);
}
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
@@ -5369,6 +5467,25 @@ void NV_API_CALL rm_dma_buf_put_client_and_device(
// NOTE: Used only on VMWware
//
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(
nvidia_stack_t *sp,
NvBool is_driver_vm
)
{
OBJSYS *pSys;
POBJHYPERVISOR pHypervisor;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
pSys = SYS_GET_INSTANCE();
pHypervisor = SYS_GET_HYPERVISOR(pSys);
pHypervisor->setProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED, is_driver_vm);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NvBool NV_API_CALL rm_is_altstack_in_use(void)
{
#if defined(__use_altstack__)

View File

@@ -80,6 +80,8 @@
#include <class/cl2080.h>
#include <class/cl402c.h>
#include <gpu/conf_compute/conf_compute.h>
#include <gpu/dce_client/dce_client.h>
// RMCONFIG: need definition of REGISTER_ALL_HALS()
#include "g_hal_register.h"
@@ -476,6 +478,8 @@ RmInitGpuInfoWithRmApi
NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES);
}
nv->coherent = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING);
portMemFree(pGpuInfoParams);
// UNLOCK: release GPUs lock
@@ -1381,7 +1385,8 @@ static NvBool RmUnixAllocRmApi(
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_ROOT,
&nv->rmapi.hClient) != NV_OK)
&nv->rmapi.hClient,
sizeof(nv->rmapi.hClient)) != NV_OK)
{
goto fail;
}
@@ -1405,7 +1410,8 @@ static NvBool RmUnixAllocRmApi(
nv->rmapi.hClient,
&nv->rmapi.hDevice,
NV01_DEVICE_0,
&deviceParams) != NV_OK)
&deviceParams,
sizeof(deviceParams)) != NV_OK)
{
goto fail;
}
@@ -1418,7 +1424,8 @@ static NvBool RmUnixAllocRmApi(
nv->rmapi.hDevice,
&nv->rmapi.hSubDevice,
NV20_SUBDEVICE_0,
&subDeviceParams) != NV_OK)
&subDeviceParams,
sizeof(subDeviceParams)) != NV_OK)
{
goto fail;
}
@@ -1433,7 +1440,8 @@ static NvBool RmUnixAllocRmApi(
nv->rmapi.hSubDevice,
&nv->rmapi.hI2C,
NV40_I2C,
NULL) != NV_OK)
NULL,
0) != NV_OK)
{
nv->rmapi.hI2C = 0;
}
@@ -1449,7 +1457,8 @@ static NvBool RmUnixAllocRmApi(
nv->rmapi.hDevice,
&nv->rmapi.hDisp,
NV04_DISPLAY_COMMON,
NULL) != NV_OK)
NULL,
0) != NV_OK)
{
nv->rmapi.hDisp = 0;
}
@@ -1461,6 +1470,59 @@ fail:
return NV_FALSE;
}
static NV_STATUS RmFetchGspRmImages
(
nv_state_t *nv,
GSP_FIRMWARE *pGspFw,
const void **gspFwHandle,
const void **gspFwLogHandle
)
{
nv_firmware_chip_family_t chipFamily;
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvU32 gpuArch = (DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, nvp->pmc_boot_42) <<
GPU_ARCH_SHIFT);
NvU32 gpuImpl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, nvp->pmc_boot_42);
chipFamily = nv_firmware_get_chip_family(gpuArch, gpuImpl);
portMemSet(pGspFw, 0, sizeof(*pGspFw));
*gspFwHandle = nv_get_firmware(nv, NV_FIRMWARE_TYPE_GSP,
chipFamily,
&pGspFw->pBuf,
&pGspFw->size);
if (*gspFwHandle == NULL &&
!nv->allow_fallback_to_monolithic_rm)
{
NV_PRINTF(LEVEL_ERROR, "No firmware image found\n");
return NV_ERR_NOT_SUPPORTED;
}
else if (*gspFwHandle != NULL)
{
#if LIBOS_LOG_DECODE_ENABLE
if (nv->enable_firmware_logs)
{
*gspFwLogHandle = nv_get_firmware(nv, NV_FIRMWARE_TYPE_GSP_LOG,
chipFamily,
&pGspFw->pLogElf,
&pGspFw->logElfSize);
if (*gspFwLogHandle == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Failed to load gsp_log_*.bin, no GSP-RM logs will be printed (non-fatal)\n");
}
}
#endif
nv->request_fw_client_rm = NV_TRUE;
}
else
{
nv->request_fw_client_rm = NV_FALSE;
}
return NV_OK;
}
NvBool RmInitAdapter(
nv_state_t *nv
)
@@ -1524,46 +1586,14 @@ NvBool RmInitAdapter(
//
if (nv->request_firmware)
{
NvU32 gpuArch = (DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, nvp->pmc_boot_42) <<
GPU_ARCH_SHIFT);
NvU32 gpuImpl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, nvp->pmc_boot_42);
nv_firmware_chip_family_t chipFamily = nv_firmware_get_chip_family(gpuArch, gpuImpl);
nv_set_dma_address_size(nv, NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
gspFwHandle = nv_get_firmware(nv, NV_FIRMWARE_TYPE_GSP,
chipFamily,
&gspFw.pBuf,
&gspFw.size);
if (gspFwHandle == NULL &&
!nv->allow_fallback_to_monolithic_rm)
status.rmStatus = RmFetchGspRmImages(nv, &gspFw, &gspFwHandle, &gspFwLogHandle);
if (status.rmStatus != NV_OK)
{
RM_SET_ERROR(status, RM_INIT_FIRMWARE_FETCH_FAILED);
goto shutdown;
}
else if (gspFwHandle != NULL)
{
#if LIBOS_LOG_DECODE_ENABLE
if (nv->enable_firmware_logs)
{
gspFwLogHandle = nv_get_firmware(nv, NV_FIRMWARE_TYPE_GSP_LOG,
chipFamily,
&gspFw.pLogElf,
&gspFw.logElfSize);
if (gspFwLogHandle == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Failed to load gsp_log_*.bin, no GSP-RM logs will be printed (non-fatal)\n");
}
}
#endif
nv->request_fw_client_rm = NV_TRUE;
}
else
{
nv->request_fw_client_rm = NV_FALSE;
}
}
// initialize the RM device register mapping
@@ -1606,6 +1636,7 @@ NvBool RmInitAdapter(
if (status.rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "FSP boot command failed.\n");
RM_SET_ERROR(status, RM_INIT_FIRMWARE_INIT_FAILED);
goto shutdown;
}
}
@@ -1661,6 +1692,17 @@ NvBool RmInitAdapter(
initNbsiTable(pGpu);
}
//
// Load GSP proxy if early init is required. We need to do this
// before we trigger a full gpuStateInit and gpuStateLoad in
// RmInitNvDevice
// TODO: Check bug 200744430
//
if (gpuIsCCFeatureEnabled(pGpu))
{
confComputeEarlyInit(pGpu, GPU_GET_CONF_COMPUTE(pGpu));
}
// finally, initialize the device
RmInitNvDevice(devicereference, &status);
if (! RM_INIT_SUCCESS(status.initStatus) )
@@ -1710,6 +1752,14 @@ NvBool RmInitAdapter(
intrSetIntrEn(pIntr, INTERRUPT_TYPE_HARDWARE);
}
// LOCK: acquire GPUs lock
status.rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_INIT);
if (status.rmStatus != NV_OK)
{
goto shutdown;
}
KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu);
// initialize the watchdog (disabled by default)
status.rmStatus = pKernelRc != NULL ? krcWatchdogInit_HAL(pGpu, pKernelRc) :
@@ -1729,8 +1779,13 @@ NvBool RmInitAdapter(
RM_SET_ERROR(status, RM_INIT_WATCHDOG_FAILED);
NV_PRINTF(LEVEL_ERROR,
"krcWatchdogInit failed, bailing out of RmInitAdapter\n");
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
goto shutdown;
}
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
nv_start_rc_timer(nv);
nvp->status = NV_OK;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2012-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -71,9 +71,10 @@ osCreateMemFromOsDescriptor
NV_STATUS rmStatus;
void *pPrivate;
pClient = serverutilGetClientUnderLock(hClient);
if ((pDescriptor == NvP64_NULL) ||
(*pLimit == 0) ||
(serverutilGetClientUnderLock(hClient, &pClient) != NV_OK))
(pClient == NULL))
{
return NV_ERR_INVALID_PARAM_STRUCT;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -95,6 +95,15 @@ static void NV_API_CALL osNvlinkFreeAltStack(nvidia_stack_t *sp)
#endif
}
static NV_STATUS NV_API_CALL osNvlinkGetAltStack(nvidia_stack_t **sp)
{
return osNvlinkAllocAltStack(sp);
}
static void NV_API_CALL osNvlinkPutAltStack(nvidia_stack_t *sp)
{
osNvlinkFreeAltStack(sp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_add_link
(
struct nvlink_link *link
@@ -102,10 +111,9 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_add_link
{
void *fp;
NvlStatus status;
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp;
if (NV_OK != osNvlinkAllocAltStack(&sp))
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
@@ -116,14 +124,7 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_add_link
NV_EXIT_RM_RUNTIME(sp, fp);
if (status == NVL_SUCCESS)
{
pLink->pOsInfo = sp;
}
else
{
osNvlinkFreeAltStack(sp);
}
osNvlinkPutAltStack(sp);
return status;
}
@@ -135,10 +136,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_remove_link
{
void *fp;
NvlStatus status;
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
pLink->pOsInfo = NULL;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -146,7 +149,7 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_remove_link
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkFreeAltStack(sp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -159,8 +162,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_lock_link
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -170,6 +177,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_lock_link
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -180,8 +189,12 @@ static void NV_API_CALL rm_nvlink_ops_unlock_link
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -190,6 +203,8 @@ static void NV_API_CALL rm_nvlink_ops_unlock_link
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_queue_link_change
@@ -200,8 +215,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_queue_link_change
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link_change->master->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -211,6 +230,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_queue_link_change
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -224,8 +245,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_dl_link_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -235,6 +260,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_dl_link_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -247,8 +274,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_dl_link_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -258,6 +289,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_dl_link_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -271,8 +304,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_tl_link_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -281,6 +318,7 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_tl_link_mode
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -294,8 +332,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_tl_link_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -305,6 +347,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_tl_link_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -318,8 +362,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_tx_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -329,6 +377,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_tx_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -342,8 +392,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_tx_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -353,6 +407,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_tx_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -366,8 +422,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -377,6 +437,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -390,8 +452,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_mode
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -401,6 +467,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_mode
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -413,8 +481,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_detect
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -424,6 +496,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_detect
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -435,8 +509,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_detect
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -446,6 +524,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_detect
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -457,8 +537,12 @@ static void NV_API_CALL rm_nvlink_get_uphy_load
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -467,6 +551,8 @@ static void NV_API_CALL rm_nvlink_get_uphy_load
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_read_link_discovery_token
@@ -478,8 +564,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_read_link_discovery_token
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -489,6 +579,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_read_link_discovery_token
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -501,8 +593,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_write_link_discovery_token
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -512,6 +608,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_write_link_discovery_token
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}
@@ -522,8 +620,12 @@ static void NV_API_CALL rm_nvlink_ops_training_complete
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t *sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -532,6 +634,8 @@ static void NV_API_CALL rm_nvlink_ops_training_complete
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_ali_training
@@ -542,8 +646,12 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_ali_training
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK * pLink = link->link_info;
nvidia_stack_t * sp = (nvidia_stack_t *)pLink->pOsInfo;
nvidia_stack_t * sp;
if (osNvlinkGetAltStack(&sp) != NV_OK)
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
@@ -552,6 +660,8 @@ static NvlStatus NV_API_CALL rm_nvlink_ops_ali_training
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkPutAltStack(sp);
return status;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -214,7 +214,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(
nvidia_stack_t *sp, void *pPma,
NvLength pageCount, NvU32 pageSize,
NvLength pageCount, NvU64 pageSize,
nvgpuPmaAllocationOptions_t pPmaAllocOptions,
NvU64 *pPages)
{
@@ -229,7 +229,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(
nvidia_stack_t *sp, void *pPma,
NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
NvU64 *pPages, NvLength pageCount, NvU64 pageSize, NvU32 flags)
{
NV_STATUS rmStatus;
void *fp;
@@ -241,7 +241,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(
nvidia_stack_t *sp, void *pPma,
NvU64 *pPages, NvLength pageCount, NvU32 pageSize)
NvU64 *pPages, NvLength pageCount, NvU64 pageSize)
{
NV_STATUS rmStatus;
void *fp;
@@ -253,7 +253,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU32 pageSize)
NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU64 pageSize)
{
NV_STATUS rmStatus;
void *fp;
@@ -274,8 +274,31 @@ NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_tsg_allocate(nvidia_stack_t *sp,
gpuAddressSpaceHandle vaspace,
const gpuTsgAllocParams *allocParams,
gpuTsgHandle *tsg)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsTsgAllocate(vaspace, allocParams, tsg);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_tsg_destroy(nvidia_stack_t * sp,
nvgpuTsgHandle_t tsg)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsTsgDestroy(tsg);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *sp,
gpuAddressSpaceHandle vaspace,
const gpuTsgHandle tsg,
const gpuChannelAllocParams *allocParams,
gpuChannelHandle *channel,
gpuChannelInfo *channelInfo)
@@ -283,7 +306,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *sp,
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsChannelAllocate(vaspace, allocParams, channel,
rmStatus = nvGpuOpsChannelAllocate(tsg, allocParams, channel,
channelInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
@@ -300,7 +323,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t * sp,
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *sp,
void *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
void *pPma, NvU64 *pPages, NvLength pageCount, NvU64 pageSize, NvU32 flags)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
@@ -583,12 +606,13 @@ NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
gpuAccessCntrInfo *accessCntrInfo,
NvU32 accessCntrIndex)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo);
rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo, accessCntrIndex);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
@@ -822,3 +846,136 @@ rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *sp,
struct ccslContext_t **ctx,
gpuChannelHandle channel)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslContextInit(ctx, channel);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *sp,
struct ccslContext_t *ctx)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslContextClear(ctx);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_acquire_encryption_iv(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 *encryptIv)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslAcquireEncryptionIv(ctx, encryptIv);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 *decryptIv)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslLogDeviceEncryption(ctx, decryptIv);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 direction)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslRotateIv(ctx, direction);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *encryptIv,
NvU8 *outputBuffer,
NvU8 *authTagData)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslEncryptWithIv(ctx, bufferSize, inputBuffer, encryptIv, outputBuffer, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagData)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslEncrypt(ctx, bufferSize, inputBuffer, outputBuffer, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU8 *outputBuffer,
NvU8 const *authTagData)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, outputBuffer, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagData)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslSign(ctx, bufferSize, inputBuffer, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 direction,
NvU64 *messageNum)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsQueryMessagePool(ctx, direction, messageNum);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -262,7 +262,8 @@ static NV_STATUS RmRefObjExportImport(void)
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_ROOT,
&hObjExportRmClient);
&hObjExportRmClient,
sizeof(hObjExportRmClient));
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__);
@@ -426,7 +427,8 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
NV01_DEVICE_0,
&params);
&params,
sizeof(params));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n",
@@ -443,7 +445,8 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
objExportDevice[deviceInstance].hRmDevice,
objExportDevice[deviceInstance].hRmSubDevice,
NV20_SUBDEVICE_0,
&subdevParams);
&subdevParams,
sizeof(subdevParams));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n",

View File

@@ -30,6 +30,291 @@
#include <osapi.h>
#include <gpu/mem_mgr/mem_mgr.h>
#include <vgpu/rpc.h>
#include "vgpu/vgpu_events.h"
static NV_STATUS
unixCallVideoBIOS
(
OBJGPU *pGpu,
NvU32 *eax,
NvU32 *ebx
)
{
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
if (NVCPU_IS_X86_64)
{
NvU32 eax_in = *eax;
NvU32 ebx_in = *ebx;
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_UEFI))
{
return NV_ERR_NOT_SUPPORTED;
}
NV_PRINTF(LEVEL_INFO, "unixCallVideoBIOS: 0x%x 0x%x, vga_satus = %d\n", *eax, *ebx, NV_PRIMARY_VGA(NV_GET_NV_STATE(pGpu)));
status = nv_vbios_call(pGpu, eax, ebx);
// this was originally changed for nt in changelist 644223
if (*eax != 0x4f)
{
NV_PRINTF(LEVEL_ERROR,
"int10h(%04x, %04x) vesa call failed! (%04x, %04x)\n",
eax_in, ebx_in, *eax, *ebx);
status = NV_ERR_GENERIC;
}
}
return status;
}
static void
RmSaveDisplayState
(
OBJGPU *pGpu
)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
NvBool use_vbios = NV_PRIMARY_VGA(nv) && RmGpuHasIOSpaceEnabled(nv);
NvU32 eax, ebx;
NV_STATUS status;
NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE_PARAMS unixConsoleParams = {0};
if (IS_VIRTUAL(pGpu) || pKernelDisplay == NULL)
{
return;
}
os_disable_console_access();
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_UEFI))
{
NV_PRINTF(LEVEL_INFO, "RM fallback doesn't support saving of efifb console\n");
goto done;
}
unixConsoleParams.bSaveOrRestore = NV_TRUE;
unixConsoleParams.bUseVbios = use_vbios;
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE,
&unixConsoleParams, sizeof(unixConsoleParams)), done);
if (use_vbios)
{
//
// Attempt to identify the currently set VESA mode; assume
// vanilla VGA text if the VBIOS call fails.
//
eax = 0x4f03;
ebx = 0;
if (NV_OK == unixCallVideoBIOS(pGpu, &eax, &ebx))
{
nvp->vga.vesaMode = (ebx & 0x3fff);
}
else
{
nvp->vga.vesaMode = 3;
}
}
done:
os_enable_console_access();
}
static void RmRestoreDisplayState
(
OBJGPU *pGpu
)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
NvBool use_vbios = NV_PRIMARY_VGA(nv) && RmGpuHasIOSpaceEnabled(nv);;
KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
NV_STATUS status;
NvU32 eax, ebx;
NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE_PARAMS unixConsoleParams = {0};
NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE_PARAMS restoreParams = {0};
NV_ASSERT_OR_RETURN_VOID(pKernelDisplay != NULL);
//
// vGPU:
//
// Since vGPU does all real hardware management in the
// host, there is nothing to do at this point in the
// guest OS (where IS_VIRTUAL(pGpu) is true).
//
if (IS_VIRTUAL(pGpu))
{
// we don't have VGA state that's needing to be restored.
NV_PRINTF(LEVEL_INFO, "skipping RestoreDisplayState on VGPU (0x%x)\n",
pGpu->gpuId);
return;
}
os_disable_console_access();
//
// Fix up DCB index VBIOS scratch registers.
// The strategies employed are:
//
// SBIOS/VBIOS:
// Clear the DCB index, and set the previous DCB index to the original
// value. This allows the VBIOS (during the int10h mode-set) to
// determine which display to enable, and to set the head-enabled bit
// as needed (see bugs #264873 and #944398).
//
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_UEFI))
{
NV_PRINTF(LEVEL_INFO, "RM fallback doesn't support efifb console restore\n");
goto done;
}
unixConsoleParams.bUseVbios = use_vbios;
unixConsoleParams.bSaveOrRestore = NV_FALSE;
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, pRmApi->Control(pRmApi, nv->rmapi.hClient,
nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE,
&unixConsoleParams, sizeof(unixConsoleParams)), done);
eax = 0x4f02;
ebx = nvp->vga.vesaMode;
if (NV_OK == unixCallVideoBIOS(pGpu, &eax, &ebx))
{
restoreParams.bWriteCr = NV_TRUE;
}
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, pRmApi->Control(pRmApi, nv->rmapi.hClient,
nv->rmapi.hSubDevice,
NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE,
&restoreParams, sizeof(restoreParams)), done);
done:
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_UEFI))
{
}
os_enable_console_access();
}
static void
RmChangeResMode
(
OBJGPU *pGpu,
NvBool hires
)
{
if (hires)
{
SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
RmSaveDisplayState(pGpu);
SLI_LOOP_END
}
else
{
SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
RmRestoreDisplayState(pGpu);
//
// vGPU:
//
// Since vGPU does all real hardware management in the host, if we
// are in guest OS (where IS_VIRTUAL(pGpu) is true), do an RPC to
// the host to trigger switch from HIRES to (LORES)VGA.
//
if (IS_VIRTUAL(pGpu))
{
NV_STATUS status = NV_OK;
NV_RM_RPC_SWITCH_TO_VGA(pGpu, status);
}
SLI_LOOP_END
}
}
NV_STATUS NV_API_CALL
rm_save_low_res_mode
(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
THREAD_STATE_NODE threadState;
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
RmSaveDisplayState(pGpu);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS
deviceCtrlCmdOsUnixVTSwitch_IMPL
(
Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
NvBool hires;
NvBool bChangeResMode = NV_TRUE;
switch (pParams->cmd)
{
case NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE:
hires = NV_TRUE;
break;
case NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE:
hires = NV_FALSE;
break;
case NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED:
bChangeResMode = NV_FALSE;
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
RmUpdateGc6ConsoleRefCount(nv,
pParams->cmd != NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE);
if (!bChangeResMode)
{
return NV_OK;
}
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FB) == NV_OK)
{
RmChangeResMode(pGpu, hires);
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
else
{
NV_PRINTF(LEVEL_INFO,"%s: Failed to acquire GPU lock", __FUNCTION__);
}
return NV_OK;
}
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL
(
Device *pDevice,

View File

@@ -174,6 +174,18 @@ static NvBool osInterruptPending(
}
}
if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) &&
!IS_VIRTUAL(pGpu) && bitVectorTest(&intr0Pending, MC_ENGINE_IDX_TMR))
{
// We have to clear the top level interrupt bit here since otherwise
// the bottom half will attempt to service the interrupt on the CPU
// side before GSP receives the notification and services it
intrClearLeafVector_HAL(pGpu, pIntr, MC_ENGINE_IDX_TMR, &threadState);
bitVectorClr(&intr0Pending, MC_ENGINE_IDX_TMR);
NV_ASSERT_OK(intrTriggerPrivDoorbell_HAL(pGpu, pIntr, NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_HANDLE));
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{

View File

@@ -0,0 +1,457 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/******************************************************************************
*
* Description:
* This file provides the glue layer between the RM's OS object
* unixCallVideoBIOS() method and the x86 real mode emulator
*
******************************************************************************/
#include <core/core.h>
#include <nv.h>
#include <nv-priv.h>
#include <osapi.h>
#include <os/os.h>
#include <x86emu/x86emu.h>
#define IO_LOG(port, val)
#define NV_ROMLEY_VGA_PHYS_ADDR 0x9d000
#define NV_VGA_PHYS_ADDR 0xa0000
#define NV_VIDEO_ROM_PHYS_ADDR 0xc0000
#define NV_EXPANSION_VIDEO_ROM_PHYS_ADDR 0xc8000
#define NV_SYSTEM_ROM_PHYS_ADDR 0xf0000
#define X86EMU_LOWMEM 0x600 // Interrupt vectors, BIOS data size
#define X86EMU_STACK_ADDR 0x10000
#define X86EMU_STACK_SIZE 0x1000
#define X86EMU_BUF_ADDR 0x20000 // EDID buffer for VESA int10 0x4f15
#define X86EMU_BUF_SIZE 128
#define OP_HLT 0xf4 // HLT instruction - causes the emulator to return
// This is where IRET from int10 will take us
#define X86EMU_IRET_SEG (X86EMU_STACK_ADDR >> 4)
#define X86EMU_IRET_OFF 0
#define X86EMU_DFLT_FLAGS (F_IF)
static NvBool x86emuReady;
static void *x86emuStack;
static struct x86emu_mem_seg {
NvU32 start;
NvU32 end;
void *vaddr;
} x86emu_mem_map[] = {
{NV_ROMLEY_VGA_PHYS_ADDR, NV_VGA_PHYS_ADDR - 1},
{NV_VGA_PHYS_ADDR, NV_VIDEO_ROM_PHYS_ADDR - 1}, // vga
{NV_VIDEO_ROM_PHYS_ADDR, NV_EXPANSION_VIDEO_ROM_PHYS_ADDR -1}, // vbios, sbios
{NV_EXPANSION_VIDEO_ROM_PHYS_ADDR, NV_SYSTEM_ROM_PHYS_ADDR -1}, // vbios, sbios
{NV_SYSTEM_ROM_PHYS_ADDR, 0xFFFFF}, // Motherboard BIOS
{X86EMU_STACK_ADDR, X86EMU_STACK_ADDR + X86EMU_STACK_SIZE - 1}, // emulator stack
{0, X86EMU_LOWMEM - 1}, // Interrupt vectors, BIOS data
{X86EMU_BUF_ADDR, X86EMU_BUF_ADDR + X86EMU_BUF_SIZE - 1},
};
enum {
X86EMU_SEG_ROMLEY_VGA = 0,
X86EMU_SEG_VGA,
X86EMU_SEG_ROM,
X86EMU_SEG_ROM_EXPANSION,
X86EMU_SEG_ROM_MOTHERBOARD_BIOS,
X86EMU_SEG_EMULATOR_STACK,
X86EMU_SEG_BIOS_DATA,
X86EMU_SEG_EDID_BUFFER,
X86EMU_NUM_SEGS
};
ct_assert(X86EMU_NUM_SEGS == NV_ARRAY_ELEMENTS(x86emu_mem_map));
static NvU8
x_inb(NvU16 port)
{
NvU8 val;
val = os_io_read_byte(port);
IO_LOG(port, val);
return val;
}
static void
x_outb(NvU16 port, NvU8 val)
{
IO_LOG(port, val);
os_io_write_byte(port, val);
}
static NvU16
x_inw(NvU16 port)
{
NvU16 val;
val = os_io_read_word(port);
IO_LOG(port, val);
return val;
}
static void
x_outw(NvU16 port, NvU16 val)
{
IO_LOG(port, val);
os_io_write_word(port, val);
}
static u32
x_inl(X86EMU_pioAddr port)
{
NvU32 val;
val = os_io_read_dword(port);
IO_LOG(port, val);
return val;
}
static void
x_outl(X86EMU_pioAddr port, u32 val)
{
IO_LOG(port, val);
os_io_write_dword(port, val);
}
static void *
Mem_addr_xlat(NvU32 addr, NvU32 sz)
{
int i;
struct x86emu_mem_seg *pseg;
for (i = 0; i < X86EMU_NUM_SEGS; ++i)
{
pseg = x86emu_mem_map + i;
if (pseg->vaddr != 0 && addr >= pseg->start && addr + sz - 1 <= pseg->end)
return (void *)((char *)pseg->vaddr + addr - pseg->start);
}
X86EMU_halt_sys();
return 0;
}
static NvU8
Mem_rb(u32 addr)
{
NvU8 *va = Mem_addr_xlat(addr, 1);
return va != 0 ? *va : 0;
}
static NvU16
Mem_rw(u32 addr)
{
NvU16 *va = Mem_addr_xlat(addr, 2);
return va != 0 ? *va : 0;
}
static u32
Mem_rl(u32 addr)
{
NvU32 *va = Mem_addr_xlat(addr, 4);
return va != 0 ? *va : 0;
}
static void
Mem_wb(u32 addr, NvU8 val)
{
NvU8 *va = Mem_addr_xlat(addr, 1);
if (va != 0)
*va = val;
}
static void
Mem_ww(u32 addr, NvU16 val)
{
NvU16 *va = Mem_addr_xlat(addr, 2);
if (va != 0)
*va = val;
}
static void
Mem_wl(u32 addr, u32 val)
{
NvU32 *va = Mem_addr_xlat(addr, 4);
if (va != 0)
*va = val;
}
static NvU16
get_int_seg(int i)
{
return Mem_rw(i * 4 + 2);
}
static NvU16
get_int_off(int i)
{
return Mem_rw(i * 4);
}
static void
pushw(NvU16 i)
{
M.x86.R_ESP -= 2;
Mem_ww((M.x86.R_SS << 4) + M.x86.R_ESP, i);
}
static void
x86emu_do_int(int num)
{
#if 0
Int10Current->num = num;
if (!int_handler(Int10Current)) {
X86EMU_halt_sys();
}
#else
if ((num == 0x15) && (M.x86.R_AX == 0x5f80))
{
//
// Handle the MXM_SYS_INFO_CALLBACK_NUM int 15h SBIOS
// callback: disclaim support by returning a value
// other than 005fh (MXM_SYS_INFO_CALLBACK_FUNC_SUPPORTED)
// to the caller.
//
M.x86.R_AX = 0;
}
else
{
NV_PRINTF(LEVEL_ERROR, "x86emu: int $%d (eax = %08x)\n",
num, M.x86.R_EAX);
DBG_BREAKPOINT();
X86EMU_halt_sys();
}
#endif
}
NV_STATUS
RmInitX86EmuState(OBJGPU *pGpu) {
int i;
struct x86emu_mem_seg *pseg;
X86EMU_intrFuncs *intFuncs;
X86EMU_pioFuncs pioFuncs = {
(&x_inb),
(&x_inw),
(&x_inl),
(&x_outb),
(&x_outw),
(&x_outl)
};
X86EMU_memFuncs memFuncs = {
(&Mem_rb),
(&Mem_rw),
(&Mem_rl),
(&Mem_wb),
(&Mem_ww),
(&Mem_wl)
};
if (!NV_PRIMARY_VGA(NV_GET_NV_STATE(pGpu))) // not the primary GPU
return NV_OK;
NV_ASSERT(!x86emuReady);
x86emuStack = portMemAllocNonPaged(
X86EMU_STACK_SIZE + sizeof(X86EMU_intrFuncs*) * 256);
if (x86emuStack == NULL)
return NV_ERR_NO_MEMORY;
// Interrupt dispatch table
intFuncs = (void *)((NvU8 *)x86emuStack + X86EMU_STACK_SIZE);
// Fill virtual addresses in the memory map
for (i = 0; i < X86EMU_NUM_SEGS; ++i)
{
pseg = x86emu_mem_map + i;
switch (i)
{
case X86EMU_SEG_ROM:
case X86EMU_SEG_ROM_EXPANSION:
case X86EMU_SEG_ROM_MOTHERBOARD_BIOS:
nv_get_updated_emu_seg(&pseg->start, &pseg->end);
/* fallthrough */
case X86EMU_SEG_VGA:
case X86EMU_SEG_BIOS_DATA:
pseg->vaddr = os_map_kernel_space(pseg->start,
pseg->end - pseg->start + 1,
NV_MEMORY_CACHED);
if (pseg->vaddr == 0)
{
NV_PRINTF(LEVEL_ERROR, "x86emu can't map phys addr 0x%05x\n",
pseg->start);
return NV_ERR_GENERIC;
}
break;
case X86EMU_SEG_EMULATOR_STACK:
pseg->vaddr = x86emuStack;
break;
default:
pseg->vaddr = 0;
break;
}
}
X86EMU_setupMemFuncs(&memFuncs);
M.mem_base = 0;
M.mem_size = 1024*1024;
X86EMU_setupPioFuncs(&pioFuncs);
for (i=0;i<256;i++)
intFuncs[i] = x86emu_do_int;
X86EMU_setupIntrFuncs(intFuncs);
x86emuReady = NV_TRUE;
return NV_OK;
}
void
RmFreeX86EmuState(OBJGPU *pGpu)
{
int i;
struct x86emu_mem_seg *pseg;
if (! x86emuReady || !NV_PRIMARY_VGA(NV_GET_NV_STATE(pGpu))) // not the primary GPU
return;
portMemFree(x86emuStack);
x86emuStack = 0;
for (i = 0; i < X86EMU_NUM_SEGS; ++i)
{
pseg = x86emu_mem_map + i;
switch (i)
{
case X86EMU_SEG_ROMLEY_VGA:
case X86EMU_SEG_VGA:
case X86EMU_SEG_ROM:
case X86EMU_SEG_ROM_EXPANSION:
case X86EMU_SEG_ROM_MOTHERBOARD_BIOS:
case X86EMU_SEG_BIOS_DATA:
if (pseg->vaddr != 0)
os_unmap_kernel_space(pseg->vaddr, pseg->end - pseg->start + 1);
}
pseg->vaddr = 0;
}
x86emuReady = NV_FALSE;
}
NV_STATUS
nv_vbios_call(
OBJGPU *pGpu,
NvU32 *eax,
NvU32 *ebx)
{
NvU16 seg;
NvU16 off;
struct x86emu_mem_seg *pseg;
if (!NV_PRIMARY_VGA(NV_GET_NV_STATE(pGpu)))
return NV_ERR_GENERIC;
seg = get_int_seg(0x10);
off = get_int_off(0x10);
pseg = &x86emu_mem_map[X86EMU_SEG_VGA];
if (seg < (pseg->start >> 4))
{
pseg = &x86emu_mem_map[X86EMU_SEG_ROMLEY_VGA];
if (seg < (pseg->start >> 4))
{
NV_PRINTF(LEVEL_ERROR,
"cannot call the VBIOS. INT10 vector not in ROM: %04x:%04x\n",
seg, off);
return NV_ERR_GENERIC;
}
else if (pseg->vaddr == NULL)
{
pseg->vaddr = os_map_kernel_space(pseg->start,
(pseg->end - pseg->start + 1),
NV_MEMORY_CACHED);
if (pseg->vaddr == NULL)
{
NV_PRINTF(LEVEL_ERROR, "x86emu can't map phys addr 0x%05x\n",
pseg->start);
return NV_ERR_GENERIC;
}
}
}
// Reset the CPU
portMemSet(&M, 0, sizeof(M));
M.x86.R_SS = X86EMU_STACK_ADDR >> 4;
M.x86.R_ESP = X86EMU_STACK_SIZE;
M.x86.R_CS = seg;
M.x86.R_EIP = off;
M.x86.R_EFLG = X86EMU_DFLT_FLAGS;
M.x86.R_EAX = *eax;
M.x86.R_EBX = *ebx;
M.x86.R_ECX = 0;
M.x86.R_EDX = 0;
M.x86.R_ES = X86EMU_BUF_ADDR >> 4;
X86EMU_trace_on();
// Map ES:EDI to buffer. Used by vesa intr 4f15 - read EDID
pseg = &x86emu_mem_map[X86EMU_SEG_EDID_BUFFER];
pseg->vaddr = NULL;
// Prepare the code for IRET to jump to
Mem_wb((X86EMU_IRET_SEG << 4) + X86EMU_IRET_OFF, OP_HLT);
pushw(X86EMU_DFLT_FLAGS);
pushw(X86EMU_IRET_SEG);
pushw(X86EMU_IRET_OFF);
// Run the emulator
X86EMU_exec();
*eax = M.x86.R_EAX;
*ebx = M.x86.R_EBX;
return NV_OK;
}

View File

@@ -0,0 +1,547 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: This file contains the code to handle debugging of the
* emulator.
*
****************************************************************************/
#include "x86emu/x86emui.h"
#ifndef NO_SYS_HEADERS
#include <stdarg.h>
#include <stdlib.h>
#endif
/*----------------------------- Implementation ----------------------------*/
#ifdef X86EMU_DEBUG
static void print_encoded_bytes (u16 s, u16 o);
static void print_decoded_instruction (void);
static int parse_line (char *s, int *ps, int *n);
/* should look something like debug's output. */
void X86EMU_trace_regs (void)
{
if (DEBUG_TRACE()) {
x86emu_dump_regs();
}
if (DEBUG_DECODE() && ! DEBUG_DECODE_NOPRINT()) {
NV_PRINTF(LEVEL_INFO, "%04x:%04x ", M.x86.saved_cs,
M.x86.saved_ip);
print_encoded_bytes( M.x86.saved_cs, M.x86.saved_ip);
print_decoded_instruction();
}
}
void X86EMU_trace_xregs (void)
{
if (DEBUG_TRACE()) {
x86emu_dump_xregs();
}
}
void x86emu_just_disassemble (void)
{
/*
* This routine called if the flag DEBUG_DISASSEMBLE is set kind
* of a hack!
*/
NV_PRINTF(LEVEL_INFO, "%04x:%04x ", M.x86.saved_cs,
M.x86.saved_ip);
print_encoded_bytes( M.x86.saved_cs, M.x86.saved_ip);
print_decoded_instruction();
}
static void disassemble_forward (u16 seg, u16 off, int n)
{
X86EMU_sysEnv tregs;
int i;
u8 op1;
/*
* hack, hack, hack. What we do is use the exact machinery set up
* for execution, except that now there is an additional state
* flag associated with the "execution", and we are using a copy
* of the register struct. All the major opcodes, once fully
* decoded, have the following two steps: TRACE_REGS(r,m);
* SINGLE_STEP(r,m); which disappear if DEBUG is not defined to
* the preprocessor. The TRACE_REGS macro expands to:
*
* if (debug&DEBUG_DISASSEMBLE)
* {just_disassemble(); goto EndOfInstruction;}
* if (debug&DEBUG_TRACE) trace_regs(r,m);
*
* ...... and at the last line of the routine.
*
* EndOfInstruction: end_instr();
*
* Up to the point where TRACE_REG is expanded, NO modifications
* are done to any register EXCEPT the IP register, for fetch and
* decoding purposes.
*
* This was done for an entirely different reason, but makes a
* nice way to get the system to help debug codes.
*/
tregs = M;
tregs.x86.R_IP = off;
tregs.x86.R_CS = seg;
/* reset the decoding buffers */
tregs.x86.enc_str_pos = 0;
tregs.x86.enc_pos = 0;
/* turn on the "disassemble only, no execute" flag */
tregs.x86.debug |= DEBUG_DISASSEMBLE_F;
/* DUMP NEXT n instructions to screen in straight_line fashion */
/*
* This looks like the regular instruction fetch stream, except
* that when this occurs, each fetched opcode, upon seeing the
* DEBUG_DISASSEMBLE flag set, exits immediately after decoding
* the instruction. XXX --- CHECK THAT MEM IS NOT AFFECTED!!!
* Note the use of a copy of the register structure...
*/
for (i=0; i<n; i++) {
op1 = (*sys_rdb)(((u32)M.x86.R_CS<<4) + (M.x86.R_IP++));
(x86emu_optab[op1])(op1);
}
/* end major hack mode. */
}
void x86emu_check_ip_access (void)
{
/* NULL as of now */
}
void x86emu_check_sp_access (void)
{
}
void x86emu_check_mem_access (u32 dummy)
{
/* check bounds, etc */
}
void x86emu_check_data_access (uint dummy1, uint dummy2)
{
/* check bounds, etc */
}
void x86emu_inc_decoded_inst_len (int x)
{
M.x86.enc_pos += x;
}
void x86emu_decode_printf (const char *x)
{
NvU32 max_size = sizeof(M.x86.decoded_buf) - M.x86.enc_str_pos;
nvDbgSnprintf(M.x86.decoded_buf + M.x86.enc_str_pos, max_size, "%s", x);
M.x86.enc_str_pos += portStringLength(x);
}
void x86emu_decode_printf2 (const char *x, int y)
{
char temp[100];
nvDbgSnprintf(temp, sizeof(temp), x, y);
x86emu_decode_printf(temp);
}
void x86emu_end_instr (void)
{
M.x86.enc_str_pos = 0;
M.x86.enc_pos = 0;
}
static void print_encoded_bytes (u16 s, u16 o)
{
int i;
char buf1[64];
for (i=0; i< M.x86.enc_pos; i++) {
nvDbgSnprintf(buf1 + 2 * i, sizeof(buf1) - (2 * i),
"%02x", fetch_data_byte_abs(s, o + i));
}
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "%-20s", buf1);
}
static void print_decoded_instruction (void)
{
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "%s", M.x86.decoded_buf);
}
void x86emu_print_int_vect (u16 iv)
{
u16 seg,off;
if (iv > 256) return;
seg = fetch_data_word_abs(0,iv*4);
off = fetch_data_word_abs(0,iv*4+2);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "%04x:%04x ", seg, off);
}
void X86EMU_dump_memory (u16 seg, u16 off, u32 amt)
{
u32 start = off & 0xfffffff0;
u32 end = (off+16) & 0xfffffff0;
u32 i;
while (end <= off + amt) {
NV_PRINTF(LEVEL_INFO, "%04x:%04x ", seg, start);
for (i=start; i< off; i++)
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " ");
for ( ; i< end; i++)
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "%02x ", fetch_data_byte_abs(seg, i));
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n");
start = end;
end = start + 16;
}
}
void x86emu_single_step (void)
{
int ps[10];
int ntok;
int cmd;
int done;
int segment;
int offset;
static int breakpoint;
static int noDecode = 1;
if (DEBUG_BREAK()) {
if (M.x86.saved_ip != breakpoint) {
return;
} else {
M.x86.debug &= ~DEBUG_DECODE_NOPRINT_F;
M.x86.debug |= DEBUG_TRACE_F;
M.x86.debug &= ~DEBUG_BREAK_F;
print_decoded_instruction ();
X86EMU_trace_regs();
}
}
done=0;
offset = M.x86.saved_ip;
while (!done) {
NV_PRINTF(LEVEL_INFO, "-");
/*
* Set _X86EMU_env.x86.debug_cmd from a kernel debugger to a command to
* control the emulator debugger
*/
if (M.x86.debug_cmd[0] == '\0')
{
DBG_BREAKPOINT();
}
cmd = parse_line(M.x86.debug_cmd, ps, &ntok);
switch(cmd) {
case 'u':
disassemble_forward(M.x86.saved_cs,(u16)offset,10);
break;
case 'd':
if (ntok == 2) {
segment = M.x86.saved_cs;
offset = ps[1];
X86EMU_dump_memory(segment,(u16)offset,16);
offset += 16;
} else if (ntok == 3) {
segment = ps[1];
offset = ps[2];
X86EMU_dump_memory(segment,(u16)offset,16);
offset += 16;
} else {
segment = M.x86.saved_cs;
X86EMU_dump_memory(segment,(u16)offset,16);
offset += 16;
}
break;
case 'c':
M.x86.debug ^= DEBUG_TRACECALL_F;
break;
case 's':
M.x86.debug ^= DEBUG_SVC_F | DEBUG_SYS_F | DEBUG_SYSINT_F;
break;
case 'r':
X86EMU_trace_regs();
break;
case 'x':
X86EMU_trace_xregs();
break;
case 'g':
if (ntok == 2) {
breakpoint = ps[1];
if (noDecode) {
M.x86.debug |= DEBUG_DECODE_NOPRINT_F;
} else {
M.x86.debug &= ~DEBUG_DECODE_NOPRINT_F;
}
M.x86.debug &= ~DEBUG_TRACE_F;
M.x86.debug |= DEBUG_BREAK_F;
done = 1;
}
break;
case 'q':
M.x86.debug |= DEBUG_EXIT;
return;
case 'P':
noDecode = (noDecode)?0:1;
NV_PRINTF(LEVEL_INFO, "Toggled decoding to %s\n",
(noDecode) ? "FALSE" : "TRUE");
break;
case 'a':
X86EMU_trace_off();
/* fall thru */
case 't':
case 0:
done = 1;
break;
}
portMemSet(&M.x86.debug_cmd, 0, sizeof(M.x86.debug_cmd));
}
}
int X86EMU_trace_on(void)
{
return M.x86.debug |= DEBUG_STEP_F | DEBUG_DECODE_F | DEBUG_TRACE_F;
}
int X86EMU_trace_off(void)
{
return M.x86.debug &= ~(DEBUG_STEP_F | DEBUG_DECODE_F | DEBUG_TRACE_F);
}
static void parse_hex (char *s, int *pval)
{
unsigned int val = 0;
unsigned char c;
if (s[0] == '0' && s[1] == 'x')
s += 2;
while ((c = *(s++)))
{
if (c >= '0' && c <= '9')
{
val <<= 4;
val |= c - '0';
continue;
}
else if (c >= 'a' && c <= 'f')
{
val <<= 4;
val |= c - 'a';
continue;
}
else if (c >= 'A' && c <= 'F')
{
val <<= 4;
val |= c - 'A';
continue;
}
break;
}
*pval = val;
}
static int parse_line (char *s, int *ps, int *n)
{
int cmd;
*n = 0;
while(*s == ' ' || *s == '\t') s++;
ps[*n] = *s;
switch (*s) {
case '\n':
*n += 1;
return 0;
default:
cmd = *s;
*n += 1;
}
while (1)
{
while (*s != ' ' && *s != '\t' && *s != '\n' && *s != '\0')
s++;
if (*s == '\n' || *s == '\0')
return cmd;
while(*s == ' ' || *s == '\t')
s++;
parse_hex(s, &ps[*n]);
*n += 1;
}
}
void x86emu_dump_regs (void)
{
NV_PRINTF(LEVEL_INFO, "\tAX=%04x ", M.x86.R_AX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "BX=%04x ", M.x86.R_BX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "CX=%04x ", M.x86.R_CX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "DX=%04x ", M.x86.R_DX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "SP=%04x ", M.x86.R_SP);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "BP=%04x ", M.x86.R_BP);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "SI=%04x ", M.x86.R_SI);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "DI=%04x\n", M.x86.R_DI);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\tDS=%04x ", M.x86.R_DS);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "ES=%04x ", M.x86.R_ES);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "SS=%04x ", M.x86.R_SS);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "CS=%04x ", M.x86.R_CS);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "IP=%04x ", M.x86.R_IP);
if (ACCESS_FLAG(F_OF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "OV ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NV ");
}
if (ACCESS_FLAG(F_DF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "DN ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "UP ");
}
if (ACCESS_FLAG(F_IF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EI ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "DI ");
}
if (ACCESS_FLAG(F_SF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NG ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PL ");
}
if (ACCESS_FLAG(F_ZF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "ZR ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NZ ");
}
if (ACCESS_FLAG(F_AF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "AC ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NA ");
}
if (ACCESS_FLAG(F_PF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PE ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PO ");
}
if (ACCESS_FLAG(F_CF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "CY ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NC ");
}
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n");
}
void x86emu_dump_xregs (void)
{
NV_PRINTF(LEVEL_INFO, "\tEAX=%08x ", M.x86.R_EAX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EBX=%08x ", M.x86.R_EBX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "ECX=%08x ", M.x86.R_ECX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EDX=%08x \n", M.x86.R_EDX);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\tESP=%08x ", M.x86.R_ESP);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EBP=%08x ", M.x86.R_EBP);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "ESI=%08x ", M.x86.R_ESI);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EDI=%08x\n", M.x86.R_EDI);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\tDS=%04x ", M.x86.R_DS);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "ES=%04x ", M.x86.R_ES);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "SS=%04x ", M.x86.R_SS);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "CS=%04x ", M.x86.R_CS);
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EIP=%08x\n\t", M.x86.R_EIP);
if (ACCESS_FLAG(F_OF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "OV ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NV ");
}
if (ACCESS_FLAG(F_DF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "DN ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "UP ");
}
if (ACCESS_FLAG(F_IF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "EI ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "DI ");
}
if (ACCESS_FLAG(F_SF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NG ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PL ");
}
if (ACCESS_FLAG(F_ZF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "ZR ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NZ ");
}
if (ACCESS_FLAG(F_AF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "AC ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NA ");
}
if (ACCESS_FLAG(F_PF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PE ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PO ");
}
if (ACCESS_FLAG(F_CF)) {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "CY ");
} else {
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "NC ");
}
NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n");
}
#else
void x86emu_dump_regs (void)
{
}
void x86emu_dump_xregs (void)
{
}
int X86EMU_trace_on(void)
{
return 0;
}
int X86EMU_trace_off(void)
{
return 0;
}
#endif /* X86EMU_DEBUG */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,965 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: This file contains the code to implement the decoding and
* emulation of the FPU instructions.
*
****************************************************************************/
#include "x86emu/x86emui.h"
/*----------------------------- Implementation ----------------------------*/
/* opcode=0xd8 */
void x86emuOp_esc_coprocess_d8(u8 X86EMU_UNUSED(op1))
{
START_OF_INSTR();
DECODE_PRINTF("ESC D8\n");
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
static char *x86emu_fpu_op_d9_tab[] = {
"FLD\tDWORD PTR ", "ESC_D9\t", "FST\tDWORD PTR ", "FSTP\tDWORD PTR ",
"FLDENV\t", "FLDCW\t", "FSTENV\t", "FSTCW\t",
"FLD\tDWORD PTR ", "ESC_D9\t", "FST\tDWORD PTR ", "FSTP\tDWORD PTR ",
"FLDENV\t", "FLDCW\t", "FSTENV\t", "FSTCW\t",
"FLD\tDWORD PTR ", "ESC_D9\t", "FST\tDWORD PTR ", "FSTP\tDWORD PTR ",
"FLDENV\t", "FLDCW\t", "FSTENV\t", "FSTCW\t",
};
static char *x86emu_fpu_op_d9_tab1[] = {
"FLD\t", "FLD\t", "FLD\t", "FLD\t",
"FLD\t", "FLD\t", "FLD\t", "FLD\t",
"FXCH\t", "FXCH\t", "FXCH\t", "FXCH\t",
"FXCH\t", "FXCH\t", "FXCH\t", "FXCH\t",
"FNOP", "ESC_D9", "ESC_D9", "ESC_D9",
"ESC_D9", "ESC_D9", "ESC_D9", "ESC_D9",
"FSTP\t", "FSTP\t", "FSTP\t", "FSTP\t",
"FSTP\t", "FSTP\t", "FSTP\t", "FSTP\t",
"FCHS", "FABS", "ESC_D9", "ESC_D9",
"FTST", "FXAM", "ESC_D9", "ESC_D9",
"FLD1", "FLDL2T", "FLDL2E", "FLDPI",
"FLDLG2", "FLDLN2", "FLDZ", "ESC_D9",
"F2XM1", "FYL2X", "FPTAN", "FPATAN",
"FXTRACT", "ESC_D9", "FDECSTP", "FINCSTP",
"FPREM", "FYL2XP1", "FSQRT", "ESC_D9",
"FRNDINT", "FSCALE", "ESC_D9", "ESC_D9",
};
#endif /* X86EMU_DEBUG */
/* opcode=0xd9 */
void x86emuOp_esc_coprocess_d9(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
u8 stkelem = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
#ifdef X86EMU_DEBUG
if (mod != 3) {
DECODE_PRINTINSTR32(x86emu_fpu_op_d9_tab, mod, rh, rl);
} else {
DECODE_PRINTF(x86emu_fpu_op_d9_tab1[(rh << 3) + rl]);
}
#endif
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
DECODE_PRINTF("\n");
break;
case 1:
destoffset = decode_rm01_address(rl);
DECODE_PRINTF("\n");
break;
case 2:
destoffset = decode_rm10_address(rl);
DECODE_PRINTF("\n");
break;
case 3: /* register to register */
stkelem = (u8)rl;
if (rh < 4) {
DECODE_PRINTF2("ST(%d)\n", stkelem);
} else {
DECODE_PRINTF("\n");
}
break;
}
#ifdef X86EMU_FPU_PRESENT
/* execute */
switch (mod) {
case 3:
switch (rh) {
case 0:
x86emu_fpu_R_fld(X86EMU_FPU_STKTOP, stkelem);
break;
case 1:
x86emu_fpu_R_fxch(X86EMU_FPU_STKTOP, stkelem);
break;
case 2:
switch (rl) {
case 0:
x86emu_fpu_R_nop();
break;
default:
x86emu_fpu_illegal();
break;
}
case 3:
x86emu_fpu_R_fstp(X86EMU_FPU_STKTOP, stkelem);
break;
case 4:
switch (rl) {
case 0:
x86emu_fpu_R_fchs(X86EMU_FPU_STKTOP);
break;
case 1:
x86emu_fpu_R_fabs(X86EMU_FPU_STKTOP);
break;
case 4:
x86emu_fpu_R_ftst(X86EMU_FPU_STKTOP);
break;
case 5:
x86emu_fpu_R_fxam(X86EMU_FPU_STKTOP);
break;
default:
/* 2,3,6,7 */
x86emu_fpu_illegal();
break;
}
break;
case 5:
switch (rl) {
case 0:
x86emu_fpu_R_fld1(X86EMU_FPU_STKTOP);
break;
case 1:
x86emu_fpu_R_fldl2t(X86EMU_FPU_STKTOP);
break;
case 2:
x86emu_fpu_R_fldl2e(X86EMU_FPU_STKTOP);
break;
case 3:
x86emu_fpu_R_fldpi(X86EMU_FPU_STKTOP);
break;
case 4:
x86emu_fpu_R_fldlg2(X86EMU_FPU_STKTOP);
break;
case 5:
x86emu_fpu_R_fldln2(X86EMU_FPU_STKTOP);
break;
case 6:
x86emu_fpu_R_fldz(X86EMU_FPU_STKTOP);
break;
default:
/* 7 */
x86emu_fpu_illegal();
break;
}
break;
case 6:
switch (rl) {
case 0:
x86emu_fpu_R_f2xm1(X86EMU_FPU_STKTOP);
break;
case 1:
x86emu_fpu_R_fyl2x(X86EMU_FPU_STKTOP);
break;
case 2:
x86emu_fpu_R_fptan(X86EMU_FPU_STKTOP);
break;
case 3:
x86emu_fpu_R_fpatan(X86EMU_FPU_STKTOP);
break;
case 4:
x86emu_fpu_R_fxtract(X86EMU_FPU_STKTOP);
break;
case 5:
x86emu_fpu_illegal();
break;
case 6:
x86emu_fpu_R_decstp();
break;
case 7:
x86emu_fpu_R_incstp();
break;
}
break;
case 7:
switch (rl) {
case 0:
x86emu_fpu_R_fprem(X86EMU_FPU_STKTOP);
break;
case 1:
x86emu_fpu_R_fyl2xp1(X86EMU_FPU_STKTOP);
break;
case 2:
x86emu_fpu_R_fsqrt(X86EMU_FPU_STKTOP);
break;
case 3:
x86emu_fpu_illegal();
break;
case 4:
x86emu_fpu_R_frndint(X86EMU_FPU_STKTOP);
break;
case 5:
x86emu_fpu_R_fscale(X86EMU_FPU_STKTOP);
break;
case 6:
case 7:
default:
x86emu_fpu_illegal();
break;
}
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_fld(X86EMU_FPU_FLOAT, destoffset);
break;
case 1:
x86emu_fpu_illegal();
break;
case 2:
x86emu_fpu_M_fst(X86EMU_FPU_FLOAT, destoffset);
break;
case 3:
x86emu_fpu_M_fstp(X86EMU_FPU_FLOAT, destoffset);
break;
case 4:
x86emu_fpu_M_fldenv(X86EMU_FPU_WORD, destoffset);
break;
case 5:
x86emu_fpu_M_fldcw(X86EMU_FPU_WORD, destoffset);
break;
case 6:
x86emu_fpu_M_fstenv(X86EMU_FPU_WORD, destoffset);
break;
case 7:
x86emu_fpu_M_fstcw(X86EMU_FPU_WORD, destoffset);
break;
}
}
}
#else
(void)destoffset;
(void)stkelem;
#endif /* X86EMU_FPU_PRESENT */
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
char *x86emu_fpu_op_da_tab[] = {
"FIADD\tDWORD PTR ", "FIMUL\tDWORD PTR ", "FICOM\tDWORD PTR ",
"FICOMP\tDWORD PTR ",
"FISUB\tDWORD PTR ", "FISUBR\tDWORD PTR ", "FIDIV\tDWORD PTR ",
"FIDIVR\tDWORD PTR ",
"FIADD\tDWORD PTR ", "FIMUL\tDWORD PTR ", "FICOM\tDWORD PTR ",
"FICOMP\tDWORD PTR ",
"FISUB\tDWORD PTR ", "FISUBR\tDWORD PTR ", "FIDIV\tDWORD PTR ",
"FIDIVR\tDWORD PTR ",
"FIADD\tDWORD PTR ", "FIMUL\tDWORD PTR ", "FICOM\tDWORD PTR ",
"FICOMP\tDWORD PTR ",
"FISUB\tDWORD PTR ", "FISUBR\tDWORD PTR ", "FIDIV\tDWORD PTR ",
"FIDIVR\tDWORD PTR ",
"ESC_DA ", "ESC_DA ", "ESC_DA ", "ESC_DA ",
"ESC_DA ", "ESC_DA ", "ESC_DA ", "ESC_DA ",
};
#endif /* X86EMU_DEBUG */
/* opcode=0xda */
void x86emuOp_esc_coprocess_da(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
u8 stkelem = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
DECODE_PRINTINSTR32(x86emu_fpu_op_da_tab, mod, rh, rl);
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
DECODE_PRINTF("\n");
break;
case 1:
destoffset = decode_rm01_address(rl);
DECODE_PRINTF("\n");
break;
case 2:
destoffset = decode_rm10_address(rl);
DECODE_PRINTF("\n");
break;
case 3: /* register to register */
stkelem = (u8)rl;
DECODE_PRINTF2("\tST(%d),ST\n", stkelem);
break;
}
#ifdef X86EMU_FPU_PRESENT
switch (mod) {
case 3:
x86emu_fpu_illegal();
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_iadd(X86EMU_FPU_SHORT, destoffset);
break;
case 1:
x86emu_fpu_M_imul(X86EMU_FPU_SHORT, destoffset);
break;
case 2:
x86emu_fpu_M_icom(X86EMU_FPU_SHORT, destoffset);
break;
case 3:
x86emu_fpu_M_icomp(X86EMU_FPU_SHORT, destoffset);
break;
case 4:
x86emu_fpu_M_isub(X86EMU_FPU_SHORT, destoffset);
break;
case 5:
x86emu_fpu_M_isubr(X86EMU_FPU_SHORT, destoffset);
break;
case 6:
x86emu_fpu_M_idiv(X86EMU_FPU_SHORT, destoffset);
break;
case 7:
x86emu_fpu_M_idivr(X86EMU_FPU_SHORT, destoffset);
break;
}
}
#else
(void)destoffset;
(void)stkelem;
#endif
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
char *x86emu_fpu_op_db_tab[] = {
"FILD\tDWORD PTR ", "ESC_DB\t19", "FIST\tDWORD PTR ", "FISTP\tDWORD PTR ",
"ESC_DB\t1C", "FLD\tTBYTE PTR ", "ESC_DB\t1E", "FSTP\tTBYTE PTR ",
"FILD\tDWORD PTR ", "ESC_DB\t19", "FIST\tDWORD PTR ", "FISTP\tDWORD PTR ",
"ESC_DB\t1C", "FLD\tTBYTE PTR ", "ESC_DB\t1E", "FSTP\tTBYTE PTR ",
"FILD\tDWORD PTR ", "ESC_DB\t19", "FIST\tDWORD PTR ", "FISTP\tDWORD PTR ",
"ESC_DB\t1C", "FLD\tTBYTE PTR ", "ESC_DB\t1E", "FSTP\tTBYTE PTR ",
};
#endif /* X86EMU_DEBUG */
/* opcode=0xdb */
void x86emuOp_esc_coprocess_db(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
#ifdef X86EMU_DEBUG
if (mod != 3) {
DECODE_PRINTINSTR32(x86emu_fpu_op_db_tab, mod, rh, rl);
} else if (rh == 4) { /* === 11 10 0 nnn */
switch (rl) {
case 0:
DECODE_PRINTF("FENI\n");
break;
case 1:
DECODE_PRINTF("FDISI\n");
break;
case 2:
DECODE_PRINTF("FCLEX\n");
break;
case 3:
DECODE_PRINTF("FINIT\n");
break;
}
} else {
DECODE_PRINTF2("ESC_DB %0x\n", (mod << 6) + (rh << 3) + (rl));
}
#endif /* X86EMU_DEBUG */
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
break;
case 1:
destoffset = decode_rm01_address(rl);
break;
case 2:
destoffset = decode_rm10_address(rl);
break;
case 3: /* register to register */
break;
}
#ifdef X86EMU_FPU_PRESENT
/* execute */
switch (mod) {
case 3:
switch (rh) {
case 4:
switch (rl) {
case 0:
x86emu_fpu_R_feni();
break;
case 1:
x86emu_fpu_R_fdisi();
break;
case 2:
x86emu_fpu_R_fclex();
break;
case 3:
x86emu_fpu_R_finit();
break;
default:
x86emu_fpu_illegal();
break;
}
break;
default:
x86emu_fpu_illegal();
break;
}
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_fild(X86EMU_FPU_SHORT, destoffset);
break;
case 1:
x86emu_fpu_illegal();
break;
case 2:
x86emu_fpu_M_fist(X86EMU_FPU_SHORT, destoffset);
break;
case 3:
x86emu_fpu_M_fistp(X86EMU_FPU_SHORT, destoffset);
break;
case 4:
x86emu_fpu_illegal();
break;
case 5:
x86emu_fpu_M_fld(X86EMU_FPU_LDBL, destoffset);
break;
case 6:
x86emu_fpu_illegal();
break;
case 7:
x86emu_fpu_M_fstp(X86EMU_FPU_LDBL, destoffset);
break;
}
}
#else
(void)destoffset;
#endif
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
char *x86emu_fpu_op_dc_tab[] = {
"FADD\tQWORD PTR ", "FMUL\tQWORD PTR ", "FCOM\tQWORD PTR ",
"FCOMP\tQWORD PTR ",
"FSUB\tQWORD PTR ", "FSUBR\tQWORD PTR ", "FDIV\tQWORD PTR ",
"FDIVR\tQWORD PTR ",
"FADD\tQWORD PTR ", "FMUL\tQWORD PTR ", "FCOM\tQWORD PTR ",
"FCOMP\tQWORD PTR ",
"FSUB\tQWORD PTR ", "FSUBR\tQWORD PTR ", "FDIV\tQWORD PTR ",
"FDIVR\tQWORD PTR ",
"FADD\tQWORD PTR ", "FMUL\tQWORD PTR ", "FCOM\tQWORD PTR ",
"FCOMP\tQWORD PTR ",
"FSUB\tQWORD PTR ", "FSUBR\tQWORD PTR ", "FDIV\tQWORD PTR ",
"FDIVR\tQWORD PTR ",
"FADD\t", "FMUL\t", "FCOM\t", "FCOMP\t",
"FSUBR\t", "FSUB\t", "FDIVR\t", "FDIV\t",
};
#endif /* X86EMU_DEBUG */
/* opcode=0xdc */
void x86emuOp_esc_coprocess_dc(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
u8 stkelem = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
DECODE_PRINTINSTR32(x86emu_fpu_op_dc_tab, mod, rh, rl);
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
DECODE_PRINTF("\n");
break;
case 1:
destoffset = decode_rm01_address(rl);
DECODE_PRINTF("\n");
break;
case 2:
destoffset = decode_rm10_address(rl);
DECODE_PRINTF("\n");
break;
case 3: /* register to register */
stkelem = (u8)rl;
DECODE_PRINTF2("\tST(%d),ST\n", stkelem);
break;
}
#ifdef X86EMU_FPU_PRESENT
/* execute */
switch (mod) {
case 3:
switch (rh) {
case 0:
x86emu_fpu_R_fadd(stkelem, X86EMU_FPU_STKTOP);
break;
case 1:
x86emu_fpu_R_fmul(stkelem, X86EMU_FPU_STKTOP);
break;
case 2:
x86emu_fpu_R_fcom(stkelem, X86EMU_FPU_STKTOP);
break;
case 3:
x86emu_fpu_R_fcomp(stkelem, X86EMU_FPU_STKTOP);
break;
case 4:
x86emu_fpu_R_fsubr(stkelem, X86EMU_FPU_STKTOP);
break;
case 5:
x86emu_fpu_R_fsub(stkelem, X86EMU_FPU_STKTOP);
break;
case 6:
x86emu_fpu_R_fdivr(stkelem, X86EMU_FPU_STKTOP);
break;
case 7:
x86emu_fpu_R_fdiv(stkelem, X86EMU_FPU_STKTOP);
break;
}
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_fadd(X86EMU_FPU_DOUBLE, destoffset);
break;
case 1:
x86emu_fpu_M_fmul(X86EMU_FPU_DOUBLE, destoffset);
break;
case 2:
x86emu_fpu_M_fcom(X86EMU_FPU_DOUBLE, destoffset);
break;
case 3:
x86emu_fpu_M_fcomp(X86EMU_FPU_DOUBLE, destoffset);
break;
case 4:
x86emu_fpu_M_fsub(X86EMU_FPU_DOUBLE, destoffset);
break;
case 5:
x86emu_fpu_M_fsubr(X86EMU_FPU_DOUBLE, destoffset);
break;
case 6:
x86emu_fpu_M_fdiv(X86EMU_FPU_DOUBLE, destoffset);
break;
case 7:
x86emu_fpu_M_fdivr(X86EMU_FPU_DOUBLE, destoffset);
break;
}
}
#else
(void)destoffset;
(void)stkelem;
#endif
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
static char *x86emu_fpu_op_dd_tab[] = {
"FLD\tQWORD PTR ", "ESC_DD\t29,", "FST\tQWORD PTR ", "FSTP\tQWORD PTR ",
"FRSTOR\t", "ESC_DD\t2D,", "FSAVE\t", "FSTSW\t",
"FLD\tQWORD PTR ", "ESC_DD\t29,", "FST\tQWORD PTR ", "FSTP\tQWORD PTR ",
"FRSTOR\t", "ESC_DD\t2D,", "FSAVE\t", "FSTSW\t",
"FLD\tQWORD PTR ", "ESC_DD\t29,", "FST\tQWORD PTR ", "FSTP\tQWORD PTR ",
"FRSTOR\t", "ESC_DD\t2D,", "FSAVE\t", "FSTSW\t",
"FFREE\t", "FXCH\t", "FST\t", "FSTP\t",
"ESC_DD\t2C,", "ESC_DD\t2D,", "ESC_DD\t2E,", "ESC_DD\t2F,",
};
#endif /* X86EMU_DEBUG */
/* opcode=0xdd */
void x86emuOp_esc_coprocess_dd(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
u8 stkelem = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
DECODE_PRINTINSTR32(x86emu_fpu_op_dd_tab, mod, rh, rl);
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
DECODE_PRINTF("\n");
break;
case 1:
destoffset = decode_rm01_address(rl);
DECODE_PRINTF("\n");
break;
case 2:
destoffset = decode_rm10_address(rl);
DECODE_PRINTF("\n");
break;
case 3: /* register to register */
stkelem = (u8)rl;
DECODE_PRINTF2("\tST(%d),ST\n", stkelem);
break;
}
#ifdef X86EMU_FPU_PRESENT
switch (mod) {
case 3:
switch (rh) {
case 0:
x86emu_fpu_R_ffree(stkelem);
break;
case 1:
x86emu_fpu_R_fxch(stkelem);
break;
case 2:
x86emu_fpu_R_fst(stkelem); /* register version */
break;
case 3:
x86emu_fpu_R_fstp(stkelem); /* register version */
break;
default:
x86emu_fpu_illegal();
break;
}
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_fld(X86EMU_FPU_DOUBLE, destoffset);
break;
case 1:
x86emu_fpu_illegal();
break;
case 2:
x86emu_fpu_M_fst(X86EMU_FPU_DOUBLE, destoffset);
break;
case 3:
x86emu_fpu_M_fstp(X86EMU_FPU_DOUBLE, destoffset);
break;
case 4:
x86emu_fpu_M_frstor(X86EMU_FPU_WORD, destoffset);
break;
case 5:
x86emu_fpu_illegal();
break;
case 6:
x86emu_fpu_M_fsave(X86EMU_FPU_WORD, destoffset);
break;
case 7:
x86emu_fpu_M_fstsw(X86EMU_FPU_WORD, destoffset);
break;
}
}
#else
(void)destoffset;
(void)stkelem;
#endif
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
static char *x86emu_fpu_op_de_tab[] =
{
"FIADD\tWORD PTR ", "FIMUL\tWORD PTR ", "FICOM\tWORD PTR ",
"FICOMP\tWORD PTR ",
"FISUB\tWORD PTR ", "FISUBR\tWORD PTR ", "FIDIV\tWORD PTR ",
"FIDIVR\tWORD PTR ",
"FIADD\tWORD PTR ", "FIMUL\tWORD PTR ", "FICOM\tWORD PTR ",
"FICOMP\tWORD PTR ",
"FISUB\tWORD PTR ", "FISUBR\tWORD PTR ", "FIDIV\tWORD PTR ",
"FIDIVR\tWORD PTR ",
"FIADD\tWORD PTR ", "FIMUL\tWORD PTR ", "FICOM\tWORD PTR ",
"FICOMP\tWORD PTR ",
"FISUB\tWORD PTR ", "FISUBR\tWORD PTR ", "FIDIV\tWORD PTR ",
"FIDIVR\tWORD PTR ",
"FADDP\t", "FMULP\t", "FCOMP\t", "FCOMPP\t",
"FSUBRP\t", "FSUBP\t", "FDIVRP\t", "FDIVP\t",
};
#endif /* X86EMU_DEBUG */
/* opcode=0xde */
void x86emuOp_esc_coprocess_de(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
u8 stkelem = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
DECODE_PRINTINSTR32(x86emu_fpu_op_de_tab, mod, rh, rl);
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
DECODE_PRINTF("\n");
break;
case 1:
destoffset = decode_rm01_address(rl);
DECODE_PRINTF("\n");
break;
case 2:
destoffset = decode_rm10_address(rl);
DECODE_PRINTF("\n");
break;
case 3: /* register to register */
stkelem = (u8)rl;
DECODE_PRINTF2("\tST(%d),ST\n", stkelem);
break;
}
#ifdef X86EMU_FPU_PRESENT
switch (mod) {
case 3:
switch (rh) {
case 0:
x86emu_fpu_R_faddp(stkelem, X86EMU_FPU_STKTOP);
break;
case 1:
x86emu_fpu_R_fmulp(stkelem, X86EMU_FPU_STKTOP);
break;
case 2:
x86emu_fpu_R_fcomp(stkelem, X86EMU_FPU_STKTOP);
break;
case 3:
if (stkelem == 1)
x86emu_fpu_R_fcompp(stkelem, X86EMU_FPU_STKTOP);
else
x86emu_fpu_illegal();
break;
case 4:
x86emu_fpu_R_fsubrp(stkelem, X86EMU_FPU_STKTOP);
break;
case 5:
x86emu_fpu_R_fsubp(stkelem, X86EMU_FPU_STKTOP);
break;
case 6:
x86emu_fpu_R_fdivrp(stkelem, X86EMU_FPU_STKTOP);
break;
case 7:
x86emu_fpu_R_fdivp(stkelem, X86EMU_FPU_STKTOP);
break;
}
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_fiadd(X86EMU_FPU_WORD, destoffset);
break;
case 1:
x86emu_fpu_M_fimul(X86EMU_FPU_WORD, destoffset);
break;
case 2:
x86emu_fpu_M_ficom(X86EMU_FPU_WORD, destoffset);
break;
case 3:
x86emu_fpu_M_ficomp(X86EMU_FPU_WORD, destoffset);
break;
case 4:
x86emu_fpu_M_fisub(X86EMU_FPU_WORD, destoffset);
break;
case 5:
x86emu_fpu_M_fisubr(X86EMU_FPU_WORD, destoffset);
break;
case 6:
x86emu_fpu_M_fidiv(X86EMU_FPU_WORD, destoffset);
break;
case 7:
x86emu_fpu_M_fidivr(X86EMU_FPU_WORD, destoffset);
break;
}
}
#else
(void)destoffset;
(void)stkelem;
#endif
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}
#ifdef X86EMU_DEBUG
static char *x86emu_fpu_op_df_tab[] = {
/* mod == 00 */
"FILD\tWORD PTR ", "ESC_DF\t39\n", "FIST\tWORD PTR ", "FISTP\tWORD PTR ",
"FBLD\tTBYTE PTR ", "FILD\tQWORD PTR ", "FBSTP\tTBYTE PTR ",
"FISTP\tQWORD PTR ",
/* mod == 01 */
"FILD\tWORD PTR ", "ESC_DF\t39 ", "FIST\tWORD PTR ", "FISTP\tWORD PTR ",
"FBLD\tTBYTE PTR ", "FILD\tQWORD PTR ", "FBSTP\tTBYTE PTR ",
"FISTP\tQWORD PTR ",
/* mod == 10 */
"FILD\tWORD PTR ", "ESC_DF\t39 ", "FIST\tWORD PTR ", "FISTP\tWORD PTR ",
"FBLD\tTBYTE PTR ", "FILD\tQWORD PTR ", "FBSTP\tTBYTE PTR ",
"FISTP\tQWORD PTR ",
/* mod == 11 */
"FFREE\t", "FXCH\t", "FST\t", "FSTP\t",
"ESC_DF\t3C,", "ESC_DF\t3D,", "ESC_DF\t3E,", "ESC_DF\t3F,"
};
#endif /* X86EMU_DEBUG */
/* opcode=0xdf */
void x86emuOp_esc_coprocess_df(u8 X86EMU_UNUSED(op1))
{
int mod, rl, rh;
uint destoffset = 0;
u8 stkelem = 0;
START_OF_INSTR();
FETCH_DECODE_MODRM(mod, rh, rl);
DECODE_PRINTINSTR32(x86emu_fpu_op_df_tab, mod, rh, rl);
switch (mod) {
case 0:
destoffset = decode_rm00_address(rl);
DECODE_PRINTF("\n");
break;
case 1:
destoffset = decode_rm01_address(rl);
DECODE_PRINTF("\n");
break;
case 2:
destoffset = decode_rm10_address(rl);
DECODE_PRINTF("\n");
break;
case 3: /* register to register */
stkelem = (u8)rl;
DECODE_PRINTF2("\tST(%d)\n", stkelem);
break;
}
#ifdef X86EMU_FPU_PRESENT
switch (mod) {
case 3:
switch (rh) {
case 0:
x86emu_fpu_R_ffree(stkelem);
break;
case 1:
x86emu_fpu_R_fxch(stkelem);
break;
case 2:
x86emu_fpu_R_fst(stkelem); /* register version */
break;
case 3:
x86emu_fpu_R_fstp(stkelem); /* register version */
break;
default:
x86emu_fpu_illegal();
break;
}
break;
default:
switch (rh) {
case 0:
x86emu_fpu_M_fild(X86EMU_FPU_WORD, destoffset);
break;
case 1:
x86emu_fpu_illegal();
break;
case 2:
x86emu_fpu_M_fist(X86EMU_FPU_WORD, destoffset);
break;
case 3:
x86emu_fpu_M_fistp(X86EMU_FPU_WORD, destoffset);
break;
case 4:
x86emu_fpu_M_fbld(X86EMU_FPU_BSD, destoffset);
break;
case 5:
x86emu_fpu_M_fild(X86EMU_FPU_LONG, destoffset);
break;
case 6:
x86emu_fpu_M_fbstp(X86EMU_FPU_BSD, destoffset);
break;
case 7:
x86emu_fpu_M_fistp(X86EMU_FPU_LONG, destoffset);
break;
}
}
#else
(void)destoffset;
(void)stkelem;
#endif
DECODE_CLEAR_SEGOVR();
END_OF_INSTR_NO_TRACE();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,663 @@
/****************************************************************************
*
* Realmode X86 Emulator Library
*
* Copyright (C) 1996-1999 SciTech Software, Inc.
* Copyright (C) David Mosberger-Tang
* Copyright (C) 1999 Egbert Eich
*
* ========================================================================
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of the authors not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. The authors makes no
* representations about the suitability of this software for any purpose.
* It is provided "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* ========================================================================
*
* Language: ANSI C
* Environment: Any
* Developer: Kendall Bennett
*
* Description: This file includes subroutines which are related to
* programmed I/O and memory access. Included in this module
* are default functions with limited usefulness. For real
* uses these functions will most likely be overriden by the
* user library.
*
****************************************************************************/
#include "x86emu/x86emu.h"
#include "x86emu/x86emui.h"
#include "x86emu/regs.h"
#include "x86emu/debug.h"
#include "x86emu/prim_ops.h"
#ifndef NO_SYS_HEADERS
#include <string.h>
#endif
/*------------------------- Global Variables ------------------------------*/
X86EMU_sysEnv _X86EMU_env; /* Global emulator machine state */
X86EMU_intrFuncs _X86EMU_intrTab[256];
/*----------------------------- Implementation ----------------------------*/
#if defined(__alpha__) || defined(__alpha)
/* to cope with broken egcs-1.1.2 :-(((( */
#define ALPHA_UALOADS
/*
* inline functions to do unaligned accesses
* from linux/include/asm-alpha/unaligned.h
*/
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
* packed structures to talk about such things with.
*/
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
struct __una_u64 { unsigned long x __attribute__((packed)); };
struct __una_u32 { unsigned int x __attribute__((packed)); };
struct __una_u16 { unsigned short x __attribute__((packed)); };
#endif
static __inline__ unsigned long ldq_u(unsigned long * r11)
{
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
return ptr->x;
#else
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
"extql %0,%2,%0\n\t"
"extqh %1,%2,%1"
:"=&r" (r1), "=&r" (r2)
:"r" (r11),
"m" (*r11),
"m" (*(const unsigned long *)(7+(char *) r11)));
return r1 | r2;
#endif
}
static __inline__ unsigned long ldl_u(unsigned int * r11)
{
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
return ptr->x;
#else
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
"extll %0,%2,%0\n\t"
"extlh %1,%2,%1"
:"=&r" (r1), "=&r" (r2)
:"r" (r11),
"m" (*r11),
"m" (*(const unsigned long *)(3+(char *) r11)));
return r1 | r2;
#endif
}
static __inline__ unsigned long ldw_u(unsigned short * r11)
{
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
return ptr->x;
#else
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
"extwl %0,%2,%0\n\t"
"extwh %1,%2,%1"
:"=&r" (r1), "=&r" (r2)
:"r" (r11),
"m" (*r11),
"m" (*(const unsigned long *)(1+(char *) r11)));
return r1 | r2;
#endif
}
/*
* Elemental unaligned stores
*/
static __inline__ void stq_u(unsigned long r5, unsigned long * r11)
{
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
struct __una_u64 *ptr = (struct __una_u64 *) r11;
ptr->x = r5;
#else
unsigned long r1,r2,r3,r4;
__asm__("ldq_u %3,%1\n\t"
"ldq_u %2,%0\n\t"
"insqh %6,%7,%5\n\t"
"insql %6,%7,%4\n\t"
"mskqh %3,%7,%3\n\t"
"mskql %2,%7,%2\n\t"
"bis %3,%5,%3\n\t"
"bis %2,%4,%2\n\t"
"stq_u %3,%1\n\t"
"stq_u %2,%0"
:"=m" (*r11),
"=m" (*(unsigned long *)(7+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
#endif
}
static __inline__ void stl_u(unsigned long r5, unsigned int * r11)
{
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
struct __una_u32 *ptr = (struct __una_u32 *) r11;
ptr->x = r5;
#else
unsigned long r1,r2,r3,r4;
__asm__("ldq_u %3,%1\n\t"
"ldq_u %2,%0\n\t"
"inslh %6,%7,%5\n\t"
"insll %6,%7,%4\n\t"
"msklh %3,%7,%3\n\t"
"mskll %2,%7,%2\n\t"
"bis %3,%5,%3\n\t"
"bis %2,%4,%2\n\t"
"stq_u %3,%1\n\t"
"stq_u %2,%0"
:"=m" (*r11),
"=m" (*(unsigned long *)(3+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
#endif
}
static __inline__ void stw_u(unsigned long r5, unsigned short * r11)
{
#if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91))
struct __una_u16 *ptr = (struct __una_u16 *) r11;
ptr->x = r5;
#else
unsigned long r1,r2,r3,r4;
__asm__("ldq_u %3,%1\n\t"
"ldq_u %2,%0\n\t"
"inswh %6,%7,%5\n\t"
"inswl %6,%7,%4\n\t"
"mskwh %3,%7,%3\n\t"
"mskwl %2,%7,%2\n\t"
"bis %3,%5,%3\n\t"
"bis %2,%4,%2\n\t"
"stq_u %3,%1\n\t"
"stq_u %2,%0"
:"=m" (*r11),
"=m" (*(unsigned long *)(1+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
#endif
}
#elif defined(__GNUC__) && ((__GNUC__ < 3)) && \
(defined (__ia64__) || defined (ia64__))
#define IA64_UALOADS
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
* packed structures to talk about such things with.
*/
struct __una_u64 { unsigned long x __attribute__((packed)); };
struct __una_u32 { unsigned int x __attribute__((packed)); };
struct __una_u16 { unsigned short x __attribute__((packed)); };
static __inline__ unsigned long
__uldq (const unsigned long * r11)
{
const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
return ptr->x;
}
static __inline__ unsigned long
uldl (const unsigned int * r11)
{
const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
return ptr->x;
}
static __inline__ unsigned long
uldw (const unsigned short * r11)
{
const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
return ptr->x;
}
static __inline__ void
ustq (unsigned long r5, unsigned long * r11)
{
struct __una_u64 *ptr = (struct __una_u64 *) r11;
ptr->x = r5;
}
static __inline__ void
ustl (unsigned long r5, unsigned int * r11)
{
struct __una_u32 *ptr = (struct __una_u32 *) r11;
ptr->x = r5;
}
static __inline__ void
ustw (unsigned long r5, unsigned short * r11)
{
struct __una_u16 *ptr = (struct __una_u16 *) r11;
ptr->x = r5;
}
#endif
/****************************************************************************
PARAMETERS:
addr - Emulator memory address to read
RETURNS:
Byte value read from emulator memory.
REMARKS:
Reads a byte value from the emulator memory.
****************************************************************************/
u8 X86API rdb(
u32 addr)
{
u8 val;
if (addr > M.mem_size - 1) {
DB(NV_PRINTF(LEVEL_INFO, "mem_read: address %#x out of range!\n", addr);)
HALT_SYS();
}
val = *(u8*)(M.mem_base + addr);
DB( if (DEBUG_MEM_TRACE())
NV_PRINTF(LEVEL_INFO, "%#08x 1 -> %#x\n", addr, val);)
return val;
}
/****************************************************************************
PARAMETERS:
addr - Emulator memory address to read
RETURNS:
Word value read from emulator memory.
REMARKS:
Reads a word value from the emulator memory.
****************************************************************************/
u16 X86API rdw(
u32 addr)
{
u16 val = 0;
if (addr > M.mem_size - 2) {
DB(NV_PRINTF(LEVEL_INFO, "mem_read: address %#x out of range!\n", addr);)
HALT_SYS();
}
#ifdef __BIG_ENDIAN__
if (addr & 0x1) {
val = (*(u8*)(M.mem_base + addr) |
(*(u8*)(M.mem_base + addr + 1) << 8));
}
else
#endif
#if defined(ALPHA_UALOADS)
val = ldw_u((u16*)(M.mem_base + addr));
#elif defined(IA64_UALOADS)
val = uldw((u16*)(M.mem_base + addr));
#else
val = *(u16*)(M.mem_base + addr);
#endif
DB( if (DEBUG_MEM_TRACE())
NV_PRINTF(LEVEL_INFO, "%#08x 2 -> %#x\n", addr, val);)
return val;
}
/****************************************************************************
PARAMETERS:
addr - Emulator memory address to read
RETURNS:
Long value read from emulator memory.
REMARKS:
Reads a long value from the emulator memory.
****************************************************************************/
u32 X86API rdl(
u32 addr)
{
u32 val = 0;
if (addr > M.mem_size - 4) {
DB(NV_PRINTF(LEVEL_INFO, "mem_read: address %#x out of range!\n", addr);)
HALT_SYS();
}
#ifdef __BIG_ENDIAN__
if (addr & 0x3) {
val = (*(u8*)(M.mem_base + addr + 0) |
(*(u8*)(M.mem_base + addr + 1) << 8) |
(*(u8*)(M.mem_base + addr + 2) << 16) |
(*(u8*)(M.mem_base + addr + 3) << 24));
}
else
#endif
#if defined(ALPHA_UALOADS)
val = ldl_u((u32*)(M.mem_base + addr));
#elif defined(IA64_UALOADS)
val = uldl((u32*)(M.mem_base + addr));
#else
val = *(u32*)(M.mem_base + addr);
#endif
DB( if (DEBUG_MEM_TRACE())
NV_PRINTF(LEVEL_INFO, "%#08x 4 -> %#x\n", addr, val);)
return val;
}
/****************************************************************************
PARAMETERS:
addr - Emulator memory address to read
val - Value to store
REMARKS:
Writes a byte value to emulator memory.
****************************************************************************/
void X86API wrb(
u32 addr,
u8 val)
{
DB( if (DEBUG_MEM_TRACE())
NV_PRINTF(LEVEL_INFO, "%#08x 1 <- %#x\n", addr, val);)
if (addr > M.mem_size - 1) {
DB(NV_PRINTF(LEVEL_INFO, "mem_write: address %#x out of range!\n", addr);)
HALT_SYS();
}
*(u8*)(M.mem_base + addr) = val;
}
/****************************************************************************
PARAMETERS:
addr - Emulator memory address to read
val - Value to store
REMARKS:
Writes a word value to emulator memory.
****************************************************************************/
void X86API wrw(
u32 addr,
u16 val)
{
DB( if (DEBUG_MEM_TRACE())
NV_PRINTF(LEVEL_INFO, "%#08x 2 <- %#x\n", addr, val);)
if (addr > M.mem_size - 2) {
DB(NV_PRINTF(LEVEL_INFO, "mem_write: address %#x out of range!\n", addr);)
HALT_SYS();
}
#ifdef __BIG_ENDIAN__
if (addr & 0x1) {
*(u8*)(M.mem_base + addr + 0) = (val >> 0) & 0xff;
*(u8*)(M.mem_base + addr + 1) = (val >> 8) & 0xff;
}
else
#endif
#if defined(ALPHA_UALOADS)
stw_u(val,(u16*)(M.mem_base + addr));
#elif defined(IA64_UALOADS)
ustw(val,(u16*)(M.mem_base + addr));
#else
*(u16*)(M.mem_base + addr) = val;
#endif
}
/****************************************************************************
PARAMETERS:
addr - Emulator memory address to read
val - Value to store
REMARKS:
Writes a long value to emulator memory.
****************************************************************************/
void X86API wrl(
u32 addr,
u32 val)
{
DB( if (DEBUG_MEM_TRACE())
NV_PRINTF(LEVEL_INFO, "%#08x 4 <- %#x\n", addr, val);)
if (addr > M.mem_size - 4) {
DB(NV_PRINTF(LEVEL_INFO, "mem_write: address %#x out of range!\n", addr);)
HALT_SYS();
}
#ifdef __BIG_ENDIAN__
if (addr & 0x1) {
*(u8*)(M.mem_base + addr + 0) = (val >> 0) & 0xff;
*(u8*)(M.mem_base + addr + 1) = (val >> 8) & 0xff;
*(u8*)(M.mem_base + addr + 2) = (val >> 16) & 0xff;
*(u8*)(M.mem_base + addr + 3) = (val >> 24) & 0xff;
}
else
#endif
#if defined(ALPHA_UALOADS)
stl_u(val,(u32*)(M.mem_base + addr));
#elif defined(IA64_UALOADS)
ustl(val,(u32*)(M.mem_base + addr));
#else
*(u32*)(M.mem_base + addr) = val;
#endif
}
/****************************************************************************
PARAMETERS:
addr - PIO address to read
RETURN:
0
REMARKS:
Default PIO byte read function. Doesn't perform real inb.
****************************************************************************/
static u8 X86API p_inb(
X86EMU_pioAddr addr)
{
DB( if (DEBUG_IO_TRACE())
NV_PRINTF(LEVEL_INFO, "inb %#04x \n", addr);)
return 0;
}
/****************************************************************************
PARAMETERS:
addr - PIO address to read
RETURN:
0
REMARKS:
Default PIO word read function. Doesn't perform real inw.
****************************************************************************/
static u16 X86API p_inw(
X86EMU_pioAddr addr)
{
DB( if (DEBUG_IO_TRACE())
NV_PRINTF(LEVEL_INFO, "inw %#04x \n", addr);)
return 0;
}
/****************************************************************************
PARAMETERS:
addr - PIO address to read
RETURN:
0
REMARKS:
Default PIO long read function. Doesn't perform real inl.
****************************************************************************/
static u32 X86API p_inl(
X86EMU_pioAddr addr)
{
DB( if (DEBUG_IO_TRACE())
NV_PRINTF(LEVEL_INFO, "inl %#04x \n", addr);)
return 0;
}
/****************************************************************************
PARAMETERS:
addr - PIO address to write
val - Value to store
REMARKS:
Default PIO byte write function. Doesn't perform real outb.
****************************************************************************/
static void X86API p_outb(
X86EMU_pioAddr addr,
u8 val)
{
DB( if (DEBUG_IO_TRACE())
NV_PRINTF(LEVEL_INFO, "outb %#02x -> %#04x \n", val, addr);)
return;
}
/****************************************************************************
PARAMETERS:
addr - PIO address to write
val - Value to store
REMARKS:
Default PIO word write function. Doesn't perform real outw.
****************************************************************************/
static void X86API p_outw(
X86EMU_pioAddr addr,
u16 val)
{
DB( if (DEBUG_IO_TRACE())
NV_PRINTF(LEVEL_INFO, "outw %#04x -> %#04x \n", val, addr);)
return;
}
/****************************************************************************
PARAMETERS:
addr - PIO address to write
val - Value to store
REMARKS:
Default PIO ;ong write function. Doesn't perform real outl.
****************************************************************************/
static void X86API p_outl(
X86EMU_pioAddr addr,
u32 val)
{
DB( if (DEBUG_IO_TRACE())
NV_PRINTF(LEVEL_INFO, "outl %#08x -> %#04x \n", val, addr);)
return;
}
/*------------------------- Global Variables ------------------------------*/
u8 (X86APIP sys_rdb)(u32 addr) = rdb;
u16 (X86APIP sys_rdw)(u32 addr) = rdw;
u32 (X86APIP sys_rdl)(u32 addr) = rdl;
void (X86APIP sys_wrb)(u32 addr,u8 val) = wrb;
void (X86APIP sys_wrw)(u32 addr,u16 val) = wrw;
void (X86APIP sys_wrl)(u32 addr,u32 val) = wrl;
u8 (X86APIP sys_inb)(X86EMU_pioAddr addr) = p_inb;
u16 (X86APIP sys_inw)(X86EMU_pioAddr addr) = p_inw;
u32 (X86APIP sys_inl)(X86EMU_pioAddr addr) = p_inl;
void (X86APIP sys_outb)(X86EMU_pioAddr addr, u8 val) = p_outb;
void (X86APIP sys_outw)(X86EMU_pioAddr addr, u16 val) = p_outw;
void (X86APIP sys_outl)(X86EMU_pioAddr addr, u32 val) = p_outl;
/*----------------------------- Setup -------------------------------------*/
/****************************************************************************
PARAMETERS:
funcs - New memory function pointers to make active
REMARKS:
This function is used to set the pointers to functions which access
memory space, allowing the user application to override these functions
and hook them out as necessary for their application.
****************************************************************************/
void X86EMU_setupMemFuncs(
X86EMU_memFuncs *funcs)
{
sys_rdb = funcs->rdb;
sys_rdw = funcs->rdw;
sys_rdl = funcs->rdl;
sys_wrb = funcs->wrb;
sys_wrw = funcs->wrw;
sys_wrl = funcs->wrl;
}
/****************************************************************************
PARAMETERS:
funcs - New programmed I/O function pointers to make active
REMARKS:
This function is used to set the pointers to functions which access
I/O space, allowing the user application to override these functions
and hook them out as necessary for their application.
****************************************************************************/
void X86EMU_setupPioFuncs(
X86EMU_pioFuncs *funcs)
{
sys_inb = funcs->inb;
sys_inw = funcs->inw;
sys_inl = funcs->inl;
sys_outb = funcs->outb;
sys_outw = funcs->outw;
sys_outl = funcs->outl;
}
/****************************************************************************
PARAMETERS:
funcs - New interrupt vector table to make active
REMARKS:
This function is used to set the pointers to functions which handle
interrupt processing in the emulator, allowing the user application to
hook interrupts as necessary for their application. Any interrupts that
are not hooked by the user application, and reflected and handled internally
in the emulator via the interrupt vector table. This allows the application
to get control when the code being emulated executes specific software
interrupts.
****************************************************************************/
void X86EMU_setupIntrFuncs(
X86EMU_intrFuncs funcs[])
{
int i;
for (i=0; i < 256; i++)
_X86EMU_intrTab[i] = NULL;
if (funcs) {
for (i = 0; i < 256; i++)
_X86EMU_intrTab[i] = funcs[i];
}
}
/****************************************************************************
PARAMETERS:
int - New software interrupt to prepare for
REMARKS:
This function is used to set up the emulator state to exceute a software
interrupt. This can be used by the user application code to allow an
interrupt to be hooked, examined and then reflected back to the emulator
so that the code in the emulator will continue processing the software
interrupt as per normal. This essentially allows system code to actively
hook and handle certain software interrupts as necessary.
****************************************************************************/
void X86EMU_prepareForInt(
int num)
{
push_word((u16)M.x86.R_FLG);
CLEAR_FLAG(F_IF);
CLEAR_FLAG(F_TF);
push_word(M.x86.R_CS);
M.x86.R_CS = mem_access_word(num * 4 + 2);
push_word(M.x86.R_IP);
M.x86.R_IP = mem_access_word(num * 4);
M.x86.intr = 0;
}