535.43.02

This commit is contained in:
Andy Ritger
2023-05-30 10:11:36 -07:00
parent 6dd092ddb7
commit eb5c7665a1
1403 changed files with 295367 additions and 86235 deletions

View File

@@ -79,17 +79,17 @@ typedef struct
NvU32 index;
} LOCK_TRACE_INFO;
#define INSERT_LOCK_TRACE(plti, ra, t, d16, d32, ti, irql, pr, ts) \
{ \
(plti)->entries[(plti)->index].callerRA = (NvUPtr)ra; \
(plti)->entries[(plti)->index].type = t; \
(plti)->entries[(plti)->index].data16.value = d16; \
(plti)->entries[(plti)->index].data32.value = d32; \
(plti)->entries[(plti)->index].threadId = ti; \
(plti)->entries[(plti)->index].timestamp = ts; \
(plti)->entries[(plti)->index].bHighIrql = irql; \
(plti)->entries[(plti)->index].priority = pr; \
(plti)->index = ((plti)->index + 1) % MAX_TRACE_LOCK_CALLS; \
#define INSERT_LOCK_TRACE(plti, ra, t, d16, d32, ti, irql, pr, ts) \
{ \
(plti)->entries[(plti)->index].callerRA = (NvUPtr)ra; \
(plti)->entries[(plti)->index].type = t; \
(plti)->entries[(plti)->index].data16.value = d16; \
(plti)->entries[(plti)->index].data32.value = d32; \
(plti)->entries[(plti)->index].threadId = ti; \
(plti)->entries[(plti)->index].timestamp = ts; \
(plti)->entries[(plti)->index].bHighIrql = irql; \
(plti)->entries[(plti)->index].priority = pr; \
(plti)->index = ((plti)->index + 1) % MAX_TRACE_LOCK_CALLS; \
}
//

View File

@@ -81,11 +81,7 @@ typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; // FW declare thread state
#define NV_ROUNDUP(a,b) ((NV_CEIL(a,b))*(b))
#define NV_ROUND_TO_QUANTA(a, quanta) (((quanta) == 0) ? (a): ((((a) + ((quanta) >> 1)) / (quanta)) * (quanta)))
#define NV_FLOOR_TO_QUANTA(a, quanta) (((a) / (quanta)) * (quanta))
#ifndef NV_SIZEOF32
#define NV_SIZEOF32(x) (sizeof(x))
#endif
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#define NV_ARRAY_ELEMENTS32(x) ((NV_SIZEOF32(x)/NV_SIZEOF32((x)[0])))
#define NV_BYTESWAP16(a) ((((a) & 0xff00)>>8) | \
(((a) & 0x00ff)<<8))
#define NV_BYTESWAP32(a) ((((a) & 0xff000000)>>24) | \

View File

@@ -118,12 +118,12 @@ void osFlushLog(void);
#if !NVCPU_IS_RISCV64
#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \
do \
{ \
NV_PRINTF(LEVEL_ERROR, "bp @ " NV_FILE_FMT ":%d\n", NV_FILE, __LINE__);\
osFlushLog(); \
DBG_ROUTINE(); \
#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \
do \
{ \
NV_PRINTF(LEVEL_ERROR, "bp @ " NV_FILE_FMT ":%d\n", NV_FILE, __LINE__); \
osFlushLog(); \
DBG_ROUTINE(); \
} while (0)
#else // !NVCPU_IS_RISCV64
@@ -247,48 +247,48 @@ void nvDbgDumpBufferBytes(void *pBuffer, NvU32 length);
// and return NOT_FULL_POWER. See bug 1679965.
//
//
#define API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, bAllowWithoutSysmemAccess) \
if ((!gpuIsGpuFullPower(pGpu)) && \
(!(pGpu)->getProperty((pGpu), \
PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \
{ \
DBG_BREAKPOINT(); \
if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
} \
else if (gpuIsSurpriseRemovalSupported(pGpu) && \
(pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
} \
} \
if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
#define API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, bAllowWithoutSysmemAccess) \
if ((!gpuIsGpuFullPower(pGpu)) && \
(!(pGpu)->getProperty((pGpu), \
PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \
{ \
DBG_BREAKPOINT(); \
if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
} \
else if (gpuIsSurpriseRemovalSupported(pGpu) && \
(pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
} \
} \
if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
}
#define API_GPU_FULL_POWER_SANITY_CHECK_OR_GOTO(pGpu, bGpuAccess, bAllowWithoutSysmemAccess, status, tag) \
if ((!gpuIsGpuFullPower(pGpu)) && \
(!(pGpu)->getProperty((pGpu), \
PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \
{ \
DBG_BREAKPOINT(); \
if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \
{ \
status = NV_ERR_GPU_NOT_FULL_POWER; \
goto tag; \
} \
else if (gpuIsSurpriseRemovalSupported(pGpu) && \
(pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \
{ \
status = NV_ERR_GPU_NOT_FULL_POWER; \
goto tag; \
} \
} \
if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
if ((!gpuIsGpuFullPower(pGpu)) && \
(!(pGpu)->getProperty((pGpu), \
PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \
{ \
DBG_BREAKPOINT(); \
if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \
{ \
status = NV_ERR_GPU_NOT_FULL_POWER; \
goto tag; \
} \
else if (gpuIsSurpriseRemovalSupported(pGpu) && \
(pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \
{ \
status = NV_ERR_GPU_NOT_FULL_POWER; \
goto tag; \
} \
} \
if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \
{ \
return NV_ERR_GPU_NOT_FULL_POWER; \
}

View File

@@ -149,7 +149,7 @@ typedef struct THREAD_STATE_DB
//
// The normal power transition requirement for Windows is 4 seconds.
// Use longer time to let OS fire timeout and ask recovery.
// Use longer time to let OS fire timeout and ask recovery.
//
#define TIMEOUT_WDDM_POWER_TRANSITION_INTERVAL_MS 9800
@@ -207,7 +207,7 @@ NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags);
NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs);
NV_STATUS threadStateResetTimeout(OBJGPU *pGpu);
void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum);
void threadStateYieldCpuIfNecessary(OBJGPU *pGpu);
void threadStateYieldCpuIfNecessary(OBJGPU *pGpu, NvBool bQuiet);
void threadStateSetTimeoutOverride(THREAD_STATE_NODE *, NvU64);
NV_STATUS threadStateEnqueueCallbackOnFree(THREAD_STATE_NODE *pThreadNode,

View File

@@ -39,7 +39,7 @@
* @return NV_OK if the conversion is successful.
*/
static NV_INLINE
NV_STATUS ceIndexFromType(OBJGPU *pGpu, NvHandle hClient, RM_ENGINE_TYPE rmEngineType, NvU32 *ceIdx)
NV_STATUS ceIndexFromType(OBJGPU *pGpu, Device *pDevice, RM_ENGINE_TYPE rmEngineType, NvU32 *ceIdx)
{
NV_STATUS status = NV_OK;
RM_ENGINE_TYPE localRmEngType = rmEngineType;
@@ -56,7 +56,7 @@ NV_STATUS ceIndexFromType(OBJGPU *pGpu, NvHandle hClient, RM_ENGINE_TYPE rmEngin
KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
MIG_INSTANCE_REF ref;
status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref);
status = kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager, pDevice, &ref);
if (status != NV_OK)
return status;

View File

@@ -0,0 +1,224 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CCSL_H
#define CCSL_H
#include "nvstatus.h"
#include "nvmisc.h"
#include "kernel/gpu/conf_compute/conf_compute.h"
typedef struct ccslContext_t *pCcslContext;
/*
* Initializes a context by providing client and channel information.
*
* ccslContext [in / out]
* hClient [in]
* hChannel [in]
*/
NV_STATUS
ccslContextInitViaChannel
(
pCcslContext *ppCtx,
NvHandle hClient,
NvHandle hChannel
);
/*
* Initializes a context by providing key ID information.
*
* ConfidentialCompute [in]
* ccslContext [in / out]
* globalKeyId [in]
*/
NV_STATUS
ccslContextInitViaKeyId
(
ConfidentialCompute *pConfCompute,
pCcslContext *ppCtx,
NvU32 globalKeyId
);
/*
* Clears the context and erases sensitive material such as keys.
*
* ccslContext [in / out]
*/
void
ccslContextClear
(
pCcslContext ctx
);
/* To be called before library client triggers a Device-side encryption.
* Attempts to increment the library's Device-side message counter and returns an error if it will overflow.
*
* ccslContext [in]
* decryptIv [in]
*
* Returns NV_ERR_INSUFFICIENT_RESOURCES if the next Device-side encryption will overflow.
* Returns NV_OK otherwise.
*/
NV_STATUS
ccslLogDeviceEncryption
(
pCcslContext ctx,
NvU8 *decryptIv
);
/* Request the next IV to be used in encryption. Storing it explicitly enables the caller
* to perform encryption out of order using EncryptWithIv
*
* ccslContext [in / out]
* encryptIv [out]
*
* Returns NV_ERR_INSUFFICIENT_RESOURCES if the next encryption will overflow.
* Returns NV_OK otherwise.
*/
NV_STATUS
ccslAcquireEncryptionIv
(
pCcslContext ctx,
NvU8 *encryptIv
);
/* Rotate the IV for the given direction.
*
* ccslContext [in / out]
* direction [in]
*/
NV_STATUS
ccslRotateIv
(
pCcslContext ctx,
NvU8 direction
);
/*
* Encrypt and sign data using provided IV
*
* ccslContext [in]
* bufferSize [in] - Size of buffer to be encrypted in units of bytes.
* inputBuffer [in] - Address of plaintext input buffer. For performance it should be 16-byte aligned.
* encryptionIv [in/out] - IV to use for encryption. The IV will be "dirtied" after this operation.
* outputBuffer [in/out] - Address of ciphertext output buffer.
* authTagBuffer [in/out] - Address of authentication tag. In APM it is 32 bytes. In HCC it is 16 bytes.
*
* Returns NV_OK.
*/
NV_STATUS
ccslEncryptWithIv
(
pCcslContext ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *encryptIv,
NvU8 *outputBuffer,
NvU8 *authTagBuffer
);
/*
* If message counter will not overflow then encrypt and sign data.
*
* ccslContext [in]
* bufferSize [in] - Size of buffer to be encrypted in units of bytes.
* inputBuffer [in] - Address of plaintext input buffer. For performance it should be 16-byte aligned.
* outputBuffer [in/out] - Address of ciphertext output buffer.
* authTagBuffer [in/out] - Address of authentication tag. In APM it is 32 bytes. In HCC it is 16 bytes.
*
* Returns NV_ERR_INSUFFICIENT_RESOURCES if message counter will overflow.
* Returns NV_OK otherwise.
*/
NV_STATUS
ccslEncrypt
(
pCcslContext ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagBuffer
);
/*
* First verify authentication tag. If authentication passes then the data is decrypted.
*
* ccslContext [in]
* bufferSize [in] - Size of buffer to be decrypted in units of bytes.
* inputBuffer [in] - Address of ciphertext input buffer. For performance it should be 16-byte aligned.
* outputBuffer [in/out] - Address of plaintext output buffer.
* authTagBuffer [in] - Address of authentication tag. In APM it is 32 bytes. In HCC it is 16 bytes.
*
* Returns NV_ERR_INVALID_DATA if verification of the authentication tag fails.
* Returns NV_OK otherwise.
*/
NV_STATUS
ccslDecrypt
(
pCcslContext ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU8 *outputBuffer,
NvU8 const *authTagBuffer
);
/*
* Sign the plaintext message.
*
* ccslContext [in]
* bufferSize [in] - Size of buffer to be signed in units of bytes.
* inputBuffer [in] - Address of input buffer. For performance it should be 16-byte aligned.
* authTagBuffer [in/out] - Address of authentication tag. In HCC it is 32 bytes.
*
* Returns NV_OK
*/
NV_STATUS
ccslSign
(
pCcslContext ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagBuffer
);
#define CCSL_DIR_HOST_TO_DEVICE 0
#define CCSL_DIR_DEVICE_TO_HOST 1
/*
* Returns the number of messages that can be encrypted by the CPU (CCSL_DIR_HOST_TO_DEVICE)
* or encrypted by the GPU (CCSL_DIR_DEVICE_TO_HOST) before the message counter will overflow.
*
* ccslContext [in]
* direction [in] - Either CCSL_DIR_HOST_TO_DEVICE or CCSL_DIR_DEVICE_TO_HOST.
* messageNum [out] - Number of messages that can be encrypted before overflow.
*/
NV_STATUS
ccslQueryMessagePool
(
pCcslContext ctx,
NvU8 direction,
NvU64 *messageNum
);
#endif // CCSL_H

View File

@@ -0,0 +1,3 @@
#include "g_conf_compute_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_conf_compute_api_nvoc.h"

View File

@@ -0,0 +1,96 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CONF_COMPUTE_KEYSTORE_H
#define CONF_COMPUTE_KEYSTORE_H
#include "nvtypes.h"
#include "nvstatus.h"
#include "cc_drv.h"
#include "kernel/gpu/fifo/kernel_channel.h"
// Named values for ccKeyStoreDeposit()'s slotNumber parameter.
#define KEYSTORE_SLOT_GSP 0
#define KEYSTORE_SLOT_SEC2 1
// LCE index can range from 0 to 7 inclusive.
#define KEYSTORE_SLOT_LCE_UMD(index) (2 + (index) * 3)
#define KEYSTORE_SLOT_LCE_KMD(index) (3 + (index) * 3)
#define KEYSTORE_SLOT_LCE_MGPU(index) (4 + (index) * 3)
/*!
* @brief Initialize the keystore.
*
* @param[in] pGpu Pointer to GPU object.
*
* @return NV_ERR_INVALID_STATE if system is not in Confidential Compute mode.
*/
NV_STATUS
ccKeyStoreInit (OBJGPU *pGpu);
/*!
* @brief Deposits a KMB into a keystore slot.
*
* @param[in] slotNumber Slot number into which the KMB will be deposited.
* @param[in] keyMaterialBundle Pair of key / IV / IV mask tuples. IV will be set to 0 by function.
*
* @return NV_ERR_INVALID_INDEX if slotNumber is illegal, NV_OK otherwise.
*/
NV_STATUS
ccKeyStoreDeposit
(
NvU32 slotNumber,
CC_KMB keyMaterialBundle
);
/*!
* @brief Retrieves a KMB based on the channel. The keystore uses channel
information such as engine type and the associated privilege mode
to determine which KMB to fetch.
* The IV's channel counter for the given direction is pre-incremented.
*
* @param[in] pKernelChannel
* @param[in] rotateOperation Either:
ROTATE_IV_ENCRYPT
ROTATE_IV_DECRYPT
ROTATE_IV_ALL_VALID
* @param[out] keyMaterialBundle The KMB for the given channel.
*
* @return NV_ERR_INSUFFICIENT_RESOURCES if channel counter overflow would occur.
NV_ERR_GENERIC for other errors.
* NV_OK otherwise.
*/
NV_STATUS
ccKeyStoreRetrieveViaChannel
(
KernelChannel *pKernelChannel,
ROTATE_IV_TYPE rotateOperation,
PCC_KMB keyMaterialBundle
);
/*!
* @brief Clears the contents of the keystore.
*/
void
cckeyStoreClear (void);
#endif // CONF_COMPUTE_KEYSTORE_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-202 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -58,4 +58,9 @@
*/
#define FLCN_CTX_ENG_BUFFER_SIZE_HW 4096
/*!
* Number of register read needed for reset signal propagation
*/
#define FLCN_RESET_PROPAGATION_DELAY_COUNT 10
#endif // FALCON_COMMON_H

View File

@@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GPU_CHILD_CLASS_DEFS_H
#define GPU_CHILD_CLASS_DEFS_H
/*!
* @file
* @details Provides the class definitions for every GPU child class without the
* need to include every individual header.
*/
#include "core/prelude.h"
#define GPU_CHILD_CLASS_DEFS_GPU_CHILD(className, accessorName, numInstances, bConstructEarly, gpuField) \
extern const struct NVOC_CLASS_DEF NV_CONCATENATE(__nvoc_class_def_, className);
#define GPU_CHILD \
GPU_CHILD_CLASS_DEFS_GPU_CHILD
#include "gpu/gpu_child_list.h"
// Sub-classes of GPU children
// Pmu sub-classes
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Pmu10;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Pmu20;
#endif // GPU_CHILD_CLASS_DEFS_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -105,12 +105,12 @@
#if GPU_CHILD_MODULE(KERNEL_DISPLAY)
GPU_CHILD_SINGLE_INST( KernelDisplay, GPU_GET_KERNEL_DISPLAY, 1, NV_FALSE, pKernelDisplay )
#endif
#if GPU_CHILD_MODULE(DISP)
GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, pDisp )
#endif
#if GPU_CHILD_MODULE(TMR)
GPU_CHILD_SINGLE_INST( OBJTMR, GPU_GET_TIMER, 1, NV_TRUE, pTmr )
#endif
#if GPU_CHILD_MODULE(DISP)
GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, pDisp )
#endif
#if GPU_CHILD_MODULE(BUS)
GPU_CHILD_SINGLE_INST( OBJBUS, GPU_GET_BUS, 1, NV_FALSE, pBus )
#endif
@@ -297,8 +297,11 @@
#if GPU_CHILD_MODULE(OFA)
GPU_CHILD_SINGLE_INST( OBJOFA, GPU_GET_OFA, 1, NV_FALSE, pOfa )
#endif
#if RMCFG_MODULE_CONF_COMPUTE && GPU_CHILD_MODULE(CONF_COMPUTE)
GPU_CHILD_SINGLE_INST( ConfidentialCompute, GPU_GET_CONF_COMPUTE, 1, NV_TRUE, pConfCompute )
#endif
#if RMCFG_MODULE_KERNEL_CCU && GPU_CHILD_MODULE(KERNEL_CCU)
GPU_CHILD_SINGLE_INST( KernelCcu, GPU_GET_KERNEL_CCU, 1, NV_FALSE, pKernelCcu )
GPU_CHILD_SINGLE_INST( KernelCcu, GPU_GET_KERNEL_CCU, 1, NV_FALSE, pKernelCcu )
#endif
// Undefine the entry macros to simplify call sites

View File

@@ -99,6 +99,7 @@ typedef enum
#define RM_ENGINE_TYPE_NVENC_SIZE 3
#define RM_ENGINE_TYPE_NVJPEG_SIZE 8
#define RM_ENGINE_TYPE_NVDEC_SIZE 8
#define RM_ENGINE_TYPE_OFA_SIZE 1
#define RM_ENGINE_TYPE_GR_SIZE 8
// Indexed engines
@@ -118,6 +119,15 @@ typedef enum
#define RM_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= RM_ENGINE_TYPE_NVJPEG0) && ((i) < RM_ENGINE_TYPE_NVJPEG(RM_ENGINE_TYPE_NVJPEG_SIZE)))
#define RM_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - RM_ENGINE_TYPE_NVJPEG0)
#define RM_ENGINE_TYPE_OFA(i) (RM_ENGINE_TYPE_OFA+(i))
#define RM_ENGINE_TYPE_IS_OFA(i) (((i) >= RM_ENGINE_TYPE_OFA) && ((i) < RM_ENGINE_TYPE_OFA(RM_ENGINE_TYPE_OFA_SIZE)))
#define RM_ENGINE_TYPE_OFA_IDX(i) ((i) - RM_ENGINE_TYPE_OFA)
#define RM_ENGINE_TYPE_IS_VIDEO(i) (RM_ENGINE_TYPE_IS_NVENC(i) | \
RM_ENGINE_TYPE_IS_NVDEC(i) | \
RM_ENGINE_TYPE_IS_NVJPEG(i) | \
RM_ENGINE_TYPE_IS_OFA(i))
#define RM_ENGINE_TYPE_GR(i) (RM_ENGINE_TYPE_GR0 + (i))
#define RM_ENGINE_TYPE_IS_GR(i) (((i) >= RM_ENGINE_TYPE_GR0) && ((i) < RM_ENGINE_TYPE_GR(RM_ENGINE_TYPE_GR_SIZE)))
#define RM_ENGINE_TYPE_GR_IDX(i) ((i) - RM_ENGINE_TYPE_GR0)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -33,28 +33,31 @@
#define GPU_FABRIC_PROBE_DEFAULT_PROBE_SLOWDOWN_THRESHOLD 10
typedef struct GPU_FABRIC_PROBE_INFO GPU_FABRIC_PROBE_INFO;
typedef struct GPU_FABRIC_PROBE_INFO_KERNEL GPU_FABRIC_PROBE_INFO_KERNEL;
NV_STATUS gpuFabricProbeStart(OBJGPU *pGpu,
GPU_FABRIC_PROBE_INFO **ppGpuFabricProbeInfo);
void gpuFabricProbeStop(GPU_FABRIC_PROBE_INFO *pGpuFabricProbeInfo);
GPU_FABRIC_PROBE_INFO_KERNEL **ppGpuFabricProbeInfoKernel);
void gpuFabricProbeStop(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
void gpuFabricProbeSuspend(GPU_FABRIC_PROBE_INFO *pGpuFabricProbeInfo);
NV_STATUS gpuFabricProbeResume(GPU_FABRIC_PROBE_INFO *pGpuFabricProbeInfo);
void gpuFabricProbeSuspend(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
NV_STATUS gpuFabricProbeResume(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
NV_STATUS gpuFabricProbeGetGpuFabricHandle(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *pHandle);
NV_STATUS gpuFabricProbeGetGfId(GPU_FABRIC_PROBE_INFO *pInfo, NvU32 *pGfId);
NV_STATUS gpuFabricProbeGetfmCaps(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *pFmCaps);
NV_STATUS gpuFabricProbeGetClusterUuid(GPU_FABRIC_PROBE_INFO *pInfo, NvUuid *pClusterUuid);
NV_STATUS gpuFabricProbeGetFabricPartitionId(GPU_FABRIC_PROBE_INFO *pInfo, NvU16 *pFabricPartitionId);
NV_STATUS gpuFabricProbeGetGpaAddress(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *pGpaAddress);
NV_STATUS gpuFabricProbeGetGpaAddressRange(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *pGpaAddressRange);
NV_STATUS gpuFabricProbeGetFlaAddress(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *pFlaAddress);
NV_STATUS gpuFabricProbeGetFlaAddressRange(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *pFlaAddressRange);
NV_STATUS gpuFabricProbeGetNumProbeReqs(GPU_FABRIC_PROBE_INFO *pInfo, NvU64 *numProbes);
NV_STATUS gpuFabricProbeGetGpuFabricHandle(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pHandle);
NV_STATUS gpuFabricProbeGetGfId(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pGfId);
NV_STATUS gpuFabricProbeGetfmCaps(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFmCaps);
NV_STATUS gpuFabricProbeGetClusterUuid(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvUuid *pClusterUuid);
NV_STATUS gpuFabricProbeGetFabricPartitionId(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU16 *pFabricPartitionId);
NV_STATUS gpuFabricProbeGetGpaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pGpaAddress);
NV_STATUS gpuFabricProbeGetGpaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pGpaAddressRange);
NV_STATUS gpuFabricProbeGetFlaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddress);
NV_STATUS gpuFabricProbeGetFlaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddressRange);
NV_STATUS gpuFabricProbeGetNumProbeReqs(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *numProbes);
NvBool gpuFabricProbeIsReceived(GPU_FABRIC_PROBE_INFO *pGpuFabricProbeInfo);
NvBool gpuFabricProbeIsSuccess(GPU_FABRIC_PROBE_INFO *pGpuFabricProbeInfo);
NV_STATUS gpuFabricProbeGetFmStatus(GPU_FABRIC_PROBE_INFO *pGpuFabricProbeInfo);
NvBool gpuFabricProbeIsReceived(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
NvBool gpuFabricProbeIsSuccess(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
NV_STATUS gpuFabricProbeGetFmStatus(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
NvBool gpuFabricProbeIsSupported(OBJGPU *pGpu);
NV_STATUS gpuFabricProbeSetBwMode(NvU8 mode);
NV_STATUS gpuFabricProbeGetlinkMaskToBeReduced(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel,
NvU32 *linkMaskToBeReduced);
#endif // GPU_FABRIC_PROBE_H

View File

@@ -139,6 +139,10 @@ static NV_INLINE NvU32 timeoutApplyScale(TIMEOUT_DATA *pTD, NvU32 timeout)
#define gpuScaleTimeout(g,a) timeoutApplyScale(&(g)->timeoutData, a)
#define gpuTimeoutCondWait(g,a,b,t) timeoutCondWait(&(g)->timeoutData, t, a, b, __LINE__)
#define GPU_ENG_RESET_TIMEOUT_VALUE(g, t) (t)
//
// In SCSIM simulation platform, both CPU and GPU are simulated and the reg write/read itself
// takes more time. This helper macro handles it with increased timeout value.
//
#define GPU_ENG_RESET_TIMEOUT_VALUE(g, t) ((gpuIsSelfHosted(g) && IS_SIMULATION(g)) ? 1000 : (t))
#endif // _GPU_TIMEOUT_H_

View File

@@ -34,7 +34,7 @@
*
* Not all buffer types are supported on every GPU.
*/
#define GR_GLOBALCTX_BUFFER_DEF(x) \
#define GR_GLOBALCTX_BUFFER_DEF(x) \
NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_BUNDLE_CB, 0x00000000) \
NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_PAGEPOOL, 0x00000001) \
NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB, 0x00000002) \
@@ -50,7 +50,7 @@ NV_ENUM_DEF(GR_GLOBALCTX_BUFFER, GR_GLOBALCTX_BUFFER_DEF)
#define GR_GLOBALCTX_BUFFER_COUNT NV_ENUM_SIZE(GR_GLOBALCTX_BUFFER)
#define GR_CTX_BUFFER_DEF(x) \
#define GR_CTX_BUFFER_DEF(x) \
NV_ENUM_ENTRY(x, GR_CTX_BUFFER_MAIN, 0x00000000) \
NV_ENUM_ENTRY(x, GR_CTX_BUFFER_ZCULL, 0x00000001) \
NV_ENUM_ENTRY(x, GR_CTX_BUFFER_PM, 0x00000002) \

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,8 +25,55 @@
#define GSP_FW_HEAP_H
// Static defines for the GSP FW WPR Heap
#define GSP_FW_HEAP_SIZE_LIBOS2 (64u << 20)
#define GSP_FW_HEAP_SIZE_LIBOS3_BAREMETAL (84u << 20)
#define GSP_FW_HEAP_SIZE_LIBOS3_VGPU (549u << 20)
#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20)
//
// Calibrated by observing RM init heap usage - the amount of GSP-RM heap memory
// used during GSP-RM boot and Kernel RM initialization, up to and including the
// first client subdevice allocation.
//
#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+
//
// Calibrated by observing RM init heap usage on GPUs with various FB sizes.
// This seems to fit the data fairly well, but is likely inaccurate (differences
// in heap usage are more likely correlate with GPU architecture than FB size).
// TODO: this requires more detailed profiling and tuning.
//
#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
//
// This number is calibrated by profiling the WPR heap usage of a single
// client channel allocation. In practice, channel allocations on newer
// architectures are around 44KB-46KB (depending on internal memory
// tracking overhead configured in GSP-RM).
//
// Note: Turing supports 4096 channels vs 2048 on Ampere+, but the heap is
// sized to support only 2048 channels - otherwise the Turing heap balloons
// to 176MB+ on already small-FB parts.
//
// Note: The 2048 channel limit is per-runlist. GSP-RM currently uses only
// a single ChID space across all engines, because per-runlist channel RAM is
// disabled by default in most configurations. If/once per-runlist ChID spaces
// are used on GSP-RM, this will likely no longer be sufficient/relevant as
// the memory that could be needed for client channel allocations will far
// exceed the amount we can carve out for GSP-RM's heap (max ChID will no longer
// be the limiter).
//
#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
#define GSP_FW_HEAP_SIZE_VGPU_DEFAULT (549 << 20) // Default for all VGPU configs
// Min/max bounds for heap size override by regkey
#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB (256u)
#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u)
#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB (276u)
#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_VGPU_MIN_MB (549u)
#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_VGPU_MAX_MB (1024u)
#endif // GSP_FW_HEAP_H

View File

@@ -53,6 +53,12 @@ typedef struct
MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
GSP_SR_INIT_ARGUMENTS srInitArguments;
NvU32 gpuInstance;
struct
{
NvU64 pa;
NvU64 size;
} profilerArgs;
} GSP_ARGUMENTS_CACHED;
#endif // GSP_INIT_ARGS_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -75,7 +75,7 @@ typedef struct GspStaticConfigInfo_t
GspSMInfo SM_info;
NvBool poisonFuseEnabled;
NvU64 fb_length;
NvU32 fbio_mask;
NvU32 fb_bus_width;
@@ -100,6 +100,7 @@ typedef struct GspStaticConfigInfo_t
NvBool bIsMobile;
NvBool bIsGc6Rtd3Allowed;
NvBool bIsGcOffRtd3Allowed;
NvBool bIsGcoffLegacyAllowed;
NvU64 bar1PdeBase;
NvU64 bar2PdeBase;
@@ -127,7 +128,10 @@ typedef struct GspStaticConfigInfo_t
// Subdevice handle for internal RMAPI control.
NvHandle hInternalSubdevice;
NvBool bSelfHostedMode;
NvBool bAtsSupported;
NvBool bIsGpuUefi;
} GspStaticConfigInfo;
// Pushed from CPU-RM to GSP-RM
@@ -147,6 +151,7 @@ typedef struct GspSystemInfo
NvU64 clPdbProperties;
NvU32 Chipset;
NvBool bGpuBehindBridge;
NvBool bMnocAvailable;
NvBool bUpstreamL0sUnsupported;
NvBool bUpstreamL1Unsupported;
NvBool bUpstreamL1PorSupported;
@@ -155,6 +160,9 @@ typedef struct GspSystemInfo
BUSINFO FHBBusInfo;
BUSINFO chipsetIDInfo;
ACPI_METHOD_DATA acpiMethodData;
NvU32 hypervisorType;
NvBool bIsPassthru;
NvU64 sysTimerOffsetNs;
} GspSystemInfo;

View File

@@ -65,6 +65,7 @@ typedef struct _message_queue_info
msgqHandle hQueue; // Do not allow requests when hQueue is null.
NvU32 txSeqNum; // Next sequence number for tx.
NvU32 rxSeqNum; // Next sequence number for rx.
NvU32 txBufferFull;
NvU32 queueIdx; // QueueIndex used to identify which task the message is supposed to be sent to.
} MESSAGE_QUEUE_INFO;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -61,6 +61,7 @@
#define MC_ENGINE_IDX_CE7 22
#define MC_ENGINE_IDX_CE8 23
#define MC_ENGINE_IDX_CE9 24
#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9
#define MC_ENGINE_IDX_VIC 35
#define MC_ENGINE_IDX_ISOHUB 36
#define MC_ENGINE_IDX_VGPU 37
@@ -136,7 +137,8 @@
#define MC_ENGINE_IDX_DISP_GSP 163
#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
#define MC_ENGINE_IDX_MAX 166 // This must be kept as the max bit if
#define MC_ENGINE_IDX_PXUC 166
#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if
// we need to add more engines
#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF
@@ -159,6 +161,9 @@
// Index ESCHED reference
#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x))
#define MC_ENGINE_IDX_IS_CE(x) \
((MC_ENGINE_IDX_CE(0) <= (x)) && ((x) <= MC_ENGINE_IDX_CE_MAX))
MAKE_BITVECTOR(MC_ENGINE_BITVECTOR, MC_ENGINE_IDX_MAX);
typedef MC_ENGINE_BITVECTOR *PMC_ENGINE_BITVECTOR;

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef INTR_COMMON_H
#define INTR_COMMON_H 1
/*! Common defines used by both Intr and OBJINTRABLE */
/*! Kinds of interrupts that a unit can have. */
typedef enum {
/*!
* Legacy concept of "stalling" interrupts.
*
* These may have a RETRIGGER mechanism.
*/
INTR_KIND_INTERRUPT,
/*! Notification "non-stalling" interrupts. */
INTR_KIND_NOTIFICATION,
INTR_KIND_COUNT
} INTR_KIND;
#endif /* ifndef INTR_COMMON_H */

View File

@@ -0,0 +1,92 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef KERN_GPU_POWER_H
#define KERN_GPU_POWER_H
#include "ctrl/ctrl2080/ctrl2080power.h" // NV2080_CTRL_GC6_FLAVOR_ID_MAX
#include "diagnostics/profiler.h"
typedef enum
{
GPU_GC6_STATE_POWERED_ON = 0 ,
GPU_GC6_STATE_EXITED = GPU_GC6_STATE_POWERED_ON ,
GPU_GC6_STATE_ENTERING ,
GPU_GC6_STATE_ENTERING_FAILED ,
GPU_GC6_STATE_ENTERED ,
GPU_GC6_STATE_EXITING ,
GPU_GC6_STATE_EXITING_FAILED ,
} GPU_GC6_STATE;
// TODO-SC use mask for the bool variables
typedef struct
{
GPU_GC6_STATE currentState;
NvU32 executedStepMask; // step mask executed during entry sequence
NvU32 stepMask[NV2080_CTRL_GC6_FLAVOR_ID_MAX]; // step mask cache
} _GPU_GC6_STATE;
// GPU event mask operation
#define GC6_REFCOUNT_MASK_SET(pGpu, refCountBit) \
do \
{ \
if (pGpu != NULL) \
{ \
((pGpu->gc6State.refCountMask) |= (NVBIT(refCountBit))); \
} \
} while(0)
#define GC6_REFCOUNT_MASK_CLEAR(pGpu, refCountBit) \
do \
{ \
if (pGpu != NULL) \
{ \
((pGpu->gc6State.refCountMask) &= ~(NVBIT(refCountBit))); \
} \
} while(0)
#define GC6_REFCOUNT_MASK_GET_FROM_EVENT(event) ((event / 2))
// GC6 related defines
#define GC6_FB_CLAMP_TIMEOUT_MS 10
// Macros for GPU_GC6_STATE
#define IS_GPU_GC6_STATE_POWERED_ON(obj) (obj->gc6State.currentState == GPU_GC6_STATE_POWERED_ON)
#define IS_GPU_GC6_STATE_EXITED(obj) (obj->gc6State.currentState == GPU_GC6_STATE_EXITED)
#define IS_GPU_GC6_STATE_ENTERING(obj) (obj->gc6State.currentState == GPU_GC6_STATE_ENTERING)
#define IS_GPU_GC6_STATE_ENTERED(obj) (obj->gc6State.currentState == GPU_GC6_STATE_ENTERED)
#define IS_GPU_GC6_STATE_EXITING(obj) (obj->gc6State.currentState == GPU_GC6_STATE_EXITING)
#define SET_GPU_GC6_STATE(obj, state) (obj->gc6State.currentState = state)
#define SET_GPU_GC6_STATE_AND_LOG(obj, state) \
do { \
SET_GPU_GC6_STATE(obj, state); \
RMTRACE_GPU(_GC6_STATE, obj->gpuId, state, 0, 0, 0, 0, 0, 0); \
} while(0)
//Macro to check is a given GC6 step id is set
#define GPU_IS_GC6_STEP_ID_SET(stepId, stepMask) \
((NVBIT(NV2080_CTRL_GC6_STEP_ID_##stepId) & (stepMask)) != 0)
#endif // KERN_GPU_POWER_H

View File

@@ -0,0 +1,122 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_ce_utils_nvoc.h"
#ifndef CE_UTILS_H
#define CE_UTILS_H
#include "gpu/gpu_resource.h" // GpuResource
#include "class/cl0050.h"
#include "ctrl/ctrl0050.h"
#include "kernel/gpu/mem_mgr/channel_utils.h"
typedef struct
{
MEMORY_DESCRIPTOR *pMemDesc;
NvU64 offset;
NvU64 length;
NvU32 pattern;
NvU64 flags;
NvU64 submittedWorkId; // Payload to poll for async completion
} CEUTILS_MEMSET_PARAMS;
typedef struct
{
MEMORY_DESCRIPTOR *pSrcMemDesc;
MEMORY_DESCRIPTOR *pDstMemDesc;
NvU64 dstOffset;
NvU64 srcOffset;
NvU64 length;
NvU64 flags;
NvU64 submittedWorkId; // Payload to poll for async completion
} CEUTILS_MEMCOPY_PARAMS;
NVOC_PREFIX(ceutils) class CeUtils : Object
{
public:
NV_STATUS ceutilsConstruct(CeUtils *pCeUtils, OBJGPU *pGpu, NV0050_ALLOCATION_PARAMETERS *pAllocParams);
void ceutilsDestruct(CeUtils *pCeUtils);
NV_STATUS ceutilsInitialize(CeUtils *pCeUtils, OBJGPU *pGpu, NV0050_ALLOCATION_PARAMETERS *pAllocParams);
void ceutilsDeinit(CeUtils *pCeUtils);
void ceutilsRegisterGPUInstance(CeUtils *pCeUtils, KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance);
NV_STATUS ceutilsMemset(CeUtils *pCeUtils, CEUTILS_MEMSET_PARAMS *pParams);
NV_STATUS ceutilsMemcopy(CeUtils *pCeUtils, CEUTILS_MEMCOPY_PARAMS *pParams);
NvU64 ceutilsUpdateProgress(CeUtils *pCeUtils);
void ceutilsServiceInterrupts(CeUtils *pCeUtils);
//
// Internal states
//
NvHandle hClient;
NvHandle hDevice;
NvHandle hSubdevice;
OBJCHANNEL *pChannel;
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance;
OBJGPU *pGpu;
KernelCE *pKCe;
NvBool bUseVasForCeCopy;
NvU32 hTdCopyClass;
NvU64 lastSubmittedPayload;
NvU64 lastCompletedPayload;
};
#if defined(DEBUG) || defined (DEVELOP)
NVOC_PREFIX(ceutilsapi) class CeUtilsApi : GpuResource
{
public:
NV_STATUS ceutilsapiConstruct(CeUtilsApi *pCeUtilsApi, CALL_CONTEXT *pCallContext,
RS_RES_ALLOC_PARAMS_INTERNAL *pParams)
: GpuResource(pCallContext, pParams);
void ceutilsapiDestruct(CeUtilsApi *pCeUtilsApi);
//
// Below APIs are only provided for SRT testing, thus only available for debug or
// develop driver builds
//
//
RMCTRL_EXPORT(NV0050_CTRL_CMD_MEMSET, RMCTRL_FLAGS(PRIVILEGED, API_LOCK_READONLY))
NV_STATUS ceutilsapiCtrlCmdMemset(CeUtilsApi *pCeUtilsApi, NV0050_CTRL_MEMSET_PARAMS *pParams);
RMCTRL_EXPORT(NV0050_CTRL_CMD_MEMCOPY, RMCTRL_FLAGS(PRIVILEGED, API_LOCK_READONLY))
NV_STATUS ceutilsapiCtrlCmdMemcopy(CeUtilsApi *pCeUtilsApi, NV0050_CTRL_MEMCOPY_PARAMS *pParams);
RMCTRL_EXPORT(NV0050_CTRL_CMD_CHECK_PROGRESS, RMCTRL_FLAGS(PRIVILEGED, API_LOCK_READONLY))
NV_STATUS ceutilsapiCtrlCmdCheckProgress(CeUtilsApi *pCeUtilsApi, NV0050_CTRL_CHECK_PROGRESS_PARAMS *pParams);
CeUtils *pCeUtils;
};
#endif
#endif // CE_UTILS_H

View File

@@ -0,0 +1,36 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CE_UTILS_SIZES_H
#define _CE_UTILS_SIZES_H
#define CE_MAX_BYTES_PER_LINE 0xffffffffULL
#define CE_NUM_COPY_BLOCKS 4096
#define CE_CHANNEL_SEMAPHORE_SIZE 8
#define CE_GPFIFO_SIZE NV906F_GP_ENTRY__SIZE * CE_NUM_COPY_BLOCKS
#define CE_CHANNEL_NOTIFIER_SIZE (sizeof(NvNotification) * \
NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1)
#define CE_METHOD_SIZE_PER_BLOCK 0x64
#define FAST_SCRUBBER_METHOD_SIZE_PER_BLOCK 0x78
#endif // _CE_UTILS_SIZES_H

View File

@@ -0,0 +1,157 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CHANNEL_UTILS_H_
#define _CHANNEL_UTILS_H_
#include "core/core.h"
#include "gpu/gpu.h"
#include "gpu/mem_mgr/mem_mgr.h"
#include "gpu/ce/kernel_ce.h"
#include "gpu/bus/kern_bus.h"
#include "core/prelude.h"
#include "rmapi/rs_utils.h"
#include "nvos.h"
#include "class/cl906f.h"
#include "class/cl906f.h"
#include "class/cl906fsw.h"
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
#include "class/clc0b5.h" // PASCAL_DMA_COPY_A
#include "class/clc1b5.h" // PASCAL_DMA_COPY_B
#include "class/clc3b5.h" // VOLTA_DMA_COPY_A
#include "class/clc5b5.h" // TURING_DMA_COPY_A
#include "class/clc6b5.h" // AMPERE_DMA_COPY_A
#include "class/clc7b5.h" // AMPERE_DMA_COPY_B
#include "class/clc8b5.h" // HOPPER_DMA_COPY_A
#include "class/clc86f.h" // HOPPER_CHANNEL_GPFIFO_A
#include "nvctassert.h"
#include "vgpu/vgpu_guest_pma_scrubber.h"
#define RM_SUBCHANNEL 0x0
#define NV_PUSH_METHOD(OpType, SubCh, Method, Count) \
(DRF_DEF(906F, _DMA, _SEC_OP, OpType) | \
DRF_NUM(906F, _DMA, _METHOD_ADDRESS, (Method) >> 2) | \
DRF_NUM(906F, _DMA, _METHOD_SUBCHANNEL, (SubCh)) | \
DRF_NUM(906F, _DMA, _METHOD_COUNT, (Count)))
#define _NV_ASSERT_CONTIGUOUS_METHOD(a1, a2) NV_ASSERT((a2) - (a1) == 4)
#define NV_PUSH_DATA(Data) MEM_WR32(pPtr++, (Data))
#define _NV_PUSH_INC_1U(SubCh, a1, d1, Count) \
do \
{ \
NV_PUSH_DATA(NV_PUSH_METHOD(_INC_METHOD, SubCh, a1, Count)); \
NV_PUSH_DATA(d1); \
} while (0)
#define NV_PUSH_INC_1U(SubCh, a1, d1) \
do \
{ \
_NV_PUSH_INC_1U (SubCh, a1, d1, 1); \
} while (0)
#define NV_PUSH_INC_2U(SubCh, a1, d1, a2, d2) \
do \
{ \
_NV_ASSERT_CONTIGUOUS_METHOD(a1, a2); \
_NV_PUSH_INC_1U(SubCh, a1, d1, 2); \
NV_PUSH_DATA(d2); \
} while (0)
#define NV_PUSH_INC_3U(SubCh, a1, d1, a2, d2, a3, d3) \
do \
{ \
_NV_ASSERT_CONTIGUOUS_METHOD(a1, a2); \
_NV_ASSERT_CONTIGUOUS_METHOD(a2, a3); \
_NV_PUSH_INC_1U(SubCh, a1, d1, 3); \
NV_PUSH_DATA(d2); \
NV_PUSH_DATA(d3); \
} while (0)
#define NV_PUSH_INC_4U(SubCh, a1, d1, a2, d2, a3, d3, a4, d4) \
do \
{ \
_NV_ASSERT_CONTIGUOUS_METHOD(a1, a2); \
_NV_ASSERT_CONTIGUOUS_METHOD(a2, a3); \
_NV_ASSERT_CONTIGUOUS_METHOD(a3, a4); \
_NV_PUSH_INC_1U(SubCh, a1, d1, 4); \
NV_PUSH_DATA(d2); \
NV_PUSH_DATA(d3); \
NV_PUSH_DATA(d4); \
} while (0)
#define READ_CHANNEL_PAYLOAD_SEMA(channel) MEM_RD32((NvU8*)channel->pbCpuVA + \
channel->finishPayloadOffset)
#define READ_CHANNEL_PB_SEMA(channel) MEM_RD32((NvU8*)channel->pbCpuVA + \
channel->semaOffset)
#define WRITE_CHANNEL_PB_SEMA(channel, val) MEM_WR32((NvU8*)channel->pbCpuVA + \
channel->semaOffset, val);
#define WRITE_CHANNEL_PAYLOAD_SEMA(channel,val) MEM_WR32((NvU8*)channel->pbCpuVA + \
channel->finishPayloadOffset, val);
//
// This struct contains parameters needed to send a pushbuffer for a CE
// operation. This interface only supports contiguous operations.
//
typedef struct
{
NvBool bCeMemcopy; // Whether this is a CE memcopy;
// If set to false, this will be a memset operation
NvU64 dstAddr; // Physical address of the source address
NvU64 srcAddr; // Physical address of the source address; only valid for memcopy
NvU32 size;
NvU32 pattern; // Fixed pattern to memset to. Only valid for memset
NvU32 payload; // Payload value used to release semaphore
NvU64 clientSemaAddr;
NV_ADDRESS_SPACE dstAddressSpace;
NV_ADDRESS_SPACE srcAddressSpace;
NvU32 dstCpuCacheAttrib;
NvU32 srcCpuCacheAttrib;
} CHANNEL_PB_INFO;
NV_STATUS channelSetupIDs(OBJCHANNEL *pChannel, OBJGPU *pGpu, NvBool bUseVasForCeCopy, NvBool bMIGInUse);
void channelSetupChannelBufferSizes(OBJCHANNEL *pChannel);
// Needed for pushbuffer management
NV_STATUS channelWaitForFreeEntry(OBJCHANNEL *pChannel, NvU32 *pPutIndex);
NV_STATUS channelFillGpFifo(OBJCHANNEL *pChannel, NvU32 putIndex, NvU32 methodsLength);
NvU32 channelFillPb(OBJCHANNEL *pChannel, NvU32 putIndex, NvBool bPipelined,
NvBool bInsertFinishPayload, CHANNEL_PB_INFO *pChannelPbInfo);
NvU32 channelFillPbFastScrub(OBJCHANNEL *pChannel, NvU32 putIndex, NvBool bPipelined,
NvBool bInsertFinishPayload, CHANNEL_PB_INFO *pChannelPbInfo);
// Needed for work tracking
NV_STATUS channelWaitForFinishPayload(OBJCHANNEL *pChannel, NvU64 targetPayload);
NvU64 channelGetFinishPayload(OBJCHANNEL *pChannel);
#endif // _CHANNEL_UTILS_H_

View File

@@ -125,7 +125,7 @@ typedef struct PMA_ALLOC_INFO
{
NvBool bContig;
NvU32 pageCount;
NvU32 pageSize;
NvU64 pageSize;
NvU32 refCount;
NvU64 allocSize;
NvU32 flags;

View File

@@ -34,16 +34,18 @@
#include "nvctassert.h"
#include "vgpu/vgpu_guest_pma_scrubber.h"
#if !defined(SRT_BUILD)
#include "gpu/mem_mgr/ce_utils.h"
#endif
struct OBJGPU;
struct Heap;
struct OBJCHANNEL;
#define RM_SUBCHANNEL 0x0
#define MEMSET_PATTERN 0x00000000
#define SCRUBBER_NUM_PAYLOAD_SEMAPHORES (2)
#define SCRUBBER_SEMAPHORE_SIZE_INBYTES (4)
#define SCRUBBER_CHANNEL_SEMAPHORE_SIZE (SCRUBBER_SEMAPHORE_SIZE_INBYTES *\
#define SCRUBBER_CHANNEL_SEMAPHORE_SIZE (SCRUBBER_SEMAPHORE_SIZE_INBYTES * \
SCRUBBER_NUM_PAYLOAD_SEMAPHORES)
#define SCRUBBER_CHANNEL_NOTIFIER_SIZE (sizeof(NvNotification) * NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1)
@@ -52,67 +54,6 @@ struct OBJCHANNEL;
#define SCRUB_MAX_BYTES_PER_LINE 0xffffffffULL
#define MAX_SCRUB_ITEMS 4096 // 4K scrub items
#define READ_SCRUBBER_PAYLOAD_SEMA(channel) MEM_RD32((NvU8*)channel->pbCpuVA +\
channel->finishPayloadOffset)
#define READ_SCRUBBER_PB_SEMA(channel) MEM_RD32((NvU8*)channel->pbCpuVA +\
channel->semaOffset)
#define WRITE_SCRUBBER_PB_SEMA(channel, val) MEM_WR32((NvU8*)channel->pbCpuVA +\
channel->semaOffset, val);
#define WRITE_SCRUBBER_PAYLOAD_SEMA(channel,val) MEM_WR32((NvU8*)channel->pbCpuVA +\
channel->finishPayloadOffset, val);
// Use Incrementing Methods to save the PB Space
#define _NV_ASSERT_CONTIGUOUS_METHODS(a1, a2) NV_ASSERT((a2) - (a1) == 4)
#define NV_PUSH_METHOD(OpType, SubCh, Method, Count) \
(DRF_DEF(906F, _DMA, _SEC_OP, OpType) |\
DRF_NUM(906F, _DMA, _METHOD_ADDRESS, (Method) >> 2) |\
DRF_NUM(906F, _DMA, _METHOD_SUBCHANNEL, (SubCh)) |\
DRF_NUM(906F, _DMA, _METHOD_COUNT, (Count)))
#define NV_PUSH_DATA(Data) MEM_WR32(pPtr++, (Data))
#define _NV_PUSH_INC_1U(SubCh, a1,d1, Count) \
do{ \
NV_PUSH_DATA(NV_PUSH_METHOD(_INC_METHOD, SubCh, a1, Count));\
NV_PUSH_DATA(d1); \
} while(0)
#define NV_PUSH_INC_1U(SubCh, a1,d1) \
do{ \
_NV_PUSH_INC_1U (SubCh, a1,d1, 1);\
} while(0)
#define NV_PUSH_INC_2U(SubCh, a1,d1, a2,d2) \
do{ \
_NV_ASSERT_CONTIGUOUS_METHODS(a1, a2);\
_NV_PUSH_INC_1U(SubCh, a1,d1, 2); \
NV_PUSH_DATA(d2); \
} while(0)
#define NV_PUSH_INC_3U(SubCh, a1,d1, a2,d2, a3,d3) \
do{ \
_NV_ASSERT_CONTIGUOUS_METHODS(a1,a2);\
_NV_ASSERT_CONTIGUOUS_METHODS(a2,a3);\
_NV_PUSH_INC_1U(SubCh, a1,d1, 3); \
NV_PUSH_DATA(d2); \
NV_PUSH_DATA(d3); \
} while(0)
#define NV_PUSH_INC_4U(SubCh, a1,d1, a2,d2, a3,d3, a4,d4) \
do{ \
_NV_ASSERT_CONTIGUOUS_METHODS(a1,a2);\
_NV_ASSERT_CONTIGUOUS_METHODS(a2,a3);\
_NV_ASSERT_CONTIGUOUS_METHODS(a3,a4);\
_NV_PUSH_INC_1U(SubCh, a1,d1, 4); \
NV_PUSH_DATA(d2); \
NV_PUSH_DATA(d3); \
NV_PUSH_DATA(d4); \
} while(0)
// structure to store the details of a scrubbing work
typedef struct SCRUB_NODE {
// The 64 bit ID assigned to each work
@@ -144,8 +85,10 @@ typedef struct OBJMEMSCRUB {
NvLength scrubListSize;
// Pre-allocated Free Scrub List
PSCRUB_NODE pScrubList;
// Scrubber Channel
struct OBJCHANNEL *pChannel;
#if !defined(SRT_BUILD)
// Scrubber uses ceUtils to manage CE channel
CeUtils ceUtilsObject;
#endif
struct OBJGPU *pGpu;
VGPU_GUEST_PMA_SCRUB_BUFFER_RING vgpuScrubBuffRing;
NvBool bVgpuScrubberEnabled;

View File

@@ -26,12 +26,12 @@
#include "core/prelude.h"
#define CLEAR_HAL_ATTR(a) \
#define CLEAR_HAL_ATTR(a) \
a = (a &~(DRF_NUM(OS32, _ATTR, _COMPR, 0x3) | \
DRF_NUM(OS32, _ATTR, _TILED, 0x3) | \
DRF_NUM(OS32, _ATTR, _ZCULL, 0x3)));
#define CLEAR_HAL_ATTR2(a) \
#define CLEAR_HAL_ATTR2(a) \
a = (a & ~(DRF_SHIFTMASK(NVOS32_ATTR2_ZBC) | \
DRF_SHIFTMASK(NVOS32_ATTR2_GPU_CACHEABLE)));

View File

@@ -127,12 +127,12 @@ void pmaAddrtreeSetEvictingFrames(void *pMap, NvU64 frameEvictionsInProcess);
*/
NV_STATUS pmaAddrtreeScanContiguous(
void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd,
NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment,
NvU64 numPages, NvU64 *freelist, NvU64 pageSize, NvU64 alignment,
NvU64 *pagesAllocated, NvBool bSkipEvict, NvBool bReverseAlloc);
NV_STATUS pmaAddrtreeScanDiscontiguous(
void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd,
NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment,
NvU64 numPages, NvU64 *freelist, NvU64 pageSize, NvU64 alignment,
NvU64 *pagesAllocated, NvBool bSkipEvict, NvBool bReverseAlloc);
void pmaAddrtreePrintTree(void *pMap, const char* str);
@@ -154,7 +154,7 @@ void pmaAddrtreePrintTree(void *pMap, const char* str);
void pmaAddrtreeChangeState(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState);
void pmaAddrtreeChangeStateAttrib(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState, NvBool writeAttrib);
void pmaAddrtreeChangeStateAttribEx(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState,PMA_PAGESTATUS newStateMask);
void pmaAddrtreeChangePageStateAttrib(void * pMap, NvU64 startFrame, NvU32 pageSize,
void pmaAddrtreeChangePageStateAttrib(void * pMap, NvU64 startFrame, NvU64 pageSize,
PMA_PAGESTATUS newState, NvBool writeAttrib);
/*!

View File

@@ -90,7 +90,7 @@ typedef NvU32 PMA_PAGESTATUS;
#define ATTRIB_PERSISTENT NVBIT(MAP_IDX_PERSISTENT)
#define ATTRIB_NUMA_REUSE NVBIT(MAP_IDX_NUMA_REUSE)
#define ATTRIB_BLACKLIST NVBIT(MAP_IDX_BLACKLIST)
#define ATTRIB_MASK (ATTRIB_EVICTING | ATTRIB_SCRUBBING \
#define ATTRIB_MASK (ATTRIB_EVICTING | ATTRIB_SCRUBBING \
| ATTRIB_PERSISTENT | ATTRIB_NUMA_REUSE \
| ATTRIB_BLACKLIST)
@@ -114,6 +114,9 @@ typedef struct _PMA_STATS
NvU64 numFreeFrames; // PMA-wide free 64KB frame count
NvU64 numFree2mbPages; // PMA-wide free 2MB pages count
#if !defined(NVWATCH)
NvU64 num2mbPagesProtected; // PMA-wide total number of 2MB pages in protected memory
NvU64 numFreeFramesProtected; // PMA-wide free 64KB frame count in protected memory
NvU64 numFree2mbPagesProtected; // PMA-wide free 2MB pages count in protected memory
#endif // !defined(NVWATCH)
} PMA_STATS;

View File

@@ -59,7 +59,7 @@ extern "C" {
* is used for allocations coming from the Linux kernel.
* The perf implication is under further study. See bug #1999793.
*/
NV_STATUS pmaNumaAllocate(PMA *pPma, NvLength allocationCount, NvU32 pageSize,
NV_STATUS pmaNumaAllocate(PMA *pPma, NvLength allocationCount, NvU64 pageSize,
PMA_ALLOCATION_OPTIONS *allocationOptions, NvU64 *pPages);
/*!

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -73,6 +73,7 @@ typedef struct SCRUB_NODE SCRUB_NODE;
#define PMA_INIT_INTERNAL NVBIT(3) // Used after heap is removed
#define PMA_INIT_FORCE_PERSISTENCE NVBIT(4)
#define PMA_INIT_ADDRTREE NVBIT(5)
#define PMA_INIT_NUMA_AUTO_ONLINE NVBIT(6)
// These flags are used for querying PMA's config and/or state.
#define PMA_QUERY_SCRUB_ENABLED NVBIT(0)
@@ -166,7 +167,7 @@ typedef enum
/*!
* @brief Callbacks to UVM for eviction
*/
typedef NV_STATUS (*pmaEvictPagesCb_t)(void *ctxPtr, NvU32 pageSize, NvU64 *pPages,
typedef NV_STATUS (*pmaEvictPagesCb_t)(void *ctxPtr, NvU64 pageSize, NvU64 *pPages,
NvU32 count, NvU64 physBegin, NvU64 physEnd,
MEMORY_PROTECTION prot);
typedef NV_STATUS (*pmaEvictRangeCb_t)(void *ctxPtr, NvU64 physBegin, NvU64 physEnd,
@@ -180,13 +181,13 @@ typedef void (*pmaMapDestroy_t)(void *pMap);
typedef void (*pmaMapChangeState_t)(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState);
typedef void (*pmaMapChangeStateAttrib_t)(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState, NvBool writeAttrib);
typedef void (*pmaMapChangeStateAttribEx_t)(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState, PMA_PAGESTATUS newStateMask);
typedef void (*pmaMapChangePageStateAttrib_t)(void *pMap, NvU64 startFrame, NvU32 pageSize, PMA_PAGESTATUS newState, NvBool writeAttrib);
typedef void (*pmaMapChangePageStateAttrib_t)(void *pMap, NvU64 startFrame, NvU64 pageSize, PMA_PAGESTATUS newState, NvBool writeAttrib);
typedef PMA_PAGESTATUS (*pmaMapRead_t)(void *pMap, NvU64 frameNum, NvBool readAttrib);
typedef NV_STATUS (*pmaMapScanContiguous_t)(void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd,
NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment,
NvU64 numPages, NvU64 *freelist, NvU64 pageSize, NvU64 alignment,
NvU64 *pagesAllocated, NvBool bSkipEvict, NvBool bReverseAlloc);
typedef NV_STATUS (*pmaMapScanDiscontiguous_t)(void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd,
NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment,
NvU64 numPages, NvU64 *freelist, NvU64 pageSize, NvU64 alignment,
NvU64 *pagesAllocated, NvBool bSkipEvict, NvBool bReverseAlloc);
typedef void (*pmaMapGetSize_t)(void *pMap, NvU64 *pBytesTotal);
typedef void (*pmaMapGetLargestFree_t)(void *pMap, NvU64 *pLargestFree);
@@ -251,6 +252,7 @@ struct _PMA
NvU64 coherentCpuFbBase; // Used to calculate FB offset from bus address
NvU64 coherentCpuFbSize; // Used for error checking only
NvU32 numaReclaimSkipThreshold; // percent value below which __GFP_RECLAIM will not be used.
NvBool bNumaAutoOnline; // If NUMA memory is auto-onlined
// Blacklist related states
PMA_BLACKLIST_CHUNK *pBlacklistChunks; // Tracking for blacklist pages
@@ -433,12 +435,12 @@ NV_STATUS pmaRegisterRegion(PMA *pPma, NvU32 id, NvBool bAsyncEccScrub,
* code,because it is not very informative.
*
*/
NV_STATUS pmaAllocatePages(PMA *pPma, NvLength pageCount, NvU32 pageSize,
NV_STATUS pmaAllocatePages(PMA *pPma, NvLength pageCount, NvU64 pageSize,
PMA_ALLOCATION_OPTIONS *pAllocationOptions, NvU64 *pPages);
// allocate on multiple GPU, thus pmaCount
NV_STATUS pmaAllocatePagesBroadcast(PMA **pPma, NvU32 pmaCount, NvLength allocationCount,
NvU32 pageSize, PMA_ALLOCATION_OPTIONS *pAllocationOptions, NvU64 *pPages);
NvU64 pageSize, PMA_ALLOCATION_OPTIONS *pAllocationOptions, NvU64 *pPages);
/*!
@@ -472,7 +474,7 @@ NV_STATUS pmaAllocatePagesBroadcast(PMA **pPma, NvU32 pmaCount, NvLength allocat
* TODO some error for rollback
*
*/
NV_STATUS pmaPinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize);
NV_STATUS pmaPinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU64 pageSize);
/*!
@@ -498,7 +500,7 @@ NV_STATUS pmaPinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSi
* TODO some error for rollback
*
*/
NV_STATUS pmaUnpinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize);
NV_STATUS pmaUnpinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU64 pageSize);
/*!
@@ -815,7 +817,7 @@ void pmaNumaOfflined(PMA *pPma);
* @return
* void
*/
void pmaGetClientBlacklistedPages(PMA *pPma, NvU64 *pChunks, NvU32 *pPageSize, NvU32 *pNumChunks);
void pmaGetClientBlacklistedPages(PMA *pPma, NvU64 *pChunks, NvU64 *pPageSize, NvU32 *pNumChunks);
/*!
* @brief Returns the PMA blacklist size in bytes for
@@ -865,6 +867,54 @@ void pmaPrintMapState(PMA *pPma);
*/
NV_STATUS pmaAddToBlacklistTracking(PMA *pPma, NvU64 physBase);
/*!
* @brief Returns total protected video memory.
*
* @param[in] pPma PMA pointer
* @param[in] pBytesTotal Pointer that will return the total FB memory size.
*
* @return
* void
*/
void pmaGetTotalProtectedMemory(PMA *pPma, NvU64 *pBytesTotal);
/*!
* @brief Returns total unprotected video memory.
*
* @param[in] pPma PMA pointer
* @param[in] pBytesTotal Pointer that will return the total FB memory size.
*
* @return
* void
*/
void pmaGetTotalUnprotectedMemory(PMA *pPma, NvU64 *pBytesTotal);
/*!
* @brief Returns information about the total free protected FB memory.
* In confidential compute use cases, memory will be split into
* protected and unprotected regions
*
* @param[in] pPma PMA pointer
* @param[in] pBytesFree Pointer that will return the free protected memory size.
*
* @return
* void
*/
void pmaGetFreeProtectedMemory(PMA *pPma, NvU64 *pBytesFree);
/*!
* @brief Returns information about the total free unprotected FB memory.
* In confidential compute use cases, memory will be split into
* protected and unprotected regions
*
* @param[in] pPma PMA pointer
* @param[in] pBytesFree Pointer that will return the free unprotected memory size.
*
* @return
* void
*/
void pmaGetFreeUnprotectedMemory(PMA *pPma, NvU64 *pBytesFree);
#ifdef __cplusplus
}
#endif

View File

@@ -45,11 +45,11 @@ NvBool pmaStateCheck(PMA *pPma);
NV_STATUS _pmaEvictContiguous(PMA *pPma, void *pMap, NvU64 evictStart, NvU64 evictEnd,
MEMORY_PROTECTION prot);
NV_STATUS _pmaEvictPages(PMA *pPma, void *pMap, NvU64 *evictPages, NvU64 evictPageCount,
NvU64 *allocPages, NvU64 allocPageCount, NvU32 pageSize,
NvU64 *allocPages, NvU64 allocPageCount, NvU64 pageSize,
NvU64 physBegin, NvU64 physEnd, MEMORY_PROTECTION prot);
void _pmaClearScrubBit(PMA *pPma, SCRUB_NODE *pPmaScrubList, NvU64 count);
NV_STATUS _pmaCheckScrubbedPages(PMA *pPma, NvU64 chunkSize, NvU64 *pPages, NvU32 pageCount);
NV_STATUS _pmaPredictOutOfMemory(PMA *pPma, NvLength allocationCount, NvU32 pageSize,
NV_STATUS _pmaPredictOutOfMemory(PMA *pPma, NvLength allocationCount, NvU64 pageSize,
PMA_ALLOCATION_OPTIONS *allocationOptions);
NV_STATUS pmaSelector(PMA *pPma, PMA_ALLOCATION_OPTIONS *allocationOptions, NvS32 *regionList);
void _pmaReallocBlacklistPages (PMA *pPma, NvU32 regId, NvU64 rangeBegin, NvU64 rangeSize);

View File

@@ -131,7 +131,7 @@ void pmaRegmapChangeStateAttrib(void *pMap, NvU64 frameNum,
*
* @return void
*/
void pmaRegmapChangePageStateAttrib(void * pMap, NvU64 frameNumStart, NvU32 pageSize,
void pmaRegmapChangePageStateAttrib(void * pMap, NvU64 frameNumStart, NvU64 pageSize,
PMA_PAGESTATUS newState, NvBool writeAttrib);
/*!
@@ -188,7 +188,7 @@ PMA_PAGESTATUS pmaRegmapRead(void *pMap, NvU64 frameNum, NvBool readAttrib);
*/
NV_STATUS pmaRegmapScanContiguous(
void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd,
NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment,
NvU64 numPages, NvU64 *freelist, NvU64 pageSize, NvU64 alignment,
NvU64 *pagesAllocated, NvBool bSkipEvict, NvBool bReverseAlloc);
/*!
@@ -215,7 +215,7 @@ NV_STATUS pmaRegmapScanContiguous(
*/
NV_STATUS pmaRegmapScanDiscontiguous(
void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd,
NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment,
NvU64 numPages, NvU64 *freelist, NvU64 pageSize, NvU64 alignment,
NvU64 *pagesAllocated, NvBool bSkipEvict, NvBool bReverseAlloc);
/*!

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef RM_PAGE_SIZE_H
#define RM_PAGE_SIZE_H
//---------------------------------------------------------------------------
//
// Memory page defines.
//
// These correspond to the granularity understood by the hardware
// for address mapping; the system page size can be larger.
//
//---------------------------------------------------------------------------
#define RM_PAGE_SIZE_INVALID 0
#define RM_PAGE_SIZE 4096
#define RM_PAGE_SIZE_64K (64 * 1024)
#define RM_PAGE_SIZE_128K (128 * 1024)
#define RM_PAGE_MASK 0x0FFF
#define RM_PAGE_SHIFT 12
#define RM_PAGE_SHIFT_64K 16
#define RM_PAGE_SHIFT_128K 17
#define RM_PAGE_SHIFT_2M 21
#define RM_PAGE_SIZE_2M (1 << RM_PAGE_SHIFT_2M)
// Huge page size is 2 MB
#define RM_PAGE_SHIFT_HUGE RM_PAGE_SHIFT_2M
#define RM_PAGE_SIZE_HUGE (1ULL << RM_PAGE_SHIFT_HUGE)
#define RM_PAGE_MASK_HUGE ((1ULL << RM_PAGE_SHIFT_HUGE) - 1)
// 512MB page size
#define RM_PAGE_SHIFT_512M 29
#define RM_PAGE_SIZE_512M (1ULL << RM_PAGE_SHIFT_512M)
#define RM_PAGE_MASK_512M (RM_PAGE_SIZE_512M - 1)
//---------------------------------------------------------------------------
//
// Memory page attributes.
//
// These attributes are used by software for page size mapping;
// Big pages can be of 64/128KB[Fermi/Kepler/Pascal]
// Huge page is 2 MB[Pascal+]
// 512MB page is Ampere+
// Default page attribute lets driver decide the optimal page size
//
//---------------------------------------------------------------------------
typedef enum
{
RM_ATTR_PAGE_SIZE_DEFAULT,
RM_ATTR_PAGE_SIZE_4KB,
RM_ATTR_PAGE_SIZE_BIG,
RM_ATTR_PAGE_SIZE_HUGE,
RM_ATTR_PAGE_SIZE_512MB,
RM_ATTR_PAGE_SIZE_INVALID
}
RM_ATTR_PAGE_SIZE;
#endif // RM_PAGE_SIZE_H

View File

@@ -0,0 +1,3 @@
#include "g_sem_surf_nvoc.h"

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -33,60 +33,14 @@
#include "nvtypes.h"
#include "nvgputypes.h"
#include "nvstatus.h"
#include "resserv/rs_client.h"
#include "gpu/mem_mgr/rm_page_size.h"
typedef struct OBJGPU OBJGPU;
typedef struct ChannelDescendant ChannelDescendant;
typedef struct ContextDma ContextDma;
typedef struct Memory Memory;
typedef struct EVENTNOTIFICATION EVENTNOTIFICATION;
//---------------------------------------------------------------------------
//
// Memory page defines.
//
// These correspond to the granularity understood by the hardware
// for address mapping; the system page size can be larger.
//
//---------------------------------------------------------------------------
#define RM_PAGE_SIZE_INVALID 0
#define RM_PAGE_SIZE 4096
#define RM_PAGE_SIZE_64K (64 * 1024)
#define RM_PAGE_SIZE_128K (128 * 1024)
#define RM_PAGE_MASK 0x0FFF
#define RM_PAGE_SHIFT 12
#define RM_PAGE_SHIFT_64K 16
#define RM_PAGE_SHIFT_128K 17
// Huge page size is 2 MB
#define RM_PAGE_SHIFT_HUGE 21
#define RM_PAGE_SIZE_HUGE (1 << RM_PAGE_SHIFT_HUGE)
#define RM_PAGE_MASK_HUGE ((1 << RM_PAGE_SHIFT_HUGE) - 1)
// 512MB page size
#define RM_PAGE_SHIFT_512M 29
#define RM_PAGE_SIZE_512M (1 << RM_PAGE_SHIFT_512M)
#define RM_PAGE_MASK_512M (RM_PAGE_SIZE_512M - 1)
//---------------------------------------------------------------------------
//
// Memory page attributes.
//
// These attributes are used by software for page size mapping;
// Big pages can be of 64/128KB[Fermi/Kepler/Pascal]
// Huge page is 2 MB[Pascal+]
// 512MB page is Ampere+
// Default page attribute lets driver decide the optimal page size
//
//---------------------------------------------------------------------------
typedef enum
{
RM_ATTR_PAGE_SIZE_DEFAULT = 0x0,
RM_ATTR_PAGE_SIZE_4KB = 0x1,
RM_ATTR_PAGE_SIZE_BIG = 0x2,
RM_ATTR_PAGE_SIZE_HUGE = 0x3,
RM_ATTR_PAGE_SIZE_512MB = 0x4,
RM_ATTR_PAGE_SIZE_INVALID = 0x5
}
RM_ATTR_PAGE_SIZE;
//---------------------------------------------------------------------------
//
@@ -133,8 +87,8 @@ void notifyFillNOTIFICATION(OBJGPU *pGpu,
NV_STATUS CompletionStatus,
NvBool TimeSupplied,
NvU64 Time);
NV_STATUS notifyFillNotifierGPUVA (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32);
NV_STATUS notifyFillNotifierGPUVATimestamp (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32, NvU64);
NV_STATUS notifyFillNotifierGPUVA (OBJGPU*, RsClient*, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32);
NV_STATUS notifyFillNotifierGPUVATimestamp (OBJGPU*, RsClient*, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32, NvU64);
NV_STATUS notifyFillNotifierMemory (OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32);
NV_STATUS notifyFillNotifierMemoryTimestamp(OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32, NvU64);
void notifyFillNvNotification(OBJGPU *pGpu,
@@ -145,8 +99,8 @@ void notifyFillNvNotification(OBJGPU *pGpu,
NvBool TimeSupplied,
NvU64 Time);
NV_STATUS semaphoreFillGPUVA (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV32, NvBool);
NV_STATUS semaphoreFillGPUVATimestamp(OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV32, NvBool, NvU64);
NV_STATUS semaphoreFillGPUVA (OBJGPU*, RsClient*, NvHandle, NvU64, NvV32, NvV32, NvBool);
NV_STATUS semaphoreFillGPUVATimestamp(OBJGPU*, RsClient*, NvHandle, NvU64, NvV32, NvV32, NvBool, NvU64);
RM_ATTR_PAGE_SIZE dmaNvos32ToPageSizeAttr(NvU32 attr, NvU32 attr2);

View File

@@ -67,7 +67,7 @@ typedef struct
NvU32 flags;
/*!
* Specifies number of clients who requested
* Specifies number of clients who requested
* CUDA boost. This is used only in case of the CUDA clients.
*/
NvU32 refCount;

View File

@@ -50,7 +50,7 @@ struct KERNEL_PERF_GPU_BOOST_SYNC
*/
NvBool bHystersisEnable;
/*!
/*!
* SLI GPU Boost feature is enabled.
*/
NvBool bSliGpuBoostSyncEnable;

View File

@@ -0,0 +1,3 @@
#include "g_sec2_context_nvoc.h"

View File

@@ -0,0 +1,74 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LIBSPDM_INCLUDES_H
#define LIBSPDM_INCLUDES_H
//
// Dedicated header file to centralize all libspdm-related includes and defines.
// This allows us to minimize the amount of headers (and sources) that have direct
// libspdm dependency, and allowing shared macros & types for dependent source.
//
/* ------------------------ Includes --------------------------------------- */
#include "internal/libspdm_common_lib.h"
#include "internal/libspdm_secured_message_lib.h"
#include "library/spdm_requester_lib.h"
/* ------------------------ Macros and Defines ----------------------------- */
//
// As libspdm has its own RETURN_STATUS define, we need to ensure we do not
// accidentally compare it against NV_STATUS. Use macro for consistent libspdm
// error handling.
//
#define CHECK_SPDM_STATUS(expr) do { \
libspdm_return_t __spdmStatus; \
__spdmStatus = (expr); \
if (LIBSPDM_STATUS_IS_ERROR(__spdmStatus)) \
{ \
NV_PRINTF(LEVEL_INFO, "SPDM failed with status 0x%0x\n", \
__spdmStatus); \
status = NV_ERR_GENERIC; \
goto ErrorExit; \
} \
} while (NV_FALSE)
// Check for any critical issues caused by data size mismatches.
ct_assert(sizeof(NvU8) == sizeof(uint8_t));
ct_assert(sizeof(NvU16) == sizeof(uint16_t));
ct_assert(sizeof(NvU32) == sizeof(uint32_t));
ct_assert(sizeof(NvU64) == sizeof(uint64_t));
typedef struct _SPDM_ALGO_CHECK_ENTRY
{
libspdm_data_type_t dataType;
uint32_t expectedAlgo;
} SPDM_ALGO_CHECK_ENTRY, *PSPDM_ALGO_CHECK_ENTRY;
//
// Check for assert in libspdm code, indicating a fatal condition.
// Returns false if assert was hit.
//
bool nvspdm_check_and_clear_libspdm_assert(void);
#endif // LIBSPDM_INCLUDES_H

View File

@@ -0,0 +1,3 @@
#include "g_spdm_nvoc.h"

View File

@@ -51,24 +51,24 @@
NV_STATUS subdeviceCtrlCmdRcSetCleanErrorHistory(Subdevice *pSubdevice);
RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO,
RMCTRL_FLAGS(NON_PRIVILEGED))
RMCTRL_FLAGS(NON_PRIVILEGED, GPU_LOCK_DEVICE_ONLY, API_LOCK_READONLY))
NV_STATUS subdeviceCtrlCmdRcGetWatchdogInfo(Subdevice *pSubdevice,
NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS *pWatchdogInfoParams);
RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG,
RMCTRL_FLAGS(NON_PRIVILEGED))
RMCTRL_FLAGS(NON_PRIVILEGED, GPU_LOCK_DEVICE_ONLY))
NV_STATUS subdeviceCtrlCmdRcDisableWatchdog(Subdevice *pSubdevice);
RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG,
RMCTRL_FLAGS(NON_PRIVILEGED))
RMCTRL_FLAGS(NON_PRIVILEGED, GPU_LOCK_DEVICE_ONLY))
NV_STATUS subdeviceCtrlCmdRcSoftDisableWatchdog(Subdevice *pSubdevice);
RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG,
RMCTRL_FLAGS(NON_PRIVILEGED))
RMCTRL_FLAGS(NON_PRIVILEGED, GPU_LOCK_DEVICE_ONLY))
NV_STATUS subdeviceCtrlCmdRcEnableWatchdog(Subdevice *pSubdevice);
RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS,
RMCTRL_FLAGS(NON_PRIVILEGED))
RMCTRL_FLAGS(NON_PRIVILEGED, GPU_LOCK_DEVICE_ONLY))
NV_STATUS subdeviceCtrlCmdRcReleaseWatchdogRequests(Subdevice *pSubdevice);
RMCTRL_EXPORT(NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT,

View File

@@ -0,0 +1,3 @@
#include "g_egm_mem_nvoc.h"

View File

@@ -46,13 +46,13 @@
typedef enum
{
POOL_CONFIG_GMMU_FMT_1 = 0, // configure pool for client page tables with version = GMMU_FMT_VERSION_1
POOL_CONFIG_GMMU_FMT_2 = 1, // configure pool for client page tables with version = GMMU_FMT_VERSION_2
POOL_CONFIG_CTXBUF_512M = 2, // configure pool for RM internal allocations like ctx buffers with 512MB page size
POOL_CONFIG_CTXBUF_2M = 3, // configure pool for RM internal allocations like ctx buffers with 2MB page size
POOL_CONFIG_CTXBUF_64K = 4, // configure pool for RM internal allocations like ctx buffers with 64KB page size
POOL_CONFIG_CTXBUF_4K = 5, // configure pool for RM internal allocations like ctx buffers with 4KB page size
POOL_CONFIG_MAX_SUPPORTED = 6
POOL_CONFIG_GMMU_FMT_1, // configure pool for client page tables with version = GMMU_FMT_VERSION_1
POOL_CONFIG_GMMU_FMT_2, // configure pool for client page tables with version = GMMU_FMT_VERSION_2
POOL_CONFIG_CTXBUF_512M, // configure pool for RM internal allocations like ctx buffers with 512MB page size
POOL_CONFIG_CTXBUF_2M, // configure pool for RM internal allocations like ctx buffers with 2MB page size
POOL_CONFIG_CTXBUF_64K, // configure pool for RM internal allocations like ctx buffers with 64KB page size
POOL_CONFIG_CTXBUF_4K, // configure pool for RM internal allocations like ctx buffers with 4KB page size
POOL_CONFIG_MAX_SUPPORTED
}POOL_CONFIG_MODE;
/* ------------------------------------ Datatypes ---------------------------------- */

View File

@@ -53,11 +53,6 @@ OSSimEscapeWriteBuffer stubOsSimEscapeWriteBuffer;
OSSimEscapeRead stubOsSimEscapeRead;
OSSimEscapeReadBuffer stubOsSimEscapeReadBuffer;
OSSetSurfaceName stubOsSetSurfaceName;
OSCallACPI_BCL stubOsCallACPI_BCL;
OSCallACPI_ON stubOsCallACPI_ON;
OSCallACPI_OFF stubOsCallACPI_OFF;
OSCallACPI_NVHG_GPUON stubOsCallWMI_NVHG_GPUON;
OSCallACPI_NVHG_GPUOFF stubOsCallWMI_NVHG_GPUOFF;
OSCallACPI_NVHG_GPUSTA stubOsCallWMI_NVHG_GPUSTA;
OSCallACPI_NVHG_MXDS stubOsCallWMI_NVHG_MXDS;
OSCallACPI_NVHG_MXMX stubOsCallWMI_NVHG_MXMX;
@@ -66,15 +61,11 @@ OSCallACPI_NVHG_DCS stubOsCallWMI_NVHG_DCS;
OSCheckCallback stubOsCheckCallback;
OSRCCallback stubOsRCCallback;
OSCallACPI_NBPS stubOsCallACPI_NBPS;
OSCallACPI_NBSL stubOsCallACPI_NBSL;
OSCallACPI_OPTM_GPUON stubOsCallWMI_OPTM_GPUON;
OSSetupVBlank stubOsSetupVBlank;
OSObjectEventNotification stubOsObjectEventNotification;
OSPageArrayGetPhysAddr stubOsPageArrayGetPhysAddr;
OSInternalReserveFreeCallback stubOsInternalReserveFreeCallback;
OSInternalReserveAllocCallback stubOsInternalReserveAllocCallback;
OSGetUefiVariable stubOsGetUefiVariable;
OSCallACPI_LRST stubOsCallACPI_LRST;
#endif // OS_STUB_H

View File

@@ -47,9 +47,9 @@ typedef enum _NBSI_TBL_SOURCES // keep in sync with nvapi.spec
#define NBSI_TBL_SOURCE_MAX 6 // number of NBSI_TBL_SOURCES entries (not including BEST FIT)
#define NBSI_TBL_SOURCE_ALL (NBSI_TBL_SOURCE_REGISTRY | \
NBSI_TBL_SOURCE_VBIOS | \
NBSI_TBL_SOURCE_SBIOS | \
NBSI_TBL_SOURCE_ACPI | \
NBSI_TBL_SOURCE_VBIOS | \
NBSI_TBL_SOURCE_SBIOS | \
NBSI_TBL_SOURCE_ACPI | \
NBSI_TBL_SOURCE_UEFI)
#define NBSI_TBL_SOURCE_NONE 0

View File

@@ -31,7 +31,7 @@
#define PCI_EXP_ROM_SIGNATURE 0xaa55
#define PCI_EXP_ROM_SIGNATURE_NV 0x4e56 // "VN" in word format
#define PCI_EXP_ROM_SIGNATURE_NV2 0xbb77
#define IS_VALID_PCI_ROM_SIG(sig) ((sig == PCI_EXP_ROM_SIGNATURE) || \
#define IS_VALID_PCI_ROM_SIG(sig) ((sig == PCI_EXP_ROM_SIGNATURE) || \
(sig == PCI_EXP_ROM_SIGNATURE_NV) || \
(sig == PCI_EXP_ROM_SIGNATURE_NV2))
@@ -68,7 +68,7 @@ typedef union _PCI_EXP_ROM {
#define PCI_DATA_STRUCT_SIGNATURE 0x52494350 // "PCIR" in dword format
#define PCI_DATA_STRUCT_SIGNATURE_NV 0x5344504E // "NPDS" in dword format
#define PCI_DATA_STRUCT_SIGNATURE_NV2 0x53494752 // "RGIS" in dword format
#define IS_VALID_PCI_DATA_SIG(sig) ((sig == PCI_DATA_STRUCT_SIGNATURE) || \
#define IS_VALID_PCI_DATA_SIG(sig) ((sig == PCI_DATA_STRUCT_SIGNATURE) || \
(sig == PCI_DATA_STRUCT_SIGNATURE_NV) || \
(sig == PCI_DATA_STRUCT_SIGNATURE_NV2))

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -54,6 +54,42 @@
#define NV_PB_PFM_REQ_HNDLR_PCTRL_BIT_31 31:31
#define NV_PB_PFM_REQ_HNDLR_PCTRL_BIT_31_ZERO (0)
/*
* NV0000_CTRL_PFM_REQ_HNDLR_EDPP_LIMIT_INFO
*
* GPU EDPpeak Limit information for platform
*
* ulVersion
* (Major(16 bits):Minor(16 bits), current v1.0)
* Little endian format 0x00, 0x00, 0x01, 0x00
* limitLast
* last requested platform limit
* limitMin
* Minimum allowed limit value on EDPp policy on both AC and DC
* limitRated
* Rated/default allowed limit value on EDPp policy on AC
* limitMax
* Maximum allowed limit value on EDPp policy on AC
* limitCurr
* Current resultant limit effective on EDPp policy on AC and DC
* limitBattRated
* Default/rated allowed limit on EDPp policy on DC
* limitBattMax
* Maximum allowed limit on EDPp policy on DC
* rsvd
* Reserved
*/
typedef struct NV0000_CTRL_PFM_REQ_HNDLR_EDPP_LIMIT_INFO_V1 {
NvU32 ulVersion;
NvU32 limitLast;
NvU32 limitMin;
NvU32 limitRated;
NvU32 limitMax;
NvU32 limitCurr;
NvU32 limitBattRated;
NvU32 limitBattMax;
NvU32 rsvd;
} NV0000_CTRL_PFM_REQ_HNDLR_EDPP_LIMIT_INFO_V1, *PNV0000_CTRL_PFM_REQ_HNDLR_EDPP_LIMIT_INFO_V1;
/*
* NV0000_CTRL_PFM_REQ_HNDLR_PSHAREDATA
@@ -181,6 +217,8 @@ typedef struct _NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_PARAMS_EX
#define NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_CMD_GETPPM (GPS_FUNC_GETPPM)
#define NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_CMD_SETPPM (GPS_FUNC_SETPPM)
#define NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_CMD_PSHAREPARAMS (GPS_FUNC_PSHAREPARAMS)
#define NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_CMD_SETEDPPLIMITINFO (GPS_FUNC_SETEDPPLIMITINFO)
#define NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_CMD_GETEDPPLIMIT (GPS_FUNC_GETEDPPLIMIT)
// PFM_REQ_HNDLR_SUPPORT output
#define NV0000_CTRL_PFM_REQ_HNDLR_SUPPORTED_SUPPORT_AVAIL 0:0
@@ -193,6 +231,8 @@ typedef struct _NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_PARAMS_EX
#define NV0000_CTRL_PFM_REQ_HNDLR_SUPPORTED_GETPPM_AVAIL 40:40
#define NV0000_CTRL_PFM_REQ_HNDLR_SUPPORTED_SETPPM_AVAIL 41:41
#define NV0000_CTRL_PFM_REQ_HNDLR_SUPPORTED_PSHAREPARAMS_AVAIL 42:42
#define NV0000_CTRL_PFM_REQ_HNDLR_SUPPORTED_INFOEDPPLIMIT_AVAIL 43:43
#define NV0000_CTRL_PFM_REQ_HNDLR_SUPPORTED_SETEDPPLIMIT_AVAIL 44:44
// PFM_REQ_HNDLR_PCONTROL
#define NV0000_CTRL_PFM_REQ_HNDLR_PCONTROL_REQ_TYPE 3:0
@@ -221,7 +261,12 @@ typedef struct _NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_PARAMS_EX
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_USER_CONFIG_TGP_MODE 22:22 // output only
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_USER_CONFIG_TGP_MODE_DISABLE (0)
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_USER_CONFIG_TGP_MODE_ENABLE (1)
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_PLATFORM_GETEDPPEAKLIMIT_SET 25:25 // output only
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_PLATFORM_GETEDPPEAKLIMIT_SET_FALSE (0U)
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_PLATFORM_GETEDPPEAKLIMIT_SET_TRUE (1U)
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_PLATFORM_SETEDPPEAKLIMITINFO_SET 26:26 // output only
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_PLATFORM_SETEDPPEAKLIMITINFO_SET_FALSE (0U)
#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARESTATUS_PLATFORM_SETEDPPEAKLIMITINFO_SET_TRUE (1U)
// Shared by GETPPL, SETPPL
#define NV0000_CTRL_PFM_REQ_HNDLR_PPL_ARGS_COUNT (3)
@@ -250,6 +295,9 @@ typedef struct _NV0000_CTRL_PFM_REQ_HNDLR_CALL_ACPI_PARAMS_EX
#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_ARGS_INDEX 7:0 // output
#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_ARGS_AVAILABLE_MASK 15:8 // output
// Shared by INFOEDPPLIMIT and SETEDPPLIMIT
#define NV0000_CTRL_PFM_REQ_HNDLR_EDPP_VERSION_V10 (0x10000U) // input & output
//
// PFM_REQ_HNDLR_PSHARE_PARAMS
// status bits

View File

@@ -99,20 +99,20 @@ typedef struct
#define RMCTRL_CLEAR_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) &= ~(0?cap##field))
// macros to AND/OR caps between two tables
#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \
#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \
tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] & tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp;
#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \
#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \
tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] | tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp;
// Whether the command ID is a NULL command?
// We allow NVXXXX_CTRL_CMD_NULL (0x00000000) as well as the
// per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 )
#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \
#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \
(FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && \
FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd)))
@@ -141,11 +141,8 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
#define _RMCTRL_PREP_ACCESS_ARG(x) | NVBIT(NV_CONCATENATE(RS_ACCESS_, x))
#define ACCESS_RIGHTS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_ACCESS_ARG, __VA_ARGS__))
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \
( \
(ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && \
!RMCFG_FEATURE_PHYSICAL_RM \
)
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \
(ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)
//
// 'FLAGS' Attribute

View File

@@ -163,7 +163,7 @@ NV_STATUS intermapDelDmaMapping (RsClient *, VirtualMemory *
void intermapFreeDmaMapping (PCLI_DMA_MAPPING_INFO);
CLI_DMA_MAPPING_INFO *intermapGetDmaMapping (VirtualMemory *pVirtualMemory, NvU64 dmaOffset, NvU32 gpuMask);
NvBool CliGetDmaMappingInfo (NvHandle, NvHandle, NvHandle, NvU64, NvU32, PCLI_DMA_MAPPING_INFO*);
NvBool CliGetDmaMappingInfo (RsClient *, NvHandle, NvHandle, NvU64, NvU32, PCLI_DMA_MAPPING_INFO*);
void CliGetDmaMappingIterator (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR, PNODE pDmaMappingList);
void CliGetDmaMappingNext (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,6 +39,7 @@
typedef struct gpuSession *gpuSessionHandle;
typedef struct gpuDevice *gpuDeviceHandle;
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
typedef struct gpuTsg *gpuTsgHandle;
typedef struct gpuChannel *gpuChannelHandle;
typedef struct gpuObject *gpuObjectHandle;
@@ -97,7 +98,11 @@ NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
NvLength pageCount,
NvU64 pageSize);
NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
const gpuTsgAllocParams *params,
gpuTsgHandle *tsgHandle);
NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle,
const gpuChannelAllocParams *params,
gpuChannelHandle *channelHandle,
gpuChannelInfo *channelInfo);
@@ -105,6 +110,8 @@ NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
void nvGpuOpsTsgDestroy(struct gpuTsg *tsg);
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
@@ -196,7 +203,7 @@ NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
void **pPma,
const UvmPmaStatistics **pPmaPubStats);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex);
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo);
@@ -272,4 +279,40 @@ NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
// Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
NV_STATUS nvGpuOpsCcslLogDeviceEncryption(struct ccslContext_t *ctx,
NvU8 *decryptIv);
NV_STATUS nvGpuOpsCcslAcquireEncryptionIv(struct ccslContext_t *ctx,
NvU8 *encryptIv);
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
NvU8 direction);
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *encryptIv,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU8 *outputBuffer,
NvU8 const *authTagBuffer);
NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 *messageNum);
#endif /* _NV_GPU_OPS_H_*/

View File

@@ -77,12 +77,12 @@ typedef struct API_STATE RMAPI_PARAM_COPY;
// this initialization, there is no need to make it return a status and
// duplicate error checking.
//
#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \
do { \
RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \
(paramCopy).ppKernelParams = (void **) &(pKernelParams); \
(paramCopy).pUserParams = (theUserParams); \
(paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \
#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \
do { \
RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \
(paramCopy).ppKernelParams = (void **) &(pKernelParams); \
(paramCopy).pUserParams = (theUserParams); \
(paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \
(paramCopy).bSizeValid = portSafeMulU32((numElems), (sizeOfElem), &(paramCopy).paramsSize); \
} while(0)

View File

@@ -114,10 +114,16 @@ RM_API *rmapiGetInterface(RMAPI_TYPE rmapiType);
// Flags for RM_API::Alloc
#define RMAPI_ALLOC_FLAGS_NONE 0
#define RMAPI_ALLOC_FLAGS_SKIP_RPC NVBIT(0)
#define RMAPI_ALLOC_FLAGS_SERIALIZED NVBIT(1)
// Flags for RM_API::Free
#define RMAPI_FREE_FLAGS_NONE 0
// Flags for RM_API RPC's
#define RMAPI_RPC_FLAGS_NONE 0
#define RMAPI_RPC_FLAGS_COPYOUT_ON_ERROR NVBIT(0)
#define RMAPI_RPC_FLAGS_SERIALIZED NVBIT(1)
/**
* Interface for performing operations through the RM API exposed to client
* drivers. Interface provides consistent view to the RM API while abstracting
@@ -129,16 +135,16 @@ struct _RM_API
{
// Allocate a resource with default security attributes and local pointers (no NvP64)
NV_STATUS (*Alloc)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvU32 hClass, void *pAllocParams);
NvHandle *phObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize);
// Allocate a resource with default security attributes and local pointers (no NvP64)
// and client assigned handle
NV_STATUS (*AllocWithHandle)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle hObject, NvU32 hClass, void *pAllocParams);
NvHandle hObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize);
// Allocate a resource
NV_STATUS (*AllocWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams,
NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, NvU32 paramsSize,
NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo);
// Free a resource with default security attributes
@@ -303,7 +309,7 @@ rmapiEpilogue
RM_API_CONTEXT *pContext
);
void
void
rmapiInitLockInfo
(
RM_API *pRmApi,
@@ -315,7 +321,7 @@ rmapiInitLockInfo
// RM locking modules: 24-bit group bitmask, 8-bit subgroup id
//
// Lock acquires are tagged with a RM_LOCK_MODULE_* in order to partition
// the acquires into groups, which allows read-only locks to be
// the acquires into groups, which allows read-only locks to be
// enabled / disabled on a per-group basis (via apiLockMask and gpuLockMask
// in OBJSYS.)
//

View File

@@ -46,11 +46,11 @@
MAKE_LIST(ClientHandlesList, NvHandle);
#define serverutilGetDerived(pRmClient, hResource, ppBaseRes, type) \
(clientGetResource(staticCast((pRmClient), RsClient), \
(hResource), \
classId(type), \
(ppBaseRes)) != NV_OK) \
? NULL \
(clientGetResource(staticCast((pRmClient), RsClient), \
(hResource), \
classId(type), \
(ppBaseRes)) != NV_OK) \
? NULL \
: dynamicCast(*(ppBaseRes), type)
/**
@@ -119,9 +119,8 @@ NV_STATUS serverutilGenResourceHandle(NvHandle, NvHandle*);
* Get a client pointer from a client handle without taking any locks.
*
* @param[in] hClient The client to acquire
* @param[out] ppClient Pointer to the RmClient
*/
NV_STATUS serverutilGetClientUnderLock(NvHandle hClient, RmClient **ppClient);
RmClient *serverutilGetClientUnderLock(NvHandle hClient);
/**
* Get a client pointer from a client handle and lock it.

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,8 +36,20 @@
#define VGPU_UUID_SIZE NVA081_VM_UUID_SIZE
#define VGPU_MAX_GFID 64
#define VGPU_SIGNATURE_SIZE NVA081_VGPU_SIGNATURE_SIZE
#define VGPU_MAX_PLUGIN_CHANNELS 5
#define MAX_VGPU_DEVICES_PER_PGPU NVA081_MAX_VGPU_PER_PGPU
#define SET_GUEST_ID_ACTION_SET 0
#define SET_GUEST_ID_ACTION_UNSET 1
typedef struct
{
NvU8 action;
NvU32 vmPid;
VM_ID_TYPE vmIdType;
VM_ID guestVmId;
} SET_GUEST_ID_PARAMS;
/* This structure represents the vGPU type's attributes */
typedef struct
{