535.54.03

This commit is contained in:
Andy Ritger
2023-06-14 12:37:59 -07:00
parent eb5c7665a1
commit 26458140be
120 changed files with 83370 additions and 81507 deletions

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NV_IOCTL_NVLOG)
#define NV_IOCTL_NVLOG
#include <nvtypes.h>
#include "ctrl/ctrl0000/ctrl0000nvd.h"
typedef struct
{
NvU32 ctrl; // in
NvU32 status; // out
union // in/out
{
NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS getNvlogInfo;
NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS getNvlogBufferInfo;
NV0000_CTRL_NVD_GET_NVLOG_PARAMS getNvlog;
} params;
} NV_NVLOG_CTRL_PARAMS;
#endif

View File

@@ -510,6 +510,12 @@ struct nv_file_private_t
nv_file_private_t *ctl_nvfp;
void *ctl_nvfp_priv;
NvU32 register_or_refcount;
//
// True if a client or an event was ever allocated on this fd.
// If false, RMAPI cleanup is skipped.
//
NvBool bCleanupRmapi;
};
// Forward define the gpu ops structures
@@ -959,6 +965,8 @@ NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU
void NV_API_CALL rm_power_source_change_event (nvidia_stack_t *, NvU32);
void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *);
NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);

View File

@@ -50,5 +50,6 @@
#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C
#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D
#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E
#define NV_ESC_RM_NVLOG_CTRL 0x5F
#endif // NV_ESCAPE_H_INCLUDED

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -130,6 +130,8 @@ NV_STATUS RmInitX86EmuState(OBJGPU *);
void RmFreeX86EmuState(OBJGPU *);
NV_STATUS RmPowerSourceChangeEvent(nv_state_t *, NvU32);
void RmRequestDNotifierState(nv_state_t *);
const NvU8 *RmGetGpuUuidRaw(nv_state_t *);
NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *);

View File

@@ -2177,6 +2177,7 @@ RmPowerManagementInternal(
//
RmPowerSourceChangeEvent(nv, !ac_plugged);
}
RmRequestDNotifierState(nv);
}
break;

View File

@@ -46,6 +46,10 @@
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#include "rmapi/client_resource.h"
#include "nvlog/nvlog.h"
#include <nv-ioctl-nvlog.h>
#include <ctrl/ctrl00fd.h>
#define NV_CTL_DEVICE_ONLY(nv) \
@@ -839,6 +843,40 @@ NV_STATUS RmIoctl(
break;
}
case NV_ESC_RM_NVLOG_CTRL:
{
NV_NVLOG_CTRL_PARAMS *pParams = data;
NV_CTL_DEVICE_ONLY(nv);
if (!osIsAdministrator())
{
rmStatus = NV_ERR_INSUFFICIENT_PERMISSIONS;
pParams->status = rmStatus;
goto done;
}
switch (pParams->ctrl)
{
// Do not use NVOC _DISPATCH here as it dereferences NULL RmClientResource*
case NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO:
rmStatus = cliresCtrlCmdNvdGetNvlogInfo_IMPL(NULL, &pParams->params.getNvlogInfo);
break;
case NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO:
rmStatus = cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL(NULL, &pParams->params.getNvlogBufferInfo);
break;
case NV0000_CTRL_CMD_NVD_GET_NVLOG:
rmStatus = cliresCtrlCmdNvdGetNvlog_IMPL(NULL, &pParams->params.getNvlog);
break;
default:
rmStatus = NV_ERR_NOT_SUPPORTED;
break;
}
pParams->status = rmStatus;
goto done;
}
case NV_ESC_REGISTER_FD:
{
nv_ioctl_register_fd_t *params = data;

View File

@@ -5415,6 +5415,14 @@ osDmabufIsSupported(void)
return os_dma_buf_enabled;
}
void osAllocatedRmClient(void *pOsInfo)
{
nv_file_private_t* nvfp = (nv_file_private_t*)pOsInfo;
if (nvfp != NULL)
nvfp->bCleanupRmapi = NV_TRUE;
}
NV_STATUS
osGetEgmInfo
(

View File

@@ -31,6 +31,7 @@
#include <class/cl0000.h>
#include <rmosxfac.h> // Declares RmInitRm().
#include "gpu/gpu.h"
#include "gps.h"
#include <osfuncs.h>
#include <platform/chipset/chipset.h>
@@ -86,6 +87,13 @@
#include "gpu/bus/kern_bus.h"
//
// If timer callback comes when PM resume is in progress, then it can't be
// serviced. The timer needs to be rescheduled in this case. This time controls
// the duration of rescheduling.
//
#define TIMER_RESCHED_TIME_DURING_PM_RESUME_NS (100 * 1000 * 1000)
//
// Helper function which can be called before doing any RM control
// This function:
@@ -499,6 +507,8 @@ done:
new_event->active = NV_TRUE;
new_event->refcount = 0;
nvfp->bCleanupRmapi = NV_TRUE;
NV_PRINTF(LEVEL_INFO, "allocated OS event:\n");
NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent);
NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd);
@@ -1158,12 +1168,47 @@ NV_STATUS RmPowerSourceChangeEvent(
&params, sizeof(params));
}
/*!
* @brief Function to request latest D-Notifier status from SBIOS.
*
* Handle certain scenarios (like a fresh boot or suspend/resume
* of the system) when RM is not available to receive the Dx notifiers.
* This function gets the latest D-Notifier status from SBIOS
* when RM is ready to receive and handle those events.
* Use GPS_FUNC_REQUESTDXSTATE subfunction to invoke current Dx state.
*
* @param[in] pNv nv_state_t pointer.
*/
void RmRequestDNotifierState(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
NvU32 supportedFuncs = 0;
NvU16 dsmDataSize = sizeof(supportedFuncs);
NV_STATUS status = NV_OK;
status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_GPS_2X,
GPS_FUNC_REQUESTDXSTATE, &supportedFuncs,
&dsmDataSize);
if (status != NV_OK)
{
//
// Call for 'GPS_FUNC_REQUESTDXSTATE' subfunction may fail if the
// SBIOS/EC does not have the corresponding implementation.
//
NV_PRINTF(LEVEL_INFO,
"%s: Failed to request Dx event update, status 0x%x\n",
__FUNCTION__, status);
}
}
/*!
* @brief Deal with D-notifier events to apply a performance
* level based on the requested auxiliary power-state.
* Read confluence page "D-Notifiers on Linux" for more details.
*
* @param[in] pGpu OBJGPU pointer.
* @param[in] pNv nv_state_t pointer.
* @param[in] event_type NvU32 Event type.
*/
static void RmHandleDNotifierEvent(
@@ -2551,6 +2596,16 @@ void NV_API_CALL rm_cleanup_file_private(
OBJSYS *pSys = SYS_GET_INSTANCE();
NV_ENTER_RM_RUNTIME(sp,fp);
//
// Skip cleaning up this fd if:
// - no RMAPI clients and events were ever allocated on this fd
// - no RMAPI object handles were exported on this fd
// Access nvfp->handles without locking as fd cleanup is synchronised by the kernel
//
if (!nvfp->bCleanupRmapi && nvfp->handles == NULL)
goto done;
pRmApi = rmapiGetInterface(RMAPI_EXTERNAL);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
threadStateSetTimeoutOverride(&threadState, 10 * 1000);
@@ -2600,6 +2655,7 @@ void NV_API_CALL rm_cleanup_file_private(
rmapiEpilogue(pRmApi, &rmApiContext);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
done:
if (nvfp->ctl_nvfp != NULL)
{
nv_put_file_private(nvfp->ctl_nvfp_priv);
@@ -3018,14 +3074,16 @@ static NV_STATUS RmRunNanoTimerCallback(
if ((status = rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK)
{
TMR_EVENT *pEvent = (TMR_EVENT *)pTmrEvent;
NvU64 timeNs = pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH) ?
TIMER_RESCHED_TIME_DURING_PM_RESUME_NS :
osGetTickResolution();
//
// We failed to acquire the lock - depending on what's holding it,
// the lock could be held for a while, so try again soon, but not too
// soon to prevent the owner from making forward progress indefinitely.
//
return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->pOSTmrCBdata,
osGetTickResolution());
return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->pOSTmrCBdata, timeNs);
}
threadStateInitISRAndDeferredIntHandler(&threadState, pGpu,
@@ -3062,7 +3120,7 @@ NV_STATUS NV_API_CALL rm_run_nano_timer_callback
if (pGpu == NULL)
return NV_ERR_GENERIC;
if (!FULL_GPU_SANITY_CHECK(pGpu))
if (!FULL_GPU_SANITY_FOR_PM_RESUME(pGpu))
{
return NV_ERR_GENERIC;
}
@@ -4059,6 +4117,48 @@ void NV_API_CALL rm_power_source_change_event(
NV_EXIT_RM_RUNTIME(sp,fp);
}
void NV_API_CALL rm_request_dnotifier_state(
nv_stack_t *sp,
nv_state_t *pNv
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(pNv);
if (nvp->b_mobile_config_enabled)
{
THREAD_STATE_NODE threadState;
void *fp;
GPU_MASK gpuMask;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_ACPI)) == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
// LOCK: acquire per device lock
if ((pGpu != NULL) &&
((rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE,
GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_ACPI,
&gpuMask)) == NV_OK))
{
RmRequestDNotifierState(pNv);
// UNLOCK: release per device lock
rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE);
}
// UNLOCK: release API lock
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
}
}
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages(
nvidia_stack_t *sp,
nv_dma_device_t *peer,

View File

@@ -72,6 +72,7 @@ osCreateMemFromOsDescriptor
void *pPrivate;
pClient = serverutilGetClientUnderLock(hClient);
if ((pDescriptor == NvP64_NULL) ||
(*pLimit == 0) ||
(pClient == NULL))
@@ -362,6 +363,23 @@ osCheckGpuBarsOverlapAddrRange
return NV_OK;
}
static NvU64
_doWarBug4040336
(
OBJGPU *pGpu,
NvU64 addr
)
{
if (gpuIsWarBug4040336Enabled(pGpu))
{
if ((addr & 0xffffffff00000000ULL) == 0x7fff00000000ULL)
{
addr = addr & 0xffffffffULL;
}
}
return addr;
}
static NV_STATUS
osCreateOsDescriptorFromIoMemory
(
@@ -440,6 +458,14 @@ osCreateOsDescriptorFromIoMemory
return rmStatus;
}
//
// BF3's PCIe MMIO bus address at 0x800000000000(CPU PA 0x7fff00000000) is
// too high for Ampere to address. As a result, BF3's bus address is
// moved to < 4GB. Now, the CPU PA and the bus address are no longer 1:1
// and needs to be adjusted.
//
*base = _doWarBug4040336(pGpu, *base);
rmStatus = memdescCreate(ppMemDesc, pGpu, (*pLimit + 1), 0,
NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM,
NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE);

View File

@@ -869,30 +869,6 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_acquire_encryption_iv(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 *encryptIv)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslAcquireEncryptionIv(ctx, encryptIv);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 *decryptIv)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslLogDeviceEncryption(ctx, decryptIv);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 direction)
@@ -942,12 +918,15 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
NvU8 const *authTagData)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, outputBuffer, authTagData);
rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, outputBuffer,
addAuthData, addAuthDataSize, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
@@ -979,3 +958,17 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *sp,
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *sp,
struct ccslContext_t *ctx,
NvU8 direction,
NvU64 increment,
NvU8 *iv)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsIncrementIv(ctx, direction, increment, iv);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}