515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,38 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _ALLOC_SIZE_H_
#define _ALLOC_SIZE_H_
#include "nvstatus.h"
/*
* rmapiGetClassAllocParamSize()
*
* Returns class size in number of bytes. Returns zero
* if the specified class has no optional allocation parameters
*
*/
NV_STATUS rmapiGetClassAllocParamSize(NvU32 *pAllocParamSizeBytes, NvP64 pUserParams, NvBool *pBAllowNull, NvU32 hClass);
#endif // _ALLOC_SIZE_H_

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "g_binary_api_nvoc.h"
#ifndef BINARY_API_H
#define BINARY_API_H
#include "core/core.h"
#include "rmapi/resource.h"
#include "gpu/gpu_resource.h"
#include "resserv/rs_resource.h"
#include "rmapi/control.h"
NVOC_PREFIX(binapi) class BinaryApi : GpuResource
{
public:
NV_STATUS binapiConstruct(BinaryApi *pResource,
CALL_CONTEXT *pCallContext,
RS_RES_ALLOC_PARAMS_INTERNAL *pParams) :
GpuResource(pCallContext, pParams);
virtual NV_STATUS binapiControl(BinaryApi *pResource, CALL_CONTEXT *pCallContext,
RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
};
NVOC_PREFIX(binapipriv) class BinaryApiPrivileged : BinaryApi
{
public:
NV_STATUS binapiprivConstruct(BinaryApiPrivileged *pResource, CALL_CONTEXT *pCallContext,
RS_RES_ALLOC_PARAMS_INTERNAL *pParams) :
BinaryApi(pCallContext, pParams);
virtual NV_STATUS binapiprivControl(BinaryApiPrivileged *pResource, CALL_CONTEXT *pCallContext,
RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
};
#endif

View File

@@ -0,0 +1,3 @@
#include "g_client_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_client_resource_nvoc.h"

View File

@@ -0,0 +1,159 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CONTROL_H_
#define _CONTROL_H_
#include "core/core.h"
#include "resserv/rs_resource.h"
#include "resserv/resserv.h"
#include "utils/nvmacro.h"
#include "rmapi/param_copy.h"
struct NVOC_EXPORTED_METHOD_DEF;
typedef RS_RES_CONTROL_PARAMS_INTERNAL RmCtrlParams;
//
// RmCtrlExecuteCookie
//
// This typedef describes the data used by the rmctrl cmd execution
// path. The data is filled at the beginning of rmControlCmdExecute()
// and used as necessary in the other stages.
//
struct RS_CONTROL_COOKIE
{
// Rmctrl Command ID
NvU32 cmd;
// Rmctrl Flags
NvU32 ctrlFlags;
// Required Access Rights for this command
const RS_ACCESS_MASK rightsRequired;
NvBool bFreeParamCopy; ///< Indicates that param copies should be cleaned up
NvBool bFreeEmbeddedCopy; ///< Indicates embedded param copies should be cleaned up
RMAPI_PARAM_COPY paramCopy;
RMAPI_PARAM_COPY embeddedParamCopies[4]; // Up to 4 embedded pointers per one RmControl identified
};
typedef RS_CONTROL_COOKIE RmCtrlExecuteCookie;
// values for RmCtrlDeferredCmd.pending
#define RMCTRL_DEFERRED_FREE 0 // buffer is free
#define RMCTRL_DEFERRED_ACQUIRED 1 // buffer is acquired to fill in data
#define RMCTRL_DEFERRED_READY 2 // buffer is acquired and data has been copied.
#define RMCTRL_DEFERRED_MAX_PARAM_SIZE 128 // 128 bytes internal buffer for rmctrl param
typedef struct
{
NvS32 volatile pending;
NvU32 cpuInst;
RmCtrlParams rmCtrlDeferredParams;
NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; // buffer to hold rmCtrlDeferredParams.pParams
} RmCtrlDeferredCmd;
// catch commands misdirected to non-existent engines
#define VERIFY_OBJ_PTR(p) if (p == NULL) return NV_ERR_INVALID_ARGUMENT
// macros to get/set/clear cap bits
#define RMCTRL_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field))
#define RMCTRL_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field))
#define RMCTRL_CLEAR_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) &= ~(0?cap##field))
// macros to AND/OR caps between two tables
#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \
tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] & tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp;
#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \
tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] | tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp;
// Whether the command ID is a NULL command?
// We allow NVXXXX_CTRL_CMD_NULL (0x00000000) as well as the
// per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 )
#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \
(FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && \
FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd)))
// top-level internal RM Control interface
NV_STATUS rmControl_Deferred(RmCtrlDeferredCmd *pRmCtrlDeferredCmd);
// Helper functions for handling embedded parameter copies
NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams);
NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams);
#define RM_CLIENT_PTR_ACCESS_CHECK_READ NVBIT(0)
#define RM_CLIENT_PTR_ACCESS_CHECK_WRITE NVBIT(1)
//
// For NVOC Exported functions
//
// RMCTRL_FLAGS(A, B, C) is expanded to
// 0 | RMCTRL_FLAGS_A | RMCTRL_FLAGS_B | RMCTRL_FLAGS_C
//
// ACCESS_RIGHTS(A, B, C) is expanded to
// 0 | NVBIT(RS_ACCESS_A) | NVBIT(RS_ACCESS_B) | NVBIT(RS_ACCESS_C)
//
#define RMCTRL_EXPORT(cmdId, ...) [[nvoc::export(cmdId, __VA_ARGS__)]]
#define _RMCTRL_PREP_FLAG_ARG(x) | NV_CONCATENATE(RMCTRL_FLAGS_, x)
#define RMCTRL_FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_FLAG_ARG, __VA_ARGS__))
#define _RMCTRL_PREP_ACCESS_ARG(x) | NVBIT(NV_CONCATENATE(RS_ACCESS_, x))
#define ACCESS_RIGHTS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_ACCESS_ARG, __VA_ARGS__))
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \
( \
(ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && \
!RMCFG_FEATURE_PHYSICAL_RM \
)
// per-rmcontrol flags values
#define RMCTRL_FLAGS_NONE 0x000000000
#define RMCTRL_FLAGS_NO_STATIC 0x000000000 // internal to chip-config. TODO -- delete
#define RMCTRL_FLAGS_ONLY_IF_CMD_DEFINED 0x000000000 // internal to chip-config. TODO -- delete
#define RMCTRL_FLAGS_KERNEL_PRIVILEGED 0x000000000
#define RMCTRL_FLAGS_NO_GPUS_LOCK 0x000000001
#define RMCTRL_FLAGS_NO_GPUS_ACCESS 0x000000002
#define RMCTRL_FLAGS_PRIVILEGED 0x000000004
#define RMCTRL_FLAGS_HACK_USED_ON_MULTIPLE_CLASSES 0x000000008
#define RMCTRL_FLAGS_NON_PRIVILEGED 0x000000010
#define RMCTRL_FLAGS_BIG_PAYLOAD 0x000000020
#define RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY 0x000000040
#define RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED 0x000000100 // for Resserv Access Rights migration
#define RMCTRL_FLAGS_ROUTE_TO_PHYSICAL 0x000000200
#define RMCTRL_FLAGS_INTERNAL 0x000000400
#define RMCTRL_FLAGS_API_LOCK_READONLY 0x000000800
#define RMCTRL_FLAGS_GPU_LOCK_READONLY 0x000001000
#define RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST 0x000002000
#define RMCTRL_FLAGS_CACHEABLE 0x000004000
#define RMCTRL_FLAGS_COPYOUT_ON_ERROR 0x000008000
#define RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS 0x000010000
#endif // _CONTROL_H_

View File

@@ -0,0 +1,3 @@
#include "g_event_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_event_buffer_nvoc.h"

View File

@@ -0,0 +1,130 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXPORTS_H
#define _EXPORTS_H
#include "core/core.h"
//
// !! Deprecated. Do not use these exported API functions. Instead use the
// User or Kernel ones below depending on if they are called from Kernel or
// User space.
//
// A User export is to be used for code paths originating from user space and
// MUST pass only user client handles and user-mode pointers. On most OSes, RM
// will sanity check the use of handles and pointers against incorrect or
// malicious use.
//
// A Kernel export is to be used for code paths originating from kernel space
// and MUST pass only kernel client handles and kernel-mode pointers. By default
// RM will skip any validation checks when a Kernel export is called. The onus
// is on the caller that only valid handles and pointers are passed.
// TBD. RM may enable the checks on debug builds or when a regkey is set.
//
// For more information refer to the Kernel_Client_Data_Validation wiki page
//
// WARNING!! RM has validation checks for handles and pointers. An incorrect use
// of export can cause RM failing the API.
//
void Nv01AllocMemory (NVOS02_PARAMETERS*);
void Nv01AllocObject (NVOS05_PARAMETERS*);
void Nv04Alloc (NVOS21_PARAMETERS*);
void Nv04AllocWithAccess (NVOS64_PARAMETERS*);
void Nv01Free (NVOS00_PARAMETERS*);
void Nv04Control (NVOS54_PARAMETERS*);
void Nv04VidHeapControl (NVOS32_PARAMETERS*);
void Nv04IdleChannels (NVOS30_PARAMETERS*);
void Nv04MapMemory (NVOS33_PARAMETERS*);
void Nv04UnmapMemory (NVOS34_PARAMETERS*);
void Nv04UpdateContextDma (NVOS37_PARAMETERS*);
void Nv04I2CAccess (NVOS_I2C_ACCESS_PARAMS*);
void Nv04AllocContextDma (NVOS39_PARAMETERS*);
void Nv04BindContextDma (NVOS49_PARAMETERS*);
void Nv04MapMemoryDma (NVOS46_PARAMETERS*);
void Nv04UnmapMemoryDma (NVOS47_PARAMETERS*);
void Nv04DupObject (NVOS55_PARAMETERS*);
void Nv04Share (NVOS57_PARAMETERS*);
void Nv04AddVblankCallback (NVOS61_PARAMETERS*);
// exported "User" API functions
void Nv01AllocMemoryUser (NVOS02_PARAMETERS*);
void Nv01AllocObjectUser (NVOS05_PARAMETERS*);
void Nv04AllocUser (NVOS21_PARAMETERS*);
void Nv04AllocWithAccessUser (NVOS64_PARAMETERS*);
void Nv01FreeUser (NVOS00_PARAMETERS*);
void Nv04ControlUser (NVOS54_PARAMETERS*);
void Nv04VidHeapControlUser (NVOS32_PARAMETERS*);
void Nv04IdleChannelsUser (NVOS30_PARAMETERS*);
void Nv04MapMemoryUser (NVOS33_PARAMETERS*);
void Nv04UnmapMemoryUser (NVOS34_PARAMETERS*);
void Nv04UpdateContextDmaUser (NVOS37_PARAMETERS*);
void Nv04I2CAccessUser (NVOS_I2C_ACCESS_PARAMS*);
void Nv04AllocContextDmaUser (NVOS39_PARAMETERS*);
void Nv04BindContextDmaUser (NVOS49_PARAMETERS*);
void Nv04MapMemoryDmaUser (NVOS46_PARAMETERS*);
void Nv04UnmapMemoryDmaUser (NVOS47_PARAMETERS*);
void Nv04DupObjectUser (NVOS55_PARAMETERS*);
void Nv04ShareUser (NVOS57_PARAMETERS*);
void Nv04AddVblankCallbackUser (NVOS61_PARAMETERS*);
// exported "Kernel" API functions
void Nv01AllocMemoryKernel (NVOS02_PARAMETERS*);
void Nv01AllocObjectKernel (NVOS05_PARAMETERS*);
void Nv04AllocKernel (NVOS21_PARAMETERS*);
void Nv04AllocWithAccessKernel (NVOS64_PARAMETERS*);
void Nv01FreeKernel (NVOS00_PARAMETERS*);
void Nv04ControlKernel (NVOS54_PARAMETERS*);
void Nv04VidHeapControlKernel (NVOS32_PARAMETERS*);
void Nv04IdleChannelsKernel (NVOS30_PARAMETERS*);
void Nv04MapMemoryKernel (NVOS33_PARAMETERS*);
void Nv04UnmapMemoryKernel (NVOS34_PARAMETERS*);
void Nv04UpdateContextDmaKernel (NVOS37_PARAMETERS*);
void Nv04I2CAccessKernel (NVOS_I2C_ACCESS_PARAMS*);
void Nv04AllocContextDmaKernel (NVOS39_PARAMETERS*);
void Nv04BindContextDmaKernel (NVOS49_PARAMETERS*);
void Nv04MapMemoryDmaKernel (NVOS46_PARAMETERS*);
void Nv04UnmapMemoryDmaKernel (NVOS47_PARAMETERS*);
void Nv04DupObjectKernel (NVOS55_PARAMETERS*);
void Nv04ShareKernel (NVOS57_PARAMETERS*);
void Nv04AddVblankCallbackKernel (NVOS61_PARAMETERS*);
// exported "WithSecInfo" API functions
void Nv01AllocMemoryWithSecInfo (NVOS02_PARAMETERS*, API_SECURITY_INFO);
void Nv01AllocObjectWithSecInfo (NVOS05_PARAMETERS*, API_SECURITY_INFO);
void Nv04AllocWithSecInfo (NVOS21_PARAMETERS*, API_SECURITY_INFO);
void Nv04AllocWithAccessSecInfo (NVOS64_PARAMETERS*, API_SECURITY_INFO);
void Nv01FreeWithSecInfo (NVOS00_PARAMETERS*, API_SECURITY_INFO);
void Nv04ControlWithSecInfo (NVOS54_PARAMETERS*, API_SECURITY_INFO);
void Nv04VidHeapControlWithSecInfo (NVOS32_PARAMETERS*, API_SECURITY_INFO);
void Nv04IdleChannelsWithSecInfo (NVOS30_PARAMETERS*, API_SECURITY_INFO);
void Nv04MapMemoryWithSecInfo (NVOS33_PARAMETERS*, API_SECURITY_INFO);
void Nv04UnmapMemoryWithSecInfo (NVOS34_PARAMETERS*, API_SECURITY_INFO);
void Nv04I2CAccessWithSecInfo (NVOS_I2C_ACCESS_PARAMS*, API_SECURITY_INFO);
void Nv04AllocContextDmaWithSecInfo (NVOS39_PARAMETERS*, API_SECURITY_INFO);
void Nv04BindContextDmaWithSecInfo (NVOS49_PARAMETERS*, API_SECURITY_INFO);
void Nv04MapMemoryDmaWithSecInfo (NVOS46_PARAMETERS*, API_SECURITY_INFO);
void Nv04UnmapMemoryDmaWithSecInfo (NVOS47_PARAMETERS*, API_SECURITY_INFO);
void Nv04DupObjectWithSecInfo (NVOS55_PARAMETERS*, API_SECURITY_INFO);
void Nv04ShareWithSecInfo (NVOS57_PARAMETERS*, API_SECURITY_INFO);
#endif // _EXPORTS_H

View File

@@ -0,0 +1,175 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _MAPPING_LIST_H_
#define _MAPPING_LIST_H_
#include <nvlimits.h>
#include "containers/btree.h"
#include "gpu/mem_mgr/mem_desc.h"
#include "os/os.h"
#include "rmapi/resource.h"
struct P2PApi;
typedef struct VirtualMemory VirtualMemory;
typedef struct Memory Memory;
// ****************************************************************************
// Type definitions
// ****************************************************************************
// dma information definitions
typedef struct _def_client_dma_mapping_info CLI_DMA_MAPPING_INFO, *PCLI_DMA_MAPPING_INFO;
typedef struct _def_client_dma_mapping_info_iterator CLI_DMA_MAPPING_INFO_ITERATOR, *PCLI_DMA_MAPPING_INFO_ITERATOR;
// mapping information definitions
typedef struct _def_client_dma_alloc_map_info CLI_DMA_ALLOC_MAP_INFO;
//
// DMA memory mapping XXX keep around since needed by mapping.c
// We need to figure out what to do with this
// RS-TODO gradually remove this with inter-mapping cleanup
//
struct _def_client_dma_mapping_info
{
NvHandle hDevice;
NvU64 DmaOffset;
void* KernelVAddr[NV_MAX_SUBDEVICES]; // Kernel's virtual address, if required
void* KernelPriv; // Token required to unmap the kernel mapping
NvU64 FbAperture[NV_MAX_SUBDEVICES]; // GPU aperture addresses, if required
NvU64 FbApertureLen[NV_MAX_SUBDEVICES]; // GPU aperture mapped lengths
MEMORY_DESCRIPTOR *pMemDesc; // Subregion to be mapped
NvU32 Flags;
struct P2PApi *pP2PInfo;
NvU32 gpuMask;
ADDRESS_TRANSLATION addressTranslation;
MEMORY_DESCRIPTOR *pBar1P2PVirtMemDesc; // The peer GPU mapped BAR1 region
MEMORY_DESCRIPTOR *pBar1P2PPhysMemDesc; // The peer GPU vidmem sub region
PCLI_DMA_MAPPING_INFO Next;
PCLI_DMA_MAPPING_INFO Prev;
};
//
// iterator object to enum CLI_DMA_MAPPING_INFO from 'pDmaMappingList'
//
struct _def_client_dma_mapping_info_iterator
{
PNODE pDmaMappingList; // list of hDevices
PNODE pCurrentList; // current hDevice list entry, is list of pDmaMappings
PNODE pNextDmaMapping; // next pDmaMapping while iterating over the DmaOffsets
};
//
// DMA allocMapping
//
struct _def_client_dma_alloc_map_info
{
CLI_DMA_MAPPING_INFO *pDmaMappingInfo;
struct VirtualMemory *pVirtualMemory;
struct Memory *pMemory;
};
// ****************************************************************************
// Function definitions
// ****************************************************************************
// Client Memory Mappings
//
// CliUpdateMemoryMappingInfo - Fill in RsCpuMapping fields for system memory mappings
//
static inline NV_STATUS
CliUpdateMemoryMappingInfo
(
RsCpuMapping *pCpuMapping,
NvBool bKernel,
NvP64 cpuAddress,
NvP64 priv,
NvU64 cpuMapLength,
NvU32 flags
)
{
if (pCpuMapping == NULL)
return NV_ERR_INVALID_ARGUMENT;
pCpuMapping->pPrivate->bKernel = bKernel;
pCpuMapping->length = cpuMapLength;
pCpuMapping->flags = flags;
pCpuMapping->processId = osGetCurrentProcess();
pCpuMapping->pLinearAddress = cpuAddress;
pCpuMapping->pPrivate->pPriv = priv;
pCpuMapping->pPrivate->gpuAddress = -1;
pCpuMapping->pPrivate->gpuMapLength = -1;
return NV_OK;
}
// ****************************************************************************
// Device Memory Mappings
// ****************************************************************************
//
// CliUpdateDeviceMemoryMapping - Fill in RsCpuMapping fields for device memory mappings
//
static inline NV_STATUS
CliUpdateDeviceMemoryMapping
(
RsCpuMapping *pCpuMapping,
NvBool bKernel,
NvP64 priv,
NvP64 cpuAddress,
NvU64 cpuMapLength,
NvU64 gpuAddress,
NvU64 gpuMapLength,
NvU32 flags
)
{
if (pCpuMapping == NULL)
return NV_ERR_INVALID_ARGUMENT;
pCpuMapping->pPrivate->bKernel = bKernel;
pCpuMapping->length = cpuMapLength;
pCpuMapping->flags = flags;
pCpuMapping->processId = osGetCurrentProcess();
pCpuMapping->pLinearAddress = cpuAddress;
pCpuMapping->pPrivate->pPriv = priv;
pCpuMapping->pPrivate->gpuAddress = gpuAddress;
pCpuMapping->pPrivate->gpuMapLength = gpuMapLength;
return NV_OK;
}
RsCpuMapping *CliFindMappingInClient (NvHandle, NvHandle, NvP64);
// DMA Mappings
NV_STATUS intermapCreateDmaMapping (RsClient *, RsResourceRef *, NvHandle, NvHandle, PCLI_DMA_MAPPING_INFO *, NvU32);
NV_STATUS intermapRegisterDmaMapping (RsClient *, NvHandle, NvHandle, PCLI_DMA_MAPPING_INFO, NvU64, NvU32);
NV_STATUS intermapDelDmaMapping (RsClient *, NvHandle, NvHandle, NvU64, NvU32, NvBool*);
void intermapFreeDmaMapping (PCLI_DMA_MAPPING_INFO);
NvBool CliGetDmaMappingInfo (NvHandle, NvHandle, NvHandle, NvU64, NvU32, PCLI_DMA_MAPPING_INFO*);
void CliGetDmaMappingIterator (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR, PNODE pDmaMappingList);
void CliGetDmaMappingNext (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR);
// Unmap all DMA mappings between a memory resource and any DynamicMemory
NV_STATUS intermapUnmapDeviceMemoryDma (RsClient *, RsResourceRef *, NvHandle);
#endif

View File

@@ -0,0 +1,272 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nv_gpu_ops.h
*
* This file defines the interface between the common RM layer
* and the OS specific platform layers. (Currently supported
* are Linux and KMD)
*
*/
#ifndef _NV_GPU_OPS_H_
#define _NV_GPU_OPS_H_
#include "nvgputypes.h"
#include "nv_uvm_types.h"
typedef struct gpuSession *gpuSessionHandle;
typedef struct gpuDevice *gpuDeviceHandle;
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
typedef struct gpuChannel *gpuChannelHandle;
typedef struct gpuObject *gpuObjectHandle;
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
const gpuInfo *pGpuInfo,
const NvProcessorUuid *gpuGuid,
struct gpuDevice **device,
NvBool bCreateSmcPartition);
NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
NvU64 vaBase,
NvU64 vaSize,
gpuAddressSpaceHandle *vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
gpuDeviceHandle device2,
getP2PCapsParams *p2pCaps);
void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
NvLength pageCount,
NvU32 pageSize,
gpuPmaAllocationOptions *pPmaAllocOptions,
NvU64 *pPages);
void nvGpuOpsPmaFreePages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize);
NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
const gpuChannelAllocParams *params,
gpuChannelHandle *channelHandle,
gpuChannelInfo *channelInfo);
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
NvU64 pointer);
NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
NvU64 memory, NvLength length,
void **cpuPtr, NvU32 pageSize);
void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
void* cpuPtr);
NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
gpuCaps *caps);
NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
gpuCesCaps *caps);
NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuAddressSpace *dstVaSpace,
NvU64 *dstAddress);
NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
NvHandle hClient,
NvHandle hPhysMemory,
NvHandle *hDupMemory,
gpuMemoryInfo *pGpuMemoryInfo);
NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
NvHandle hSubDevice, NvU8 *gpuGuid,
unsigned guidLength);
NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
const NvU8 *gpuUuid,
NvHandle *hClient,
NvHandle *hDevice,
NvHandle *hSubDevice);
NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
NvHandle hPhysHandle);
NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
const gpuClientInfo *pGpuClientInfo,
gpuInfo *pGpuInfo);
NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
NvU32 *pSubdeviceId);
NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid);
NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
NvHandle hUserClient,
NvHandle hUserVASpace,
struct gpuAddressSpace **vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
void **pPma,
const UvmPmaStatistics **pPmaPubStats);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
gpuAccessCntrInfo *pAccessCntrInfo,
NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo,
gpuAccessCntrConfig *pAccessCntrConfig);
NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
struct gpuDevice *device2,
NvHandle *hP2pObject);
NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
NvHandle hP2pObject);
NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
NvHandle hClient,
NvHandle hChannel,
gpuRetainedChannel **retainedChannel,
gpuChannelInstanceInfo *channelInstanceInfo);
void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
gpuChannelResourceBindParams *channelResourceBindParams);
void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
NvP64 resourceDescriptor,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
const void *pFaultPacket);
// Private interface used for windows only
// Interface used for SR-IOV heavy
NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
const gpuPagingChannelAllocParams *params,
gpuPagingChannelHandle *channelHandle,
gpuPagingChannelInfo *channelinfo);
void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device,
NvU64 *dstAddress);
void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device);
NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
char *methodStream,
NvU32 methodStreamSize);
#endif /* _NV_GPU_OPS_H_*/

View File

@@ -0,0 +1,99 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _PARAM_COPY_H_
#define _PARAM_COPY_H_
//
// RMAPI_PARAM_COPY - a mechanism for getting user params in and out of resman.
//
// The struct RMAPI_PARAM_COPY keeps track of current API params for eventual
// copyout and free as needed.
//
#include <core/core.h>
struct API_STATE
{
NvP64 pUserParams; // ptr to params in client's addr space
void **ppKernelParams; // ptr to current 'pKernelParams'
NvU32 paramsSize; // # bytes
NvU32 flags;
NvBool bSizeValid;
const char *msgTag;
};
typedef struct API_STATE RMAPI_PARAM_COPY;
#define RMAPI_PARAM_COPY_FLAGS_NONE 0x00000000
#define RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE NVBIT(0)
#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN NVBIT(1)
#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT NVBIT(2)
#define RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER NVBIT(3)
//
// Only set this if the paramsSize member of RMAPI_PARAM_COPY has been validated for
// correctness before calling apiParamAccess. There is a default cap on the
// largest size allowed in order to avoid huge memory allocations triggering
// out of memory scenarios if the user passes in a bogus size.
//
#define RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK NVBIT(4)
//
// 1MB is the largest size allowed for an embedded pointer accessed through
// apiParamAccess unless RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK is specified
// and the size is validated before calling apiParamsAcquire.
//
#define RMAPI_PARAM_COPY_MAX_PARAMS_SIZE (1*1024*1024)
#if NV_PRINTF_STRINGS_ALLOWED
#define RMAPI_PARAM_COPY_MSG_TAG(x) x
#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = theMsgTag
#else
#define RMAPI_PARAM_COPY_MSG_TAG(x) ((const char *) 0)
#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = ((const char *) 0)
#endif
//
// Initializes the RMAPI_PARAM_COPY structure. Sets bValid to false if calculating size
// caused an overflow. This makes the rmapiParamsAcquire() call fail with
// NV_ERR_INVALID_ARGUMENT. Since rmapiParamsAcquire() always directly follows
// this initialization, there is no need to make it return a status and
// duplicate error checking.
//
#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \
do { \
RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \
(paramCopy).ppKernelParams = (void **) &(pKernelParams); \
(paramCopy).pUserParams = (theUserParams); \
(paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \
(paramCopy).bSizeValid = portSafeMulU32((numElems), (sizeOfElem), &(paramCopy).paramsSize); \
} while(0)
// Routines for alloc/copyin/copyout/free sequences
NV_STATUS rmapiParamsAcquire(RMAPI_PARAM_COPY *, NvBool);
NV_STATUS rmapiParamsRelease(RMAPI_PARAM_COPY *);
NV_STATUS rmapiParamsCopyOut(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool);
NV_STATUS rmapiParamsCopyIn(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool);
// Init copy_param structure
NV_STATUS rmapiParamsCopyInit(RMAPI_PARAM_COPY *, NvU32 hClass);
#endif // _PARAM_COPY_H_

View File

@@ -0,0 +1,3 @@
#include "g_resource_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_resource_fwd_decls_nvoc.h"

View File

@@ -0,0 +1,405 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMAPI_H_
#define _RMAPI_H_
#include "core/core.h"
#include "nvsecurityinfo.h"
//
// Forward declarations
//
typedef struct _RM_API RM_API;
typedef struct RsServer RsServer;
typedef struct OBJGPU OBJGPU;
typedef struct RsResource RsResource;
typedef struct RsCpuMapping RsCpuMapping;
typedef struct CALL_CONTEXT CALL_CONTEXT;
typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR;
typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL;
typedef struct RS_LOCK_INFO RS_LOCK_INFO;
typedef NvU32 NV_ADDRESS_SPACE;
extern RsServer g_resServ;
/**
* Initialize RMAPI module.
*
* Must be called once and only once before any RMAPI functions can be called
*/
NV_STATUS rmapiInitialize(void);
/**
* Shutdown RMAPI module
*
* Must be called once and only once when a driver is shutting down and no more
* RMAPI functions will be called.
*/
void rmapiShutdown(void);
// Flags for rmapiLockAcquire
#define RMAPI_LOCK_FLAGS_NONE (0x00000000) // default no flags
#define RMAPI_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) // conditional acquire; if lock is
// already held then return error
#define RMAPI_LOCK_FLAGS_READ NVBIT(1) // Acquire API lock for READ
#define RMAPI_LOCK_FLAGS_WRITE (0x00000000) // Acquire API lock for WRITE - Default
/**
* Acquire the RM API Lock
*
* The API lock is a sleeping mutex that is used to serialize access to RM APIs
* by (passive-level) RM clients.
*
* The API lock is not used to protect state accessed by DPC and ISRs. For DPC
* and ISRs that GPU lock is used instead. For state controlled by clients, this
* often requires taking both API and GPU locks in API paths
*
* @param[in] flags RM_LOCK_FLAGS_*
* @param[in] module RM_LOCK_MODULES_*
*/
NV_STATUS rmapiLockAcquire(NvU32 flags, NvU32 module);
/**
* Release RM API Lock
*/
void rmapiLockRelease(void);
/**
* Check if current thread owns the API lock
*/
NvBool rmapiLockIsOwner(void);
/**
* Type of RM API client interface
*/
typedef enum
{
RMAPI_EXTERNAL, // For clients external from RM TLS, locks, etc -- no default security attributes
RMAPI_EXTERNAL_KERNEL, // For clients external from TLS and locks but which still need default security attributes
RMAPI_MODS_LOCK_BYPASS, // Hack for MODS - skip RM locks but initialize TLS (bug 1808386)
RMAPI_API_LOCK_INTERNAL, // For clients that already have the TLS & API lock held -- security is RM internal
RMAPI_GPU_LOCK_INTERNAL, // For clients that have TLS, API lock, and GPU lock -- security is RM internal
RMAPI_STUBS, // All functions just return NV_ERR_NOT_SUPPORTED
RMAPI_TYPE_MAX
} RMAPI_TYPE;
/**
* Query interface that can be used to perform operations through the
* client-level RM API
*/
RM_API *rmapiGetInterface(RMAPI_TYPE rmapiType);
// Flags for RM_API::Alloc
#define RMAPI_ALLOC_FLAGS_NONE 0
#define RMAPI_ALLOC_FLAGS_SKIP_RPC NVBIT(0)
// Flags for RM_API::Free
#define RMAPI_FREE_FLAGS_NONE 0
/**
* Interface for performing operations through the RM API exposed to client
* drivers. Interface provides consistent view to the RM API while abstracting
* the individuals callers from specifying security attributes and/or from
* locking needs. For example, this interface can be used either before or after
* the API or GPU locks.
*/
struct _RM_API
{
// Allocate a resource with default security attributes and local pointers (no NvP64)
NV_STATUS (*Alloc)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvU32 hClass, void *pAllocParams);
// Allocate a resource with default security attributes and local pointers (no NvP64)
// and client assigned handle
NV_STATUS (*AllocWithHandle)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle hObject, NvU32 hClass, void *pAllocParams);
// Allocate a resource
NV_STATUS (*AllocWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams,
NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo);
// Free a resource with default security attributes
NV_STATUS (*Free)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject);
// Free a resource
NV_STATUS (*FreeWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject,
NvU32 flags, API_SECURITY_INFO *pSecInfo);
// Free a list of clients with default security attributes
NV_STATUS (*FreeClientList)(struct _RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients);
// Free a list of clients
NV_STATUS (*FreeClientListWithSecInfo)(struct _RM_API *pRmApi, NvHandle *phClientList,
NvU32 numClients, API_SECURITY_INFO *pSecInfo);
// Invoke a control with default security attributes and local pointers (no NvP64)
NV_STATUS (*Control)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd,
void *pParams, NvU32 paramsSize);
// Invoke a control
NV_STATUS (*ControlWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd,
NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo);
// Prefetch a control parameters into the control call cache (0000, 0080 and 2080 classes only)
NV_STATUS (*ControlPrefetch)(struct _RM_API *pRmApi, NvU32 cmd);
// Dup an object with default security attributes
NV_STATUS (*DupObject)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject,
NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags);
// Dup an object
NV_STATUS (*DupObjectWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags,
API_SECURITY_INFO *pSecInfo);
// Share an object with default security attributes
NV_STATUS (*Share)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject,
RS_SHARE_POLICY *pSharePolicy);
// Share an object
NV_STATUS (*ShareWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject,
RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo);
// Map memory with default security attributes and local pointers (no NvP64). Provides
// RM internal implementation for NvRmMapMemory().
NV_STATUS (*MapToCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory,
NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags);
// Map memory. Provides RM internal implementation for NvRmMapMemory().
NV_STATUS (*MapToCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory,
NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo);
// Unmap memory with default security attributes and local pointers (no NvP64)
NV_STATUS (*UnmapFromCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress,
NvU32 flags, NvU32 ProcessId);
// Unmap memory
NV_STATUS (*UnmapFromCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory,
NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo);
// Map dma memory with default security attributes. Provides RM internal implementation for NvRmMapMemoryDma().
NV_STATUS (*Map)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory,
NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset);
// Map dma memory. Provides RM internal implementation for NvRmMapMemoryDma().
NV_STATUS (*MapWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory,
NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset, API_SECURITY_INFO *pSecInfo);
// Unmap dma memory with default security attributes
NV_STATUS (*Unmap)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory,
NvU32 flags, NvU64 dmaOffset);
// Unmap dma memory
NV_STATUS (*UnmapWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory,
NvU32 flags, NvU64 dmaOffset, API_SECURITY_INFO *pSecInfo);
API_SECURITY_INFO defaultSecInfo;
NvBool bHasDefaultSecInfo;
NvBool bTlsInternal;
NvBool bApiLockInternal;
NvBool bRmSemaInternal;
NvBool bGpuLockInternal;
void *pPrivateContext;
};
// Called before any RM resource is freed
NV_STATUS rmapiFreeResourcePrologue(RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams);
// Mark for deletion the client resources given a GPU mask
void rmapiSetDelPendingClientResourcesFromGpuMask(NvU32 gpuMask);
// Delete the marked client resources
void rmapiDelPendingClients(void);
void rmapiDelPendingDevices(NvU32 gpuMask);
void rmapiReportLeakedDevices(NvU32 gpuMask);
//
// Given a value, retrieves an array of client handles corresponding to clients
// with matching pOSInfo fields. The array is allocated dynamically, and is
// expected to be freed by the caller.
//
NV_STATUS rmapiGetClientHandlesFromOSInfo(void*, NvHandle**, NvU32*);
//
// Base mapping routines for use by RsResource subclasses
//
NV_STATUS rmapiMapGpuCommon(RsResource *, CALL_CONTEXT *, RsCpuMapping *, OBJGPU *, NvU32, NvU32);
NV_STATUS rmapiValidateKernelMapping(RS_PRIV_LEVEL privLevel, NvU32 flags, NvBool *pbKernel);
NV_STATUS rmapiGetEffectiveAddrSpace(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags, NV_ADDRESS_SPACE *pAddrSpace);
/**
* Deprecated RM API interfaces. Use RM_API instead.
*/
NV_STATUS RmUnmapMemoryDma(NvHandle, NvHandle, NvHandle, NvHandle, MEMORY_DESCRIPTOR*, NvU32, NvU64);
NV_STATUS RmConfigGetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool);
NV_STATUS RmConfigSetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool);
/**
* Control cache API.
* Every function except rmapiControlCacheInit and rmapiControlCacheFree is thread safe.
*/
void rmapiControlCacheInit(void);
NvBool rmapiControlIsCacheable(NvU32 flags, NvBool isGSPClient);
void* rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd);
NV_STATUS rmapiControlCacheSet(NvHandle hClient, NvHandle hObject, NvU32 cmd,
void* params, NvU32 paramsSize);
void rmapiControlCacheFree(void);
void rmapiControlCacheFreeClient(NvHandle hClient);
void rmapiControlCacheFreeObject(NvHandle hClient, NvHandle hObject);
typedef struct _RM_API_CONTEXT {
NvU32 gpuMask;
} RM_API_CONTEXT;
//
// Handler to do stuff that is required before invoking a RM API
//
NV_STATUS
rmapiPrologue
(
RM_API *pRmApi,
RM_API_CONTEXT *pContext
);
//
// Handler to do stuff that is required after invoking a RM API
//
void
rmapiEpilogue
(
RM_API *pRmApi,
RM_API_CONTEXT *pContext
);
void
rmapiInitLockInfo
(
RM_API *pRmApi,
NvHandle hClient,
RS_LOCK_INFO *pLockInfo
);
//
// RM locking modules: 24-bit group bitmask, 8-bit subgroup id
//
// Lock acquires are tagged with a RM_LOCK_MODULE_* in order to partition
// the acquires into groups, which allows read-only locks to be
// enabled / disabled on a per-group basis (via apiLockMask and gpuLockMask
// in OBJSYS.)
//
// The groups are further partitioned into subgroups, which
// are used for lock profiling data collection.
//
#define RM_LOCK_MODULE_VAL(grp, subgrp) ((((grp) & 0xffffff) << 8) | ((subgrp) & 0xff))
#define RM_LOCK_MODULE_GRP(val) (((val) >> 8) & 0xffffff)
// Grp SubGrp
#define RM_LOCK_MODULES_NONE RM_LOCK_MODULE_VAL(0x000000, 0x00)
#define RM_LOCK_MODULES_WORKITEM RM_LOCK_MODULE_VAL(0x000001, 0x00)
#define RM_LOCK_MODULES_CLIENT RM_LOCK_MODULE_VAL(0x000002, 0x00)
#define RM_LOCK_MODULES_GPU_OPS RM_LOCK_MODULE_VAL(0x000004, 0x00)
#define RM_LOCK_MODULES_OSAPI RM_LOCK_MODULE_VAL(0x000010, 0x00)
#define RM_LOCK_MODULES_STATE_CONFIG RM_LOCK_MODULE_VAL(0x000010, 0x01)
#define RM_LOCK_MODULES_EVENT RM_LOCK_MODULE_VAL(0x000010, 0x02)
#define RM_LOCK_MODULES_VBIOS RM_LOCK_MODULE_VAL(0x000010, 0x03)
#define RM_LOCK_MODULES_MEM RM_LOCK_MODULE_VAL(0x000020, 0x00)
#define RM_LOCK_MODULES_MEM_FLA RM_LOCK_MODULE_VAL(0x000020, 0x01)
#define RM_LOCK_MODULES_MEM_PMA RM_LOCK_MODULE_VAL(0x000020, 0x02)
#define RM_LOCK_MODULES_POWER RM_LOCK_MODULE_VAL(0x000040, 0x00)
#define RM_LOCK_MODULES_ACPI RM_LOCK_MODULE_VAL(0x000040, 0x01)
#define RM_LOCK_MODULES_DYN_POWER RM_LOCK_MODULE_VAL(0x000040, 0x02)
#define RM_LOCK_MODULES_HYPERVISOR RM_LOCK_MODULE_VAL(0x000080, 0x00)
#define RM_LOCK_MODULES_VGPU RM_LOCK_MODULE_VAL(0x000080, 0x01)
#define RM_LOCK_MODULES_RPC RM_LOCK_MODULE_VAL(0x000080, 0x02)
#define RM_LOCK_MODULES_DIAG RM_LOCK_MODULE_VAL(0x000100, 0x00)
#define RM_LOCK_MODULES_RC RM_LOCK_MODULE_VAL(0x000100, 0x01)
#define RM_LOCK_MODULES_SLI RM_LOCK_MODULE_VAL(0x000200, 0x00)
#define RM_LOCK_MODULES_P2P RM_LOCK_MODULE_VAL(0x000200, 0x01)
#define RM_LOCK_MODULES_NVLINK RM_LOCK_MODULE_VAL(0x000200, 0x02)
#define RM_LOCK_MODULES_HOTPLUG RM_LOCK_MODULE_VAL(0x000400, 0x00)
#define RM_LOCK_MODULES_DISP RM_LOCK_MODULE_VAL(0x000400, 0x01)
#define RM_LOCK_MODULES_KERNEL_RM_EVENTS RM_LOCK_MODULE_VAL(0x000400, 0x02)
#define RM_LOCK_MODULES_GPU RM_LOCK_MODULE_VAL(0x000800, 0x00)
#define RM_LOCK_MODULES_GR RM_LOCK_MODULE_VAL(0x000800, 0x01)
#define RM_LOCK_MODULES_FB RM_LOCK_MODULE_VAL(0x000800, 0x02)
#define RM_LOCK_MODULES_FIFO RM_LOCK_MODULE_VAL(0x000800, 0x03)
#define RM_LOCK_MODULES_TMR RM_LOCK_MODULE_VAL(0x000800, 0x04)
#define RM_LOCK_MODULES_I2C RM_LOCK_MODULE_VAL(0x001000, 0x00)
#define RM_LOCK_MODULES_GPS RM_LOCK_MODULE_VAL(0x001000, 0x01)
#define RM_LOCK_MODULES_SEC2 RM_LOCK_MODULE_VAL(0x001000, 0x02)
#define RM_LOCK_MODULES_THERM RM_LOCK_MODULE_VAL(0x001000, 0x03)
#define RM_LOCK_MODULES_INFOROM RM_LOCK_MODULE_VAL(0x001000, 0x04)
#define RM_LOCK_MODULES_ISR RM_LOCK_MODULE_VAL(0x002000, 0x00)
#define RM_LOCK_MODULES_DPC RM_LOCK_MODULE_VAL(0x002000, 0x01)
#define RM_LOCK_MODULES_INIT RM_LOCK_MODULE_VAL(0x004000, 0x00)
#define RM_LOCK_MODULES_STATE_LOAD RM_LOCK_MODULE_VAL(0x004000, 0x01)
#define RM_LOCK_MODULES_STATE_UNLOAD RM_LOCK_MODULE_VAL(0x008000, 0x00)
#define RM_LOCK_MODULES_DESTROY RM_LOCK_MODULE_VAL(0x008000, 0x01)
//
// ResServ lock flag translation
//
#define RM_LOCK_FLAGS_NONE 0
#define RM_LOCK_FLAGS_NO_API_LOCK RS_LOCK_FLAGS_NO_TOP_LOCK
#define RM_LOCK_FLAGS_NO_CLIENT_LOCK RS_LOCK_FLAGS_NO_CLIENT_LOCK
#define RM_LOCK_FLAGS_NO_GPUS_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1
#define RM_LOCK_FLAGS_GPU_GROUP_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2
#define RM_LOCK_FLAGS_RM_SEMA RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3
//
// ResServ lock state translation
//
#define RM_LOCK_STATES_NONE 0
#define RM_LOCK_STATES_API_LOCK_ACQUIRED RS_LOCK_STATE_TOP_LOCK_ACQUIRED
#define RM_LOCK_STATES_GPUS_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED
#define RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED
#define RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK
#define RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED
#define RM_LOCK_STATES_RM_SEMA_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED
//
// ResServ lock release translation
//
#define RM_LOCK_RELEASE_API_LOCK RS_LOCK_RELEASE_TOP_LOCK
#define RM_LOCK_RELEASE_CLIENT_LOCK RS_LOCK_RELEASE_CLIENT_LOCK
#define RM_LOCK_RELEASE_GPUS_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_1
#define RM_LOCK_RELEASE_GPU_GROUP_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_2
#define RM_LOCK_RELEASE_RM_SEMA RS_LOCK_RELEASE_CUSTOM_LOCK_3
#endif // _RMAPI_H_

View File

@@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef RMAPI_UTILS_H
#define RMAPI_UTILS_H
#include "rmapi/rmapi.h"
//
// Alloc a client, device and subdevice handle for a gpu
//
NV_STATUS
rmapiutilAllocClientAndDeviceHandles
(
RM_API *pRmApi,
OBJGPU *pGpu,
NvHandle *phClient,
NvHandle *phDevice,
NvHandle *phSubDevice
);
//
// Free client, device and subdevice handles
//
void
rmapiutilFreeClientAndDeviceHandles
(
RM_API *pRmApi,
NvHandle *phClient,
NvHandle *phDevice,
NvHandle *phSubDevice
);
//
// Return NV_TRUE if the given external class ID is an INTERNAL_ONLY class
//
NvBool rmapiutilIsExternalClassIdInternalOnly(NvU32 externalClassId);
#endif /* RMAPI_UTILS_H */

View File

@@ -0,0 +1,188 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RS_UTILS_H_
#define _RS_UTILS_H_
/**
* @defgroup RsUtilities
*
* Provides convenience utilities for resserv. Utility functions provide
* abstractions that take handles as inputs -- helpful for legacy code that
* passes hClient or hResource handles and not underlying objects. Desire
* is for pClient and RsResourceRef types to be used for new code instead of
* passing handles around and this utility module phased out.
*
* @{
*/
#include "resserv/rs_server.h"
#include "resserv/rs_client.h"
#include "resserv/rs_resource.h"
#include "rmapi/client.h"
#include "containers/list.h"
MAKE_LIST(ClientHandlesList, NvHandle);
#define serverutilGetDerived(pRmClient, hResource, ppBaseRes, type) \
(clientGetResource(staticCast((pRmClient), RsClient), \
(hResource), \
classId(type), \
(ppBaseRes)) != NV_OK) \
? NULL \
: dynamicCast(*(ppBaseRes), type)
/**
* Get the reference to a resource
* @param[in] hClient Client handle
* @param[in] hResource The resource to lookup
* @param[out] ppResourceRef The reference to the resource
*/
NV_STATUS serverutilGetResourceRef(NvHandle hClient, NvHandle hObject,
RsResourceRef **ppResourceRef);
/**
* Get the reference to a resource (with a type check)
* @param[in] hClient Client handle
* @param[in] hResource The resource to lookup
* @param[out] ppResourceRef The reference to the resource
*/
NV_STATUS serverutilGetResourceRefWithType(NvHandle hClient, NvHandle hObject,
NvU32 internalClassId, RsResourceRef **ppResourceRef);
/**
* Get the reference to a resource (with a type and parent check)
* @param[in] hClient Client handle
* @param[in] hResource The resource to lookup
* @param[out] ppResourceRef The reference to the resource
*/
NV_STATUS serverutilGetResourceRefWithParent(NvHandle hClient, NvHandle hParent, NvHandle hObject,
NvU32 internalClassId, RsResourceRef **ppResourceRef);
/**
* Find the first child object of given type
*/
RsResourceRef *serverutilFindChildRefByType(NvHandle hClient, NvHandle hParent, NvU32 internalClassId, NvBool bExactMatch);
/**
* Get an iterator to the elements in the client's resource map
*
* See clientRefIter for documentation on hScopedObject and iterType
*/
RS_ITERATOR serverutilRefIter(NvHandle hClient, NvHandle hScopedObject, NvU32 internalClassId, RS_ITER_TYPE iterType, NvBool bExactMatch);
/**
* Get an iterator to the elements in the server's shared object map
*/
RS_SHARE_ITERATOR serverutilShareIter(NvU32 internalClassId);
/**
* Get an iterator to the elements in the server's shared object map
*/
NvBool serverutilShareIterNext(RS_SHARE_ITERATOR* pIt);
/**
* Validate that a given resource handle is well-formed and does not already
* exist under a given client.
*/
NvBool serverutilValidateNewResourceHandle(NvHandle, NvHandle);
/**
* Generate an unused handle for a resource. The handle will be generated in the white-listed range that was
* specified when the client was allocated.
*/
NV_STATUS serverutilGenResourceHandle(NvHandle, NvHandle*);
/**
* Get a client pointer from a client handle without taking any locks.
*
* @param[in] hClient The client to acquire
* @param[out] ppClient Pointer to the RmClient
*/
NV_STATUS serverutilGetClientUnderLock(NvHandle hClient, RmClient **ppClient);
/**
* Get a client pointer from a client handle and lock it.
*
* @param[in] hClient The client to acquire
* @param[in] access LOCK_ACCESS_*
* @param[out] ppClient Pointer to the RmClient
*/
NV_STATUS serverutilAcquireClient(NvHandle hClient, LOCK_ACCESS_TYPE access, RmClient **ppClient);
/**
* Unlock a client
*
* @param[in] access LOCK_ACCESS_*
* @param[in] pClient Pointer to the RmClient
*/
void serverutilReleaseClient(LOCK_ACCESS_TYPE access, RmClient *pClient);
/**
* Get the first valid client pointer in resource server without taking any locks.
*/
RmClient **serverutilGetFirstClientUnderLock(void);
/**
* Get the next valid client pointer in resource server without taking any locks.
*
* @param[in] ppClient Pointer returned by a previous call to
* serverutilGetFirstClientUnderLock or
* serverutilGetNextClientUnderLock
*/
RmClient **serverutilGetNextClientUnderLock(RmClient **pClient);
/*!
* @brief Retrieve all hClients allocated for the given (ProcID, SubProcessID)
*
* This function iterates through all the clients in the resource server and finds
* hClients allocated for the given (ProcID, SubProcessID) and returns them to
* the caller.
*
* @param[in] procID Process ID
* @param[in] subProcessID SubProcess ID
* @param[out] pClientList List in which the client handles are returned
*
* @return NV_STATUS
*/
NV_STATUS serverutilGetClientHandlesFromPid(NvU32 procID, NvU32 subProcessID, ClientHandlesList *pClientList);
/**
* This is a filtering function intended to be used with refFindCpuMappingWithFilter.
* This filter will only match user mappings belonging to the current process.
*
* @param[in] ppMapping The mapping that is being filtered
*/
NvBool serverutilMappingFilterCurrentUserProc(RsCpuMapping *ppMapping);
/**
* This is a filtering function intended to be used with refFindCpuMappingWithFilter.
* This filter will only match kernel mappings.
*
* @param[in] ppMapping The mapping that is being filtered
*/
NvBool serverutilMappingFilterKernel(RsCpuMapping *ppMapping);
#endif