515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,65 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef EFI_CONSOLE_H
#define EFI_CONSOLE_H
#include "gpu/disp/kern_disp_max.h"
struct OBJGPU;
typedef struct
{
NvBool isDispStateSave;
NvU32 activeDisplayId[OBJ_MAX_HEADS];
struct
{
NvU32 displayId;
struct {
NvU32 index;
NvU32 subLinkMask;
} sorXBar;
struct {
NvU32 linkBw;
NvU32 laneCount;
NvU32 linkCtl;
} displayPort;
} activeDfpState[OBJ_MAX_DFPS];
NvU32 numDfps;
struct
{
NvU32 coreChannelClass;
NvU32 cacheSize;
NvU32 *pCache;
} display;
} nv_efi_t;
void RmSaveEFIDisplayState (OBJGPU *pGpu);
void RmRestoreEFIDisplayState (OBJGPU *pGpu);
#endif // EFI_CONSOLE_H

View File

@@ -0,0 +1,94 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CAPS_H_
#define _NV_CAPS_H_
#include <nv-kernel-interface-api.h>
/*
* Opaque OS-specific struct; on Linux, this has member
* 'struct proc_dir_entry'.
*/
typedef struct nv_cap nv_cap_t;
/*
* Creates directory named "capabilities" under the provided path.
*
* @param[in] path Absolute path
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_init(const char *path);
/*
* Creates capability directory entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability directory's name
* @param[in] mode Capability directory's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Creates capability file entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability file's name
* @param[in] mode Capability file's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Destroys capability entry
*
* @param[in] cap Capability entry
*/
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap);
/*
* Validates and duplicates the provided file descriptor
*
* @param[in] cap Capability entry
* @param[in] fd File descriptor to be validated
*
* Returns duplicate fd upon success. Otherwise, returns -1.
*/
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd);
/*
* Closes file descriptor
*
* This function should be used to close duplicate file descriptors
* returned by nv_cap_validate_and_dup_fd.
*
* @param[in] fd File descriptor to be validated
*
*/
void NV_API_CALL nv_cap_close_fd(int fd);
#endif /* _NV_CAPS_H_ */

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_GPU_INFO_H_
#define _NV_GPU_INFO_H_
typedef struct {
NvU32 gpu_id;
struct {
NvU32 domain;
NvU8 bus, slot, function;
} pci_info;
/*
* opaque OS-specific pointer; on Linux, this is a pointer to the
* 'struct device' for the GPU.
*/
void *os_device_ptr;
} nv_gpu_info_t;
#define NV_MAX_GPUS 32
#endif /* _NV_GPU_INFO_H_ */

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_NUMBERS_H
#define NV_IOCTL_NUMBERS_H
/* NOTE: using an ioctl() number > 55 will overflow! */
#define NV_IOCTL_MAGIC 'F'
#define NV_IOCTL_BASE 200
#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0)
#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1)
#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6)
#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7)
#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9)
#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10)
#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11)
#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12)
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
#endif

View File

@@ -0,0 +1,145 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_H
#define NV_IOCTL_H
#include <nv-ioctl-numbers.h>
#include <nvtypes.h>
typedef struct {
NvU32 domain; /* PCI domain number */
NvU8 bus; /* PCI bus number */
NvU8 slot; /* PCI slot number */
NvU8 function; /* PCI function number */
NvU16 vendor_id; /* PCI vendor ID */
NvU16 device_id; /* PCI device ID */
} nv_pci_info_t;
/*
* ioctl()'s with parameter structures too large for the
* _IOC cmd layout use the nv_ioctl_xfer_t structure
* and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual
* size and user argument pointer into the RM, which
* will then copy it to/from kernel space in separate steps.
*/
typedef struct nv_ioctl_xfer
{
NvU32 cmd;
NvU32 size;
NvP64 ptr NV_ALIGN_BYTES(8);
} nv_ioctl_xfer_t;
typedef struct nv_ioctl_card_info
{
NvBool valid;
nv_pci_info_t pci_info; /* PCI config information */
NvU32 gpu_id;
NvU16 interrupt_line;
NvU64 reg_address NV_ALIGN_BYTES(8);
NvU64 reg_size NV_ALIGN_BYTES(8);
NvU64 fb_address NV_ALIGN_BYTES(8);
NvU64 fb_size NV_ALIGN_BYTES(8);
NvU32 minor_number;
NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */
} nv_ioctl_card_info_t;
/* alloc event */
typedef struct nv_ioctl_alloc_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_alloc_os_event_t;
/* free event */
typedef struct nv_ioctl_free_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_free_os_event_t;
/* status code */
typedef struct nv_ioctl_status_code
{
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU32 status;
} nv_ioctl_status_code_t;
/* check version string */
#define NV_RM_API_VERSION_STRING_LENGTH 64
typedef struct nv_ioctl_rm_api_version
{
NvU32 cmd;
NvU32 reply;
char versionString[NV_RM_API_VERSION_STRING_LENGTH];
} nv_ioctl_rm_api_version_t;
#define NV_RM_API_VERSION_CMD_STRICT 0
#define NV_RM_API_VERSION_CMD_RELAXED '1'
#define NV_RM_API_VERSION_CMD_OVERRIDE '2'
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1
typedef struct nv_ioctl_query_device_intr
{
NvU32 intrStatus NV_ALIGN_BYTES(4);
NvU32 status;
} nv_ioctl_query_device_intr;
/* system parameters that the kernel driver may use for configuration */
typedef struct nv_ioctl_sys_params
{
NvU64 memblock_size NV_ALIGN_BYTES(8);
} nv_ioctl_sys_params_t;
typedef struct nv_ioctl_register_fd
{
int ctl_fd;
} nv_ioctl_register_fd_t;
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
NvHandle hClient;
NvU32 totalObjects;
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU32 status;
} nv_ioctl_export_to_dma_buf_fd_t;
#endif

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_KERNEL_RMAPI_OPS_H_
#define _NV_KERNEL_RMAPI_OPS_H_
/*
* Define the RMAPI provided to kernel-level RM clients.
*
* Kernel-level RM clients should populate nvidia_kernel_rmapi_ops_t
* by assigning nvidia_kernel_rmapi_ops_t::op and the corresponding
* parameter structure in nvidia_kernel_rmapi_ops_t's params union.
* Then, pass a pointer to the nvidia_kernel_rmapi_ops_t to
* rm_kernel_rmapi_op().
*/
#include "nvtypes.h"
#include "nvos.h"
typedef struct {
NvU32 op; /* One of the NV0[14]_XXXX operations listed below. */
union {
NVOS00_PARAMETERS free; /* NV01_FREE */
NVOS02_PARAMETERS allocMemory64; /* NV01_ALLOC_MEMORY */
NVOS21_PARAMETERS alloc; /* NV04_ALLOC */
NVOS32_PARAMETERS *pVidHeapControl; /* NV04_VID_HEAP_CONTROL */
NVOS33_PARAMETERS mapMemory; /* NV04_MAP_MEMORY */
NVOS34_PARAMETERS unmapMemory; /* NV04_UNMAP_MEMORY */
NVOS39_PARAMETERS allocContextDma2; /* NV04_ALLOC_CONTEXT_DMA */
NVOS46_PARAMETERS mapMemoryDma; /* NV04_MAP_MEMORY_DMA */
NVOS47_PARAMETERS unmapMemoryDma; /* NV04_UNMAP_MEMORY_DMA */
NVOS49_PARAMETERS bindContextDma; /* NV04_BIND_CONTEXT_DMA */
NVOS54_PARAMETERS control; /* NV04_CONTROL*/
NVOS55_PARAMETERS dupObject; /* NV04_DUP_OBJECT */
NVOS57_PARAMETERS share; /* NV04_SHARE */
NVOS61_PARAMETERS addVblankCallback; /* NV04_ADD_VBLANK_CALLBACK */
} params;
} nvidia_kernel_rmapi_ops_t;
#endif /* _NV_KERNEL_RMAPI_OPS_H_ */

View File

@@ -0,0 +1,63 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2007-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_NB_REGS_H_
#define _NV_NB_REGS_H_
typedef struct
{
NvU32 subsystem_vendor_id;
NvU32 subsystem_device_id;
NvU16 gpu_device_id;
} nv_nb_id_t;
typedef struct
{
NvU32 vendor_id;
const char *name;
NvU32 data;
} nv_nb_reg_t;
/*
* nb_id_table contains the OEM vendor ID, the subsystem ID and the
* GPU device ID of the notebooks for which we need to enable
* vendor specific registry keys. nb_reg_table contains the vendor
* specific registry key values. The initVendorSpecificRegistry()
* function compares the present notebooks OEM subsystem ID and the
* GPU device ID with the values present in id_tables. If a match
* is found, initVendorSpecificRegistry() extracts the vendor
* ID and sets any associated registry key listed in nb_reg_table.
*/
static nv_nb_id_t nb_id_table[] = {
{ PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x1284 }, // Acer GT 630
{ PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x124b }, // Acer GT 640
{ 0, 0, 0 }
};
static nv_nb_reg_t nb_reg_table[] = {
{ PCI_VENDOR_ID_PC_PARTNER, "RmOverrideSupportChipsetAspm", 2 },
{ 0, NULL, 0 }
};
#endif //_NV_NB_REGS_H_

View File

@@ -0,0 +1,373 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PRIV_H_
#define _NV_PRIV_H_
#include <nv.h>
#include <os/os.h>
#include <ctrl/ctrl402c.h>
#include <gpu/disp/kern_disp_max.h>
#include <efi-console.h>
#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d))
#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d))
#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d))
#define NV_PRIV_REG_RD08(b,o) ((b)->Reg008[(o)/1])
#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2])
#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4])
#define NV_NUM_CR_REGS 0x99
struct OBJGPU;
#define NV_BIT_PLANE_SIZE 64 * 1024
#define NV_NUM_VGA_BIT_PLANES 4
/*
* device state during Power Management
*/
typedef struct nv_pm_state_s
{
NvU32 IntrEn;
NvBool InHibernate;
} nv_pm_state_t;
/*
* data structure for the UNIX workqueues
*/
typedef struct nv_work_item_s
{
NvU32 flags;
NvU32 gpuInstance;
union
{
OSWorkItemFunction *pGpuFunction;
OSSystemWorkItemFunction *pSystemFunction;
} func;
void *pData;
} nv_work_item_t;
#define NV_WORK_ITEM_FLAGS_NONE 0x0
#define NV_WORK_ITEM_FLAGS_REQUIRES_GPU 0x1
#define NV_WORK_ITEM_FLAGS_DONT_FREE_DATA 0x2
/*
* pseudo-registry data structure
*/
typedef enum
{
NV_REGISTRY_ENTRY_TYPE_UNKNOWN = 0,
NV_REGISTRY_ENTRY_TYPE_DWORD,
NV_REGISTRY_ENTRY_TYPE_BINARY,
NV_REGISTRY_ENTRY_TYPE_STRING
} nv_reg_type_t;
typedef struct nv_reg_entry_s
{
char *regParmStr;
NvU32 type;
NvU32 data; // used when type == NV_REGISTRY_ENTRY_TYPE_DWORD
NvU8 *pdata; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING}
NvU32 len; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING}
struct nv_reg_entry_s *next;
} nv_reg_entry_t;
#define INVALID_DISP_ID 0xFFFFFFFF
#define MAX_DISP_ID_PER_ADAPTER 0x2
typedef struct nv_i2c_adapter_entry_s
{
void *pOsAdapter;
NvU32 port;
NvU32 displayId[MAX_DISP_ID_PER_ADAPTER];
} nv_i2c_adapter_entry_t;
#define NV_INIT_FLAG_HAL 0x0001
#define NV_INIT_FLAG_HAL_COMPONENTS 0x0002
#define NV_INIT_FLAG_GPU_STATE 0x0004
#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008
#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010
#define NV_INIT_FLAG_CORE_LOGIC 0x0020
#define NV_INIT_FLAG_HIRES 0x0040
#define NV_INIT_FLAG_DISP_STATE_SAVED 0x0080
#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0100
#define NV_INIT_FLAG_PUBLIC_I2C 0x0400
#define NV_INIT_FLAG_SCALABILITY 0x0800
#define NV_INIT_FLAG_DMA 0x1000
#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS
/*
* GPU dynamic power state machine.
*
* The GPU is in exactly one of these states at at time. Only certain state
* transitions are valid, as documented by the DAGs below.
*
* When in "instant idle" or COARSE mode:
*
* +----------------------+
* v |
* +---------+ +----------------+ +--------+
* | UNKNOWN | --> | IDLE_INDICATED | --> | IN_USE |
* +---------+ +----------------+ +--------+
*
* The transition from UNKNOWN to IDLE_INDICATED happens in
* rm_init_dynamic_power_management().
*
* Thereafter, transitions from IDLE_INDICATED to IN_USE happen when
* os_ref_dynamic_power() is called and the refcount transitions from 0 to 1;
* transitions from IN_USE to IDLE_INDICATED happen when
* os_unref_dynamic_power() is called and the refcount transitions from 1 to 0.
* Note that only calls to os_(un)ref_dynamic_power() with the mode == COARSE
* are considered in this mode; calls with mode == FINE are ignored. Since
* COARSE calls are placed only in rm_init_adapter/rm_shutdown_adapter, the GPU
* effectively stays in the IN_USE state any time any client has initialized
* it.
*
*
* When in "deferred idle" or FINE mode:
*
* +----------------------------------------------------------------+
* | |
* | |
* | +-------------------------------------------+----------------------+
* | | | v
* | +---------+ +----------------+ +--------------+ +----------------+ +--------+
* | | UNKNOWN | --> | IDLE_INDICATED | --> | | --> | IDLE_SUSTAINED | --> | IN_USE | -+
* | +---------+ +----------------+ | | +----------------+ +--------+ |
* | ^ | | | ^ |
* +--------------------+ | IDLE_INSTANT | ------+----------------------+ |
* | | | |
* | | | |
* | | <-----+ |
* +--------------+ |
* ^ |
* +-----------------------------------------------------+
*
* As before, the transition from UNKNOWN to IDLE_INDICATED happens in
* rm_init_dynamic_power_management(). This is not ideal: it means the GPU may
* be powered down immediately upon loading the RM module, even if
* rm_init_adapter() is going to be called soon thereafter. However, we can't
* rely on deferred idle callbacks yet, since those currently rely on core RM
* being initialized.
*
* At the beginning of rm_init_adapter(), the GPU transitions to the IN_USE
* state; during the rm_init_adapter() sequence,
* RmInitDeferredDynamicPowerManagement() will be called which will schedule
* timer callbacks and set the "deferred_idle_enabled" boolean.
*
* While in "deferred idle" mode, one of the callbacks
* timerCallbackForIdlePreConditions(), timerCallbackToIndicateIdle(), or
* RmIndicateIdle() should be scheduled when in the states:
* - IN_USE
* - IDLE_INSTANT
* - IDLE_SUSTAINED
* Note that since we may transition from IN_USE to IDLE_INSTANT rapidly (e.g.,
* for a series of RM calls), we don't attempt to schedule the callbacks and
* cancel them on each of these transitions. The
* timerCallbackForIdlePreConditions() callback will simply exit early if in
* the IN_USE state.
*
* As before, the GPU will remain in the IN_USE state until
* os_unref_dynamic_power() is called and the count transitions from 1 to 0
* (calls with mode == FINE are honored, in this mode, and these transitions
* can happen frequently). When the refcount reaches 0, rather than going
* directly to the IDLE_INDICATED state, it transitions to the IDLE_INSTANT
* state.
*
* Then, when the next timerCallbackForIdlePreConditions() callback executes,
* if all preconditions are met, the state will transition to IDLE_SUSTAINED.
*
* If, when in the IDLE_SUSTAINED state, os_ref_dynamic_power() is called, the
* GPU will transition back to the IN_USE state and return to the IDLE_INSTANT
* state. This ensures that there is a suitable delay between any activity
* that requires bumping the refcount and indicating idleness.
*
* If the timerCallbackForIdlePreConditions() callback executes again and the
* GPU is still in the IDLE_SUSTAINED state, userspace mappings will be revoked
* and the timerCallbackToIndicateIdle() callback will be scheduled.
*
* If, before the timerCallbackToIndicateIdle() callback executes, either
* os_ref_dynamic_power() is called or a mapping which has been revoked is
* accessed (which triggers the RmForceGpuNotIdle() callback), the GPU will
* transition back to the IN_USE or IDLE_INSTANT state, respectively.
*
* Then, when the timerCallbackToIndicateIdle() callback executes, if all
* mappings are still revoked, and the GPU is still in the IDLE_SUSTAINED
* state, and all GPU idleness preconditions remain satisfied, the
* RmIndicateIdle() work item will be enqueued. (Else, the GPU will transition
* back to the IDLE_INSTANT state and the callback for preconditions is
* scheduled again.)
*
* Finally, once the RmIndicateIdle() work item is called, if all of the same
* conditions still hold, the state will transition to IDLE_INDICATED. No
* callbacks will be scheduled from here; the callbacks for preconditions
* should be re-scheduled when transitioning out of the IDLE_INDICATED state.
*
* Once in the IDLE_INDICATED state, the kernel is free to call the RM to
* perform the GC6 entry sequence then turn off power to the GPU (although it
* may not, if the audio function is being used for example).
*
* There are two paths to exit the IDLE_INDICATED state:
* (a) If os_ref_dynamic_power() is called, in which case it transitions
* directly to the IN_USE state;
* (b) If RmForceGpuNotIdle() is called, in which case it transitions back to
* the IDLE_INSTANT state.
*/
typedef enum
{
NV_DYNAMIC_POWER_STATE_UNKNOWN = 0,
NV_DYNAMIC_POWER_STATE_IN_USE,
NV_DYNAMIC_POWER_STATE_IDLE_INSTANT,
NV_DYNAMIC_POWER_STATE_IDLE_SUSTAINED,
NV_DYNAMIC_POWER_STATE_IDLE_INDICATED,
} nv_dynamic_power_state_t;
typedef struct nv_dynamic_power_s
{
/*
* mode is read without the mutex -- should be read-only outside of
* rm_init_dynamic_power_management, called during probe only.
*/
nv_dynamic_power_mode_t mode;
/*
* Whether to indicate idle immediately when the refcount reaches 0, or
* only go to the IDLE_INSTANT state, and expect timer callbacks to
* transition through IDLE_SUSTAINED -> IDLE_INDICATED.
*/
NvBool deferred_idle_enabled;
nv_dynamic_power_state_t state;
NvS32 refcount;
/*
* A word on lock ordering. These locks must be taken in the order:
*
* RM API lock > this dynamic_power mutex > RM GPUs lock
*
* Skipping any of those locks is fine (if they aren't required to protect
* whatever state is being accessed or modified), so long as the order is
* not violated.
*/
PORT_MUTEX *mutex;
/*
* callback handles for deferred dynamic power management.
*/
NvP64 idle_precondition_check_event;
NvP64 indicate_idle_event;
NvBool idle_precondition_check_callback_scheduled;
/*
* callback handle for kernel initiated gc6 entry/exit.
* these will be protected by the gpu lock.
*/
NvP64 remove_idle_holdoff;
NvBool b_idle_holdoff;
/*
* flag set if the platform does not support fine grain dynamic power
* management.
*/
NvBool b_fine_not_supported;
/*
* Counter to track clients disallowing GCOFF.
*/
NvU32 clients_gcoff_disallow_refcount;
/*
* Maximum FB allocation size which can be saved in system memory
* while doing GCOFF based dynamic PM.
*/
NvU64 gcoff_max_fb_size;
/*
* NVreg_DynamicPowerManagement regkey value set by the user
*/
NvU32 dynamic_power_regkey;
} nv_dynamic_power_t;
typedef struct
{
OBJGPU *pGpu;
NvU32 pmc_boot_0;
nv_efi_t efi;
NvU8 scr_vga_active[OBJ_MAX_HEADS];
NvU8 scr_dcb_index_lo[OBJ_MAX_HEADS];
NvU8 scr_dcb_index_hi[OBJ_MAX_HEADS];
NvU8 font_bitplanes[NV_NUM_VGA_BIT_PLANES][NV_BIT_PLANE_SIZE];
NvU32 flags;
NvU32 status;
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
void *pVbiosCopy;
NvU32 vbiosSize;
nv_pm_state_t pm_state;
nv_reg_entry_t *pRegistry;
nv_dynamic_power_t dynamic_power;
/* Flag to check if the GPU needs 4K page isolation. */
NvBool b_4k_page_isolation_required;
/* Flag to check if GPU mobile config is enabled */
NvBool b_mobile_config_enabled;
/* Flag to check if S0ix-based power management is enabled. */
NvBool s0ix_pm_enabled;
/*
* Maximum FB allocation size which can be saved in system memory
* during system supened with S0ix-based power management.
*/
NvU64 s0ix_gcoff_max_fb_size;
NvU32 pmc_boot_42;
} nv_priv_t;
#define NV_SET_NV_PRIV(nv,p) ((nv)->priv = (p))
#define NV_GET_NV_PRIV(nv) ((nv) ? (nv)->priv : NULL)
/*
* Make sure that your stack has taken API Lock before using this macro.
*/
#define NV_GET_NV_PRIV_PGPU(nv) \
(NV_GET_NV_PRIV(nv) ? ((nv_priv_t *)NV_GET_NV_PRIV(nv))->pGpu : NULL)
#endif // _NV_PRIV_H_

View File

@@ -0,0 +1,920 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RM_REG_H_
#define _RM_REG_H_
#include "nvtypes.h"
/*
* use NV_REG_STRING to stringify a registry key when using that registry key
*/
#define __NV_REG_STRING(regkey) #regkey
#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey)
/*
* use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition
* of registry keys in the kernel module source code.
*/
#define __NV_REG_VAR(regkey) NVreg_##regkey
#if defined(NV_MODULE_PARAMETER)
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value)
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value)
#endif
#if defined(NV_MODULE_STRING_PARAMETER)
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value)
#endif
#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) }
/*
* Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of
* the regkey and the name of the module parameter. When using this macro, the
* name of the parameter is passed to the extra "parameter" argument, and it is
* this name that must be used in the NV_DEFINE_REG_ENTRY() macro.
*/
#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)}
/*
*----------------- registry key definitions--------------------------
*/
/*
* Option: ModifyDeviceFiles
*
* Description:
*
* When this option is enabled, the NVIDIA driver will verify the validity
* of the NVIDIA device files in /dev and attempt to dynamically modify
* and/or (re-)create them, if necessary. If you don't wish for the NVIDIA
* driver to touch the device files, you can use this registry key.
*
* This module parameter is only honored by the NVIDIA GPU driver and NVIDIA
* capability driver. Furthermore, the NVIDIA capability driver provides
* modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of
* this module parameter per device file.
*
* Possible Values:
* 0 = disable dynamic device file management
* 1 = enable dynamic device file management (default)
*/
#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles
#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES)
/*
* Option: DeviceFileUID
*
* Description:
*
* This registry key specifies the UID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default UID is 0 ('root').
*/
#define __NV_DEVICE_FILE_UID DeviceFileUID
#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID)
/*
* Option: DeviceFileGID
*
* Description:
*
* This registry key specifies the GID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default GID is 0 ('root').
*/
#define __NV_DEVICE_FILE_GID DeviceFileGID
#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID)
/*
* Option: DeviceFileMode
*
* Description:
*
* This registry key specifies the device file mode assigned to the NVIDIA
* device files created and/or modified by the NVIDIA driver when dynamic
* device file management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default mode is 0666 (octal, rw-rw-rw-).
*/
#define __NV_DEVICE_FILE_MODE DeviceFileMode
#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE)
/*
* Option: ResmanDebugLevel
*
* Default value: ~0
*/
#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel
#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL)
/*
* Option: RmLogonRC
*
* Default value: 1
*/
#define __NV_RM_LOGON_RC RmLogonRC
#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC)
/*
* Option: InitializeSystemMemoryAllocations
*
* Description:
*
* The NVIDIA Linux driver normally clears system memory it allocates
* for use with GPUs or within the driver stack. This is to ensure
* that potentially sensitive data is not rendered accessible by
* arbitrary user applications.
*
* Owners of single-user systems or similar trusted configurations may
* choose to disable the aforementioned clears using this option and
* potentially improve performance.
*
* Possible values:
*
* 1 = zero out system memory allocations (default)
* 0 = do not perform memory clears
*/
#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
InitializeSystemMemoryAllocations
#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS)
/*
* Option: RegistryDwords
*
* Description:
*
* This option accepts a semicolon-separated list of key=value pairs. Each
* key name is checked against the table of static options; if a match is
* found, the static option value is overridden, but invalid options remain
* invalid. Pairs that do not match an entry in the static option table
* are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwords="<key=value>;<key=value>;..."
*/
#define __NV_REGISTRY_DWORDS RegistryDwords
#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS)
/*
* Option: RegistryDwordsPerDevice
*
* Description:
*
* This option allows to specify registry keys per GPU device. It helps to
* control registry at GPU level of granularity. It accepts a semicolon
* separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* For each of the registry keys that follows, key name is checked against the
* table of static options; if a match is found, the static option value is
* overridden, but invalid options remain invalid. Pairs that do not match an
* entry in the static option table are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;<key=value>;<key=value>;..; \
* pci=DDDD:BB:DD.F;<key=value>;..;"
*/
#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice
#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE)
#define __NV_RM_MSG RmMsg
#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG)
/*
* Option: UsePageAttributeTable
*
* Description:
*
* Enable/disable use of the page attribute table (PAT) available in
* modern x86/x86-64 processors to set the effective memory type of memory
* mappings to write-combining (WC).
*
* If enabled, an x86 processor with PAT support is present and the host
* system's Linux kernel did not configure one of the PAT entries to
* indicate the WC memory type, the driver will change the second entry in
* the PAT from its default (write-through (WT)) to WC at module load
* time. If the kernel did update one of the PAT entries, the driver will
* not modify the PAT.
*
* In both cases, the driver will honor attempts to map memory with the WC
* memory type by selecting the appropriate PAT entry using the correct
* set of PTE flags.
*
* Possible values:
*
* ~0 = use the NVIDIA driver's default logic (default)
* 1 = enable use of the PAT for WC mappings.
* 0 = disable use of the PAT for WC mappings.
*/
#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable
#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE)
/*
* Option: EnableMSI
*
* Description:
*
* When this option is enabled and the host kernel supports the MSI feature,
* the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the
* support for this feature instead of using PCI-E wired interrupt.
*
* Possible Values:
*
* 0 = disable MSI interrupt
* 1 = enable MSI interrupt (default)
*
*/
#define __NV_ENABLE_MSI EnableMSI
#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI)
/*
* Option: RegisterForACPIEvents
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with the
* ACPI subsystem to receive notification of ACPI events.
*
* Possible values:
*
* 1 - register for ACPI events (default)
* 0 - do not register for ACPI events
*/
#define __NV_REGISTER_FOR_ACPI_EVENTS RegisterForACPIEvents
#define NV_REG_REGISTER_FOR_ACPI_EVENTS NV_REG_STRING(__NV_REGISTER_FOR_ACPI_EVENTS)
/*
* Option: EnablePCIeGen3
*
* Description:
*
* Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs
* when configured on SandyBridge E desktop platforms, NVIDIA feels that
* delivering a reliable, high-quality experience is not currently possible in
* PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and
* NVS Kepler products operate in PCIe Gen2 mode by default. You may use this
* option to enable PCIe Gen3 support.
*
* This is completely unsupported!
*
* Possible Values:
*
* 0: disable PCIe Gen3 support (default)
* 1: enable PCIe Gen3 support
*/
#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3
#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3)
/*
* Option: MemoryPoolSize
*
* Description:
*
* When set to a non-zero value, this option specifies the size of the
* memory pool, given as a multiple of 1 GB, created on VMware ESXi to
* satisfy any system memory allocations requested by the NVIDIA kernel
* module.
*/
#define __NV_MEMORY_POOL_SIZE MemoryPoolSize
#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE)
/*
* Option: KMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for kmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize
#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE)
/*
* Option: VMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for vmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize
#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE)
/*
* Option: IgnoreMMIOCheck
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will ignore
* MMIO limit check during device probe on VMWare ESXi kernel. This is
* typically necessary when VMware ESXi MMIO limit differs between any
* base version and its updates. Customer using updates can set regkey
* to avoid probe failure.
*/
#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck
#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK)
/*
* Option: TCEBypassMode
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will attempt to setup
* all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass
* the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically
* necessary for CUDA applications in which large system memory mappings may
* exceed the default TCE remapping capacity when operated in non-bypass mode.
*
* This option has no effect on non-POWER platforms.
*
* Possible Values:
*
* 0: system default TCE mode on all GPUs
* 1: enable TCE bypass mode on all GPUs
* 2: disable TCE bypass mode on all GPUs
*/
#define __NV_TCE_BYPASS_MODE TCEBypassMode
#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE)
#define NV_TCE_BYPASS_MODE_DEFAULT 0
#define NV_TCE_BYPASS_MODE_ENABLE 1
#define NV_TCE_BYPASS_MODE_DISABLE 2
/*
* Option: pci
*
* Description:
*
* On Unix platforms, per GPU based registry key can be specified as:
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,<per-gpu registry keys>".
* where DDDD:BB:DD.F refers to Domain:Bus:Device.Function.
* We need this key "pci" to identify what follows next is a PCI BDF identifier,
* for which the registry keys are to be applied.
*
* This define is not used on non-UNIX platforms.
*
* Possible Formats for value:
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI BDF identifier string.
*/
#define __NV_PCI_DEVICE_BDF pci
#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF)
/*
* Option: EnableStreamMemOPs
*
* Description:
*
* When this option is enabled, the CUDA driver will enable support for
* CUDA Stream Memory Operations in user-mode applications, which are so
* far required to be disabled by default due to limited support in
* devtools.
*
* Note: this is treated as a hint. MemOPs may still be left disabled by CUDA
* driver for other reasons.
*
* Possible Values:
*
* 0 = disable feature (default)
* 1 = enable feature
*/
#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs
#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS)
/*
* Option: EnableUserNUMAManagement
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will require the
* user-mode NVIDIA Persistence daemon to manage the onlining and offlining
* of its NUMA device memory.
*
* This option has no effect on platforms that do not support onlining
* device memory to a NUMA node (this feature is only supported on certain
* POWER9 systems).
*
* Possible Values:
*
* 0: disable user-mode NUMA management
* 1: enable user-mode NUMA management (default)
*/
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
/*
* Option: GpuBlacklist
*
* Description:
*
* This option accepts a list of blacklisted GPUs, separated by commas, that
* cannot be attached or used. Each blacklisted GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs. This regkey is deprecated and will be removed in the future. Use
* NV_REG_EXCLUDED_GPUS instead.
*/
#define __NV_GPU_BLACKLIST GpuBlacklist
#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST)
/*
* Option: ExcludedGpus
*
* Description:
*
* This option accepts a list of excluded GPUs, separated by commas, that
* cannot be attached or used. Each excluded GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs.
*/
#define __NV_EXCLUDED_GPUS ExcludedGpus
#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS)
/*
* Option: NvLinkDisable
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will not attempt to
* initialize or train NVLink connections for any GPUs. System reboot is required
* for changes to take affect.
*
* This option has no effect if no GPUs support NVLink.
*
* Possible Values:
*
* 0: Do not disable NVLink (default)
* 1: Disable NVLink
*/
#define __NV_NVLINK_DISABLE NvLinkDisable
#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE)
/*
* Option: RestrictProfilingToAdminUsers
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will prevent users
* without administrative access (i.e., the CAP_SYS_ADMIN capability) from
* using GPU performance counters.
*
* Possible Values:
*
* 0: Do not restrict GPU counters (default)
* 1: Restrict GPU counters to system administrators only
*/
#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly
#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers
#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY)
/*
* Option: TemporaryFilePath
*
* Description:
*
* When specified, this option changes the location in which the
* NVIDIA kernel module will create unnamed temporary files (e.g. to
* save the contents of video memory in). The indicated file must
* be a directory. By default, temporary files are created in /tmp.
*/
#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath
#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH)
/*
* Option: PreserveVideoMemoryAllocations
*
* If enabled, this option prompts the NVIDIA kernel module to save and
* restore all video memory allocations across system power management
* cycles, i.e. suspend/resume and hibernate/restore. Otherwise,
* only select allocations are preserved.
*
* Possible Values:
*
* 0: Preserve only select video memory allocations (default)
* 1: Preserve all video memory allocations
*/
#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
/*
* Option: EnableS0ixPowerManagement
*
* When this option is enabled, the NVIDIA driver will use S0ix-based
* power management for system suspend/resume, if both the platform and
* the GPU support S0ix.
*
* During system suspend, if S0ix is enabled and
* video memory usage is above the threshold configured by
* 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept
* in self-refresh mode while the rest of the GPU is powered down.
*
* Otherwise, the driver will copy video memory contents to system memory
* and power off the video memory along with the GPU.
*
* Possible Values:
*
* 0: Disable S0ix based power management (default)
* 1: Enable S0ix based power management
*/
#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement
#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \
NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT)
/*
* Option: S0ixPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use during
* S0ix-based system power management.
*
* When S0ix is enabled and the system is suspended, the driver will
* compare the amount of video memory in use with this threshold,
* to decide whether to keep video memory in self-refresh or copy video
* memory content to system memory.
*
* See the 'EnableS0ixPowerManagement' option.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* Default value for this option is 256MB.
*
*/
#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
S0ixPowerManagementVideoMemoryThreshold
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: DynamicPowerManagement
*
* This option controls how aggressively the NVIDIA kernel module will manage
* GPU power through kernel interfaces.
*
* Possible Values:
*
* 0: Never allow the GPU to be powered down (default).
* 1: Power down the GPU when it is not initialized.
* 2: Power down the GPU after it has been inactive for some time.
* 3: (Default) Power down the GPU after a period of inactivity (i.e.,
* mode 2) on Ampere or later notebooks. Otherwise, do not power down
* the GPU.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement
#define NV_REG_DYNAMIC_POWER_MANAGEMENT \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT)
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3
/*
* Option: DynamicPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use
* when selecting the dynamic power management scheme.
*
* When the driver detects that the GPU is idle, it will compare the amount
* of video memory in use with this threshold.
*
* If the current video memory usage is less than the threshold, the
* driver may preserve video memory contents in system memory and power off
* the video memory along with the GPU itself, if supported. Otherwise,
* the video memory will be kept in self-refresh mode while powering down
* the rest of the GPU, if supported.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* If the requested value is greater than 200MB (the default), then it
* will be capped to 200MB.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
DynamicPowerManagementVideoMemoryThreshold
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: RegisterPCIDriver
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with
* PCI subsystem.
*
* Possible values:
*
* 1 - register as PCI driver (default)
* 0 - do not register as PCI driver
*/
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
/*
* Option: EnablePCIERelaxedOrderingMode
*
* Description:
*
* When this option is enabled, the registry key RmSetPCIERelaxedOrdering will
* be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing
* every device to set the relaxed ordering bit to 1 in all outbound MWr
* transaction-layer packets. This is equivalent to setting the regkey to
* FORCE_ENABLE as a non-per-device registry key.
*
* Possible values:
* 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default)
* 1 - Enable PCIe TLP relaxed ordering bit-setting
*/
#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableGpuFirmware
*
* Description:
*
* When this option is enabled, the NVIDIA driver will enable use of GPU
* firmware.
*
* Possible mode values:
* 0 - Do not enable GPU firmware
* 1 - Enable GPU firmware
* 2 - (Default) Use the default enablement policy for GPU firmware
*
* Setting this to anything other than 2 will alter driver firmware-
* enablement policies, possibly disabling GPU firmware where it would
* have otherwise been enabled by default.
*
* If this key is set globally to the system, the driver may still attempt
* to apply some policies to maintain uniform firmware modes across all
* GPUS. This may result in the driver failing initialization on some GPUs
* to maintain such a policy.
*
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
* will attempt to honor whatever configuration is specified without applying
* additional policies. This may also result in failed GPU initialzations if
* the configuration is not possible (for example if the firmware is missing
* from the filesystem, or the GPU is not capable).
*
* Policy bits:
*
* POLICY_ALLOW_FALLBACK:
* As the normal behavior is to fail GPU initialization if this registry
* entry is set in such a way that results in an invalid configuration, if
* instead the user would like the driver to automatically try to fallback
* to initializing the failing GPU with firmware disabled, then this bit can
* be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
* Note that this can result in a mixed mode configuration (ex: GPU0 has
* firmware enabled, but GPU1 does not).
*
*/
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF
/*
* Option: EnableGpuFirmwareLogs
*
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
* to the system log, when possible.
*
* Possible values:
* 0 - Do not send GPU firmware logs to the system log
* 1 - Enable sending of GPU firmware logs to the system log
* 2 - (Default) Enable sending of GPU firmware logs to the system log for
* the debug kernel driver build only
*/
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
/*
* Option: EnableDbgBreakpoint
*
* When this option is set to a non-zero value, and the kernel is configured
* appropriately, assertions within resman will trigger a CPU breakpoint (e.g.,
* INT3 on x86_64), assumed to be caught by an attached debugger.
*
* When this option is set to the value zero (the default), assertions within
* resman will print to the system log, but no CPU breakpoint will be triggered.
*/
#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint
/*
* Option: OpenRmEnableUnsupportedGpus
*
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
* is still fairly immature, so for now require users to opt into use of open
* nvidia.ko with a special registry key, if not on a Data Center GPU.
*/
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
*---------registry key parameter declarations--------------
*/
NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0);
NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666);
NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1);
NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0);
NV_DEFINE_REG_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS, 1);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1);
NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
/*
*----------------registry database definition----------------------
*/
/*
* You can enable any of the registry options disabled by default by
* editing their respective entries in the table below. The last field
* determines if the option is considered valid - in order for the
* changes to take effect, you need to recompile and reload the NVIDIA
* kernel module.
*/
nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
{NULL, NULL}
};
#elif defined(NVRM)
extern nv_parm_t nv_parms[];
#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */
#endif /* _RM_REG_H_ */

View File

@@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_
#define _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_
#include <nvos.h>
/*
* This is a wrapper for NVOS02_PARAMETERS with file descriptor
*/
typedef struct
{
NVOS02_PARAMETERS params;
int fd;
} nv_ioctl_nvos02_parameters_with_fd;
/*
* This is a wrapper for NVOS33_PARAMETERS with file descriptor
*/
typedef struct
{
NVOS33_PARAMETERS params;
int fd;
} nv_ioctl_nvos33_parameters_with_fd;
#endif // _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_ESCAPE_H_INCLUDED
#define NV_ESCAPE_H_INCLUDED
#define NV_ESC_RM_ALLOC_MEMORY 0x27
#define NV_ESC_RM_ALLOC_OBJECT 0x28
#define NV_ESC_RM_FREE 0x29
#define NV_ESC_RM_CONTROL 0x2A
#define NV_ESC_RM_ALLOC 0x2B
#define NV_ESC_RM_CONFIG_GET 0x32
#define NV_ESC_RM_CONFIG_SET 0x33
#define NV_ESC_RM_DUP_OBJECT 0x34
#define NV_ESC_RM_SHARE 0x35
#define NV_ESC_RM_CONFIG_GET_EX 0x37
#define NV_ESC_RM_CONFIG_SET_EX 0x38
#define NV_ESC_RM_I2C_ACCESS 0x39
#define NV_ESC_RM_IDLE_CHANNELS 0x41
#define NV_ESC_RM_VID_HEAP_CONTROL 0x4A
#define NV_ESC_RM_ACCESS_REGISTRY 0x4D
#define NV_ESC_RM_MAP_MEMORY 0x4E
#define NV_ESC_RM_UNMAP_MEMORY 0x4F
#define NV_ESC_RM_GET_EVENT_DATA 0x52
#define NV_ESC_RM_ALLOC_CONTEXT_DMA2 0x54
#define NV_ESC_RM_ADD_VBLANK_CALLBACK 0x56
#define NV_ESC_RM_MAP_MEMORY_DMA 0x57
#define NV_ESC_RM_UNMAP_MEMORY_DMA 0x58
#define NV_ESC_RM_BIND_CONTEXT_DMA 0x59
#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C
#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D
#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E
#endif // NV_ESCAPE_H_INCLUDED

View File

@@ -0,0 +1,234 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Os interface definitions needed by os-interface.c
*/
#ifndef OS_INTERFACE_H
#define OS_INTERFACE_H
/******************* Operating System Interface Routines *******************\
* *
* Operating system wrapper functions used to abstract the OS. *
* *
\***************************************************************************/
#include <nvtypes.h>
#include <nvstatus.h>
#include "nv_stdarg.h"
#include <nv-kernel-interface-api.h>
#include <os/nv_memory_type.h>
#include <nv-caps.h>
typedef struct
{
NvU32 os_major_version;
NvU32 os_minor_version;
NvU32 os_build_number;
const char * os_build_version_str;
const char * os_build_date_plus_str;
}os_version_info;
/* Each OS defines its own version of this opaque type */
struct os_work_queue;
/* Each OS defines its own version of this opaque type */
typedef struct os_wait_queue os_wait_queue;
/*
* ---------------------------------------------------------------------------
*
* Function prototypes for OS interface.
*
* ---------------------------------------------------------------------------
*/
NvU64 NV_API_CALL os_get_num_phys_pages (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
NvU64 NV_API_CALL os_get_current_tick (void);
NvU64 NV_API_CALL os_get_current_tick_hr (void);
NvU64 NV_API_CALL os_get_tick_resolution (void);
NV_STATUS NV_API_CALL os_delay (NvU32);
NV_STATUS NV_API_CALL os_delay_us (NvU32);
NvU64 NV_API_CALL os_get_cpu_frequency (void);
NvU32 NV_API_CALL os_get_current_process (void);
void NV_API_CALL os_get_current_process_name (char *, NvU32);
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
char* NV_API_CALL os_string_copy (char *, const char *);
NvU32 NV_API_CALL os_string_length (const char *);
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
void NV_API_CALL os_log_error (const char *, va_list);
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
NvBool NV_API_CALL os_pci_remove_supported (void);
void NV_API_CALL os_pci_remove (void *);
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
NvU8 NV_API_CALL os_io_read_byte (NvU32);
NvU16 NV_API_CALL os_io_read_word (NvU32);
NvU32 NV_API_CALL os_io_read_dword (NvU32);
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
void NV_API_CALL os_io_write_word (NvU32, NvU16);
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
NvBool NV_API_CALL os_is_administrator (void);
NvBool NV_API_CALL os_allow_priority_override (void);
void NV_API_CALL os_dbg_init (void);
void NV_API_CALL os_dbg_breakpoint (void);
void NV_API_CALL os_dbg_set_level (NvU32);
NvU32 NV_API_CALL os_get_cpu_count (void);
NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
NvU64 NV_API_CALL os_acquire_spinlock (void *);
void NV_API_CALL os_release_spinlock (void *, NvU64);
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
void NV_API_CALL os_free_mutex (void *);
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
void NV_API_CALL os_release_mutex (void *);
void* NV_API_CALL os_alloc_semaphore (NvU32);
void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
NvBool NV_API_CALL os_is_efi_enabled (void);
NvBool NV_API_CALL os_is_xen_dom0 (void);
NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
void NV_API_CALL os_wake_up (os_wait_queue *);
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
extern NvU8 os_page_shift;
extern NvU32 os_sev_status;
extern NvBool os_sev_enabled;
extern NvBool os_dma_buf_enabled;
/*
* ---------------------------------------------------------------------------
*
* Debug macros.
*
* ---------------------------------------------------------------------------
*/
#define NV_DBG_INFO 0x0
#define NV_DBG_SETUP 0x1
#define NV_DBG_USERERRORS 0x2
#define NV_DBG_WARNINGS 0x3
#define NV_DBG_ERRORS 0x4
void NV_API_CALL out_string(const char *str);
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__)
#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status)
/*
* Fields for os_lock_user_pages flags parameter
*/
#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
#endif /* OS_INTERFACE_H */

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OS_CUSTOM_H_
#define _OS_CUSTOM_H_
/*!
* @file os_custom.h
* @brief OS module specific definitions for this OS
*/
#include <os-interface.h>
#include <osfuncs.h>
// File modes, added for NVIDIA capabilities.
#define OS_RUSR 00400 // read permission, owner
#define OS_WUSR 00200 // write permission, owner
#define OS_XUSR 00100 // execute/search permission, owner
#define OS_RWXU (OS_RUSR | OS_WUSR | OS_XUSR) // read, write, execute/search, owner
#define OS_RGRP 00040 // read permission, group
#define OS_WGRP 00020 // write permission, group
#define OS_XGRP 00010 // execute/search permission, group
#define OS_RWXG (OS_RGRP | OS_WGRP | OS_XGRP) // read, write, execute/search, group
#define OS_ROTH 00004 // read permission, other
#define OS_WOTH 00002 // write permission, other
#define OS_XOTH 00001 // execute/search permission, other
#define OS_RWXO (OS_ROTH | OS_WOTH | OS_XOTH) // read, write, execute/search, other
#define OS_RUGO (OS_RUSR | OS_RGRP | OS_ROTH)
#define OS_WUGO (OS_WUSR | OS_WGRP | OS_WOTH)
#define OS_XUGO (OS_XUSR | OS_XGRP | OS_XOTH)
// Trigger for collecting GPU state for later extraction.
NV_STATUS RmLogGpuCrash(OBJGPU *);
// This is callback function in the miniport.
// The argument is a device extension, and must be cast as such to be useful.
typedef void (*MINIPORT_CALLBACK)(void*);
NV_STATUS osPackageRegistry(OBJGPU *pGpu, PACKED_REGISTRY_TABLE *, NvU32 *);
#endif // _OS_CUSTOM_H_

View File

@@ -0,0 +1,192 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OSAPI_H_
#define _OSAPI_H_
#include "core/system.h"
#include "gpu/gpu.h"
#include <os-interface.h> // NV_DBG_ERRORS
#include <rmapi/rmapi.h>
#include <core/thread_state.h>
#if defined(__use_altstack__)
#if defined(QA_BUILD)
//---------------------------------------------------------------------------
//
// 32 bit debug marker values.
//
//---------------------------------------------------------------------------
#define NV_MARKER1 (NvU32)(('M' << 24) | ('R' << 16) | ('V' << 8) | 'N')
#define NV_MARKER2 (NvU32)(('N' << 24) | ('V' << 16) | ('R' << 8) | 'M')
//
// The two macros below implement a simple alternate stack usage sanity
// check for QA_BUILD RM builds. NV_ALTSTACK_WRITE_MARKERS() fills
// altstacks with NV_MARKER1, which enables NV_ALTSTACK_CHECK_MARKERS()
// to determine the stack usage fairly reliably by looking for the
// first clobbered marker. If more than 7/8 of the alternate stack were
// used, NV_ALTSTACK_CHECK_MARKERS() prints an error and asserts.
//
#define NV_ALTSTACK_WRITE_MARKERS(sp) \
{ \
NvU32 i, *stack = (void *)(sp)->stack; \
for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \
stack[i] = NV_MARKER1; \
}
#define NV_ALTSTACK_CHECK_MARKERS(sp) \
{ \
NvU32 i, *stack = (void *)(sp)->stack; \
for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \
{ \
if (stack[i] != NV_MARKER1) \
break; \
} \
if ((i * sizeof(NvU32)) < ((sp)->size / 8)) \
{ \
nv_printf(NV_DBG_ERRORS, "NVRM: altstack: used %d of %d bytes!\n", \
((sp)->size - (i * sizeof(NvU32))), (sp)->size); \
NV_ASSERT_PRECOMP((i * sizeof(NvU32)) >= ((sp)->size / 8)); \
} \
}
#else
#define NV_ALTSTACK_WRITE_MARKERS(sp)
#define NV_ALTSTACK_CHECK_MARKERS(sp)
#endif
#if defined(NVCPU_X86_64)
#define NV_ENTER_RM_RUNTIME(sp,fp) \
{ \
NV_ALTSTACK_WRITE_MARKERS(sp); \
__asm__ __volatile__ ("movq %%rbp,%0" : "=r" (fp)); /* save %rbp */ \
__asm__ __volatile__ ("movq %0,%%rbp" :: "r" ((sp)->top)); \
}
#define NV_EXIT_RM_RUNTIME(sp,fp) \
{ \
register void *__rbp __asm__ ("rbp"); \
if (__rbp != (sp)->top) \
{ \
nv_printf(NV_DBG_ERRORS, "NVRM: detected corrupted runtime stack!\n"); \
NV_ASSERT_PRECOMP(__rbp == (sp)->top); \
} \
NV_ALTSTACK_CHECK_MARKERS(sp); \
__asm__ __volatile__ ("movq %0,%%rbp" :: "r" (fp)); /* restore %rbp */ \
}
#else
#error "gcc \"altstacks\" support is not implemented on this platform!"
#endif
#else
#define NV_ENTER_RM_RUNTIME(sp,fp) { (void)sp; (void)fp; }
#define NV_EXIT_RM_RUNTIME(sp,fp)
#endif
void RmShutdownRm (void);
NvBool RmInitPrivateState (nv_state_t *);
void RmFreePrivateState (nv_state_t *);
NvBool RmInitAdapter (nv_state_t *);
NvBool RmPartiallyInitAdapter (nv_state_t *);
void RmShutdownAdapter (nv_state_t *);
void RmDisableAdapter (nv_state_t *);
void RmPartiallyDisableAdapter(nv_state_t *);
NV_STATUS RmGetAdapterStatus (nv_state_t *, NvU32 *);
NV_STATUS RmExcludeAdapter (nv_state_t *);
NvBool RmGpuHasIOSpaceEnabled (nv_state_t *);
void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *);
NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS RmFreeOsEvent (NvHandle, NvU32);
void RmI2cAddGpuPorts(nv_state_t *);
NV_STATUS RmInitX86EmuState(OBJGPU *);
void RmFreeX86EmuState(OBJGPU *);
NV_STATUS RmSystemEvent(nv_state_t *, NvU32, NvU32);
const NvU8 *RmGetGpuUuidRaw(nv_state_t *);
NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *);
int amd_adv_spec_cache_feature(OBJOS *);
int amd_msr_c0011022_incompatible(OBJOS *);
NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *);
NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32);
NV_STATUS rm_free_os_event (NvHandle, NvU32);
NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
void rm_client_free_os_events (NvHandle);
NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32);
NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *);
NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *);
// registry management
NV_STATUS RmInitRegistry (void);
NV_STATUS RmDestroyRegistry (nv_state_t *);
NV_STATUS RmWriteRegistryDword (nv_state_t *, const char *, NvU32 );
NV_STATUS RmReadRegistryDword (nv_state_t *, const char *, NvU32 *);
NV_STATUS RmWriteRegistryString (nv_state_t *, const char *, const char *, NvU32);
NV_STATUS RmReadRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32 *);
NV_STATUS RmWriteRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32);
NV_STATUS RmReadRegistryString (nv_state_t *, const char *, NvU8 *, NvU32 *);
NV_STATUS RmPackageRegistry (nv_state_t *, PACKED_REGISTRY_TABLE *, NvU32 *);
NvBool RmIsNvifFunctionSupported(NvU32, NvU32);
void RmInitAcpiMethods (OBJOS *, OBJSYS *, OBJGPU *);
void RmUnInitAcpiMethods (OBJSYS *);
void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64);
void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64);
void RmInitS0ixPowerManagement (nv_state_t *);
void RmInitDeferredDynamicPowerManagement (nv_state_t *);
void RmDestroyDeferredDynamicPowerManagement(nv_state_t *);
NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t);
void RmHandleDisplayChange (nvidia_stack_t *, nv_state_t *);
void RmUpdateGc6ConsoleRefCount (nv_state_t *, NvBool);
NvBool rm_get_uefi_console_status (nv_state_t *);
NvU64 rm_get_uefi_console_size (nv_state_t *, NvU64 *);
RM_API *RmUnixRmApiPrologue (nv_state_t *, THREAD_STATE_NODE *, NvU32 module);
void RmUnixRmApiEpilogue (nv_state_t *, THREAD_STATE_NODE *);
static inline NvBool rm_is_system_notebook(void)
{
return (nv_is_chassis_notebook() || nv_acpi_is_battery_present());
}
#endif // _OSAPI_H_

View File

@@ -0,0 +1,55 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef OSFUNCS_H
#define OSFUNCS_H
/**************** Resource Manager Defines and Structures ******************\
* *
* Declarations for the Operating System Specific Functions. *
* *
\***************************************************************************/
#include <os/os.h>
OSQueueWorkItem osQueueWorkItem;
OSQueueWorkItemWithFlags osQueueWorkItemWithFlags;
OSQueueSystemWorkItem osQueueSystemWorkItem;
OSDbgBreakpointEnabled osDbgBreakpointEnabled;
void* osGetStereoDongleInterface(void);
OSCallACPI_DSM osCallACPI_DSM;
OSCallACPI_DDC osCallACPI_DDC;
OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM;
OSCallACPI_DOD osCallACPI_DOD;
OSCallACPI_MXDS osCallACPI_MXDS;
OSCallACPI_MXDM osCallACPI_MXDM;
#if defined(NVCPU_X86_64)
OSnv_rdcr4 nv_rdcr4;
NvU64 nv_rdcr3(OBJOS *);
OSnv_cpuid nv_cpuid;
#endif
#endif // OSFUNCS_H

View File

@@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMOBJEXPORTIMPORT_H_
#define _RMOBJEXPORTIMPORT_H_
#include "nvstatus.h"
typedef NvHandle RmObjExportHandle;
NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance);
void RmFreeObjExportHandle(RmObjExportHandle hObject);
NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
NvHandle *phDstObject, RmObjExportHandle hSrcObject,
NvU8 *pObjectType);
NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance);
#endif // _RMOBJEXPORTIMPORT_H_

View File

@@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <os/os.h>
/* inline assembler routines for UNIX platforms */
#if defined(NVCPU_X86_64)
NvS32 nv_cpuid(
OBJOS *pOS,
NvS32 op,
NvS32 subop,
NvU32 *eax,
NvU32 *ebx,
NvU32 *ecx,
NvU32 *edx
)
{
asm volatile (" cpuid \n"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "a" (op),
"c" (subop)
: "cc");
return 1;
}
#endif

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <os/os.h>
/* inline assembler routines for UNIX platforms */
#if defined(NVCPU_X86_64)
NvU32 nv_rdcr4(OBJOS *pOS)
{
NvU64 val;
asm volatile ("movq %%cr4,%0" : "=r" (val));
return (NvU32)val;
}
NvU64 nv_rdcr3(OBJOS *pOS)
{
NvU64 val;
asm volatile ("movq %%cr3,%0" : "=r" (val));
return val;
}
#endif

View File

@@ -0,0 +1,820 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//***************************** Module Header **********************************
//
// This code is linked into the resource manager proper. It receives the
// ioctl from the resource manager's customer, unbundles the args and
// calls the correct resman routines.
//
//******************************************************************************
#include <core/prelude.h>
#include <core/locks.h>
#include <nv.h>
#include <nv_escape.h>
#include <osapi.h>
#include <rmapi/exports.h>
#include <nv-unix-nvos-params-wrappers.h>
#include <nvos.h>
#include <class/cl0000.h> // NV01_ROOT
#include <class/cl0001.h> // NV01_ROOT_NON_PRIV
#include <class/cl0005.h> // NV01_EVENT
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#define NV_CTL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) == 0) \
{ \
rmStatus = NV_ERR_INVALID_ARGUMENT; \
goto done; \
} \
}
#define NV_ACTUAL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) != 0) \
{ \
rmStatus = NV_ERR_INVALID_ARGUMENT; \
goto done; \
} \
}
// only return errors through pApi->status
static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
NV_STATUS rmStatus;
NvBool writable;
NvU32 flags = 0;
NvU64 allocSize, pageCount, *pPteArray = NULL;
void *pDescriptor, *pPageArray = NULL;
pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor);
if (((NvUPtr)pDescriptor & ~os_page_mask) != 0)
{
rmStatus = NV_ERR_NOT_SUPPORTED;
goto done;
}
// Check to prevent an NvU64 overflow
if ((pApi->data.AllocOsDesc.limit + 1) == 0)
{
rmStatus = NV_ERR_INVALID_LIMIT;
goto done;
}
allocSize = (pApi->data.AllocOsDesc.limit + 1);
pageCount = (1 + ((allocSize - 1) / os_page_size));
writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2);
flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags);
rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags);
if (rmStatus == NV_OK)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (rmStatus == NV_ERR_INVALID_ADDRESS)
{
rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount,
&pPteArray, &pPageArray);
if (rmStatus == NV_OK)
{
if (pPageArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (pPteArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY;
}
else
{
NV_ASSERT_FAILED("unknown memory import type");
rmStatus = NV_ERR_NOT_SUPPORTED;
}
}
}
if (rmStatus != NV_OK)
goto done;
Nv04VidHeapControlWithSecInfo(pApi, secInfo);
if (pApi->status != NV_OK)
{
switch (pApi->data.AllocOsDesc.descriptorType)
{
default:
break;
case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY:
os_unlock_user_pages(pageCount, pPageArray);
break;
}
}
done:
if (rmStatus != NV_OK)
pApi->status = rmStatus;
}
// only return errors through pApi->status
static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
NV_STATUS rmStatus = NV_OK;
NvU32 flags, attr, attr2;
NVOS32_PARAMETERS *pVidHeapParams;
if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) ||
!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags))
{
rmStatus = NV_ERR_INVALID_FLAGS;
goto done;
}
attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI);
if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) ||
FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags))
{
attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr);
}
else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags))
attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr);
else {
rmStatus = NV_ERR_INVALID_FLAGS;
goto done;
}
if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags))
attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr);
else
attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr);
if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags))
attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES);
else
attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO);
pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS));
if (pVidHeapParams == NULL)
{
rmStatus = NV_ERR_NO_MEMORY;
goto done;
}
portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS));
pVidHeapParams->hRoot = pApi->hRoot;
pVidHeapParams->hObjectParent = pApi->hObjectParent;
pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR;
flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED |
NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED);
if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags))
attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2);
// currently CPU-RO memory implies GPU-RO as well
if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) ||
DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags))
attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2);
pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew;
pVidHeapParams->data.AllocOsDesc.flags = flags;
pVidHeapParams->data.AllocOsDesc.attr = attr;
pVidHeapParams->data.AllocOsDesc.attr2 = attr2;
pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory;
pVidHeapParams->data.AllocOsDesc.limit = pApi->limit;
pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS;
RmCreateOsDescriptor(pVidHeapParams, secInfo);
pApi->status = pVidHeapParams->status;
portMemFree(pVidHeapParams);
done:
if (rmStatus != NV_OK)
pApi->status = rmStatus;
}
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass));
ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms));
NV_STATUS RmIoctl(
nv_state_t *nv,
nv_file_private_t *nvfp,
NvU32 cmd,
void *data,
NvU32 dataSize
)
{
NV_STATUS rmStatus = NV_ERR_GENERIC;
API_SECURITY_INFO secInfo = { };
secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER;
secInfo.paramLocation = PARAM_LOCATION_USER;
secInfo.pProcessToken = NULL;
secInfo.clientOSInfo = nvfp->ctl_nvfp;
if (secInfo.clientOSInfo == NULL)
secInfo.clientOSInfo = nvfp;
switch (cmd)
{
case NV_ESC_RM_ALLOC_MEMORY:
{
nv_ioctl_nvos02_parameters_with_fd *pApi;
NVOS02_PARAMETERS *pParms;
pApi = data;
pParms = &pApi->params;
NV_ACTUAL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_nvos02_parameters_with_fd))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR)
RmAllocOsDescriptor(pParms, secInfo);
else
{
NvU32 flags = pParms->flags;
Nv01AllocMemoryWithSecInfo(pParms, secInfo);
//
// If the system memory is going to be mapped immediately,
// create the mmap context for it now.
//
if ((pParms->hClass == NV01_MEMORY_SYSTEM) &&
(!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) &&
(!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) &&
(pParms->status == NV_OK))
{
if (rm_create_mmap_context(nv, pParms->hRoot,
pParms->hObjectParent, pParms->hObjectNew,
pParms->pMemory, pParms->limit + 1, 0,
pApi->fd) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"could not create mmap context for %p\n",
NvP64_VALUE(pParms->pMemory));
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
}
}
break;
}
case NV_ESC_RM_ALLOC_OBJECT:
{
NVOS05_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS05_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv01AllocObjectWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_ALLOC:
{
NVOS21_PARAMETERS *pApi = data;
NVOS64_PARAMETERS *pApiAccess = data;
NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS));
if ((dataSize != sizeof(NVOS21_PARAMETERS)) &&
(dataSize != sizeof(NVOS64_PARAMETERS)))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
switch (pApi->hClass)
{
case NV01_ROOT:
case NV01_ROOT_CLIENT:
case NV01_ROOT_NON_PRIV:
{
NV_CTL_DEVICE_ONLY(nv);
// Force userspace client allocations to be the _CLIENT class.
pApi->hClass = NV01_ROOT_CLIENT;
break;
}
case NV01_EVENT:
case NV01_EVENT_OS_EVENT:
case NV01_EVENT_KERNEL_CALLBACK:
case NV01_EVENT_KERNEL_CALLBACK_EX:
{
break;
}
default:
{
NV_CTL_DEVICE_ONLY(nv);
break;
}
}
if (!bAccessApi)
{
Nv04AllocWithSecInfo(pApi, secInfo);
}
else
{
Nv04AllocWithAccessSecInfo(pApiAccess, secInfo);
}
break;
}
case NV_ESC_RM_FREE:
{
NVOS00_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS00_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv01FreeWithSecInfo(pApi, secInfo);
if (pApi->status == NV_OK &&
pApi->hObjectOld == pApi->hRoot)
{
rm_client_free_os_events(pApi->hRoot);
}
break;
}
case NV_ESC_RM_VID_HEAP_CONTROL:
{
NVOS32_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS32_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR)
RmCreateOsDescriptor(pApi, secInfo);
else
Nv04VidHeapControlWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_I2C_ACCESS:
{
NVOS_I2C_ACCESS_PARAMS *pApi = data;
NV_ACTUAL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS_I2C_ACCESS_PARAMS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04I2CAccessWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_IDLE_CHANNELS:
{
NVOS30_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS30_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04IdleChannelsWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_MAP_MEMORY:
{
nv_ioctl_nvos33_parameters_with_fd *pApi;
NVOS33_PARAMETERS *pParms;
pApi = data;
pParms = &pApi->params;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_nvos33_parameters_with_fd))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04MapMemoryWithSecInfo(pParms, secInfo);
if (pParms->status == NV_OK)
{
pParms->status = rm_create_mmap_context(nv, pParms->hClient,
pParms->hDevice, pParms->hMemory,
pParms->pLinearAddress, pParms->length,
pParms->offset, pApi->fd);
if (pParms->status != NV_OK)
{
NVOS34_PARAMETERS params;
portMemSet(&params, 0, sizeof(NVOS34_PARAMETERS));
params.hClient = pParms->hClient;
params.hDevice = pParms->hDevice;
params.hMemory = pParms->hMemory;
params.pLinearAddress = pParms->pLinearAddress;
params.flags = pParms->flags;
Nv04UnmapMemoryWithSecInfo(&params, secInfo);
}
}
break;
}
case NV_ESC_RM_UNMAP_MEMORY:
{
NVOS34_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS34_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04UnmapMemoryWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_ACCESS_REGISTRY:
{
NVOS38_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS38_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_access_registry(pApi->hClient,
pApi->hObject,
pApi->AccessType,
pApi->pDevNode,
pApi->DevNodeLength,
pApi->pParmStr,
pApi->ParmStrLength,
pApi->pBinaryData,
&pApi->BinaryDataLength,
&pApi->Data,
&pApi->Entry);
break;
}
case NV_ESC_RM_ALLOC_CONTEXT_DMA2:
{
NVOS39_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS39_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04AllocContextDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_BIND_CONTEXT_DMA:
{
NVOS49_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS49_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04BindContextDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_MAP_MEMORY_DMA:
{
NVOS46_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS46_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04MapMemoryDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_UNMAP_MEMORY_DMA:
{
NVOS47_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS47_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_DUP_OBJECT:
{
NVOS55_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS55_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04DupObjectWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_SHARE:
{
NVOS57_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS57_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04ShareWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_ALLOC_OS_EVENT:
{
nv_ioctl_alloc_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_alloc_os_event(pApi->hClient,
nvfp,
pApi->fd);
break;
}
case NV_ESC_FREE_OS_EVENT:
{
nv_ioctl_free_os_event_t *pApi = data;
if (dataSize != sizeof(nv_ioctl_free_os_event_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd);
break;
}
case NV_ESC_RM_GET_EVENT_DATA:
{
NVOS41_PARAMETERS *pApi = data;
if (dataSize != sizeof(NVOS41_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pApi->status = rm_get_event_data(nvfp,
pApi->pEvent,
&pApi->MoreEvents);
break;
}
case NV_ESC_STATUS_CODE:
{
nv_state_t *pNv;
nv_ioctl_status_code_t *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(nv_ioctl_status_code_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot);
if (pNv == NULL)
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
rmStatus = rm_get_adapter_status(pNv, &pApi->status);
if (rmStatus != NV_OK)
goto done;
break;
}
case NV_ESC_RM_CONTROL:
{
NVOS54_PARAMETERS *pApi = data;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS54_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
Nv04ControlWithSecInfo(pApi, secInfo);
break;
}
case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO:
{
NVOS56_PARAMETERS *pApi = data;
void *pOldCpuAddress;
void *pNewCpuAddress;
NV_CTL_DEVICE_ONLY(nv);
if (dataSize != sizeof(NVOS56_PARAMETERS))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress);
pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress);
pApi->status = rm_update_device_mapping_info(pApi->hClient,
pApi->hDevice,
pApi->hMemory,
pOldCpuAddress,
pNewCpuAddress);
break;
}
case NV_ESC_REGISTER_FD:
{
nv_ioctl_register_fd_t *params = data;
void *priv = NULL;
nv_file_private_t *ctl_nvfp;
if (dataSize != sizeof(nv_ioctl_register_fd_t))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
if (rmStatus != NV_OK)
goto done;
// If there is already a ctl fd registered on this nvfp, fail.
if (nvfp->ctl_nvfp != NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_STATE;
goto done;
}
//
// Note that this call is valid for both "actual" devices and ctrl
// devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with
// both types of devices.
// But, the ctl_fd passed in should always correspond to a control FD.
//
ctl_nvfp = nv_get_file_private(params->ctl_fd,
NV_TRUE, /* require ctl fd */
&priv);
if (ctl_nvfp == NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// Disallow self-referential links, and disallow links to FDs that
// themselves have a link.
if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL))
{
nv_put_file_private(priv);
// UNLOCK: release API lock
rmApiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
//
// nvfp->ctl_nvfp is read outside the lock, so set it atomically.
// Note that once set, this can never be removed until the fd
// associated with nvfp is closed. We hold on to 'priv' until the
// fd is closed, too, to ensure that the fd associated with
// ctl_nvfp remains valid.
//
portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp);
nvfp->ctl_nvfp_priv = priv;
// UNLOCK: release API lock
rmApiLockRelease();
// NOTE: nv_put_file_private(priv) is not called here. It MUST be
// called during cleanup of this nvfp.
rmStatus = NV_OK;
break;
}
default:
{
NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd);
goto done;
}
}
rmStatus = NV_OK;
done:
return rmStatus;
}

View File

@@ -0,0 +1,256 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <rmconfig.h>
#include <gpu/subdevice/subdevice.h>
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
return NV_OK;
}
void NV_API_CALL rm_init_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bPr3AcpiMethodPresent
)
{
}
void NV_API_CALL rm_cleanup_dynamic_power_management(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS NV_API_CALL rm_ref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void NV_API_CALL rm_unref_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_transition_dynamic_power(
nvidia_stack_t *sp,
nv_state_t *nv,
NvBool bEnter
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
return NV_OK;
}
const char* NV_API_CALL rm_get_vidmem_power_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_dynamic_power_management_status(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return "?";
}
const char* NV_API_CALL rm_get_gpu_gcx_support(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvBool bGcxTypeGC6
)
{
return "?";
}
NV_STATUS
subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS
subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams
)
{
return NV_OK;
}
void
RmUpdateGc6ConsoleRefCount
(
nv_state_t *nv,
NvBool bIncrease
)
{
}
void
RmInitS0ixPowerManagement
(
nv_state_t *nv
)
{
}
void
RmInitDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void
RmDestroyDeferredDynamicPowerManagement
(
nv_state_t *nv
)
{
}
void RmHandleDisplayChange
(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
}
NV_STATUS
os_ref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
return NV_OK;
}
void
os_unref_dynamic_power
(
nv_state_t *nv,
nv_dynamic_power_mode_t mode
)
{
}
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *limitRated,
NvU32 *limitCurr
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
deviceCtrlCmdOsUnixVTSwitch_IMPL
(
Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_save_low_res_mode(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS RmInitX86EmuState(OBJGPU *pGpu)
{
return NV_OK;
}
void RmFreeX86EmuState(OBJGPU *pGpu)
{
}

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvtypes.h>
#include <os-interface.h>
void* memset(void* s, int c, NvUPtr n)
{
return os_mem_set(s, (NvU8)c, (NvU32)n);
}
void* memcpy(void* dest, const void* src, NvUPtr n)
{
return os_mem_copy(dest, src, (NvU32)n);
}

View File

@@ -0,0 +1,150 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvstatus.h"
#include "os/os.h"
#include "nv.h"
#include "nv-hypervisor.h"
HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
{
return OS_HYPERVISOR_UNKNOWN;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 *numVgpuTypes,
NvU32 **vgpuTypeIds,
NvBool isVirtfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU8 cmd,
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvBool isMdevAttached,
void *vf_pci_info
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 vgpuTypeId,
char *buffer,
int type_info,
NvU8 devfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_create_request(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_update_request(
nvidia_stack_t *sp ,
const NvU8 *pMdevUuid,
VGPU_DEVICE_STATE deviceState,
NvU64 *offsets,
NvU64 *sizes,
const char *configParams
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
nvidia_stack_t *sp ,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 **offsets,
NvU64 **sizes,
NvU32 *numAreas
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_gpu_bind_event(
nvidia_stack_t *sp
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_start(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
void *waitQueue,
NvS32 *returnStatus,
NvU8 *vmName,
NvU32 qemuPid
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_delete(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
NvU16 vgpuId
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 *size,
NvU32 regionIndex,
void *pVgpuVfioRef
)
{
return NV_ERR_NOT_SUPPORTED;
}
void initVGXSpecificRegistry(OBJGPU *pGpu)
{}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,676 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include "kernel/gpu/nvlink/kernel_nvlink.h"
#include "osapi.h"
#if defined(INCLUDE_NVLINK_LIB)
#include "nvlink.h"
//
// The functions in this file are a workaround for a significant design flaw
// where RM callbacks for the nvlink library are built with the altstack
// enabled, but the nvlink library is not built with altstack support. Whenever
// the library calls a callback, the stack switching needs to be accounted for
// or else we will observe corruption of data structures in the nvlink library
// as data is pushed onto what the callback thinks is the stack. See bug
// 1710300.
//
// This bug has also exposed other problems, such as the complete lack of
// locking awareness by these callbacks (e.g., assumption that the RMAPI and
// GPU locks are always held on entry, which is not a legitimate assumption).
// For now, we ignore that just to unblock testing.
//
extern NvlStatus knvlinkCoreAddLinkCallback(struct nvlink_link *);
extern NvlStatus knvlinkCoreRemoveLinkCallback(struct nvlink_link *);
extern NvlStatus knvlinkCoreLockLinkCallback(struct nvlink_link *);
extern void knvlinkCoreUnlockLinkCallback(struct nvlink_link *);
extern NvlStatus knvlinkCoreQueueLinkChangeCallback(struct nvlink_link_change *);
extern NvlStatus knvlinkCoreSetDlLinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreGetDlLinkModeCallback(struct nvlink_link *, NvU64 *);
extern NvlStatus knvlinkCoreSetTlLinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreGetTlLinkModeCallback(struct nvlink_link *, NvU64 *);
extern NvlStatus knvlinkCoreGetTxSublinkModeCallback(struct nvlink_link *, NvU64 *, NvU32 *);
extern NvlStatus knvlinkCoreSetTxSublinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreGetRxSublinkModeCallback(struct nvlink_link *, NvU64 *, NvU32 *);
extern NvlStatus knvlinkCoreSetRxSublinkModeCallback(struct nvlink_link *, NvU64, NvU32);
extern NvlStatus knvlinkCoreReadDiscoveryTokenCallback(struct nvlink_link *, NvU64 *);
extern NvlStatus knvlinkCoreWriteDiscoveryTokenCallback(struct nvlink_link *, NvU64);
extern void knvlinkCoreTrainingCompleteCallback(struct nvlink_link *);
extern void knvlinkCoreGetUphyLoadCallback(struct nvlink_link *, NvBool*);
/*!
* @brief Helper to allocate an alternate stack from within core RM.
*
* This needs to be an NV_API_CALL (built to use the original stack instead
* of the altstack) since it is called before we switch to using the altstack.
*/
static NV_STATUS NV_API_CALL osNvlinkAllocAltStack(nvidia_stack_t **pSp)
{
NV_STATUS status = NV_OK;
nvidia_stack_t *sp = NULL;
#if defined(NVCPU_X86_64) && defined(__use_altstack__)
status = os_alloc_mem((void **)&sp, sizeof(nvidia_stack_t));
if (status == NV_OK)
{
sp->size = sizeof(sp->stack);
sp->top = sp->stack + sp->size;
}
#endif
*pSp = sp;
return status;
}
/*!
* @brief Helper to free an alternate stack from within core RM.
*
* This needs to be an NV_API_CALL (built to use the original stack instead
* of the altstack) since it is called after we've switched back to using the
* original stack.
*/
static void NV_API_CALL osNvlinkFreeAltStack(nvidia_stack_t *sp)
{
#if defined(NVCPU_X86_64) && defined(__use_altstack__)
os_free_mem(sp);
#endif
}
static NvlStatus NV_API_CALL rm_nvlink_ops_add_link
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp;
if (NV_OK != osNvlinkAllocAltStack(&sp))
{
return NVL_ERR_GENERIC;
}
NV_ENTER_RM_RUNTIME(sp, fp);
status = knvlinkCoreAddLinkCallback(link);
NV_EXIT_RM_RUNTIME(sp, fp);
if (status == NVL_SUCCESS)
{
pLink->pOsInfo = sp;
}
else
{
osNvlinkFreeAltStack(sp);
}
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_remove_link
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
pLink->pOsInfo = NULL;
NV_ENTER_RM_RUNTIME(sp, fp);
status = knvlinkCoreRemoveLinkCallback(link);
NV_EXIT_RM_RUNTIME(sp, fp);
osNvlinkFreeAltStack(sp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_lock_link
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreLockLinkCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static void NV_API_CALL rm_nvlink_ops_unlock_link
(
struct nvlink_link *link
)
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
knvlinkCoreUnlockLinkCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_queue_link_change
(
struct nvlink_link_change *link_change
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link_change->master->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreQueueLinkChangeCallback(link_change);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_dl_link_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetDlLinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_dl_link_mode
(
struct nvlink_link *link,
NvU64 *mode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetDlLinkModeCallback(link, mode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_tl_link_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetTlLinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_tl_link_mode
(
struct nvlink_link *link,
NvU64 *mode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetTlLinkModeCallback(link, mode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_tx_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetTxSublinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_tx_mode
(
struct nvlink_link *link,
NvU64 *mode,
NvU32 *subMode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetTxSublinkModeCallback(link, mode, subMode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_mode
(
struct nvlink_link *link,
NvU64 mode,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetRxSublinkModeCallback(link, mode, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_mode
(
struct nvlink_link *link,
NvU64 *mode,
NvU32 *subMode
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetRxSublinkModeCallback(link, mode, subMode);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_detect
(
struct nvlink_link *link,
NvU32 flags
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreSetRxSublinkDetectCallback(link, flags);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_detect
(
struct nvlink_link *link
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreGetRxSublinkDetectCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static void NV_API_CALL rm_nvlink_get_uphy_load
(
struct nvlink_link *link,
NvBool *bUnlocked
)
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
knvlinkCoreGetUphyLoadCallback(link, bUnlocked);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
}
static NvlStatus NV_API_CALL rm_nvlink_ops_read_link_discovery_token
(
struct nvlink_link *link,
NvU64 *token
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreReadDiscoveryTokenCallback(link, token);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static NvlStatus NV_API_CALL rm_nvlink_ops_write_link_discovery_token
(
struct nvlink_link *link,
NvU64 token
)
{
void *fp;
NvlStatus status;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
status = knvlinkCoreWriteDiscoveryTokenCallback(link, token);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
return status;
}
static void NV_API_CALL rm_nvlink_ops_training_complete
(
struct nvlink_link *link
)
{
void *fp;
THREAD_STATE_NODE threadState = {0};
KNVLINK_RM_LINK *pLink = link->link_info;
nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo;
NV_ENTER_RM_RUNTIME(sp, fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
knvlinkCoreTrainingCompleteCallback(link);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp, fp);
}
#endif /* defined(INCLUDE_NVLINK_LIB) */
const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void)
{
#if defined(INCLUDE_NVLINK_LIB)
static const struct nvlink_link_handlers rm_nvlink_link_ops =
{
.add = rm_nvlink_ops_add_link,
.remove = rm_nvlink_ops_remove_link,
.lock = rm_nvlink_ops_lock_link,
.unlock = rm_nvlink_ops_unlock_link,
.queue_link_change = rm_nvlink_ops_queue_link_change,
.set_dl_link_mode = rm_nvlink_ops_set_dl_link_mode,
.get_dl_link_mode = rm_nvlink_ops_get_dl_link_mode,
.set_tl_link_mode = rm_nvlink_ops_set_tl_link_mode,
.get_tl_link_mode = rm_nvlink_ops_get_tl_link_mode,
.set_tx_mode = rm_nvlink_ops_set_link_tx_mode,
.get_tx_mode = rm_nvlink_ops_get_link_tx_mode,
.set_rx_mode = rm_nvlink_ops_set_link_rx_mode,
.get_rx_mode = rm_nvlink_ops_get_link_rx_mode,
.set_rx_detect = rm_nvlink_ops_set_link_rx_detect,
.get_rx_detect = rm_nvlink_ops_get_link_rx_detect,
.write_discovery_token = rm_nvlink_ops_write_link_discovery_token,
.read_discovery_token = rm_nvlink_ops_read_link_discovery_token,
.training_complete = rm_nvlink_ops_training_complete,
.get_uphy_load = rm_nvlink_get_uphy_load,
};
return &rm_nvlink_link_ops;
#else
return NULL;
#endif
}
/*
* @brief Verif only function to get the chiplib overrides for link connection
* state for all NVLINKs.
*
* If chiplib overrides exist, each link can either be enabled (1) or disabled (0)
*
* @param[in] pGpu GPU object pointer
* @param[in] maxLinks Size of pLinkConnection array
* @param[out] pLinkConnection array of pLinkConnection values to be populated by MODS
*
* @return NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available)
*/
NV_STATUS
osGetForcedNVLinkConnection
(
OBJGPU *pGpu,
NvU32 maxLinks,
NvU32 *pLinkConnection
)
{
int i, ret;
NV_STATUS status;
char path[64];
OBJSYS *pSys;
OBJOS *pOS;
NV_ASSERT_OR_RETURN((pLinkConnection != NULL), NV_ERR_INVALID_POINTER);
NV_ASSERT_OR_RETURN((maxLinks > 0), NV_ERR_NOT_SUPPORTED);
NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT);
pSys = SYS_GET_INSTANCE();
pOS = SYS_GET_OS(pSys);
if (pOS == NULL || pOS->osSimEscapeRead == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s: escape reads not supported on platform\n",
__FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
for (i = 0; i < maxLinks; i++)
{
ret = os_snprintf(path, sizeof(path), "CPU_MODEL|CM_ATS_ADDRESS|NVLink%u", i);
NV_ASSERT((ret > 0) && (ret < (sizeof(path) - 1)));
status = pOS->osSimEscapeRead(pGpu, path, 0, 4, &pLinkConnection[i]);
if (status == NV_OK)
{
NV_PRINTF(LEVEL_INFO, "%s: %s=0x%X\n", __FUNCTION__,
path, pLinkConnection[i]);
}
else
{
NV_PRINTF(LEVEL_INFO, "%s: osSimEscapeRead for '%s' failed (%u)\n",
__FUNCTION__, path, status);
return NV_ERR_NOT_SUPPORTED;
}
}
return NV_OK;
}
/*
* @brief Get Platform suggested NVLink linerate
*
* NVLink will use this function to get the platform suggested linerate
* if available in FRU or device tree.
*
* @param[in] pGpu GPU object pointer
* @param[out] NvU32 * Suggested datarate
*
* @return NV_OK or NV_ERR_NOT_SUPPORTED (platform linerate data not available)
*/
NV_STATUS
osGetPlatformNvlinkLinerate
(
OBJGPU *pGpu,
NvU32 *lineRate
)
{
#if defined(NVCPU_PPC64LE)
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu);
if (!pKernelNvlink)
return NV_ERR_INVALID_ARGUMENT;
return nv_get_nvlink_line_rate(nv, lineRate);
#else
//TODO : FRU based method to be filled out by Bug 200285656
//*lineRate = 0;
//return NV_OK;
return NV_ERR_NOT_SUPPORTED;
#endif
}
void
osSetNVLinkSysmemLinkState
(
OBJGPU *pGpu,
NvBool enabled
)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
NV_ASSERT(enabled);
if (enabled)
nv_dma_enable_nvlink(nv->dma_dev);
}

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/***************************** HW State Routines ***************************\
* *
* Fills in os specific function pointers for the Unix OS object. *
* *
\***************************************************************************/
#include <osfuncs.h>
#include <os/os.h>
static void initOSSpecificFunctionPointers(OBJOS *);
static void initMiscOSFunctionPointers(OBJOS *);
static void initUnixOSFunctionPointers(OBJOS *);
static void initOSSpecificProperties(OBJOS *);
void
osInitObjOS(OBJOS *pOS)
{
initOSSpecificFunctionPointers(pOS);
initOSSpecificProperties(pOS);
}
static void
initOSSpecificFunctionPointers(OBJOS *pOS)
{
initMiscOSFunctionPointers(pOS);
initUnixOSFunctionPointers(pOS);
}
static void
initMiscOSFunctionPointers(OBJOS *pOS)
{
pOS->osQueueWorkItem = osQueueWorkItem;
pOS->osQueueWorkItemWithFlags = osQueueWorkItemWithFlags;
pOS->osQueueSystemWorkItem = osQueueSystemWorkItem;
}
static void
initUnixOSFunctionPointers(OBJOS *pOS)
{
#if defined(NVCPU_X86_64)
pOS->osNv_rdcr4 = nv_rdcr4;
pOS->osNv_cpuid = nv_cpuid;
#endif
pOS->osCallACPI_DSM = osCallACPI_DSM;
pOS->osCallACPI_DDC = osCallACPI_DDC;
pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM;
pOS->osCallACPI_DOD = osCallACPI_DOD;
pOS->osCallACPI_MXDM = osCallACPI_MXDM;
pOS->osCallACPI_MXDS = osCallACPI_MXDS;
pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled;
}
static void
initOSSpecificProperties
(
OBJOS *pOS
)
{
pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE);
pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE);
}

View File

@@ -0,0 +1,705 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <nv-priv.h>
#include <nvos.h>
#if defined(DEBUG_REGISTRY)
#define DBG_REG_PRINTF(a, ...) \
NV_PRINTF(LEVEL_INFO, a, ##__VA_ARGS__)
#else
#define DBG_REG_PRINTF(a, ...)
#endif
static NvS32 stringCaseCompare(
const char *string1,
const char *string2
)
{
NvU8 c1, c2;
do
{
c1 = *string1, c2 = *string2;
if (c1 >= 'A' && c1 <= 'Z')
c1 += ('a' - 'A');
if (c2 >= 'A' && c2 <= 'Z')
c2 += ('a' - 'A');
string1++, string2++;
}
while ((c1 == c2) && (c1 != '\0'));
return (c1 - c2);
}
static nv_reg_entry_t *the_registry = NULL;
static nv_reg_entry_t* regCreateNewRegistryKey(
nv_state_t *nv,
const char *regParmStr
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *new_reg = NULL;
char *new_ParmStr = NULL;
NvU32 parm_size;
if (regParmStr == NULL)
{
DBG_BREAKPOINT();
return NULL;
}
new_reg = portMemAllocNonPaged(sizeof(nv_reg_entry_t));
if (NULL == new_reg)
{
NV_PRINTF(LEVEL_ERROR, "failed to grow registry\n");
return NULL;
}
portMemSet(new_reg, 0, sizeof(nv_reg_entry_t));
if (regParmStr != NULL)
{
parm_size = (portStringLength(regParmStr) + 1);
new_ParmStr = portMemAllocNonPaged(parm_size);
if (NULL == new_ParmStr)
{
NV_PRINTF(LEVEL_ERROR, "failed to allocate registry param string\n");
portMemFree(new_reg);
return NULL;
}
NV_ASSERT(parm_size <= NVOS38_MAX_REGISTRY_STRING_LENGTH);
if (portMemCopy(new_ParmStr, parm_size, regParmStr, parm_size) == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to copy registry param string\n");
portMemFree(new_ParmStr);
portMemFree(new_reg);
return NULL;
}
}
new_reg->regParmStr = new_ParmStr;
new_reg->type = NV_REGISTRY_ENTRY_TYPE_UNKNOWN;
if (nvp != NULL)
{
new_reg->next = nvp->pRegistry;
nvp->pRegistry = new_reg;
DBG_REG_PRINTF("local registry now at 0x%p\n", nvp->pRegistry);
}
else
{
new_reg->next = the_registry;
the_registry = new_reg;
DBG_REG_PRINTF("global registry now at 0x%p\n", the_registry);
}
return new_reg;
}
static NV_STATUS regFreeEntry(nv_reg_entry_t *tmp)
{
portMemFree(tmp->regParmStr);
tmp->regParmStr = NULL;
{
portMemFree(tmp->pdata);
tmp->pdata = NULL;
tmp->len = 0;
}
portMemFree(tmp);
return NV_OK;
}
static nv_reg_entry_t* regFindRegistryEntry(
nv_state_t *nv,
const char *regParmStr,
NvU32 type,
NvBool *bGlobalEntry
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *tmp;
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
if (nvp != NULL)
{
tmp = nvp->pRegistry;
DBG_REG_PRINTF(" local registry at 0x%p\n", tmp);
while ((tmp != NULL) && (tmp->regParmStr != NULL))
{
DBG_REG_PRINTF(" Testing against %s\n",
tmp->regParmStr);
if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) &&
(type == tmp->type))
{
DBG_REG_PRINTF(" found a match!\n");
if (bGlobalEntry)
*bGlobalEntry = NV_FALSE;
return tmp;
}
tmp = tmp->next;
}
}
tmp = the_registry;
DBG_REG_PRINTF(" global registry at 0x%p\n", tmp);
while ((tmp != NULL) && (tmp->regParmStr != NULL))
{
DBG_REG_PRINTF(" Testing against %s\n",
tmp->regParmStr);
if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) &&
(type == tmp->type))
{
DBG_REG_PRINTF(" found a match!\n");
if (bGlobalEntry)
*bGlobalEntry = NV_TRUE;
return tmp;
}
tmp = tmp->next;
}
DBG_REG_PRINTF(" no match\n");
return NULL;
}
NV_STATUS RmWriteRegistryDword(
nv_state_t *nv,
const char *regParmStr,
NvU32 Data
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if (regParmStr == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s -> 0x%x\n", __FUNCTION__, regParmStr, Data);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_DWORD, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
tmp->data = Data;
if (stringCaseCompare(regParmStr, "ResmanDebugLevel") == 0)
{
os_dbg_set_level(Data);
}
return NV_OK;
}
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
return NV_ERR_GENERIC;
tmp->type = NV_REGISTRY_ENTRY_TYPE_DWORD;
tmp->data = Data;
return NV_OK;
}
NV_STATUS RmReadRegistryDword(
nv_state_t *nv,
const char *regParmStr,
NvU32 *Data
)
{
nv_reg_entry_t *tmp;
if ((regParmStr == NULL) || (Data == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_DWORD, NULL);
if (tmp == NULL)
{
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, NULL);
if ((tmp != NULL) && (tmp->len >= sizeof(NvU32)))
{
*Data = *(NvU32 *)tmp->pdata;
}
else
{
DBG_REG_PRINTF(" not found\n");
return NV_ERR_GENERIC;
}
}
else
{
*Data = tmp->data;
}
DBG_REG_PRINTF(" found in the_registry: 0x%x\n", *Data);
return NV_OK;
}
NV_STATUS RmReadRegistryBinary(
nv_state_t *nv,
const char *regParmStr,
NvU8 *Data,
NvU32 *cbLen
)
{
nv_reg_entry_t *tmp;
NV_STATUS status;
if ((regParmStr == NULL) || (Data == NULL) || (cbLen == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, NULL);
if (tmp == NULL)
{
DBG_REG_PRINTF(" not found\n");
return NV_ERR_GENERIC;
}
DBG_REG_PRINTF(" found\n");
if (*cbLen >= tmp->len)
{
portMemCopy((NvU8 *)Data, *cbLen, (NvU8 *)tmp->pdata, tmp->len);
*cbLen = tmp->len;
status = NV_OK;
}
else
{
NV_PRINTF(LEVEL_ERROR,
"buffer (length: %u) is too small (data length: %u)\n",
*cbLen, tmp->len);
status = NV_ERR_GENERIC;
}
return status;
}
NV_STATUS RmWriteRegistryBinary(
nv_state_t *nv,
const char *regParmStr,
NvU8 *Data,
NvU32 cbLen
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if ((regParmStr == NULL) || (Data == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_BINARY, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
if (tmp->pdata != NULL)
{
portMemFree(tmp->pdata);
tmp->pdata = NULL;
tmp->len = 0;
}
}
else
{
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to create binary registry entry\n");
return NV_ERR_GENERIC;
}
}
tmp->pdata = portMemAllocNonPaged(cbLen);
if (NULL == tmp->pdata)
{
NV_PRINTF(LEVEL_ERROR, "failed to write binary registry entry\n");
return NV_ERR_GENERIC;
}
tmp->type = NV_REGISTRY_ENTRY_TYPE_BINARY;
tmp->len = cbLen;
portMemCopy((NvU8 *)tmp->pdata, tmp->len, (NvU8 *)Data, cbLen);
return NV_OK;
}
NV_STATUS RmWriteRegistryString(
nv_state_t *nv,
const char *regParmStr,
const char *buffer,
NvU32 bufferLength
)
{
nv_reg_entry_t *tmp;
NvBool bGlobalEntry;
if ((regParmStr == NULL) || (buffer == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_STRING, &bGlobalEntry);
// If we found an entry and we were looking for a global entry and
// found a global, or we were looking for a per-GPU entry and found a
// per-GPU entry
if (tmp != NULL &&
((nv == NULL && bGlobalEntry) ||
(nv != NULL && !bGlobalEntry)))
{
if (tmp->pdata != NULL)
{
portMemFree(tmp->pdata);
tmp->len = 0;
tmp->pdata = NULL;
}
}
else
{
tmp = regCreateNewRegistryKey(nv, regParmStr);
if (tmp == NULL)
{
NV_PRINTF(LEVEL_ERROR,
"failed to allocate a string registry entry!\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
}
tmp->pdata = portMemAllocNonPaged(bufferLength);
if (tmp->pdata == NULL)
{
NV_PRINTF(LEVEL_ERROR, "failed to write a string registry entry!\n");
return NV_ERR_NO_MEMORY;
}
tmp->type = NV_REGISTRY_ENTRY_TYPE_STRING;
tmp->len = bufferLength;
portMemCopy((void *)tmp->pdata, tmp->len, buffer, (bufferLength - 1));
tmp->pdata[bufferLength-1] = '\0';
return NV_OK;
}
NV_STATUS RmReadRegistryString(
nv_state_t *nv,
const char *regParmStr,
NvU8 *buffer,
NvU32 *pBufferLength
)
{
NvU32 bufferLength;
nv_reg_entry_t *tmp;
if ((regParmStr == NULL) || (buffer == NULL) || (pBufferLength == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr);
bufferLength = *pBufferLength;
*pBufferLength = 0;
*buffer = '\0';
tmp = regFindRegistryEntry(nv, regParmStr,
NV_REGISTRY_ENTRY_TYPE_STRING, NULL);
if (tmp == NULL)
{
return NV_ERR_GENERIC;
}
if (bufferLength >= tmp->len)
{
portMemCopy((void *)buffer, bufferLength, (void *)tmp->pdata, tmp->len);
*pBufferLength = tmp->len;
}
else
{
NV_PRINTF(LEVEL_ERROR,
"buffer (length: %u) is too small (data length: %u)\n",
bufferLength, tmp->len);
return NV_ERR_BUFFER_TOO_SMALL;
}
return NV_OK;
}
NV_STATUS RmInitRegistry(void)
{
NV_STATUS rmStatus;
rmStatus = os_registry_init();
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "failed to initialize the OS registry!\n");
}
return rmStatus;
}
NV_STATUS RmDestroyRegistry(nv_state_t *nv)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *tmp;
if (nvp != NULL)
{
tmp = nvp->pRegistry;
nvp->pRegistry = NULL;
}
else
{
tmp = the_registry;
the_registry = NULL;
}
while (tmp != NULL)
{
nv_reg_entry_t *entry = tmp;
tmp = tmp->next;
regFreeEntry(entry);
}
return NV_OK;
}
static void regCountEntriesAndSize(
NvU32 *pNumEntries, // Pointer to number of entries
NvU32 *pSize, // Pointer to total size
nv_reg_entry_t *pRegEntry // Pointer local or global registry
)
{
//
// Note that *pNumEntries and *pSize are not initialized here. This is so
// we can accumulate totals of both global and local registries.
//
NvU32 numEntries = *pNumEntries;
NvU32 size = *pSize;
while ((pRegEntry != NULL) && (pRegEntry->regParmStr != NULL))
{
size += portStringLength(pRegEntry->regParmStr) + 1 + pRegEntry->len;
numEntries++;
pRegEntry = pRegEntry->next;
}
*pNumEntries = numEntries;
*pSize = size;
}
static NV_STATUS regCopyEntriesToPackedBuffer(
PACKED_REGISTRY_TABLE *pRegTable, // Pointer to packed record
nv_reg_entry_t *pRegEntry, // Pointer local or global registry
NvU32 *pEntryIndex, // Pointer to next index
NvU32 *pDataOffset // Pointer to offset of next data byte.
)
{
NvU8 *pByte = (NvU8 *)pRegTable; // Byte version of record pointer.
NV_STATUS nvStatus = NV_OK;
NvU32 entryIndex = *pEntryIndex;
NvU32 dataOffset = *pDataOffset;
// Walk the records and copy the data.
while ((pRegEntry != NULL) && (pRegEntry->regParmStr != NULL))
{
PACKED_REGISTRY_ENTRY *pEntry = &pRegTable->entries[entryIndex];
NvU32 slen = portStringLength(pRegEntry->regParmStr) + 1;
// Sanity check the data offset and index against counted totals.
if ((dataOffset + slen + pRegEntry->len > pRegTable->size) ||
(entryIndex >= pRegTable->numEntries))
{
// Something has changed since we counted them?
NV_PRINTF(LEVEL_ERROR, "Registry entry record is full\n");
nvStatus = NV_ERR_INVALID_STATE;
break;
}
// Copy registry entry name to data blob.
pEntry->nameOffset = dataOffset;
portMemCopy(&pByte[dataOffset], slen, pRegEntry->regParmStr, slen);
dataOffset += slen;
switch (pRegEntry->type)
{
case NV_REGISTRY_ENTRY_TYPE_DWORD:
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_DWORD;
pEntry->length = sizeof(NvU32);
pEntry->data = pRegEntry->data;
break;
case NV_REGISTRY_ENTRY_TYPE_BINARY:
case NV_REGISTRY_ENTRY_TYPE_STRING:
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_STRING;
if (pRegEntry->type == NV_REGISTRY_ENTRY_TYPE_BINARY)
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_BINARY;
pEntry->length = pRegEntry->len;
pEntry->data = dataOffset;
portMemCopy(&pByte[dataOffset], pEntry->length,
pRegEntry->pdata, pRegEntry->len);
dataOffset += pRegEntry->len;
break;
default:
// We should never get here.
pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN;
pEntry->length = 0;
pEntry->data = 0;
DBG_BREAKPOINT();
break;
}
pRegEntry = pRegEntry->next;
entryIndex++;
}
*pEntryIndex = entryIndex;
*pDataOffset = dataOffset;
return nvStatus;
}
// Package registry entries
NV_STATUS RmPackageRegistry(
nv_state_t *nv,
PACKED_REGISTRY_TABLE *pRegTable,
NvU32 *pSize
)
{
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
nv_reg_entry_t *pLocalRegistry = NULL;
NV_STATUS nvStatus = NV_OK;
NvU32 totalSize;
NvU32 numEntries;
if (pSize == NULL)
return NV_ERR_INVALID_ARGUMENT;
// Use the local (per-device) registry if we have one.
if (nvp != NULL)
pLocalRegistry = nvp->pRegistry;
numEntries = 0;
totalSize = NV_OFFSETOF(PACKED_REGISTRY_TABLE, entries);
// Count the number of global entries and total size.
regCountEntriesAndSize(&numEntries, &totalSize, the_registry);
// Count the number of local entries and total size.
regCountEntriesAndSize(&numEntries, &totalSize, pLocalRegistry);
// Add table record size into total size.
totalSize += sizeof(PACKED_REGISTRY_ENTRY) * numEntries;
//
// If this function is called to only compute total size of registry table,
// then we are done here.
//
if (pRegTable == NULL)
{
*pSize = totalSize;
return NV_OK;
}
// Return warning if there are no registry entries.
if (numEntries == 0)
return NV_WARN_NOTHING_TO_DO;
if (totalSize > *pSize)
{
NV_PRINTF(LEVEL_ERROR, "Registry entries overflow RPC record\n");
return NV_ERR_BUFFER_TOO_SMALL;
}
// Fill in our new structure with the first pass (counting) values.
pRegTable->size = totalSize;
*pSize = totalSize;
pRegTable->numEntries = numEntries;
// Offset of first byte after the registry entry table.
totalSize = NV_OFFSETOF(PACKED_REGISTRY_TABLE, entries) +
(sizeof(PACKED_REGISTRY_ENTRY) * numEntries);
// Starting index in the registry entry table.
numEntries = 0;
// Walk the global registry and copy the data.
nvStatus = regCopyEntriesToPackedBuffer(pRegTable,
the_registry, &numEntries, &totalSize);
// Walk the local registry and copy the data.
if (nvStatus == NV_OK)
{
nvStatus = regCopyEntriesToPackedBuffer(pRegTable,
pLocalRegistry, &numEntries, &totalSize);
}
// Sanity check second pass against first pass.
if ((numEntries != pRegTable->numEntries) || (totalSize != pRegTable->size))
{
NV_PRINTF(LEVEL_ERROR, "First/second pass mismatch\n");
nvStatus = NV_ERR_INVALID_STATE;
}
return nvStatus;
}

View File

@@ -0,0 +1,812 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <os/os.h>
#include <osapi.h>
#include <core/thread_state.h>
#include "rmapi/nv_gpu_ops.h"
#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
NV_STATUS NV_API_CALL rm_gpu_ops_create_session(
nvidia_stack_t *sp,
struct gpuSession **session)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCreateSession(session);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (
nvidia_stack_t *sp, gpuSessionHandle session)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDestroySession(session);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (
nvidia_stack_t *sp,
nvgpuSessionHandle_t session,
const gpuInfo *pGpuInfo,
const NvProcessorUuid *gpuUuid,
nvgpuDeviceHandle_t *device,
NvBool bCreateSmcPartition)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDeviceCreate(session, pGpuInfo, gpuUuid, device, bCreateSmcPartition);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (
nvidia_stack_t *sp,
gpuDeviceHandle device)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDeviceDestroy(device);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create (
nvidia_stack_t *sp,
gpuDeviceHandle device,
NvU64 vaBase,
NvU64 vaSize,
gpuAddressSpaceHandle *vaSpace,
gpuAddressSpaceInfo *vaSpaceInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsAddressSpaceCreate(device, vaBase, vaSize, vaSpace,
vaSpaceInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(
nvidia_stack_t *sp,
gpuDeviceHandle device,
NvHandle hUserClient,
NvHandle hUserVASpace,
gpuAddressSpaceHandle *dupedVaspace,
gpuAddressSpaceInfo *vaSpaceInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDupAddressSpace(device, hUserClient, hUserVASpace,
dupedVaspace, vaSpaceInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *sp,
gpuAddressSpaceHandle vaspace)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsAddressSpaceDestroy(vaspace);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsMemoryAllocFb(vaspace, size, gpuOffset, allocInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *sp,
gpuDeviceHandle device1,
gpuDeviceHandle device2,
getP2PCapsParams *pP2pCapsParams)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetP2PCaps(device1, device2, pP2pCapsParams);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsMemoryAllocSys(vaspace, size, gpuOffset, allocInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(
nvidia_stack_t *sp,
void *pPma,
pmaEvictPagesCb_t evictPages,
pmaEvictRangeCb_t evictRange,
void *callbackData)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
rmStatus = pmaRegisterEvictionCb(pPma, evictPages, evictRange, callbackData);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(
nvidia_stack_t *sp,
void *pPma)
{
THREAD_STATE_NODE threadState;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
pmaUnregisterEvictionCb(pPma);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(
nvidia_stack_t *sp,
gpuDeviceHandle device,
void **pPma,
const nvgpuPmaStatistics_t *pPmaPubStats)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetPmaObject(device, pPma,
(const UvmPmaStatistics **)pPmaPubStats);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(
nvidia_stack_t *sp, void *pPma,
NvLength pageCount, NvU32 pageSize,
nvgpuPmaAllocationOptions_t pPmaAllocOptions,
NvU64 *pPages)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPmaAllocPages(pPma, pageCount, pageSize,
pPmaAllocOptions, pPages);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(
nvidia_stack_t *sp, void *pPma,
NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPmaPinPages(pPma, pPages, pageCount, pageSize, flags);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(
nvidia_stack_t *sp, void *pPma,
NvU64 *pPages, NvLength pageCount, NvU32 pageSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPmaUnpinPages(pPma, pPages, pageCount, pageSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU32 pageSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsMemoryCpuMap(vaspace, gpuOffset, length, cpuPtr,
pageSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, void* cpuPtr)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsMemoryCpuUnMap(vaspace, cpuPtr);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *sp,
gpuAddressSpaceHandle vaspace,
const gpuChannelAllocParams *allocParams,
gpuChannelHandle *channel,
gpuChannelInfo *channelInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsChannelAllocate(vaspace, allocParams, channel,
channelInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t * sp,
nvgpuChannelHandle_t channel)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsChannelDestroy(channel);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *sp,
void *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsPmaFreePages(pPma, pPages, pageCount, pageSize, flags);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(
nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, NvU64 gpuOffset)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsMemoryFree(vaspace, gpuOffset);
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuCaps * caps)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsQueryCaps(device, caps);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuCesCaps *caps)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsQueryCesCaps(device, caps);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *sp,
const NvProcessorUuid *pUuid,
const gpuClientInfo *pGpuClientInfo,
gpuInfo *pGpuInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetGpuInfo(pUuid, pGpuClientInfo, pGpuInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *sp,
gpuDeviceHandle device)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsServiceDeviceInterruptsRM(device);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *sp,
gpuAddressSpaceHandle vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsSetPageDirectory(vaSpace, physAddress, numEntries,
bVidMemAperture, pasid);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *sp,
gpuAddressSpaceHandle vaSpace)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsUnsetPageDirectory(vaSpace);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *sp,
gpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
gpuAddressSpaceHandle dstVaSpace,
NvU64 *dstAddress)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstAddress);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *sp,
gpuDeviceHandle device,
NvHandle hClient,
NvHandle hPhysMemory,
NvHandle *hDupMemory,
nvgpuMemoryInfo_t gpuMemoryInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDupMemory(device, hClient, hPhysMemory, hDupMemory, gpuMemoryInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle (nvidia_stack_t *sp,
gpuDeviceHandle device,
NvHandle hPhysHandle)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsFreeDupedHandle(device, hPhysHandle);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuFbInfo * fbInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetFbInfo(device, fbInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuEccInfo * eccInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetEccInfo(device, eccInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
//
// Please see the comments for nvUvmInterfaceOwnPageFaultIntr(), in
// nv_uvm_interface.h, for the recommended way to use this routine.
//
// How it works:
//
// The rmGpuLocksAcquire call generally saves the current GPU interrupt
// state, then disables interrupt generation for one (or all) GPUs.
// Likewise, the rmGpuLocksRelease call restores (re-enables) those
// interrupts to their previous state. However, the rmGpuLocksRelease
// call does NOT restore interrupts that RM does not own.
//
// This is rather hard to find in the code, so: very approximately, the
// following sequence happens: rmGpuLocksRelease, osEnableInterrupts,
// intrRestoreNonStall_HAL, intrEncodeIntrEn_HAL, and that last one skips
// over any interrupts that RM does not own.
//
// This means that things are a bit asymmetric, because this routine
// actually changes that ownership in between the rmGpuLocksAcquire and
// rmGpuLocksRelease calls. So:
//
// -- If you call this routine with bOwnInterrupts == NV_TRUE (UVM is
// taking ownership from the RM), then rmGpuLocksAcquire disables all
// GPU interrupts. Then the ownership is taken away from RM, so the
// rmGpuLocksRelease call leaves the replayable page fault interrupts
// disabled. It is then up to UVM (the caller) to enable replayable
// page fault interrupts when it is ready.
//
// -- If you call this routine with bOwnInterrupts == NV_FALSE (UVM is
// returning ownership to the RM), then rmGpuLocksAcquire disables
// all GPU interrupts that RM owns. Then the ownership is returned to
// RM, so the rmGpuLocksRelease call re-enables replayable page fault
// interrupts. So, that implies that you need to disable replayable page
// fault interrupts before calling this routine, in order to hand
// over a GPU to RM that is not generating interrupts, until RM is
// ready to handle the interrupts.
//
NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *sp,
struct gpuDevice *device,
NvBool bOwnInterrupts)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsOwnPageFaultIntr(device, bOwnInterrupts);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuFaultInfo *pFaultInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsInitFaultInfo(device, pFaultInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info (nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuFaultInfo *pFaultInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDestroyFaultInfo(device, pFaultInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
// Functions
//
// - rm_gpu_ops_has_pending_non_replayable_faults
// - rm_gpu_ops_get_non_replayable_faults
//
// Cannot take the GPU/RM lock because it is called during fault servicing.
// This could produce deadlocks if the UVM bottom half gets stuck behind a
// stalling interrupt that cannot be serviced if UVM is holding the lock.
//
// However, these functions can be safely called with no locks because it is
// just accessing the given client shadow fault buffer, which is implemented
// using a lock-free queue. There is a different client shadow fault buffer
// per GPU: RM top-half producer, UVM top/bottom-half consumer.
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *sp,
gpuFaultInfo *pFaultInfo,
NvBool *hasPendingFaults)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsHasPendingNonReplayableFaults(pFaultInfo, hasPendingFaults);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp,
gpuFaultInfo *pFaultInfo,
void *faultBuffer,
NvU32 *numFaults)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsGetNonReplayableFaults(pFaultInfo, faultBuffer, numFaults);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDestroyAccessCntrInfo(device, accessCntrInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo,
gpuAccessCntrConfig *accessCntrConfig)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsEnableAccessCntr(device, accessCntrInfo, accessCntrConfig);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsDisableAccessCntr(device, accessCntrInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_p2p_object_create(nvidia_stack_t *sp,
gpuDeviceHandle device1,
gpuDeviceHandle device2,
NvHandle *hP2pObject)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsP2pObjectCreate(device1, device2, hP2pObject);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *sp,
nvgpuSessionHandle_t session,
NvHandle hP2pObject)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
nvGpuOpsP2pObjectDestroy(session, hP2pObject);
NV_EXIT_RM_RUNTIME(sp, fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t* sp,
nvgpuAddressSpaceHandle_t vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsGetExternalAllocPtes(vaSpace, hDupedMemory, offset, size,
gpuExternalMappingInfo);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_retain_channel(nvidia_stack_t* sp,
nvgpuAddressSpaceHandle_t vaSpace,
NvHandle hClient,
NvHandle hChannel,
void **retainedChannel,
nvgpuChannelInstanceInfo_t channelInstanceInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsRetainChannel(vaSpace, hClient, hChannel,
(gpuRetainedChannel **)retainedChannel,
channelInstanceInfo);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_bind_channel_resources(nvidia_stack_t* sp,
void *retainedChannel,
nvgpuChannelResourceBindParams_t channelResourceBindParams)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsBindChannelResources(retainedChannel,
channelResourceBindParams);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_release_channel(nvidia_stack_t *sp, void *retainedChannel)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
nvGpuOpsReleaseChannel(retainedChannel);
NV_EXIT_RM_RUNTIME(sp, fp);
}
void NV_API_CALL
rm_gpu_ops_stop_channel(nvidia_stack_t * sp,
void *retainedChannel,
NvBool bImmediate)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsStopChannel(retainedChannel, bImmediate);
NV_EXIT_RM_RUNTIME(sp, fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t* sp,
nvgpuAddressSpaceHandle_t vaSpace,
NvP64 resourceDescriptor,
NvU64 offset,
NvU64 size,
nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp, fp);
rmStatus = nvGpuOpsGetChannelResourcePtes(vaSpace, resourceDescriptor,
offset, size,
gpuExternalMappingInfo);
NV_EXIT_RM_RUNTIME(sp, fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *sp,
nvgpuDeviceHandle_t device,
const void *pFaultPacket)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsReportNonReplayableFault(device, pFaultPacket);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *sp,
gpuDeviceHandle device,
const gpuPagingChannelAllocParams *allocParams,
gpuPagingChannelHandle *channel,
gpuPagingChannelInfo *channelInfo)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPagingChannelAllocate(device, allocParams, channel,
channelInfo);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *sp,
gpuPagingChannelHandle channel)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsPagingChannelDestroy(channel);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channels_map(nvidia_stack_t *sp,
gpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
gpuDeviceHandle device,
NvU64 *dstAddress)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPagingChannelsMap(srcVaSpace, srcAddress, device, dstAddress);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
void NV_API_CALL
rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *sp,
gpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
gpuDeviceHandle device)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
nvGpuOpsPagingChannelsUnmap(srcVaSpace, srcAddress, device);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp,
gpuPagingChannelHandle channel,
char *methodStream,
NvU32 methodStreamSize)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsPagingChannelPushStream(channel, methodStream, methodStreamSize);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -0,0 +1,624 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* @file
*
* @brief Provides RmExportObject, RmImportObject, RmFreeObjExportHandle and
* RmGetExportObjectInfo interfaces :
*
* These interfaces allow rm clients to export their objects into
* a unique RmObjExportHandle which another rm client could
* import, even if the source rm client gets destroyed.
*
* RM's device instance may get destroyed asynchronously, in which
* case exported objects residing on that device instance also get
* destroyed. This means it is not possible to import it back, but the
* RmObjExportHandle into which the object had been exported still
* remains valid but no other object could get it.
*
* There are not init/fini routines, it is the responsibility of the
* rest of RM's eco-system to make sure that all RmObjExportHandles get
* freed during driver unload.
*
* The api lock is expected to be held before calling into
* rmobjexportimport.c; do not hold gpu or any other lock.
*/
#include "rmobjexportimport.h"
#include "nvlimits.h"
#include "gpu/device/device.h"
#include "containers/map.h"
#include "rmapi/rmapi.h"
#include "rmapi/rs_utils.h"
#include "class/cl0080.h"
#include "class/cl2080.h"
#include <ctrl/ctrl0000/ctrl0000unix.h>
#include <ctrl/ctrl0000/ctrl0000client.h>
//
// A reference to an RmObjExportHandle
// generated by function RmGenerateObjExportHandle().
//
typedef struct
{
NvU32 deviceInstance;
} RmObjExportHandleRef;
MAKE_MAP(RmObjExportHandleMap, RmObjExportHandleRef);
//
// Memory allocator
//
PORT_MEM_ALLOCATOR *pMemAllocator;
//
// Map RmObjExportHandle -> RmObjExportHandleRef
//
RmObjExportHandleMap objExportHandleMap;
//
// Rm client to use to dup an object exported to RmObjExportHandle. The minimal
// requirement for duping is to have a device object allocated. This rm client
// is simply like any other external rm client and has no any special handling.
//
// We keep this rm client just like any other external rm client: if
// gpu(s)/device gets powered-down/uninitialized, rm objects allocated by
// external rm clients and located on that gpu(s)/device gets freed (the
// os-layer does that). In that way, code in this file doesn't need to worry
// about freeing exported objects located on that gpu(s)/device.
//
NvHandle hObjExportRmClient;
//
// Tracker for device and subdevice handles. For now only one subdevice
// (instance 0) is supported per device.
//
typedef struct
{
NvHandle hRmDevice;
NvHandle hRmSubDevice;
} RmObjExportDevice;
RmObjExportDevice objExportDevice[NV_MAX_DEVICES];
//
// Usage reference counter for static object in this file like rm client used to
// dup an exported object, memory allocator, map etc.
//
NvU64 objExportImportRefCount;
//
// Static functions for internal use to code in this file.
//
static NV_STATUS RmRefObjExportImport (void);
static void RmUnrefObjExportImport (void);
static RmObjExportHandle RmGenerateObjExportHandle (NvU32 deviceInstance);
static NV_STATUS RmUnrefObjExportHandle (RmObjExportHandle hObject);
//
// Free the RmObjExportHandle.
//
static NV_STATUS RmUnrefObjExportHandle(RmObjExportHandle hObject)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
RmObjExportHandleRef *pHandleRef =
mapFind(&objExportHandleMap, hObject);
if (pHandleRef == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
if (pRmApi->Free(pRmApi,
hObjExportRmClient,
(NvHandle)mapKey(&objExportHandleMap, pHandleRef)) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"Exported object trying to free was zombie in %s\n",
__FUNCTION__);
}
mapRemove(&objExportHandleMap, pHandleRef);
return NV_OK;
}
//
// Generate unique RmObjExportHandle.
//
static RmObjExportHandle RmGenerateObjExportHandle(NvU32 deviceInstance)
{
//
// The object export handle belongs to range of 0 to
// (MAX_OBJ_EXPORT_HANDLES - 1).
//
// Handle 0 is considered as invalid object handle, this function generates
// handle from range of 1 to (MAX_OBJ_EXPORT_HANDLES - 1).
//
#define MAX_OBJ_EXPORT_HANDLES 0x80000
static NvHandle hObjExportHandleNext = 1;
RmObjExportHandle hStartHandle = hObjExportHandleNext;
RmObjExportHandle hObject = 0;
do
{
RmObjExportHandleRef *pHandleRef;
hObject = hObjExportHandleNext++;
/* Reset hObjExportHandleNext to next valid handle */
if (hObjExportHandleNext == MAX_OBJ_EXPORT_HANDLES) {
hObjExportHandleNext = 1;
}
pHandleRef = mapFind(&objExportHandleMap, hObject);
if (hObject != hObjExportRmClient && pHandleRef == NULL)
{
break;
}
else
{
hObject = 0;
}
} while(hObjExportHandleNext != hStartHandle);
if (hObject != 0)
{
RmObjExportHandleRef *pHandleRef =
mapInsertNew(&objExportHandleMap, hObject);
if (pHandleRef != NULL)
{
pHandleRef->deviceInstance = deviceInstance;
}
else
{
hObject = 0;
}
}
return hObject;
}
//
// Validate that the given hObject is not one of our internally used handles.
//
// Note that mapFind(&objExportHandleMap, hObject) could still fail; that is the
// caller's responsibility.
//
static NvBool RmValidateHandleAgainstInternalHandles(RmObjExportHandle hObject)
{
NvU32 i;
//
// No external RmObjExportHandle could be valid if hObjExportRmClient has
// not been allocated yet, or if it is equal to any of the handles used
// internally by code in this file.
//
if (objExportImportRefCount == 0 || hObjExportRmClient == 0 ||
hObject == hObjExportRmClient)
{
return NV_FALSE;
}
for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++)
{
if (objExportDevice[i].hRmDevice != 0 &&
(hObject == objExportDevice[i].hRmDevice ||
hObject == objExportDevice[i].hRmSubDevice))
{
return NV_FALSE;
}
}
return NV_TRUE;
}
//
// Increment reference count of static objects internally
// used by code in this file.
//
static NV_STATUS RmRefObjExportImport(void)
{
NV_STATUS rmStatus = NV_OK;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if ((objExportImportRefCount++) != 0)
{
NV_ASSERT(hObjExportRmClient != 0);
NV_ASSERT(pMemAllocator != NULL);
return NV_OK;
}
rmStatus = pRmApi->AllocWithHandle(pRmApi,
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_NULL_OBJECT,
NV01_ROOT,
&hObjExportRmClient);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__);
goto failed;
}
pMemAllocator = portMemAllocatorCreateNonPaged();
if (pMemAllocator == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Failed to alloc memory allocator in %s\n",
__FUNCTION__);
goto failed;
}
mapInit(&objExportHandleMap, pMemAllocator);
return NV_OK;
failed:
RmUnrefObjExportImport();
return rmStatus;
}
//
// Decrement reference count of static objects internally used by code in this
// file, and free them if reference count reaches to zero.
//
static void RmUnrefObjExportImport(void)
{
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
if ((--objExportImportRefCount) != 0)
{
return;
}
if (pMemAllocator != NULL)
{
NvU32 i;
for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++)
{
if (objExportDevice[i].hRmDevice != 0)
{
RmUnrefObjExportHandle(objExportDevice[i].hRmSubDevice);
objExportDevice[i].hRmSubDevice = 0;
RmUnrefObjExportHandle(objExportDevice[i].hRmDevice);
objExportDevice[i].hRmDevice = 0;
}
}
mapDestroy(&objExportHandleMap);
portMemAllocatorRelease(pMemAllocator);
pMemAllocator = NULL;
}
if (hObjExportRmClient != 0)
{
NV_STATUS rmStatus = pRmApi->Free(pRmApi,
hObjExportRmClient,
hObjExportRmClient);
NV_ASSERT(rmStatus == NV_OK);
hObjExportRmClient = 0;
}
}
NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance)
{
RmObjExportHandle hDstObject;
NvU32 deviceInstance = NV_MAX_DEVICES;
NvHandle hTmpObject;
NV_STATUS status;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if (pDstObject == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
//
// Find the device instance on which the rm object exists.
//
hTmpObject = hSrcObject;
do
{
RsResourceRef *pResourceRef;
status = serverutilGetResourceRef(hSrcClient, hTmpObject, &pResourceRef);
if (status != NV_OK)
return status;
Device *pDevice = dynamicCast(pResourceRef->pResource, Device);
if (pDevice != NULL)
{
deviceInstance = pDevice->deviceInst;
break;
}
hTmpObject = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0;
} while (hTmpObject != 0);
if ((hTmpObject == 0) || (deviceInstance >= NV_MAX_DEVICES))
{
return NV_ERR_OBJECT_NOT_FOUND;
}
status = RmRefObjExportImport();
if (status != NV_OK)
{
return status;
}
if (objExportDevice[deviceInstance].hRmDevice == 0 ||
serverutilValidateNewResourceHandle(hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice))
{
//
// Device object has not been created or it got destroyed in the
// teardown path of device instance destruction; allocate a fresh device
// object.
//
NV0080_ALLOC_PARAMETERS params;
NV2080_ALLOC_PARAMETERS subdevParams;
if (objExportDevice[deviceInstance].hRmDevice == 0)
{
NV_ASSERT(objExportDevice[deviceInstance].hRmSubDevice == 0);
objExportDevice[deviceInstance].hRmDevice =
RmGenerateObjExportHandle(deviceInstance);
objExportDevice[deviceInstance].hRmSubDevice =
RmGenerateObjExportHandle(deviceInstance);
if (objExportDevice[deviceInstance].hRmDevice == 0 ||
objExportDevice[deviceInstance].hRmSubDevice == 0)
{
NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handles in %s\n",
__FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto done;
}
}
portMemSet(&params, 0, sizeof(NV0080_ALLOC_PARAMETERS));
params.deviceId = deviceInstance;
status = pRmApi->AllocWithHandle(pRmApi,
hObjExportRmClient,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
NV01_DEVICE_0,
&params);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n",
__FUNCTION__);
goto done;
}
portMemSet(&subdevParams, 0, sizeof(NV2080_ALLOC_PARAMETERS));
subdevParams.subDeviceId = 0;
status = pRmApi->AllocWithHandle(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
objExportDevice[deviceInstance].hRmSubDevice,
NV20_SUBDEVICE_0,
&subdevParams);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n",
__FUNCTION__);
(void) pRmApi->Free(pRmApi, hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice);
goto done;
}
}
hDstObject = RmGenerateObjExportHandle(deviceInstance);
if (hDstObject == 0)
{
NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handle in %s\n",
__FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto done;
}
// If duping under device handle fails, try subdevice handle.
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice,
&hDstObject,
hSrcClient,
hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
if (status == NV_ERR_INVALID_OBJECT_PARENT)
{
NV_PRINTF(LEVEL_INFO,
"pRmApi->DupObject(Dev, failed due to invalid parent in %s."
" Now attempting DupObject with Subdev handle.\n",
__FUNCTION__);
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
objExportDevice[deviceInstance].hRmSubDevice,
&hDstObject,
hSrcClient,
hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
RmUnrefObjExportHandle(hDstObject);
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(Subdev, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
goto done;
}
}
else
{
RmUnrefObjExportHandle(hDstObject);
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(Dev, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
goto done;
}
}
if (pDeviceInstance != NULL)
{
*pDeviceInstance = deviceInstance;
}
*pDstObject = hDstObject;
done:
if (status != NV_OK)
{
RmUnrefObjExportImport();
}
return status;
}
void RmFreeObjExportHandle(RmObjExportHandle hObject)
{
if (!RmValidateHandleAgainstInternalHandles(hObject))
{
NV_PRINTF(LEVEL_ERROR, "Invalid handle to exported object in %s\n",
__FUNCTION__);
return;
}
RmUnrefObjExportHandle(hObject);
RmUnrefObjExportImport();
}
NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
NvHandle *phDstObject, RmObjExportHandle hSrcObject,
NvU8 *pObjectType)
{
NV_STATUS status;
NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS params;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
if (!RmValidateHandleAgainstInternalHandles(hSrcObject))
{
return NV_ERR_INVALID_ARGUMENT;
}
if (mapFind(&objExportHandleMap, hSrcObject) == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
if (pObjectType != NULL)
{
params.hObject = hSrcObject;
params.mapFlags = 0;
params.addrSpaceType = \
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID;
status = pRmApi->Control(pRmApi, hObjExportRmClient, hObjExportRmClient,
NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE,
&params, sizeof(params));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"GET_ADDR_SPACE_TYPE failed with error code 0x%x in %s\n",
status, __FUNCTION__);
return status;
}
switch (params.addrSpaceType)
{
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM;
break;
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM;
break;
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC;
break;
default:
NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT);
}
}
status = pRmApi->DupObject(pRmApi, hDstClient, hDstParent, phDstObject,
hObjExportRmClient, hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"pRmApi->DupObject(pRmApi, failed with error code 0x%x in %s\n",
status, __FUNCTION__);
return status;
}
return NV_OK;
}
NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance)
{
RmObjExportHandleRef *pHandleRef = NULL;
if (!RmValidateHandleAgainstInternalHandles(hSrcObject))
{
return NV_ERR_INVALID_ARGUMENT;
}
pHandleRef = mapFind(&objExportHandleMap, hSrcObject);
if (pHandleRef == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
*deviceInstance = pHandleRef->deviceInstance;
return NV_OK;
}

View File

@@ -0,0 +1,100 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h> // NV device driver interface
#include <nv-priv.h>
#include <os/os.h>
#include <nvos.h>
#include <osapi.h>
#include "gpu/gpu.h"
#include "gpu/gpu_resource.h"
#include "gpu/subdevice/subdevice.h"
#include <osfuncs.h>
#include <diagnostics/journal.h>
#include "gpu/mem_mgr/mem_desc.h"
#include "mem_mgr/mem.h"
#include <nvpcie.h>
#include <core/locks.h>
#include "rmapi/rs_utils.h"
#include "rmapi/client_resource.h"
#include <class/cl0000.h>
#include <class/cl90cd.h>
#include <class/cl0005.h> // NV01_EVENT
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl844c.h> // G84_PERFBUFFER
#include <ctrl/ctrl0000/ctrl0000gpu.h>
#include <ctrl/ctrl0000/ctrl0000unix.h>
#include <ctrl/ctrl2080/ctrl2080gpu.h>
#include <ctrl/ctrl2080/ctrl2080unix.h>
/*!
* @brief Implements the NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS
* RmControl request. It will check if the GPU video memory will be
* persistent during system suspend/resume cycle.
*
* @param[in] pSubdevice
* @param[in,out] pParams
*
* @return
* NV_OK Success
*/
NV_STATUS
subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS *pParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
pParams->bVidmemPersistent = !gpuIsVidmemPreservationBrokenBug3172217(pGpu) &&
(nv->preserve_vidmem_allocations ||
nvp->s0ix_pm_enabled);
return NV_OK;
}
/*!
* @brief Implements the NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS
* RmControl request. It sets restore TGP flag which is used
* to restore TGP limits when client is killed.
*
* @param[in] pSubdevice
* @param[in] pParams
*
* @return
* NV_OK Success
*/
NV_STATUS
subdeviceCtrlCmdOsUnixUpdateTgpStatus_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS *pParams
)
{
pSubdevice->bUpdateTGP = pParams->bUpdateTGP;
return NV_OK;
}

View File

@@ -0,0 +1,76 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <core/locks.h>
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <gpu/device/device.h>
#include <gpu/gpu.h>
#include <gpu/mem_mgr/mem_mgr.h>
#include <gpu/mem_mgr/mem_desc.h>
#include <nv-priv.h>
#include <nv.h>
#include <osapi.h>
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FB) == NV_OK)
{
// See if the console is on one of the subdevices of this device.
portMemSet(pParams, 0, sizeof(*pParams));
SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
if (memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) != NULL)
{
NvU64 baseAddr;
// There should only be one.
NV_ASSERT(pParams->width == 0);
pParams->subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
// Console is either mapped to BAR1 or BAR2 + 16 MB
os_get_screen_info(&baseAddr, &pParams->width,
&pParams->height, &pParams->depth,
&pParams->pitch,
nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
}
SLI_LOOP_END
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
else
{
NV_PRINTF(LEVEL_INFO,"%s: Failed to acquire GPU lock", __FUNCTION__);
}
return NV_OK;
}

View File

@@ -0,0 +1,570 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nv.h>
#include <nv-priv.h>
#include <osapi.h>
#include <core/thread_state.h>
#include <core/locks.h>
#include <gpu/gpu.h>
#include "kernel/gpu/intr/intr.h"
#include <gpu/bif/kernel_bif.h>
#include "gpu/disp/kern_disp.h"
#include "objtmr.h"
static NvBool osInterruptPending(
OBJGPU *pGpu,
NvBool *serviced,
THREAD_STATE_NODE *pThreadState
)
{
POBJDISP pDisp;
KernelDisplay *pKernelDisplay;
NvBool pending, sema_release;
THREAD_STATE_NODE threadState;
NvU32 gpuMask, gpuInstance;
Intr *pIntr = NULL;
MC_ENGINE_BITVECTOR intr0Pending;
MC_ENGINE_BITVECTOR intr1Pending;
*serviced = NV_FALSE;
pending = NV_FALSE;
sema_release = NV_TRUE;
OBJGPU *pDeviceLockGpu = pGpu;
NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer
PORT_MEM_ALLOCATOR *pIsrAllocator;
//
// GPU interrupt servicing ("top half")
//
// Top-level processing of GPU interrupts is performed using the
// steps below; although the code is straight forward, there
// are a few points to be aware of:
//
// 1) The GPUs lock is acquired for two reasons: to allow
// looping over GPUs atomically in SLI and to sanity
// check the PCI configuration space of any initialized
// GPUs. If the acquisition fails, the early return
// is acceptable since GPU interrupts are disabled while
// the lock is held; note that returning success
// in this case could interfere with the processing
// of third-party device interrupts if the IRQ is shared.
// Due to the above, some interrupts may be reported as
// unhandled if invocations of the ISR registered with
// the kernel are not serialized. This is bad, but
// ignored by currently supported kernels, provided most
// interrupts are handled.
//
// 2) Since acquisition of the lock disables interrupts
// on all initialized GPUs, NV_PMC_INTR_EN_0 can not be
// relied up on to determine whether interrupts are
// expected from a given GPU. The code below is therefore
// forced to rely on software state. NV_PMC_INTR_EN_0
// is read only as a sanity check to guard against
// invalid GPU state (lack of PCI memory access, etc.).
//
// 3) High priority interrupts (VBLANK, etc.), are serviced in
// this function, service of all other interrupts is
// deferred until a bottom half. If a bottom half needs
// to be scheduled, release of the GPUs lock is
// likewise deferred until completion of the bottom half.
//
// 4) To reduce the risk of starvation, an effort is made to
// consolidate processing of interrupts pending on
// all GPUs sharing a given IRQ.
//
// 5) Care is taken to ensure that the consolidated interrupt
// processing is performed in the context of a GPU
// that has interrupts pending. Else if additional ISR
// processing via a bottom-half is required, this
// bottom-half ISR might race against the GPU's shut-down
// path.
//
pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator));
tlsIsrInit(pIsrAllocator);
// For SWRL granular locking process the countdown timer interrupt.
if (pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_SWRL_GRANULAR_LOCKING))
{
threadStateInitISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
gpuMask = gpumgrGetGpuMask(pDeviceLockGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
pIntr = GPU_GET_INTR(pGpu);
if (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))
{
// If interrupt enable is garbage the GPU is probably in a bad state
if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, &threadState) > INTERRUPT_TYPE_MAX)
{
continue;
}
intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState);
POBJTMR pTmr = GPU_GET_TIMER(pGpu);
*serviced = tmrServiceSwrlWrapper(pGpu, pTmr, &intr0Pending, &threadState);
}
}
threadStateFreeISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
}
// LOCK: try to acquire GPUs lock
if (rmDeviceGpuLocksAcquire(pDeviceLockGpu, GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_ISR) == NV_OK)
{
threadStateInitISRAndDeferredIntHandler(&threadState,
pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR);
gpuMask = gpumgrGetGpuMask(pDeviceLockGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
pIntr = GPU_GET_INTR(pGpu);
pDisp = GPU_GET_DISP(pGpu);
pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
if ((pDisp != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
}
else if ((pIntr != NULL) && INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))
{
// If interrupt enable is garbage the GPU is probably in a bad state
if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, &threadState) > INTERRUPT_TYPE_MAX)
continue;
intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState);
if (bitVectorTest(&intr0Pending, MC_ENGINE_IDX_DISP))
{
if (pKernelDisplay != NULL)
{
kdispServiceVblank_HAL(pGpu, pKernelDisplay, 0,
(VBLANK_STATE_PROCESS_LOW_LATENCY |
VBLANK_STATE_PROCESS_CALLED_FROM_ISR),
&threadState);
*serviced = NV_TRUE;
intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState);
}
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
pIntr = GPU_GET_INTR(pGpu);
if (pIntr != NULL)
{
NvBool bCtxswLog = NV_FALSE;
intrGetPendingNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState);
intrCheckFecsEventbufferPending(pGpu, pIntr, &intr1Pending, &bCtxswLog);
}
}
if (!bitVectorTestAllCleared(&intr0Pending) ||
!bitVectorTestAllCleared(&intr1Pending))
{
pending = NV_TRUE;
sema_release = NV_FALSE;
}
}
}
threadStateFreeISRAndDeferredIntHandler(&threadState,
pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR);
if (sema_release)
{
NV_ASSERT(!pending);
// UNLOCK: release GPUs lock
rmDeviceGpuLocksRelease(pDeviceLockGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
else
{
rmDeviceGpuLockSetOwner(pDeviceLockGpu, GPUS_LOCK_OWNER_PENDING_DPC_REFRESH);
}
}
if (pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
threadStateInitISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
gpuMask = gpumgrGetGpuMask(pDeviceLockGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
pIntr = GPU_GET_INTR(pGpu);
if ((pIntr != NULL) && (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)))
{
NvBool bCtxswLog = NV_FALSE;
intrGetPendingNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState);
intrCheckFecsEventbufferPending(pGpu, pIntr, &intr1Pending, &bCtxswLog);
if (!bitVectorTestAllCleared(&intr1Pending))
{
intrServiceNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState);
*serviced = NV_TRUE;
}
}
}
threadStateFreeISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
}
tlsIsrDestroy(pIsrAllocator);
portMemAllocatorRelease(pIsrAllocator);
return pending;
}
NV_STATUS osIsr(
OBJGPU *pGpu
)
{
NV_STATUS status = NV_OK;
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvBool pending = NV_FALSE;
NvBool serviced = NV_FALSE;
Intr *pIntr;
if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
pending = osInterruptPending(pGpu, &serviced, NULL /* threadstate */);
}
else
{
pIntr = GPU_GET_INTR(pGpu);
if (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))
{
KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
pending = osInterruptPending(pGpu, &serviced, NULL /* threadstate */);
kbifCheckAndRearmMSI(pGpu, pKernelBif);
}
}
}
if (!pending && (IS_VIRTUAL(pGpu) || !serviced))
status = NV_ERR_NO_INTR_PENDING;
else if (pending)
status = NV_WARN_MORE_PROCESSING_REQUIRED;
return status;
}
/*
* Helper function to determine when the RM SEMA/GPUS LOCK should toggle
* interrupts. Based on the state of the GPU - we must add cases here as we
* discover them.
*
* Noteworthy special cases:
*
* - Suspend/resume: the GPU could still be suspended and not accessible
* on the bus, while passive-level threads need to grab the GPUs
* lock, or other GPUs are being resumed and triggering interrupts.
*
* - SLI state transitions: interrupts are disabled manually prior to
* removing GPUs from the lock mask leading up to SLI link/unlink
* operations on UNIX, but since the GPUs lock is not held by design in
* these paths, it needs to be ensured that GPUs lock acquisitions
* occurring aynchronously do not re-enable interrupts on any of the
* GPUs undergoing the SLI state transition.
*
* @param[in] pGpu OBJGPU pointer
*
* @return NV_TRUE if the RM SEMA/GPUS LOCK should toggle interrupts, NV_FALSE
* otherwise.
*/
NvBool osLockShouldToggleInterrupts(OBJGPU *pGpu)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
return NV_TRUE;
return (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) &&
gpuIsStateLoaded(pGpu) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SLI_LINK_CODEPATH));
}
void osEnableInterrupts(OBJGPU *pGpu)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
// enable irq through os call
nv_control_soc_irqs(NV_GET_NV_STATE(pGpu), NV_TRUE);
return;
}
else
{
Intr *pIntr = GPU_GET_INTR(pGpu);
NvU32 intrEn;
if (!pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING))
NV_ASSERT(intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL) == INTERRUPT_TYPE_DISABLED);
intrEn = intrGetIntrEn(pIntr);
intrSetIntrEnInHw_HAL(pGpu, pIntr, intrEn, NULL);
if (pIntr != NULL)
{
intrSetStall_HAL(pGpu, pIntr, intrEn, NULL);
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED))
{
if (pIntr != NULL)
{
intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL);
}
}
}
}
void osDisableInterrupts(
OBJGPU *pGpu,
NvBool bIsr
)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
// disable irq through os call
nv_control_soc_irqs(NV_GET_NV_STATE(pGpu), NV_FALSE);
return;
}
else
{
Intr *pIntr = GPU_GET_INTR(pGpu);
NvU32 new_intr_en_0 = INTERRUPT_TYPE_DISABLED;
intrSetIntrEnInHw_HAL(pGpu, pIntr, new_intr_en_0, NULL);
if (pIntr != NULL)
{
intrSetStall_HAL(pGpu, pIntr, new_intr_en_0, NULL);
}
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED))
{
if (pIntr != NULL)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL);
}
else
{
intrRestoreNonStall_HAL(pGpu, pIntr, new_intr_en_0, NULL);
}
}
}
}
}
static void RmIsrBottomHalf(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
THREAD_STATE_NODE threadState;
OS_THREAD_HANDLE threadId;
NvU32 gpuMask, gpuInstance;
OBJGPU *pDeviceLockGpu = pGpu;
Intr *pIntr = NULL;
POBJDISP pDisp = NULL;
NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer
PORT_MEM_ALLOCATOR *pIsrAllocator;
pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator));
tlsIsrInit(pIsrAllocator);
//
// The owning thread changes as the ISR acquires the GPUs lock,
// but the bottom half releases it. Refresh the ThreadId owner to be
// correct here for the bottom half context.
//
osGetCurrentThread(&threadId);
rmDeviceGpuLockSetOwner(pDeviceLockGpu, threadId);
gpuMask = gpumgrGetGpuMask(pGpu);
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
{
threadStateInitISRAndDeferredIntHandler(&threadState,
pGpu, THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER);
pIntr = GPU_GET_INTR(pGpu);
pDisp = GPU_GET_DISP(pGpu);
//
// Call disp service incase of SOC Display,
// TODO : with multi interrupt handling based on irq aux interrupts are serviced by dpAuxService
// See JIRA task TDS-4253.
//
if ((pDisp != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
{
}
else if ((pIntr != NULL) && (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)))
{
intrServiceStall_HAL(pGpu, pIntr);
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS))
{
MC_ENGINE_BITVECTOR intrPending;
intrServiceNonStall_HAL(pGpu, pIntr, &intrPending, &threadState);
}
}
threadStateFreeISRAndDeferredIntHandler(&threadState,
pGpu, THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER);
}
// UNLOCK: release GPUs lock
rmDeviceGpuLocksRelease(pDeviceLockGpu, GPUS_LOCK_FLAGS_NONE, NULL);
tlsIsrDestroy(pIsrAllocator);
portMemAllocatorRelease(pIsrAllocator);
}
static void RmIsrBottomHalfUnlocked(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
Intr *pIntr;
THREAD_STATE_NODE threadState;
// In the GSP client scenario, the fatal fault interrupt is not shared
// by UVM and CPU-RM. Instead, it is handled entirely by GSP-RM. We
// therefore do not expect this function to be called. But if it is, bail
// without attempting to service interrupts.
if (IS_GSP_CLIENT(pGpu))
{
return;
}
// Grab GPU lock here as this kthread-item was enqueued without grabbing GPU lock
if (rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DPC) == NV_OK)
{
if (FULL_GPU_SANITY_CHECK(pGpu))
{
pIntr = GPU_GET_INTR(pGpu);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (intrGetIntrEn(pIntr) != INTERRUPT_TYPE_DISABLED)
{
MC_ENGINE_BITVECTOR intrPending;
intrGetPendingStall_HAL(pGpu, pIntr, &intrPending, &threadState);
intrServiceNonStallBottomHalf(pGpu, pIntr, &intrPending, &threadState);
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
}
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
}
NvBool NV_API_CALL rm_isr(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *NeedBottomHalf
)
{
NV_STATUS status;
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
OBJGPU *pGpu;
NvBool retval;
void *fp;
if ((nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) == 0)
{
return NV_FALSE;
}
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL)
{
return NV_FALSE;
}
NV_ENTER_RM_RUNTIME(sp,fp);
// call actual isr function here
status = isrWrapper(pGpu->testIntr, pGpu);
switch (status)
{
case NV_OK:
*NeedBottomHalf = NV_FALSE;
retval = NV_TRUE;
break;
case NV_WARN_MORE_PROCESSING_REQUIRED:
*NeedBottomHalf = NV_TRUE;
retval = NV_TRUE;
break;
case NV_ERR_NO_INTR_PENDING:
default:
*NeedBottomHalf = NV_FALSE;
retval = NV_FALSE;
break;
}
NV_EXIT_RM_RUNTIME(sp,fp);
return retval;
}
void NV_API_CALL rm_isr_bh(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
RmIsrBottomHalf(pNv);
NV_EXIT_RM_RUNTIME(sp,fp);
}
void NV_API_CALL rm_isr_bh_unlocked(
nvidia_stack_t *sp,
nv_state_t *pNv
)
{
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
RmIsrBottomHalfUnlocked(pNv);
NV_EXIT_RM_RUNTIME(sp,fp);
}