From ebcc6656ff5535308bbb450487a9cbb89f7ddc7c Mon Sep 17 00:00:00 2001 From: Andy Ritger Date: Thu, 30 Mar 2023 10:16:11 -0700 Subject: [PATCH] 525.105.17 --- CHANGELOG.md | 19 +- README.md | 19 +- kernel-open/Kbuild | 2 +- kernel-open/common/inc/nv-mm.h | 18 + kernel-open/conftest.sh | 19 + .../nvidia-drm/nvidia-drm-gem-nvkms-memory.c | 2 +- .../nvidia-drm/nvidia-drm-gem-user-memory.c | 6 +- kernel-open/nvidia-drm/nvidia-drm-gem.c | 2 +- kernel-open/nvidia-drm/nvidia-drm.Kbuild | 1 + kernel-open/nvidia-peermem/nv-p2p.h | 85 +- kernel-open/nvidia-peermem/nvidia-peermem.c | 46 +- kernel-open/nvidia-uvm/nvidia-uvm.Kbuild | 1 + kernel-open/nvidia-uvm/uvm.c | 2 +- kernel-open/nvidia-uvm/uvm_channel_test.c | 1 - kernel-open/nvidia-uvm/uvm_mmu.c | 4 +- kernel-open/nvidia-uvm/uvm_va_policy.h | 6 +- kernel-open/nvidia/nv-mmap.c | 12 +- kernel-open/nvidia/nv-p2p.c | 147 +++- kernel-open/nvidia/nv-p2p.h | 85 +- kernel-open/nvidia/nv.c | 28 +- kernel-open/nvidia/nvidia.Kbuild | 1 + src/common/displayport/inc/dp_connector.h | 4 + src/common/displayport/inc/dp_deviceimpl.h | 1 + .../displayport/src/dp_connectorimpl.cpp | 4 +- src/common/displayport/src/dp_deviceimpl.cpp | 33 +- src/common/inc/displayport/displayport.h | 1 + src/common/inc/nvBldVer.h | 20 +- src/common/inc/nvUnixVersion.h | 2 +- .../published/hopper/gh100/dev_fsp_addendum.h | 7 +- .../published/hopper/gh100/dev_fsp_pri.h | 6 +- .../published/hopper/gh100/dev_gc6_island.h | 10 +- src/common/modeset/timing/nvt_edid.c | 8 +- src/common/modeset/timing/nvt_edidext_861.c | 13 +- .../nvswitch/common/inc/soe/soeifcore.h | 24 +- .../nvswitch/interface/ctrl_dev_nvswitch.h | 27 +- .../nvswitch/kernel/inc/haldef_nvswitch.h | 3 +- .../nvswitch/kernel/inc/lr10/therm_lr10.h | 7 + src/common/nvswitch/kernel/inc/ls10/ls10.h | 8 +- .../ls10/minion_nvlink_defines_public_ls10.h | 17 + .../nvswitch/kernel/inc/ls10/therm_ls10.h | 7 + .../nvswitch/kernel/inc/pmgr_nvswitch.h | 10 - .../kernel/inc/soe/bin/g_soeuc_lr10_dbg.h | 6 +- .../kernel/inc/soe/bin/g_soeuc_lr10_prd.h | 6 +- src/common/nvswitch/kernel/lr10/therm_lr10.c | 10 + src/common/nvswitch/kernel/ls10/intr_ls10.c | 454 ++++++---- src/common/nvswitch/kernel/ls10/link_ls10.c | 20 +- src/common/nvswitch/kernel/ls10/ls10.c | 142 +++- src/common/nvswitch/kernel/ls10/pmgr_ls10.c | 7 +- src/common/nvswitch/kernel/ls10/soe_ls10.c | 8 - src/common/nvswitch/kernel/ls10/therm_ls10.c | 85 +- src/common/nvswitch/kernel/nvswitch.c | 36 +- src/common/nvswitch/kernel/smbpbi_nvswitch.c | 2 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h | 24 + .../inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h | 63 ++ src/nvidia/arch/nvalloc/common/inc/nvdevid.h | 5 +- src/nvidia/arch/nvalloc/unix/src/os.c | 29 +- src/nvidia/arch/nvalloc/unix/src/osapi.c | 63 +- src/nvidia/generated/g_gpu_nvoc.h | 2 + src/nvidia/generated/g_hal_stubs.h | 16 +- src/nvidia/generated/g_kern_bus_nvoc.c | 18 +- src/nvidia/generated/g_kern_bus_nvoc.h | 36 +- src/nvidia/generated/g_kern_fsp_nvoc.c | 11 + src/nvidia/generated/g_kern_fsp_nvoc.h | 13 + src/nvidia/generated/g_kern_mem_sys_nvoc.c | 4 +- .../g_kernel_graphics_context_nvoc.h | 11 + src/nvidia/generated/g_kernel_gsp_nvoc.c | 12 +- src/nvidia/generated/g_kernel_gsp_nvoc.h | 12 +- src/nvidia/generated/g_mem_desc_nvoc.h | 5 + src/nvidia/generated/g_nv_name_released.h | 101 ++- src/nvidia/generated/g_objtmr_nvoc.h | 1 + src/nvidia/generated/g_profiler_v2_nvoc.c | 137 ++- src/nvidia/generated/g_profiler_v2_nvoc.h | 47 ++ src/nvidia/generated/g_subdevice_nvoc.c | 779 +++++++++--------- src/nvidia/generated/g_subdevice_nvoc.h | 8 + .../inc/kernel/gpu/gsp/gsp_static_config.h | 1 + src/nvidia/kernel/vgpu/nv/rpc.c | 12 +- src/nvidia/src/kernel/core/locks.c | 11 +- .../gpu/bus/arch/ampere/kern_bus_ga100.c | 33 + .../gpu/bus/arch/maxwell/kern_bus_gm107.c | 11 +- src/nvidia/src/kernel/gpu/bus/kern_bus.c | 8 - .../kernel/gpu/disp/disp_common_ctrl_acpi.c | 97 +++ .../gpu/fsp/arch/hopper/kern_fsp_gh100.c | 24 + .../src/kernel/gpu/gr/kernel_graphics.c | 3 + .../kernel/gpu/gr/kernel_graphics_context.c | 70 +- .../gpu/gsp/arch/ada/kernel_gsp_ad102.c | 3 +- .../hwpm/profiler_v2/kern_profiler_v2_ctrl.c | 242 +++++- .../arch/maxwell/virt_mem_allocator_gm107.c | 22 +- src/nvidia/src/kernel/gpu/mem_mgr/heap.c | 5 +- src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c | 23 + .../src/kernel/gpu/mem_mgr/mem_mgr_pwr_mgmt.c | 54 +- .../kernel/gpu/mig_mgr/kernel_mig_manager.c | 280 ++++--- .../src/kernel/gpu/nvlink/kernel_nvlink.c | 8 + .../gpu/subdevice/subdevice_ctrl_gpu_kernel.c | 31 +- .../gpu/timer/arch/hopper/timer_gh100.c | 44 +- .../src/kernel/gpu/uvm/arch/volta/uvm_gv100.c | 16 +- src/nvidia/src/kernel/mem_mgr/mem_list.c | 3 +- .../src/kernel/platform/nbsi/nbsi_init.c | 75 +- .../src/kernel/rmapi/embedded_param_copy.c | 35 + .../libraries/nvport/memory/memory_tracking.c | 25 +- version.mk | 2 +- 100 files changed, 2912 insertions(+), 1117 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b974e657..d7ab6c56b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,24 +2,6 @@ ## Release 525 Entries -### [525.89.02] 2023-02-08 - -### [525.85.12] 2023-01-30 - -### [525.85.05] 2023-01-19 - -#### Fixed - -- Fix build problems with Clang 15.0, [#377](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/377) by @ptr1337 - -### [525.78.01] 2023-01-05 - -### [525.60.13] 2022-12-05 - -### [525.60.11] 2022-11-28 - -### [525.53] 2022-11-10 - #### Changed - GSP firmware is now distributed as multiple firmware files: this release has `gsp_tu10x.bin` and `gsp_ad10x.bin` replacing `gsp.bin` from previous releases. @@ -28,6 +10,7 @@ #### Fixed +- Fix build problems with Clang 15.0, [#https://github.com/NVIDIA/open-gpu-kernel-modules/issues/377] by @ptr1337 - Add support for IBT (indirect branch tracking) on supported platforms, [#256](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/256) by @rnd-ash - Return EINVAL when [failing to] allocating memory, [#280](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/280) by @YusufKhan-gamedev - Fix various typos in nvidia/src/kernel, [#16](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/16) by @alexisgeoffrey diff --git a/README.md b/README.md index 4f6759565..d7471ce1d 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # NVIDIA Linux Open GPU Kernel Module Source This is the source release of the NVIDIA Linux open GPU kernel modules, -version 525.89.02. +version 525.105.17. ## How to Build @@ -17,7 +17,7 @@ as root: Note that the kernel modules built here must be used with GSP firmware and user-space NVIDIA GPU driver components from a corresponding -525.89.02 driver release. This can be achieved by installing +525.105.17 driver release. This can be achieved by installing the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` option. E.g., @@ -167,7 +167,7 @@ for the target kernel. ## Compatible GPUs The open-gpu-kernel-modules can be used on any Turing or later GPU -(see the table below). However, in the 525.89.02 release, +(see the table below). However, in the 525.105.17 release, GeForce and Workstation support is still considered alpha-quality. To enable use of the open kernel modules on GeForce and Workstation GPUs, @@ -175,7 +175,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module parameter to 1. For more details, see the NVIDIA GPU driver end user README here: -https://us.download.nvidia.com/XFree86/Linux-x86_64/525.89.02/README/kernel_open.html +https://us.download.nvidia.com/XFree86/Linux-x86_64/525.105.17/README/kernel_open.html In the below table, if three IDs are listed, the first is the PCI Device ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI @@ -720,9 +720,13 @@ Subsystem Device ID. | NVIDIA A10 | 2236 10DE 1482 | | NVIDIA A10G | 2237 10DE 152F | | NVIDIA A10M | 2238 10DE 1677 | +| NVIDIA H800 PCIe | 2322 10DE 17A4 | +| NVIDIA H800 | 2324 10DE 17A6 | +| NVIDIA H800 | 2324 10DE 17A8 | | NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 | | NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 | | NVIDIA H100 PCIe | 2331 10DE 1626 | +| NVIDIA H100 | 2339 10DE 17FC | | NVIDIA GeForce RTX 3060 Ti | 2414 | | NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 | | NVIDIA RTX A5500 Laptop GPU | 2438 | @@ -809,11 +813,18 @@ Subsystem Device ID. | NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 | | NVIDIA RTX 6000 Ada Generation | 26B1 17AA 16A1 | | NVIDIA L40 | 26B5 10DE 169D | +| NVIDIA L40 | 26B5 10DE 17DA | | NVIDIA GeForce RTX 4080 | 2704 | | NVIDIA GeForce RTX 4090 Laptop GPU | 2717 | | NVIDIA GeForce RTX 4090 Laptop GPU | 2757 | | NVIDIA GeForce RTX 4070 Ti | 2782 | | NVIDIA GeForce RTX 4080 Laptop GPU | 27A0 | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 1028 16FA | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 103C 16FA | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 10DE 16FA | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 17AA 16FA | +| NVIDIA L4 | 27B8 10DE 16CA | +| NVIDIA L4 | 27B8 10DE 16EE | | NVIDIA GeForce RTX 4080 Laptop GPU | 27E0 | | NVIDIA GeForce RTX 4070 Laptop GPU | 2820 | | NVIDIA GeForce RTX 4070 Laptop GPU | 2860 | diff --git a/kernel-open/Kbuild b/kernel-open/Kbuild index e33db9f9e..5b3a1e787 100644 --- a/kernel-open/Kbuild +++ b/kernel-open/Kbuild @@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc EXTRA_CFLAGS += -I$(src) EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM -EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.89.02\" +EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.105.17\" EXTRA_CFLAGS += -Wno-unused-function diff --git a/kernel-open/common/inc/nv-mm.h b/kernel-open/common/inc/nv-mm.h index 44b2bdc3c..9df900c1d 100644 --- a/kernel-open/common/inc/nv-mm.h +++ b/kernel-open/common/inc/nv-mm.h @@ -261,4 +261,22 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm) #endif } +static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags) +{ +#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) + vm_flags_set(vma, flags); +#else + vma->vm_flags |= flags; +#endif +} + +static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags) +{ +#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) + vm_flags_clear(vma, flags); +#else + vma->vm_flags &= ~flags; +#endif +} + #endif // __NV_MM_H__ diff --git a/kernel-open/conftest.sh b/kernel-open/conftest.sh index 9f1b7a413..49afdc9d5 100755 --- a/kernel-open/conftest.sh +++ b/kernel-open/conftest.sh @@ -5475,6 +5475,25 @@ compile_test() { compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID" "" "types" ;; + vm_area_struct_has_const_vm_flags) + # + # Determine if the 'vm_area_struct' structure has + # const 'vm_flags'. + # + # A union of '__vm_flags' and 'const vm_flags' was added + # by commit bc292ab00f6c ("mm: introduce vma->vm_flags + # wrapper functions") in mm-stable branch (2023-02-09) + # of the akpm/mm maintainer tree. + # + CODE=" + #include + int conftest_vm_area_struct_has_const_vm_flags(void) { + return offsetof(struct vm_area_struct, __vm_flags); + }" + + compile_check_conftest "$CODE" "NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS" "" "types" + ;; + # When adding a new conftest entry, please use the correct format for # specifying the relevant upstream Linux kernel commit. # diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c index 21a931967..fdc6de69b 100644 --- a/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c @@ -201,7 +201,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table( nv_dev, "Cannot create sg_table for NvKmsKapiMemory 0x%p", nv_gem->pMemory); - return NULL; + return ERR_PTR(-ENOMEM); } sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev, diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c index 2a15b16e7..938240987 100644 --- a/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c @@ -92,9 +92,9 @@ static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem, return -EINVAL; } - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags &= ~VM_IO; - vma->vm_flags |= VM_MIXEDMAP; + nv_vm_flags_clear(vma, VM_PFNMAP); + nv_vm_flags_clear(vma, VM_IO); + nv_vm_flags_set(vma, VM_MIXEDMAP); return 0; } diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem.c b/kernel-open/nvidia-drm/nvidia-drm-gem.c index 92d61a6ee..00bbc1ce9 100644 --- a/kernel-open/nvidia-drm/nvidia-drm-gem.c +++ b/kernel-open/nvidia-drm/nvidia-drm-gem.c @@ -299,7 +299,7 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma) ret = -EINVAL; goto done; } - vma->vm_flags &= ~VM_MAYWRITE; + nv_vm_flags_clear(vma, VM_MAYWRITE); } #endif diff --git a/kernel-open/nvidia-drm/nvidia-drm.Kbuild b/kernel-open/nvidia-drm/nvidia-drm.Kbuild index 6eef7f534..04233469d 100644 --- a/kernel-open/nvidia-drm/nvidia-drm.Kbuild +++ b/kernel-open/nvidia-drm/nvidia-drm.Kbuild @@ -124,3 +124,4 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags diff --git a/kernel-open/nvidia-peermem/nv-p2p.h b/kernel-open/nvidia-peermem/nv-p2p.h index a9469bdbb..e19b38a8f 100644 --- a/kernel-open/nvidia-peermem/nv-p2p.h +++ b/kernel-open/nvidia-peermem/nv-p2p.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -94,11 +94,10 @@ struct nvidia_p2p_params { } nvidia_p2p_params_t; /* - * Capability flag for users to detect + * Macro for users to detect * driver support for persistent pages. */ -extern int nvidia_p2p_cap_persistent_pages; -#define NVIDIA_P2P_CAP_PERSISTENT_PAGES +#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API /* * This API is not supported. @@ -173,11 +172,6 @@ struct nvidia_p2p_page_table { * A pointer to the function to be invoked when the pages * underlying the virtual address range are freed * implicitly. - * If NULL, persistent pages will be returned. - * This means the pages underlying the range of GPU virtual memory - * will persist until explicitly freed by nvidia_p2p_put_pages(). - * Persistent GPU memory mappings are not supported on PowerPC, - * MIG-enabled devices and vGPU. * @param[in] data * A non-NULL opaque pointer to private data to be passed to the * callback function. @@ -190,12 +184,48 @@ struct nvidia_p2p_page_table { * insufficient resources were available to complete the operation. * -EIO if an unknown error occurred. */ -int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space, - uint64_t virtual_address, +int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), void *data); + +/* + * @brief + * Pin and make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. The pages will persist until + * explicitly freed by nvidia_p2p_put_pages_persistent(). + * + * Persistent GPU memory mappings are not supported on PowerPC, + * MIG-enabled devices and vGPU. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] flags + * Must be set to zero for now. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages_persistent(uint64_t virtual_address, uint64_t length, struct nvidia_p2p_page_table **page_table, - void (*free_callback)(void *data), - void *data); + uint32_t flags); #define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003 @@ -268,6 +298,8 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, * Release a set of pages previously made accessible to * a third-party device. * + * This API may sleep. + * * @param[in] p2p_token * A token that uniquely identifies the P2P mapping. * @param[in] va_space @@ -282,10 +314,33 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, * -EINVAL if an invalid argument was supplied. * -EIO if an unknown error occurred. */ -int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space, - uint64_t virtual_address, +int nvidia_p2p_put_pages(uint64_t p2p_token, + uint32_t va_space, uint64_t virtual_address, struct nvidia_p2p_page_table *page_table); +/* + * @brief + * Release a set of persistent pages previously made accessible to + * a third-party device. + * + * This API may sleep. + * + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * @param[in] flags + * Must be set to zero for now. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages_persistent(uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table, + uint32_t flags); + /* * @brief * Free a third-party P2P page table. (This function is a no-op.) diff --git a/kernel-open/nvidia-peermem/nvidia-peermem.c b/kernel-open/nvidia-peermem/nvidia-peermem.c index 699d1cf17..a20f7d1dd 100644 --- a/kernel-open/nvidia-peermem/nvidia-peermem.c +++ b/kernel-open/nvidia-peermem/nvidia-peermem.c @@ -284,8 +284,9 @@ out: return 0; } - -static void nv_mem_put_pages(struct sg_table *sg_head, void *context) +static void nv_mem_put_pages_common(int nc, + struct sg_table *sg_head, + void *context) { int ret = 0; struct nv_mem_context *nv_mem_context = @@ -302,8 +303,13 @@ static void nv_mem_put_pages(struct sg_table *sg_head, void *context) if (nv_mem_context->callback_task == current) return; - ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start, - nv_mem_context->page_table); + if (nc) { + ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start, + nv_mem_context->page_table, 0); + } else { + ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start, + nv_mem_context->page_table); + } #ifdef _DEBUG_ONLY_ /* Here we expect an error in real life cases that should be ignored - not printed. @@ -318,6 +324,16 @@ static void nv_mem_put_pages(struct sg_table *sg_head, void *context) return; } +static void nv_mem_put_pages(struct sg_table *sg_head, void *context) +{ + nv_mem_put_pages_common(0, sg_head, context); +} + +static void nv_mem_put_pages_nc(struct sg_table *sg_head, void *context) +{ + nv_mem_put_pages_common(1, sg_head, context); +} + static void nv_mem_release(void *context) { struct nv_mem_context *nv_mem_context = @@ -396,8 +412,9 @@ static int nv_mem_get_pages_nc(unsigned long addr, nv_mem_context->core_context = core_context; nv_mem_context->page_size = GPU_PAGE_SIZE; - ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size, - &nv_mem_context->page_table, NULL, NULL); + ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start, + nv_mem_context->mapped_size, + &nv_mem_context->page_table, 0); if (ret < 0) { peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret); return ret; @@ -407,13 +424,13 @@ static int nv_mem_get_pages_nc(unsigned long addr, } static struct peer_memory_client nv_mem_client_nc = { - .acquire = nv_mem_acquire, - .get_pages = nv_mem_get_pages_nc, - .dma_map = nv_dma_map, - .dma_unmap = nv_dma_unmap, - .put_pages = nv_mem_put_pages, - .get_page_size = nv_mem_get_page_size, - .release = nv_mem_release, + .acquire = nv_mem_acquire, + .get_pages = nv_mem_get_pages_nc, + .dma_map = nv_dma_map, + .dma_unmap = nv_dma_unmap, + .put_pages = nv_mem_put_pages_nc, + .get_page_size = nv_mem_get_page_size, + .release = nv_mem_release, }; #endif /* NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT */ @@ -477,9 +494,6 @@ static int __init nv_mem_client_init(void) } // The nc client enables support for persistent pages. - // Thanks to this check, nvidia-peermem requires the new symbol from nvidia.ko, which - // prevents users to unintentionally load this module with unsupported nvidia.ko. - BUG_ON(!nvidia_p2p_cap_persistent_pages); strcpy(nv_mem_client_nc.name, DRV_NAME "_nc"); strcpy(nv_mem_client_nc.version, DRV_VERSION); reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL); diff --git a/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild b/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild index cdc636193..626ab9740 100644 --- a/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild +++ b/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild @@ -101,5 +101,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64 NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags NV_CONFTEST_TYPE_COMPILE_TESTS += make_device_exclusive_range +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg diff --git a/kernel-open/nvidia-uvm/uvm.c b/kernel-open/nvidia-uvm/uvm.c index f537c7e1b..de3f273e2 100644 --- a/kernel-open/nvidia-uvm/uvm.c +++ b/kernel-open/nvidia-uvm/uvm.c @@ -618,7 +618,7 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma) // Using VM_DONTCOPY would be nice, but madvise(MADV_DOFORK) can reset that // so we have to handle vm_open on fork anyway. We could disable MADV_DOFORK // with VM_IO, but that causes other mapping issues. - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + nv_vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND); vma->vm_ops = &uvm_vm_ops_managed; diff --git a/kernel-open/nvidia-uvm/uvm_channel_test.c b/kernel-open/nvidia-uvm/uvm_channel_test.c index eaca0acd9..840427668 100644 --- a/kernel-open/nvidia-uvm/uvm_channel_test.c +++ b/kernel-open/nvidia-uvm/uvm_channel_test.c @@ -153,7 +153,6 @@ done: static NV_STATUS test_unexpected_completed_values(uvm_va_space_t *va_space) { - NV_STATUS status; uvm_gpu_t *gpu; for_each_va_space_gpu(gpu, va_space) { diff --git a/kernel-open/nvidia-uvm/uvm_mmu.c b/kernel-open/nvidia-uvm/uvm_mmu.c index db4785e1b..c834938e5 100644 --- a/kernel-open/nvidia-uvm/uvm_mmu.c +++ b/kernel-open/nvidia-uvm/uvm_mmu.c @@ -1,5 +1,5 @@ /******************************************************************************* - Copyright (c) 2015-2022 NVIDIA Corporation + Copyright (c) 2015-2023 NVIDIA Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to @@ -102,7 +102,7 @@ static NV_STATUS phys_mem_allocate_sysmem(uvm_page_tree_t *tree, NvLength size, NvU64 dma_addr; unsigned long flags = __GFP_ZERO; uvm_memcg_context_t memcg_context; - uvm_va_space_t *va_space; + uvm_va_space_t *va_space = NULL; struct mm_struct *mm = NULL; if (tree->type == UVM_PAGE_TREE_TYPE_USER && tree->gpu_va_space && UVM_CGROUP_ACCOUNTING_SUPPORTED()) { diff --git a/kernel-open/nvidia-uvm/uvm_va_policy.h b/kernel-open/nvidia-uvm/uvm_va_policy.h index 59963c515..6d3c96434 100644 --- a/kernel-open/nvidia-uvm/uvm_va_policy.h +++ b/kernel-open/nvidia-uvm/uvm_va_policy.h @@ -1,5 +1,5 @@ /******************************************************************************* - Copyright (c) 2022 NVIDIA Corporation + Copyright (c) 2022-2023 NVIDIA Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to @@ -53,7 +53,7 @@ typedef enum // The VA range is determined from either the uvm_va_range_t for managed // allocations or the uvm_va_policy_node_t for HMM allocations. // -typedef struct uvm_va_policy_struct +struct uvm_va_policy_struct { // Read duplication policy for this VA range (unset, enabled, or disabled). uvm_read_duplication_policy_t read_duplication; @@ -66,7 +66,7 @@ typedef struct uvm_va_policy_struct // their page tables updated to access the (possibly remote) pages. uvm_processor_mask_t accessed_by; -} uvm_va_policy_t; +}; // Policy nodes are used for storing policies in HMM va_blocks. // The va_block lock protects the tree so that invalidation callbacks can diff --git a/kernel-open/nvidia/nv-mmap.c b/kernel-open/nvidia/nv-mmap.c index 35a0347d1..da01e4648 100644 --- a/kernel-open/nvidia/nv-mmap.c +++ b/kernel-open/nvidia/nv-mmap.c @@ -452,7 +452,7 @@ static int nvidia_mmap_numa( } // Needed for the linux kernel for mapping compound pages - vma->vm_flags |= VM_MIXEDMAP; + nv_vm_flags_set(vma, VM_MIXEDMAP); for (i = 0, addr = mmap_context->page_array[0]; i < pages; addr = mmap_context->page_array[++i], start += PAGE_SIZE) @@ -596,7 +596,7 @@ int nvidia_mmap_helper( } up(&nvl->mmap_lock); - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND; + nv_vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND); } else { @@ -663,15 +663,15 @@ int nvidia_mmap_helper( NV_PRINT_AT(NV_DBG_MEMINFO, at); - vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED); - vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP); + nv_vm_flags_set(vma, VM_IO | VM_LOCKED | VM_RESERVED); + nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); } if ((prot & NV_PROTECT_WRITEABLE) == 0) { vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot); - vma->vm_flags &= ~VM_WRITE; - vma->vm_flags &= ~VM_MAYWRITE; + nv_vm_flags_clear(vma, VM_WRITE); + nv_vm_flags_clear(vma, VM_MAYWRITE); } vma->vm_ops = &nv_vm_ops; diff --git a/kernel-open/nvidia/nv-p2p.c b/kernel-open/nvidia/nv-p2p.c index 1f090fbd1..45b23ea71 100644 --- a/kernel-open/nvidia/nv-p2p.c +++ b/kernel-open/nvidia/nv-p2p.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2011-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,6 +31,11 @@ #include "nv-p2p.h" #include "rmp2pdefines.h" +typedef enum nv_p2p_page_table_type { + NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT = 0, + NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, +} nv_p2p_page_table_type_t; + typedef struct nv_p2p_dma_mapping { struct list_head list_node; struct nvidia_p2p_dma_mapping *dma_mapping; @@ -44,13 +49,9 @@ typedef struct nv_p2p_mem_info { struct list_head list_head; struct semaphore lock; } dma_mapping_list; - NvBool bPersistent; void *private; } nv_p2p_mem_info_t; -int nvidia_p2p_cap_persistent_pages = 1; -EXPORT_SYMBOL(nvidia_p2p_cap_persistent_pages); - // declared and created in nv.c extern void *nvidia_p2p_page_t_cache; @@ -238,6 +239,7 @@ static void nv_p2p_free_page_table( } static NV_STATUS nv_p2p_put_pages( + nv_p2p_page_table_type_t pt_type, nvidia_stack_t * sp, uint64_t p2p_token, uint32_t va_space, @@ -246,9 +248,6 @@ static NV_STATUS nv_p2p_put_pages( ) { NV_STATUS status; - struct nv_p2p_mem_info *mem_info = NULL; - - mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table); /* * rm_p2p_put_pages returns NV_OK if the page_table was found and @@ -258,8 +257,15 @@ static NV_STATUS nv_p2p_put_pages( * rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table * was already unlinked. */ - if (mem_info->bPersistent) + if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT) { + struct nv_p2p_mem_info *mem_info = NULL; + + /* + * It is safe to access persistent page_table as there is no async + * callback which can free it unlike non-persistent page_table. + */ + mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table); status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table); } else @@ -273,7 +279,8 @@ static NV_STATUS nv_p2p_put_pages( nv_p2p_free_page_table(*page_table); *page_table = NULL; } - else if (!mem_info->bPersistent && (status == NV_ERR_OBJECT_NOT_FOUND)) + else if ((pt_type == NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT) && + (status == NV_ERR_OBJECT_NOT_FOUND)) { status = NV_OK; *page_table = NULL; @@ -327,7 +334,8 @@ static void nv_p2p_mem_info_free_callback(void *data) nv_p2p_free_platform_data(&mem_info->page_table); } -int nvidia_p2p_get_pages( +static int nv_p2p_get_pages( + nv_p2p_page_table_type_t pt_type, uint64_t p2p_token, uint32_t va_space, uint64_t virtual_address, @@ -376,9 +384,10 @@ int nvidia_p2p_get_pages( *page_table = &(mem_info->page_table); - mem_info->bPersistent = (free_callback == NULL); - - //asign length to temporary variable since do_div macro does in-place division + /* + * assign length to temporary variable since do_div macro does in-place + * division + */ temp_length = length; do_div(temp_length, page_size); page_count = temp_length; @@ -405,7 +414,7 @@ int nvidia_p2p_get_pages( goto failed; } - if (mem_info->bPersistent) + if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT) { void *gpu_info = NULL; @@ -415,7 +424,8 @@ int nvidia_p2p_get_pages( goto failed; } - status = rm_p2p_get_gpu_info(sp, virtual_address, length, &gpu_uuid, &gpu_info); + status = rm_p2p_get_gpu_info(sp, virtual_address, length, + &gpu_uuid, &gpu_info); if (status != NV_OK) { goto failed; @@ -432,8 +442,10 @@ int nvidia_p2p_get_pages( bGetUuid = NV_TRUE; - status = rm_p2p_get_pages_persistent(sp, virtual_address, length, &mem_info->private, - physical_addresses, &entries, *page_table, gpu_info); + status = rm_p2p_get_pages_persistent(sp, virtual_address, length, + &mem_info->private, + physical_addresses, &entries, + *page_table, gpu_info); if (status != NV_OK) { goto failed; @@ -516,10 +528,12 @@ failed: { os_free_mem(physical_addresses); } + if (wreqmb_h != NULL) { os_free_mem(wreqmb_h); } + if (rreqmb_h != NULL) { os_free_mem(rreqmb_h); @@ -527,7 +541,7 @@ failed: if (bGetPages) { - (void)nv_p2p_put_pages(sp, p2p_token, va_space, + (void)nv_p2p_put_pages(pt_type, sp, p2p_token, va_space, virtual_address, page_table); } @@ -546,8 +560,45 @@ failed: return nvidia_p2p_map_status(status); } +int nvidia_p2p_get_pages( + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void * data), + void *data +) +{ + if (free_callback == NULL) + { + return -EINVAL; + } + + return nv_p2p_get_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT, + p2p_token, va_space, virtual_address, + length, page_table, free_callback, data); +} EXPORT_SYMBOL(nvidia_p2p_get_pages); +int nvidia_p2p_get_pages_persistent( + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + uint32_t flags +) +{ + if (flags != 0) + { + return -EINVAL; + } + + return nv_p2p_get_pages(NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, 0, 0, + virtual_address, length, page_table, + NULL, NULL); +} +EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent); + /* * This function is a no-op, but is left in place (for now), in order to allow * third-party callers to build and run without errors or warnings. This is OK, @@ -568,15 +619,14 @@ int nvidia_p2p_put_pages( struct nvidia_p2p_page_table *page_table ) { - struct nv_p2p_mem_info *mem_info = NULL; - NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0}; NV_STATUS status; nvidia_stack_t *sp = NULL; int rc = 0; - os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN); - - mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + if (page_table == NULL) + { + return 0; + } rc = nv_kmem_cache_alloc_stack(&sp); if (rc != 0) @@ -584,21 +634,56 @@ int nvidia_p2p_put_pages( return -ENOMEM; } - status = nv_p2p_put_pages(sp, p2p_token, va_space, + status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT, + sp, p2p_token, va_space, virtual_address, &page_table); - if (mem_info->bPersistent) - { - nvidia_dev_put_uuid(uuid, sp); - } - nv_kmem_cache_free_stack(sp); return nvidia_p2p_map_status(status); } - EXPORT_SYMBOL(nvidia_p2p_put_pages); +int nvidia_p2p_put_pages_persistent( + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table, + uint32_t flags +) +{ + NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0}; + NV_STATUS status; + nvidia_stack_t *sp = NULL; + int rc = 0; + + if (flags != 0) + { + return -EINVAL; + } + + if (page_table == NULL) + { + return 0; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return -ENOMEM; + } + + os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN); + + status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, + sp, 0, 0, virtual_address, &page_table); + + nvidia_dev_put_uuid(uuid, sp); + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} +EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent); + int nvidia_p2p_dma_map_pages( struct pci_dev *peer, struct nvidia_p2p_page_table *page_table, diff --git a/kernel-open/nvidia/nv-p2p.h b/kernel-open/nvidia/nv-p2p.h index a9469bdbb..e19b38a8f 100644 --- a/kernel-open/nvidia/nv-p2p.h +++ b/kernel-open/nvidia/nv-p2p.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -94,11 +94,10 @@ struct nvidia_p2p_params { } nvidia_p2p_params_t; /* - * Capability flag for users to detect + * Macro for users to detect * driver support for persistent pages. */ -extern int nvidia_p2p_cap_persistent_pages; -#define NVIDIA_P2P_CAP_PERSISTENT_PAGES +#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API /* * This API is not supported. @@ -173,11 +172,6 @@ struct nvidia_p2p_page_table { * A pointer to the function to be invoked when the pages * underlying the virtual address range are freed * implicitly. - * If NULL, persistent pages will be returned. - * This means the pages underlying the range of GPU virtual memory - * will persist until explicitly freed by nvidia_p2p_put_pages(). - * Persistent GPU memory mappings are not supported on PowerPC, - * MIG-enabled devices and vGPU. * @param[in] data * A non-NULL opaque pointer to private data to be passed to the * callback function. @@ -190,12 +184,48 @@ struct nvidia_p2p_page_table { * insufficient resources were available to complete the operation. * -EIO if an unknown error occurred. */ -int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space, - uint64_t virtual_address, +int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), void *data); + +/* + * @brief + * Pin and make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. The pages will persist until + * explicitly freed by nvidia_p2p_put_pages_persistent(). + * + * Persistent GPU memory mappings are not supported on PowerPC, + * MIG-enabled devices and vGPU. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] flags + * Must be set to zero for now. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages_persistent(uint64_t virtual_address, uint64_t length, struct nvidia_p2p_page_table **page_table, - void (*free_callback)(void *data), - void *data); + uint32_t flags); #define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003 @@ -268,6 +298,8 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, * Release a set of pages previously made accessible to * a third-party device. * + * This API may sleep. + * * @param[in] p2p_token * A token that uniquely identifies the P2P mapping. * @param[in] va_space @@ -282,10 +314,33 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, * -EINVAL if an invalid argument was supplied. * -EIO if an unknown error occurred. */ -int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space, - uint64_t virtual_address, +int nvidia_p2p_put_pages(uint64_t p2p_token, + uint32_t va_space, uint64_t virtual_address, struct nvidia_p2p_page_table *page_table); +/* + * @brief + * Release a set of persistent pages previously made accessible to + * a third-party device. + * + * This API may sleep. + * + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * @param[in] flags + * Must be set to zero for now. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages_persistent(uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table, + uint32_t flags); + /* * @brief * Free a third-party P2P page table. (This function is a no-op.) diff --git a/kernel-open/nvidia/nv.c b/kernel-open/nvidia/nv.c index 976449973..737fc567a 100644 --- a/kernel-open/nvidia/nv.c +++ b/kernel-open/nvidia/nv.c @@ -165,7 +165,7 @@ NvBool nv_ats_supported = NVCPU_IS_PPC64LE /* nvos_ functions.. do not take a state device parameter */ static int nvos_count_devices(void); -static nv_alloc_t *nvos_create_alloc(struct device *, int); +static nv_alloc_t *nvos_create_alloc(struct device *, NvU64); static int nvos_free_alloc(nv_alloc_t *); /*** @@ -280,11 +280,12 @@ void nv_sev_init( static nv_alloc_t *nvos_create_alloc( struct device *dev, - int num_pages + NvU64 num_pages ) { - nv_alloc_t *at; - unsigned int pt_size, i; + nv_alloc_t *at; + NvU64 pt_size; + unsigned int i; NV_KZALLOC(at, sizeof(nv_alloc_t)); if (at == NULL) @@ -295,6 +296,24 @@ nv_alloc_t *nvos_create_alloc( at->dev = dev; pt_size = num_pages * sizeof(nvidia_pte_t *); + // + // Check for multiplication overflow and check whether num_pages value can fit in at->num_pages. + // + if ((num_pages != 0) && ((pt_size / num_pages) != sizeof(nvidia_pte_t*))) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Invalid page table allocation - Number of pages exceeds max value.\n"); + NV_KFREE(at, sizeof(nv_alloc_t)); + return NULL; + } + + at->num_pages = num_pages; + if (at->num_pages != num_pages) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Invalid page table allocation - requested size overflows.\n"); + NV_KFREE(at, sizeof(nv_alloc_t)); + return NULL; + } + if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK) { nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n"); @@ -303,7 +322,6 @@ nv_alloc_t *nvos_create_alloc( } memset(at->page_table, 0, pt_size); - at->num_pages = num_pages; NV_ATOMIC_SET(at->usage_count, 0); for (i = 0; i < at->num_pages; i++) diff --git a/kernel-open/nvidia/nvidia.Kbuild b/kernel-open/nvidia/nvidia.Kbuild index 5d8b4d0b8..813d19292 100644 --- a/kernel-open/nvidia/nvidia.Kbuild +++ b/kernel-open/nvidia/nvidia.Kbuild @@ -230,6 +230,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build diff --git a/src/common/displayport/inc/dp_connector.h b/src/common/displayport/inc/dp_connector.h index 8f7a8e04b..b3da28af9 100644 --- a/src/common/displayport/inc/dp_connector.h +++ b/src/common/displayport/inc/dp_connector.h @@ -213,6 +213,10 @@ namespace DisplayPort virtual NvBool isDSCSupported() = 0; + virtual NvBool isDSCDecompressionSupported() = 0; + + virtual NvBool isDSCPassThroughSupported() = 0; + virtual DscCaps getDscCaps() = 0; // diff --git a/src/common/displayport/inc/dp_deviceimpl.h b/src/common/displayport/inc/dp_deviceimpl.h index b5bbe6dc4..832d75603 100644 --- a/src/common/displayport/inc/dp_deviceimpl.h +++ b/src/common/displayport/inc/dp_deviceimpl.h @@ -447,6 +447,7 @@ namespace DisplayPort bool getFECSupport(); NvBool isDSCPassThroughSupported(); NvBool isDSCSupported(); + NvBool isDSCDecompressionSupported(); NvBool isDSCPossible(); bool isFECSupported(); bool readAndParseDSCCaps(); diff --git a/src/common/displayport/src/dp_connectorimpl.cpp b/src/common/displayport/src/dp_connectorimpl.cpp index 4ce6142d9..58ef89007 100644 --- a/src/common/displayport/src/dp_connectorimpl.cpp +++ b/src/common/displayport/src/dp_connectorimpl.cpp @@ -5539,7 +5539,8 @@ void ConnectorImpl::notifyLongPulse(bool statusConnected) if (existingDev && existingDev->isFakedMuxDevice() && !bIsMuxOnDgpu) { - DP_LOG((" NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device")); + DP_LOG((" NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device. Marking detect complete")); + sink->notifyDetectComplete(); return; } @@ -6513,6 +6514,7 @@ void ConnectorImpl::createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) // Initialize DSC state newDev->dscCaps.bDSCSupported = true; + newDev->dscCaps.bDSCDecompressionSupported = true; newDev->parseDscCaps(buffer, bufferSize); dpMemCopy(newDev->rawDscCaps, buffer, DP_MIN(bufferSize, 16)); newDev->bDSCPossible = true; diff --git a/src/common/displayport/src/dp_deviceimpl.cpp b/src/common/displayport/src/dp_deviceimpl.cpp index b06c0c81a..5ae86d7af 100644 --- a/src/common/displayport/src/dp_deviceimpl.cpp +++ b/src/common/displayport/src/dp_deviceimpl.cpp @@ -1508,7 +1508,11 @@ NvBool DeviceImpl::getDSCSupport() { if (FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DSC_SUPPORT, _YES, byte)) { - dscCaps.bDSCSupported = true; + dscCaps.bDSCDecompressionSupported = true; + } + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _PASS_THROUGH_SUPPORT, _YES, byte)) + { + dscCaps.bDSCPassThroughSupported = true; } } @@ -1517,6 +1521,11 @@ NvBool DeviceImpl::getDSCSupport() DP_LOG(("DP-DEV> DSC Support AUX READ failed for %s!", address.toString(sb))); } + if (dscCaps.bDSCDecompressionSupported || dscCaps.bDSCPassThroughSupported) + { + dscCaps.bDSCSupported = true; + } + return dscCaps.bDSCSupported; } @@ -1636,6 +1645,11 @@ NvBool DeviceImpl::isDSCSupported() return dscCaps.bDSCSupported; } +NvBool DeviceImpl::isDSCDecompressionSupported() +{ + return dscCaps.bDSCDecompressionSupported; +} + NvBool DeviceImpl::isDSCPassThroughSupported() { return dscCaps.bDSCPassThroughSupported; @@ -1974,7 +1988,7 @@ void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent) this->devDoingDscDecompression = this; this->bDSCPossible = true; } - else if (this->parent->isDSCSupported()) + else if (this->parent->isDSCDecompressionSupported()) { // // This condition takes care of DSC capable sink devices @@ -1987,12 +2001,15 @@ void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent) } else { - // This condition takes care of branch device capable of DSC. - this->devDoingDscDecompression = this; - this->bDSCPossible = true; + if (this->isDSCDecompressionSupported()) + { + // This condition takes care of branch device capable of DSC decoding. + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } } - } - else if (this->parent && this->parent->isDSCSupported()) + } + else if (this->parent && this->parent->isDSCDecompressionSupported()) { // // This condition takes care of sink devices not capable of DSC @@ -2005,7 +2022,7 @@ void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent) } else { - if (this->isDSCSupported()) + if (this->isDSCDecompressionSupported()) { this->bDSCPossible = true; this->devDoingDscDecompression = this; diff --git a/src/common/inc/displayport/displayport.h b/src/common/inc/displayport/displayport.h index 8be93d5c5..5e582dc6c 100644 --- a/src/common/inc/displayport/displayport.h +++ b/src/common/inc/displayport/displayport.h @@ -240,6 +240,7 @@ typedef enum typedef struct DscCaps { NvBool bDSCSupported; + NvBool bDSCDecompressionSupported; NvBool bDSCPassThroughSupported; unsigned versionMajor, versionMinor; unsigned rcBufferBlockSize; diff --git a/src/common/inc/nvBldVer.h b/src/common/inc/nvBldVer.h index 9374a8823..5196b1a68 100644 --- a/src/common/inc/nvBldVer.h +++ b/src/common/inc/nvBldVer.h @@ -36,25 +36,25 @@ // and then checked back in. You cannot make changes to these sections without // corresponding changes to the buildmeister script #ifndef NV_BUILD_BRANCH - #define NV_BUILD_BRANCH r528_37 + #define NV_BUILD_BRANCH r528_79 #endif #ifndef NV_PUBLIC_BRANCH - #define NV_PUBLIC_BRANCH r528_37 + #define NV_PUBLIC_BRANCH r528_79 #endif #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) -#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r528_37-265" -#define NV_BUILD_CHANGELIST_NUM (32376659) +#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r528_79-332" +#define NV_BUILD_CHANGELIST_NUM (32663405) #define NV_BUILD_TYPE "Official" -#define NV_BUILD_NAME "rel/gpu_drv/r525/r528_37-265" -#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32376659) +#define NV_BUILD_NAME "rel/gpu_drv/r525/r528_79-332" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32663405) #else /* Windows builds */ -#define NV_BUILD_BRANCH_VERSION "r528_37-4" -#define NV_BUILD_CHANGELIST_NUM (32375411) +#define NV_BUILD_BRANCH_VERSION "r528_79-9" +#define NV_BUILD_CHANGELIST_NUM (32663405) #define NV_BUILD_TYPE "Official" -#define NV_BUILD_NAME "528.46" -#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32375411) +#define NV_BUILD_NAME "528.89" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32663405) #define NV_BUILD_BRANCH_BASE_VERSION R525 #endif // End buildmeister python edited section diff --git a/src/common/inc/nvUnixVersion.h b/src/common/inc/nvUnixVersion.h index 1b1d10516..f4708e473 100644 --- a/src/common/inc/nvUnixVersion.h +++ b/src/common/inc/nvUnixVersion.h @@ -4,7 +4,7 @@ #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) -#define NV_VERSION_STRING "525.89.02" +#define NV_VERSION_STRING "525.105.17" #else diff --git a/src/common/inc/swref/published/hopper/gh100/dev_fsp_addendum.h b/src/common/inc/swref/published/hopper/gh100/dev_fsp_addendum.h index 137fbe6ac..b1299ba26 100644 --- a/src/common/inc/swref/published/hopper/gh100/dev_fsp_addendum.h +++ b/src/common/inc/swref/published/hopper/gh100/dev_fsp_addendum.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,6 +24,11 @@ #ifndef __gh100_dev_fsp_addendum_h__ #define __gh100_dev_fsp_addendum_h__ +#define NV_GFW_FSP_UCODE_VERSION NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3(1) +#define NV_GFW_FSP_UCODE_VERSION_FULL 11:0 +#define NV_GFW_FSP_UCODE_VERSION_MAJOR 11:8 +#define NV_GFW_FSP_UCODE_VERSION_MINOR 7:0 + // // RM uses channel 0 for FSP EMEM on GH100. // diff --git a/src/common/inc/swref/published/hopper/gh100/dev_fsp_pri.h b/src/common/inc/swref/published/hopper/gh100/dev_fsp_pri.h index 6debb1627..22d217203 100644 --- a/src/common/inc/swref/published/hopper/gh100/dev_fsp_pri.h +++ b/src/common/inc/swref/published/hopper/gh100/dev_fsp_pri.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -64,5 +64,9 @@ #define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2__DEVICE_MAP 0x00000016 /* */ #define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2_VAL 31:0 /* RWIVF */ #define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2_VAL_INIT 0x00000000 /* RWI-V */ +#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3(i) (0x008f0330+(i)*4) /* RW-4A */ +#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3__SIZE_1 4 /* */ +#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3_VAL 31:0 /* RWIVF */ +#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3_VAL_INIT 0x00000000 /* RWI-V */ #endif // __gh100_dev_fsp_pri_h__ diff --git a/src/common/inc/swref/published/hopper/gh100/dev_gc6_island.h b/src/common/inc/swref/published/hopper/gh100/dev_gc6_island.h index fec06073b..4d33ed0c2 100644 --- a/src/common/inc/swref/published/hopper/gh100/dev_gc6_island.h +++ b/src/common/inc/swref/published/hopper/gh100/dev_gc6_island.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,6 +24,14 @@ #ifndef __gh100_dev_gc6_island_h__ #define __gh100_dev_gc6_island_h__ +#define NV_PGC6_SCI_SEC_TIMER_TIME_0 0x00118f54 /* RW-4R */ +#define NV_PGC6_SCI_SEC_TIMER_TIME_0_NSEC 31:5 /* RWEUF */ +#define NV_PGC6_SCI_SEC_TIMER_TIME_0_NSEC_ZERO 0x00000000 /* RWE-V */ + +#define NV_PGC6_SCI_SEC_TIMER_TIME_1 0x00118f58 /* RW-4R */ +#define NV_PGC6_SCI_SEC_TIMER_TIME_1_NSEC 28:0 /* RWEUF */ +#define NV_PGC6_SCI_SEC_TIMER_TIME_1_NSEC_ZERO 0x00000000 /* RWE-V */ + #define NV_PGC6_SCI_SYS_TIMER_OFFSET_0 0x00118df4 /* RW-4R */ #define NV_PGC6_SCI_SYS_TIMER_OFFSET_0_UPDATE 0:0 /* RWEVF */ #define NV_PGC6_SCI_SYS_TIMER_OFFSET_0_UPDATE_DONE 0x00000000 /* R-E-V */ diff --git a/src/common/modeset/timing/nvt_edid.c b/src/common/modeset/timing/nvt_edid.c index 3a6551e28..0968d332a 100644 --- a/src/common/modeset/timing/nvt_edid.c +++ b/src/common/modeset/timing/nvt_edid.c @@ -2098,8 +2098,8 @@ NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongVal // validate DTD blocks pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset]; - while (pDTD->wDTPixelClock != 0 && - (NvU8 *)pDTD - pExt < (int)sizeof(EIA861EXTENSION)) + while ((pDTD->wDTPixelClock != 0) && + (((NvU8 *)pDTD - pExt + sizeof(DETAILEDTIMINGDESCRIPTOR)) < ((NvU8)sizeof(EIA861EXTENSION) - 1))) { if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS) { @@ -2342,8 +2342,8 @@ NvU32 NvTiming_EDIDStrongValidationMask(NvU8 *pEdid, NvU32 length) // validate DTD blocks pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset]; - while (pDTD->wDTPixelClock != 0 && - (NvU8 *)pDTD - pExt < (int)sizeof(EIA861EXTENSION)) + while ((pDTD->wDTPixelClock != 0) && + (((NvU8 *)pDTD - pExt + sizeof(DETAILEDTIMINGDESCRIPTOR)) < ((NvU8)sizeof(EIA861EXTENSION) -1))) { if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS) ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); diff --git a/src/common/modeset/timing/nvt_edidext_861.c b/src/common/modeset/timing/nvt_edidext_861.c index c3a975ff1..dc58c29dc 100644 --- a/src/common/modeset/timing/nvt_edidext_861.c +++ b/src/common/modeset/timing/nvt_edidext_861.c @@ -397,7 +397,7 @@ void parse861ExtDetailedTiming(NvU8 *pEdidExt, // Get all detailed timings in CEA ext block pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pEdidExt[pEIA861->offset]; - while((NvU8 *)pDTD < (pEdidExt + sizeof(EDIDV1STRUC)) && // Check that we're not going beyond this extension block. + while((NvU8 *)pDTD + sizeof(DETAILEDTIMINGDESCRIPTOR) < (pEdidExt + sizeof(EDIDV1STRUC) - 1) && pDTD->wDTPixelClock != 0) { NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); @@ -1237,6 +1237,12 @@ NVT_STATUS get861ExtInfo(NvU8 *p, NvU32 size, NVT_EDID_CEA861_INFO *p861info) return NVT_STATUS_ERR; } + // DTD offset sanity check + if (p[2] >= 1 && p[2] <= 3) + { + return NVT_STATUS_ERR; + } + // don't do anything further if p861info is NULL if (p861info == NULL) { @@ -1299,6 +1305,11 @@ NVT_STATUS parseCta861DataBlockInfo(NvU8 *p, tag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(p[i]); payload = NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(p[i]); + /*don't allow data colleciton totally size larger than [127 - 5 (tag, revision, offset, describing native video format, checksum)]*/ + if ((i + payload > size) || (i + payload > 122)) + { + return NVT_STATUS_ERR; + } // move the pointer to the payload section or extended Tag Code i++; diff --git a/src/common/nvswitch/common/inc/soe/soeifcore.h b/src/common/nvswitch/common/inc/soe/soeifcore.h index c48cb6b70..99c4ca5ba 100644 --- a/src/common/nvswitch/common/inc/soe/soeifcore.h +++ b/src/common/nvswitch/common/inc/soe/soeifcore.h @@ -74,14 +74,18 @@ enum /*! * Read VRs - * Needed to be in sync with chips_a defines */ RM_SOE_CORE_CMD_GET_VOLTAGE_VALUES, /*! * Init PLM2 protected registers */ - RM_SOE_CORE_CMD_INIT_L2_STATE + RM_SOE_CORE_CMD_INIT_L2_STATE, + + /*! + * Read Power + */ + RM_SOE_CORE_CMD_GET_POWER_VALUES, }; // Timeout for SOE reset callback function @@ -153,6 +157,11 @@ typedef struct NvU8 cmdType; } RM_SOE_CORE_CMD_L2_STATE; +typedef struct +{ + NvU8 cmdType; +} RM_SOE_CORE_CMD_GET_POWER; + typedef union { NvU8 cmdType; @@ -164,6 +173,7 @@ typedef union RM_SOE_CORE_CMD_NPORT_TPROD_STATE nportTprodState; RM_SOE_CORE_CMD_GET_VOLTAGE getVoltage; RM_SOE_CORE_CMD_L2_STATE l2State; + RM_SOE_CORE_CMD_GET_POWER getPower; } RM_SOE_CORE_CMD; @@ -176,9 +186,19 @@ typedef struct NvU32 hvdd_mv; } RM_SOE_CORE_MSG_GET_VOLTAGE; +typedef struct +{ + NvU8 msgType; + NvU8 flcnStatus; + NvU32 vdd_w; + NvU32 dvdd_w; + NvU32 hvdd_w; +} RM_SOE_CORE_MSG_GET_POWER; + typedef union { NvU8 msgType; RM_SOE_CORE_MSG_GET_VOLTAGE getVoltage; + RM_SOE_CORE_MSG_GET_POWER getPower; } RM_SOE_CORE_MSG; #endif // _SOECORE_H_ diff --git a/src/common/nvswitch/interface/ctrl_dev_nvswitch.h b/src/common/nvswitch/interface/ctrl_dev_nvswitch.h index 693107b2c..461383e78 100644 --- a/src/common/nvswitch/interface/ctrl_dev_nvswitch.h +++ b/src/common/nvswitch/interface/ctrl_dev_nvswitch.h @@ -751,6 +751,19 @@ typedef struct NvU32 hvdd_mv; } NVSWITCH_CTRL_GET_VOLTAGE_PARAMS; +/* + * CTRL_NVSWITCH_GET_POWER + * + * Zero(0) indicates that a measurement is not available + * on the current platform. + */ +typedef struct +{ + NvU32 vdd_w; + NvU32 dvdd_w; + NvU32 hvdd_w; +} NVSWITCH_GET_POWER_PARAMS; + /* * CTRL_NVSWITCH_GET_ERRORS * @@ -3534,6 +3547,15 @@ typedef struct #define NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX 256 +typedef enum +{ + NVSWITCH_I2C_ACQUIRER_NONE = 0, + NVSWITCH_I2C_ACQUIRER_UNKNOWN, + NVSWITCH_I2C_ACQUIRER_IOCTL, // e.g. MODS + NVSWITCH_I2C_ACQUIRER_EXTERNAL, // e.g. Linux Direct + +} NVSWITCH_I2C_ACQUIRER; + /* * CTRL_NVSWITCH_I2C_INDEXED * @@ -3816,8 +3838,9 @@ typedef struct #define CTRL_NVSWITCH_CLEAR_COUNTERS 0x51 #define CTRL_NVSWITCH_SET_NVLINK_ERROR_THRESHOLD 0x52 #define CTRL_NVSWITCH_GET_NVLINK_ERROR_THRESHOLD 0x53 -#define CTRL_NVSWITCH_GET_VOLTAGE 0x55 -#define CTRL_NVSWITCH_GET_BOARD_PART_NUMBER 0x54 +#define CTRL_NVSWITCH_GET_VOLTAGE 0x54 +#define CTRL_NVSWITCH_GET_BOARD_PART_NUMBER 0x55 +#define CTRL_NVSWITCH_GET_POWER 0x56 #ifdef __cplusplus } diff --git a/src/common/nvswitch/kernel/inc/haldef_nvswitch.h b/src/common/nvswitch/kernel/inc/haldef_nvswitch.h index bdb80f5e1..864c553e9 100644 --- a/src/common/nvswitch/kernel/inc/haldef_nvswitch.h +++ b/src/common/nvswitch/kernel/inc/haldef_nvswitch.h @@ -224,7 +224,8 @@ _op(NvlStatus, nvswitch_ctrl_set_nvlink_error_threshold, (nvswitch_device *device, NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams), _arch) \ _op(NvlStatus, nvswitch_ctrl_get_nvlink_error_threshold, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams), _arch) \ _op(NvlStatus, nvswitch_ctrl_therm_read_voltage, (nvswitch_device *device, NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info), _arch) \ - _op(NvlStatus, nvswitch_ctrl_get_board_part_number, (nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p), _arch) + _op(NvlStatus, nvswitch_ctrl_therm_read_power, (nvswitch_device *device, NVSWITCH_GET_POWER_PARAMS *info), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_board_part_number, (nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p), _arch) \ #define NVSWITCH_HAL_FUNCTION_LIST_LS10(_op, _arch) \ _op(NvlStatus, nvswitch_launch_ALI, (nvswitch_device *device), _arch) \ diff --git a/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h b/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h index c6d3d68ab..ea9298fdd 100644 --- a/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h +++ b/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h @@ -62,4 +62,11 @@ nvswitch_ctrl_therm_read_voltage_lr10 NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info ); +NvlStatus +nvswitch_ctrl_therm_read_power_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_POWER_PARAMS *info +); + #endif //_THERM_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/ls10/ls10.h b/src/common/nvswitch/kernel/inc/ls10/ls10.h index 1ed15760d..b968e9c1d 100644 --- a/src/common/nvswitch/kernel/inc/ls10/ls10.h +++ b/src/common/nvswitch/kernel/inc/ls10/ls10.h @@ -497,8 +497,8 @@ typedef struct NV_NPORT_PORTSTAT_LS10(_block, _reg, _idx, ), _data); \ } -#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS (10 * NVSWITCH_INTERVAL_1SEC_IN_NS) -#define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (10 * NVSWITCH_INTERVAL_1MSEC_IN_NS) +#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1SEC_IN_NS) +#define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1MSEC_IN_NS) // Struct used for passing around error masks in error handling functions typedef struct @@ -978,6 +978,7 @@ void nvswitch_link_disable_interrupts_ls10(nvswitch_device *device, NvU32 l void nvswitch_execute_unilateral_link_shutdown_ls10(nvlink_link *link); void nvswitch_init_dlpl_interrupts_ls10(nvlink_link *link); +void nvswitch_set_dlpl_interrupts_ls10(nvlink_link *link); NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 link_mask); void nvswitch_service_minion_all_links_ls10(nvswitch_device *device); @@ -997,9 +998,6 @@ NvlStatus nvswitch_launch_ALI_ls10(nvswitch_device *device); NvlStatus nvswitch_ctrl_set_mc_rid_table_ls10(nvswitch_device *device, NVSWITCH_SET_MC_RID_TABLE_PARAMS *p); NvlStatus nvswitch_ctrl_get_mc_rid_table_ls10(nvswitch_device *device, NVSWITCH_GET_MC_RID_TABLE_PARAMS *p); -void nvswitch_init_dlpl_interrupts_ls10(nvlink_link *link); -NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 link_mask); - void nvswitch_service_minion_all_links_ls10(nvswitch_device *device); NvBool nvswitch_is_inforom_supported_ls10(nvswitch_device *device); diff --git a/src/common/nvswitch/kernel/inc/ls10/minion_nvlink_defines_public_ls10.h b/src/common/nvswitch/kernel/inc/ls10/minion_nvlink_defines_public_ls10.h index 25da73164..e9854ca85 100644 --- a/src/common/nvswitch/kernel/inc/ls10/minion_nvlink_defines_public_ls10.h +++ b/src/common/nvswitch/kernel/inc/ls10/minion_nvlink_defines_public_ls10.h @@ -24,6 +24,21 @@ #ifndef _MINION_NVLINK_DEFINES_PUBLIC_H_ #define _MINION_NVLINK_DEFINES_PUBLIC_H_ +//PAD REG READ API (Bug 2643883) +#define NV_MINION_UCODE_READUPHYPAD_ADDR 11:0 +#define NV_MINION_UCODE_READUPHYPAD_LANE 15:12 + +//FIELD FOR DEBUG_MISC_i DATA REGISTERS +#define NV_MINION_DEBUG_MISC_0_LINK_STATE 7:0 +#define NV_MINION_DEBUG_MISC_0_ISR_ID 15:8 +#define NV_MINION_DEBUG_MISC_0_OTHER_DATA 31:16 + +// Recal values checks +#define NV_MINION_UCODE_L1_EXIT_MARGIN 100 +#define NV_MINION_UCODE_L1_EXIT_MAX 200 +#define NV_MINION_UCODE_RECOVERY_TIME 250 +#define NV_MINION_UCODE_PEQ_TIME 96 + // SUBCODES for DLCMD FAULT (uses DLCMDFAULR code) - dlCmdFault() - NVLINK_LINK_INT typedef enum _MINION_STATUS { @@ -31,4 +46,6 @@ typedef enum _MINION_STATUS MINION_ALARM_BUSY = 80, } MINION_STATUS; + #define LINKSTATUS_EMERGENCY_SHUTDOWN 0x29 +#define LINKSTATUS_INITPHASE1 0x24 #endif // _MINION_NVLINK_DEFINES_PUBLIC_H_ diff --git a/src/common/nvswitch/kernel/inc/ls10/therm_ls10.h b/src/common/nvswitch/kernel/inc/ls10/therm_ls10.h index e49d71039..2f11ea8c6 100644 --- a/src/common/nvswitch/kernel/inc/ls10/therm_ls10.h +++ b/src/common/nvswitch/kernel/inc/ls10/therm_ls10.h @@ -56,4 +56,11 @@ nvswitch_ctrl_therm_read_voltage_ls10 nvswitch_device *device, NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info ); + +NvlStatus +nvswitch_ctrl_therm_read_power_ls10 +( + nvswitch_device *device, + NVSWITCH_GET_POWER_PARAMS *info +); #endif //_THERM_LS10_H_ diff --git a/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h b/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h index 7f86d5e4c..02e2b5e07 100644 --- a/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h +++ b/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h @@ -164,16 +164,6 @@ typedef struct NvBool bBlockProtocol; } NVSWITCH_I2C_HW_CMD, *PNVSWITCH_I2C_HW_CMD; - -typedef enum -{ - NVSWITCH_I2C_ACQUIRER_NONE = 0, - NVSWITCH_I2C_ACQUIRER_UNKNOWN, - NVSWITCH_I2C_ACQUIRER_IOCTL, // e.g. MODS - NVSWITCH_I2C_ACQUIRER_EXTERNAL, // e.g. Linux Direct - -} NVSWITCH_I2C_ACQUIRER; - typedef enum { i2cProfile_Standard, i2cProfile_Fast, diff --git a/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h index 6f8acdc8e..5f4c15f6e 100644 --- a/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h +++ b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h @@ -569,7 +569,7 @@ const NvU32 soe_ucode_data_lr10_dbg[] = { 0x328908f4, 0xfbfa324f, 0xbf02f971, 0xbcb0b2b9, 0xb9a6b0c9, 0xe41708f4, 0xbcffffd9, 0xfba6f09b, 0x980b08f4, 0xf9a60109, 0xf8050df4, 0xb2dc7202, 0x28d77eed, 0xb201fb00, 0x05ab98b9, 0xdeb2cfb2, 0xfd729cb2, 0x0042a97e, 0xf0fc00f8, 0xf9fc30f4, 0xbf62f9f0, 0x08e1b0b9, 0xd4b2a5b2, 0xa630c9bc, - 0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x010124bd, 0x763efc06, 0x02f80043, + 0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x24bd0101, 0x763efc06, 0x02f80043, 0x853e0101, 0x42bc0043, 0x0096b192, 0x060df401, 0x90010049, 0x96ff0399, 0x0b947e04, 0xb23bb200, 0xdd0c725a, 0x00001200, 0x7e3030bc, 0x320028d7, 0x00a433a1, 0x08b0b434, 0xb209c0b4, 0x1200da2d, 0x20bc0000, 0x01004e20, 0x0021367e, 0x0a00a033, 0x853e02f8, 0x00da0043, 0xbd000012, 0x01004cb4, @@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_dbg[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb22438cf, 0xcfd90bc8, 0xf23ebc55, 0x2e5c0e40, - 0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x0d4a5d7d, 0x9c31ffb3, 0x95bc604f, 0x40cc834d, + 0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xf4188925, 0x3294f034, 0x06c315a3, 0x41c3e219, + 0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x8cd89b95, 0x33df19d3, 0xaba62f3f, 0x5fd448c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, diff --git a/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h index eeebeb6dc..16d70610d 100644 --- a/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h +++ b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h @@ -569,7 +569,7 @@ const NvU32 soe_ucode_data_lr10_prd[] = { 0x328908f4, 0xfbfa324f, 0xbf02f971, 0xbcb0b2b9, 0xb9a6b0c9, 0xe41708f4, 0xbcffffd9, 0xfba6f09b, 0x980b08f4, 0xf9a60109, 0xf8050df4, 0xb2dc7202, 0x28d77eed, 0xb201fb00, 0x05ab98b9, 0xdeb2cfb2, 0xfd729cb2, 0x0042a97e, 0xf0fc00f8, 0xf9fc30f4, 0xbf62f9f0, 0x08e1b0b9, 0xd4b2a5b2, 0xa630c9bc, - 0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x010124bd, 0x763efc06, 0x02f80043, + 0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x24bd0101, 0x763efc06, 0x02f80043, 0x853e0101, 0x42bc0043, 0x0096b192, 0x060df401, 0x90010049, 0x96ff0399, 0x0b947e04, 0xb23bb200, 0xdd0c725a, 0x00001200, 0x7e3030bc, 0x320028d7, 0x00a433a1, 0x08b0b434, 0xb209c0b4, 0x1200da2d, 0x20bc0000, 0x01004e20, 0x0021367e, 0x0a00a033, 0x853e02f8, 0x00da0043, 0xbd000012, 0x01004cb4, @@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_prd[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb22438cf, 0xcfd90bc8, 0xf23ebc55, 0x2e5c0e40, - 0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x0d4a5d7d, 0x9c31ffb3, 0x95bc604f, 0x40cc834d, + 0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xf4188925, 0x3294f034, 0x06c315a3, 0x41c3e219, + 0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x8cd89b95, 0x33df19d3, 0xaba62f3f, 0x5fd448c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, diff --git a/src/common/nvswitch/kernel/lr10/therm_lr10.c b/src/common/nvswitch/kernel/lr10/therm_lr10.c index 6e8fc11e4..1bd9f922e 100644 --- a/src/common/nvswitch/kernel/lr10/therm_lr10.c +++ b/src/common/nvswitch/kernel/lr10/therm_lr10.c @@ -314,3 +314,13 @@ nvswitch_ctrl_therm_read_voltage_lr10 return -NVL_ERR_NOT_SUPPORTED; } +NvlStatus +nvswitch_ctrl_therm_read_power_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_POWER_PARAMS *info +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + diff --git a/src/common/nvswitch/kernel/ls10/intr_ls10.c b/src/common/nvswitch/kernel/ls10/intr_ls10.c index b9480c851..1db53815b 100644 --- a/src/common/nvswitch/kernel/ls10/intr_ls10.c +++ b/src/common/nvswitch/kernel/ls10/intr_ls10.c @@ -258,7 +258,7 @@ _nvswitch_initialize_route_interrupts DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _EXTMCRID_ECC_LIMIT_ERR, _ENABLE) | DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _RAM_ECC_LIMIT_ERR, _ENABLE) | DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _INVALID_MCRID_ERR, _ENABLE); - // NOTE: _MC_TRIGGER_ERR is debug-use only + // NOTE: _MC_TRIGGER_ERR is debug-use only } static void @@ -456,8 +456,8 @@ _nvswitch_initialize_nport_interrupts_ls10 nvswitch_device *device ) { -// Moving this L2 register access to SOE. Refer bug #3747687 -#if 0 +// Moving this L2 register access to SOE. Refer bug #3747687 +#if 0 NvU32 val; val = @@ -516,7 +516,7 @@ _nvswitch_initialize_nxbar_interrupts_ls10 DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1) | DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _EGRESS_CDT_PARITY_ERROR, 1); -// Moving this L2 register access to SOE. Refer bug #3747687 +// Moving this L2 register access to SOE. Refer bug #3747687 #if 0 NVSWITCH_BCAST_WR32_LS10(device, NXBAR, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, report_fatal); #endif // 0 @@ -532,7 +532,7 @@ _nvswitch_initialize_nxbar_interrupts_ls10 * IRQMASK is used to read in mask of interrupts * IRQDEST is used to read in enabled interrupts that are routed to the HOST * - * IRQSTAT & IRQMASK gives the pending interrupting on this minion + * IRQSTAT & IRQMASK gives the pending interrupting on this minion * * @param[in] device MINION on this device * @param[in] instance MINION instance @@ -561,7 +561,7 @@ nvswitch_minion_service_falcon_interrupts_ls10 return -NVL_NOT_FOUND; } - unhandled = pending; + unhandled = pending; bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _WDTMR, 1); if (nvswitch_test_flags(pending, bit)) @@ -761,7 +761,7 @@ _nvswitch_service_priv_ring_ls10 if (pending != 0) { - NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_PRIV_ERROR, + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_PRIV_ERROR, "Fatal, Unexpected PRI error\n"); NVSWITCH_LOG_FATAL_DATA(device, _HW, _HW_HOST_PRIV_ERROR, 2, 0, NV_FALSE, &pending); @@ -821,7 +821,7 @@ _nvswitch_collect_nport_error_info_ls10 } while (register_start <= register_end); - + *idx = i; return NVL_SUCCESS; } @@ -2177,7 +2177,7 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1: NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FIRST_1, report.raw_first & report.mask); } - + NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0, pending_0); NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_1, pending_1); @@ -4131,13 +4131,13 @@ _nvswitch_service_npg_fatal_ls10 NvU32 link; pending = NVSWITCH_ENG_RD32(device, NPG, , npg, _NPG, _NPG_INTERRUPT_STATUS); - + if (pending == 0) { return -NVL_NOT_FOUND; } - mask = + mask = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _FATAL) | DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _FATAL) | DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _FATAL) | @@ -4234,7 +4234,7 @@ _nvswitch_service_npg_nonfatal_ls10 return -NVL_NOT_FOUND; } - mask = + mask = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _NONFATAL) | DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _NONFATAL) | DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _NONFATAL) | @@ -4286,10 +4286,11 @@ static NvlStatus _nvswitch_service_nvldl_fatal_ls10 ( nvswitch_device *device, - NvU32 nvlipt_instance + NvU32 nvlipt_instance, + NvU64 intrLinkMask ) { - NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask, runtimeErrorMask = 0; + NvU64 enabledLinkMask, localLinkMask, localIntrLinkMask, runtimeErrorMask = 0; NvU32 i; nvlink_link *link; NvU32 clocksMask = NVSWITCH_PER_LINK_CLOCK_SET(RXCLK) | NVSWITCH_PER_LINK_CLOCK_SET(TXCLK); @@ -4297,11 +4298,22 @@ _nvswitch_service_nvldl_fatal_ls10 NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo = { 0 }; NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo = { 0 }; + // + // The passed in interruptLinkMask should contain a link that is part of the + // given nvlipt instance + // enabledLinkMask = nvswitch_get_enabled_link_mask(device); localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance); - localEnabledLinkMask = enabledLinkMask & localLinkMask; + localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask; - FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + if (localIntrLinkMask == 0) + { + NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask) { link = nvswitch_get_link(device, i); if (link == NULL) @@ -4343,7 +4355,7 @@ _nvswitch_service_nvldl_fatal_ls10 NVSWITCH_PRINT(device, ERROR, "%s: NVLDL[0x%x, 0x%llx]: Unable to send Runtime Error bitmask: 0x%llx,\n", __FUNCTION__, - nvlipt_instance, localLinkMask, + nvlipt_instance, localIntrLinkMask, runtimeErrorMask); } @@ -4869,19 +4881,31 @@ NvlStatus _nvswitch_service_nvltlc_fatal_ls10 ( nvswitch_device *device, - NvU32 nvlipt_instance + NvU32 nvlipt_instance, + NvU64 intrLinkMask ) { - NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask; + NvU64 enabledLinkMask, localLinkMask, localIntrLinkMask; NvU32 i; nvlink_link *link; NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED; + // + // The passed in interruptLinkMask should contain a link that is part of the + // given nvlipt instance + // enabledLinkMask = nvswitch_get_enabled_link_mask(device); localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance); - localEnabledLinkMask = enabledLinkMask & localLinkMask; + localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask; - FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + if (localIntrLinkMask == 0) + { + NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask) { link = nvswitch_get_link(device, i); if (link == NULL) @@ -5314,6 +5338,12 @@ _nvswitch_emit_link_errors_nvldl_fatal_link_ls10 { NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_UP, "LTSSM Fault Up", NV_FALSE); } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_DOWN, "LTSSM Fault Down", NV_FALSE); + } } static void @@ -5342,6 +5372,12 @@ _nvswitch_emit_link_errors_nvldl_nonfatal_link_ls10 nvswitch_configure_error_rate_threshold_interrupt_ls10(nvlink, NV_FALSE); NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_SHORT_ERROR_RATE, "RX Short Error Rate"); } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_CRC_COUNTER, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_CRC_COUNTER, "RX CRC Error Rate"); + } } static void @@ -5437,11 +5473,11 @@ _nvswitch_deferred_link_state_check_ls10 void *fn_args ) { - NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams = + NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams = (NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS*)fn_args; NvU32 nvlipt_instance = pErrorReportParams->nvlipt_instance; NvU32 link = pErrorReportParams->link; - ls10_device *chip_device; + ls10_device *chip_device; nvlink_link *pLink; NvU64 linkState; @@ -5532,13 +5568,13 @@ _nvswitch_deferred_link_errors_check_ls10 NvU32 nvlipt_instance = pErrorReportParams->nvlipt_instance; NvU32 link = pErrorReportParams->link; ls10_device *chip_device; - NvU32 pending, bit; + NvU32 pending; chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); pending = chip_device->deferredLinkErrors[link].fatalIntrMask.dl; - bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1); - if (nvswitch_test_flags(pending, bit)) + if (FLD_TEST_DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1U, pending) || + FLD_TEST_DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1U, pending) ) { nvswitch_create_deferred_link_state_check_task_ls10(device, nvlipt_instance, link); } @@ -5581,10 +5617,10 @@ _nvswitch_create_deferred_link_errors_task_ls10 pErrorReportParams->nvlipt_instance = nvlipt_instance; pErrorReportParams->link = link; - status = nvswitch_task_create_args(device, (void*)pErrorReportParams, + status = nvswitch_task_create_args(device, (void*)pErrorReportParams, &_nvswitch_deferred_link_errors_check_ls10, NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS, - NVSWITCH_TASK_TYPE_FLAGS_RUN_ONCE | + NVSWITCH_TASK_TYPE_FLAGS_RUN_ONCE | NVSWITCH_TASK_TYPE_FLAGS_VOID_PTR_ARGS); } @@ -5645,7 +5681,7 @@ _nvswitch_service_nvldl_nonfatal_link_ls10 if (nvswitch_test_flags(pending, bit)) { chip_device->deferredLinkErrors[link].nonFatalIntrMask.dl |= bit; - _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link); + _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link); nvswitch_clear_flags(&unhandled, bit); } @@ -5666,7 +5702,9 @@ _nvswitch_service_nvldl_nonfatal_link_ls10 bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_CRC_COUNTER, 1); if (nvswitch_test_flags(pending, bit)) { - NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_CRC_COUNTER, "RX CRC Counter"); + + chip_device->deferredLinkErrors[link].nonFatalIntrMask.dl |= bit; + _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link); nvswitch_clear_flags(&unhandled, bit); // @@ -5698,20 +5736,33 @@ static NvlStatus _nvswitch_service_nvldl_nonfatal_ls10 ( nvswitch_device *device, - NvU32 nvlipt_instance + NvU32 nvlipt_instance, + NvU64 intrLinkMask ) { - NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask; + NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask; NvU32 i; nvlink_link *link; NvlStatus status; NvlStatus return_status = -NVL_NOT_FOUND; NvU32 clocksMask = NVSWITCH_PER_LINK_CLOCK_SET(RXCLK) | NVSWITCH_PER_LINK_CLOCK_SET(TXCLK); - enabledLinkMask = nvswitch_get_enabled_link_mask(device); - localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance); - localEnabledLinkMask = enabledLinkMask & localLinkMask; - FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + // + // The passed in interruptLinkMask should contain a link that is part of the + // given nvlipt instance + // + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance); + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask; + + if (localIntrLinkMask == 0) + { + NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask) { link = nvswitch_get_link(device, i); if (link == NULL) @@ -6084,20 +6135,28 @@ static NvlStatus _nvswitch_service_nvltlc_nonfatal_ls10 ( nvswitch_device *device, - NvU32 nvlipt_instance + NvU32 nvlipt_instance, + NvU64 intrLinkMask ) { - NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask; + NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask; NvU32 i; nvlink_link *link; NvlStatus status; NvlStatus return_status = NVL_SUCCESS; - enabledLinkMask = nvswitch_get_enabled_link_mask(device); localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance); - localEnabledLinkMask = enabledLinkMask & localLinkMask; + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask; - FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + if (localIntrLinkMask == 0) + { + NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask) { link = nvswitch_get_link(device, i); if (link == NULL) @@ -6199,6 +6258,16 @@ _nvswitch_service_nvlipt_lnk_status_ls10 nvswitch_corelib_training_complete_ls10(link); nvswitch_init_buffer_ready(device, link, NV_TRUE); } + else if (mode == NVLINK_LINKSTATE_FAULT) + { + // + // If we are here then a previous state transition caused + // the link to FAULT as there is no TL Link state requests + // that explicitly transitions a link to fault. If that is the + // case, set the DL interrupts so any errors can be handled + // + nvswitch_set_dlpl_interrupts_ls10(link); + } } NVSWITCH_UNHANDLED_CHECK(device, unhandled); @@ -6225,7 +6294,7 @@ _nvswitch_service_nvlipt_lnk_nonfatal_ls10 { ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); nvlink_link *link_info = nvswitch_get_link(device, link); - NvU32 lnkStateRequest, lnkStateStatus; + NvU32 lnkStateRequest, linkState; NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; NvU32 pending, bit, unhandled; @@ -6253,26 +6322,21 @@ _nvswitch_service_nvlipt_lnk_nonfatal_ls10 if (nvswitch_test_flags(pending, bit)) { // - // Read back LINK_STATE_REQUESTS and LINK_STATE_STATUS registers - // If request == ACTIVE, LINK_STATE_STATUS == ACTIVE_PENDING, request == ERROR - // and there is a pending FAULT_UP interrupt then redo reset_and_drain since the - // last try failed - // + // Read back LINK_STATE_REQUESTS and TOP_LINK_STATE registers + // If request == ACTIVE and TOP_LINK_STATE == FAULT there is a pending + // fault on training so re-run reset_and_drain // Mark that the defered link error mechanism as seeing a reset_and_train re-try so // the deferred task needs to re-create itself instead of continuing with the linkstate // checks // - lnkStateStatus = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber, NVLIPT_LNK, - _NVLIPT_LNK, _CTRL_LINK_STATE_STATUS); + linkState = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber, NVLDL, + _NVLDL, _TOP_LINK_STATE); lnkStateRequest = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber, NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_REQUEST); if(FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _REQUEST, _ACTIVE, lnkStateRequest) && - !(FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _STATUS, _REQUEST_SUCCESSFUL, lnkStateRequest) || - FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _STATUS, _INIT, lnkStateRequest))&& - FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, _ACTIVE_PENDING, lnkStateStatus) && - DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1) & chip_device->deferredLinkErrors[link].fatalIntrMask.dl) + linkState == NV_NVLDL_TOP_LINK_STATE_STATE_FAULT) { chip_device->deferredLinkErrors[link].bResetAndDrainRetry = NV_TRUE; device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link)); @@ -6335,77 +6399,81 @@ static NvlStatus _nvswitch_service_nvlipt_link_nonfatal_ls10 ( nvswitch_device *device, - NvU32 instance + NvU32 instance, + NvU64 intrLinkMask ) { - NvU32 i, globalLink, bit, intrLink; - NvU32 interruptingLinks = 0; - NvU32 lnkStatusChangeLinks = 0; - NvlStatus status; - NvU64 link_enable_mask; - - link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 | - (NvU64)device->regkeys.link_enable_mask); - for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i) + NvU32 i, intrLink; + NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask; + NvU64 interruptingLinks = 0; + NvU64 lnkStatusChangeLinks = 0; + NvlStatus status = NVL_SUCCESS; + NvlStatus retStatus = NVL_SUCCESS; + + // + // The passed in interruptLinkMask should contain a link that is part of the + // given nvlipt instance + // + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(instance); + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask; + + if (localIntrLinkMask == 0) { - globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i; - if ((NVBIT64(globalLink) & link_enable_mask) == 0) + NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + + FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask) + { + if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != instance) { - continue; + NVSWITCH_ASSERT(0); + break; } - intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); + + intrLink = NVSWITCH_LINK_RD32(device, i, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); if(intrLink) { - interruptingLinks |= NVBIT(i); + interruptingLinks |= NVBIT64(i); } - - intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS); + + intrLink = NVSWITCH_LINK_RD32(device, i, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS); if(intrLink) { - lnkStatusChangeLinks |= NVBIT(i); + lnkStatusChangeLinks |= NVBIT64(i); } } + FOR_EACH_INDEX_IN_MASK_END; - if(lnkStatusChangeLinks) - { - for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i) - { - bit = NVBIT(i); - globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i; - if (nvswitch_test_flags(lnkStatusChangeLinks, bit)) - { - if( _nvswitch_service_nvlipt_lnk_status_ls10(device, instance, globalLink) != NVL_SUCCESS) - { - NVSWITCH_PRINT(device, WARN, "%s: Could not process nvlipt link status interrupt. Continuing. LinkId %d\n", - __FUNCTION__, globalLink); - } - } - } - } - if(interruptingLinks) + FOR_EACH_INDEX_IN_MASK(64, i, lnkStatusChangeLinks) { - for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i) + + if(_nvswitch_service_nvlipt_lnk_status_ls10(device, instance, i) != NVL_SUCCESS) { - bit = NVBIT(i); - globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i; - if (nvswitch_test_flags(interruptingLinks, bit)) - { - status = _nvswitch_service_nvlipt_lnk_nonfatal_ls10(device, instance, globalLink); - if (status != NVL_SUCCESS && status != -NVL_NOT_FOUND) - { - return -NVL_MORE_PROCESSING_REQUIRED; - } - } + NVSWITCH_PRINT(device, WARN, "%s: Could not process nvlipt link status interrupt. Continuing. LinkId %d\n", + __FUNCTION__, i); } - return NVL_SUCCESS; } - else + FOR_EACH_INDEX_IN_MASK_END; + + FOR_EACH_INDEX_IN_MASK(64, i, interruptingLinks) { - return -NVL_NOT_FOUND; + + status = _nvswitch_service_nvlipt_lnk_nonfatal_ls10(device, instance, i); + if (status != NVL_SUCCESS && status != -NVL_NOT_FOUND) + { + retStatus = -NVL_MORE_PROCESSING_REQUIRED; + } } + FOR_EACH_INDEX_IN_MASK_END; + + return retStatus; } @@ -6431,7 +6499,7 @@ _nvswitch_service_minion_fatal_ls10 return -NVL_NOT_FOUND; } - unhandled = pending; + unhandled = pending; bit = DRF_NUM(_MINION, _MINION_INTR, _FALCON_STALL, 0x1); if (nvswitch_test_flags(pending, bit)) @@ -6478,11 +6546,27 @@ _nvswitch_service_nvlw_nonfatal_ls10 ) { NvlStatus status[3]; + NvU32 reg; + NvU64 intrLinkMask = 0; - // TODO: @achaudhry invert handling so nvlipt_lnk is first - status[0] = _nvswitch_service_nvldl_nonfatal_ls10(device, instance); - status[1] = _nvswitch_service_nvltlc_nonfatal_ls10(device, instance); - status[2] = _nvswitch_service_nvlipt_link_nonfatal_ls10(device, instance); + reg = NVSWITCH_ENG_RD32_LS10(device, NVLW, instance, _NVLW, _TOP_INTR_1_STATUS); + intrLinkMask = DRF_VAL(_NVLW, _TOP_INTR_1_STATUS, _LINK, reg); + + // + // Shift the mask of interrupting links from the local to the + // NVLW instance to a global mask + // + intrLinkMask = intrLinkMask << (NVSWITCH_LINKS_PER_NVLW_LS10*instance); + + // If there is no pending link interrupts then there is nothing to service + if (intrLinkMask == 0) + { + return NVL_SUCCESS; + } + + status[0] = _nvswitch_service_nvldl_nonfatal_ls10(device, instance, intrLinkMask); + status[1] = _nvswitch_service_nvltlc_nonfatal_ls10(device, instance, intrLinkMask); + status[2] = _nvswitch_service_nvlipt_link_nonfatal_ls10(device, instance, intrLinkMask); if ((status[0] != NVL_SUCCESS) && (status[0] != -NVL_NOT_FOUND) && (status[1] != NVL_SUCCESS) && (status[1] != -NVL_NOT_FOUND) && @@ -6588,45 +6672,44 @@ static NvlStatus _nvswitch_service_nvlipt_link_fatal_ls10 ( nvswitch_device *device, - NvU32 instance + NvU32 instance, + NvU64 intrLinkMask ) { - NvU32 i, globalLink, bit, intrLink; - NvU32 interruptingLinks = 0; + NvU32 i, intrLink; + NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask; + NvlStatus status = NVL_SUCCESS; - //read in error status of current link - for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i) + // + // The passed in interruptLinkMask should contain a link that is part of the + // given nvlipt instance + // + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(instance); + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask; + + if (localIntrLinkMask == 0) { - globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i; - - intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); - - if(intrLink) - { - interruptingLinks |= NVBIT(i); - } + NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; } - if(interruptingLinks) + // read in error status of current link + FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask) { - for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i) + intrLink = NVSWITCH_LINK_RD32(device, i, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); + if (intrLink != 0) { - bit = NVBIT(i); - globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i; - if (nvswitch_test_flags(interruptingLinks, bit)) + if( _nvswitch_service_nvlipt_lnk_fatal_ls10(device, instance, i) != NVL_SUCCESS) { - if( _nvswitch_service_nvlipt_lnk_fatal_ls10(device, instance, globalLink) != NVL_SUCCESS) - { - return -NVL_MORE_PROCESSING_REQUIRED; - } + status = -NVL_MORE_PROCESSING_REQUIRED; } } - return NVL_SUCCESS; - } - else - { - return -NVL_NOT_FOUND; } + FOR_EACH_INDEX_IN_MASK_END; + + return status; } static NvlStatus @@ -6637,14 +6720,39 @@ _nvswitch_service_nvlw_fatal_ls10 ) { NvlStatus status[6]; + NvU64 intrLinkMask = 0; + NvU32 reg; + + reg = NVSWITCH_ENG_RD32_LS10(device, NVLW, instance, _NVLW, _TOP_INTR_0_STATUS); + intrLinkMask = DRF_VAL(_NVLW, _TOP_INTR_0_STATUS, _LINK, reg); + + // + // Shift the mask of interrupting links from the local to the + // NVLW instance to a global mask + // + intrLinkMask = intrLinkMask << (NVSWITCH_LINKS_PER_NVLW_LS10*instance); status[0] = device->hal.nvswitch_service_minion_link(device, instance); - status[1] = _nvswitch_service_nvldl_fatal_ls10(device, instance); - status[2] = _nvswitch_service_nvltlc_fatal_ls10(device, instance); - status[3] = _nvswitch_service_minion_fatal_ls10(device, instance); - status[4] = _nvswitch_service_nvlipt_common_fatal_ls10(device, instance); - status[5] = _nvswitch_service_nvlipt_link_fatal_ls10(device, instance); + status[1] = _nvswitch_service_minion_fatal_ls10(device, instance); + status[2] = _nvswitch_service_nvlipt_common_fatal_ls10(device, instance); + // + // If there is a pending link interrupt on this nvlw instance then service + // those interrupts in the handlers below. Otherwise, mark the status's + // as success as there is nothing to service + // + if (intrLinkMask != 0) + { + status[3] = _nvswitch_service_nvldl_fatal_ls10(device, instance, intrLinkMask); + status[4] = _nvswitch_service_nvltlc_fatal_ls10(device, instance, intrLinkMask); + status[5] = _nvswitch_service_nvlipt_link_fatal_ls10(device, instance, intrLinkMask); + } + else + { + status[3] = NVL_SUCCESS; + status[4] = NVL_SUCCESS; + status[5] = NVL_SUCCESS; + } if (status[0] != NVL_SUCCESS && status[0] != -NVL_NOT_FOUND && status[1] != NVL_SUCCESS && status[1] != -NVL_NOT_FOUND && @@ -7068,7 +7176,8 @@ nvswitch_service_nvldl_fatal_link_ls10 { ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device); NvU32 pending, bit, unhandled; - NvBool bSkipIntrClear = NV_FALSE; + NvU32 dlDeferredIntrLinkMask = 0; + NvBool bRequireResetAndDrain = NV_FALSE; NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; @@ -7119,13 +7228,6 @@ nvswitch_service_nvldl_fatal_link_ls10 nvswitch_clear_flags(&unhandled, bit); } - bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1); - if (nvswitch_test_flags(pending, bit)) - { - NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_DOWN, "LTSSM Fault Down", NV_FALSE); - nvswitch_clear_flags(&unhandled, bit); - } - bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_PROTOCOL, 1); if (nvswitch_test_flags(pending, bit)) { @@ -7155,22 +7257,19 @@ nvswitch_service_nvldl_fatal_link_ls10 } // - // Note: LTSSM_FAULT_UP must be the last interrupt serviced in the NVLDL + // Note: LTSSM_FAULT_{UP/DOWN} must be the last interrupt serviced in the NVLDL // Fatal tree. The last step of handling this interrupt is going into the // reset_and_drain flow for the given link which will shutdown and reset // the link. The reset portion will also wipe away any link state including // pending DL interrupts. In order to log all error before wiping that state, // service all other interrupts before this one // - bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1); + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1); if (nvswitch_test_flags(pending, bit)) { - - chip_device->deferredLinkErrors[link].fatalIntrMask.dl |= bit; - _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link); - + dlDeferredIntrLinkMask |= bit; nvswitch_clear_flags(&unhandled, bit); - device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link)); // // Since reset and drain will reset the link, including clearing @@ -7178,7 +7277,46 @@ nvswitch_service_nvldl_fatal_link_ls10 // where link clocks will not be on after reset and drain so there // maybe PRI errors on writing to the register // - bSkipIntrClear = NV_TRUE; + bRequireResetAndDrain = NV_TRUE; + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1); + if (nvswitch_test_flags(pending, bit)) + { + dlDeferredIntrLinkMask |= bit; + nvswitch_clear_flags(&unhandled, bit); + + // + // Since reset and drain will reset the link, including clearing + // pending interrupts, skip the clear write below. There are cases + // where link clocks will not be on after reset and drain so there + // maybe PRI errors on writing to the register + // + bRequireResetAndDrain = NV_TRUE; + } + + if (bRequireResetAndDrain) + { + // + // If there is a link state callback enabled for this link then + // we hit a consecutive FAULT_UP error. set bResetAndDrainRetry + // so the current callback on completion can create a new + // callback to retry the link state check to account for the added + // delay caused by taking a 2nd fault and having to re-train + // + // If there is no callback enabled then set the error mask + // and create the link errors deferred task. + // + if (chip_device->deferredLinkErrors[link].bLinkStateCallBackEnabled) + { + chip_device->deferredLinkErrors[link].bResetAndDrainRetry = NV_TRUE; + } + else + { + chip_device->deferredLinkErrors[link].fatalIntrMask.dl = dlDeferredIntrLinkMask; + _nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link); + } + device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link)); } NVSWITCH_UNHANDLED_CHECK(device, unhandled); @@ -7190,7 +7328,7 @@ nvswitch_service_nvldl_fatal_link_ls10 report.raw_enable ^ pending); } - if (!bSkipIntrClear) + if (!bRequireResetAndDrain) { NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR, pending); } @@ -7244,7 +7382,7 @@ nvswitch_service_minion_link_ls10 } unhandled = pending; - + FOR_EACH_INDEX_IN_MASK(32, localLinkIdx, pending) { link = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + localLinkIdx; @@ -7308,7 +7446,7 @@ nvswitch_service_minion_link_ls10 case NV_MINION_NVLINK_LINK_INTR_CODE_NEGOTIATION_CONFIG_ERR: NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link Negotiation Config Err Interrupt", NV_FALSE); break; - case NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT: + case NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT: NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link BADINIT interrupt", NV_FALSE); break; case NV_MINION_NVLINK_LINK_INTR_CODE_PMFAIL: diff --git a/src/common/nvswitch/kernel/ls10/link_ls10.c b/src/common/nvswitch/kernel/ls10/link_ls10.c index 237d4113a..a03fe3d01 100644 --- a/src/common/nvswitch/kernel/ls10/link_ls10.c +++ b/src/common/nvswitch/kernel/ls10/link_ls10.c @@ -1230,12 +1230,29 @@ nvswitch_init_dlpl_interrupts_ls10 NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR, 0xffffffff); NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_SW2, 0xffffffff); + // Set the interrupt bits + nvswitch_set_dlpl_interrupts_ls10(link); + + // Setup error rate thresholds + nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE); + nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE); +} + +void +nvswitch_set_dlpl_interrupts_ls10 +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 linkNumber = link->linkNumber; // Stall tree routes to INTR_A which is connected to NVLIPT fatal tree NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_STALL_EN, DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_REPLAY, _DISABLE) | DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_RECOVERY_SHORT, _DISABLE) | DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_UP, _ENABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_DOWN, _ENABLE) | DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_RAM, _ENABLE) | DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_INTERFACE, _ENABLE) | DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_SUBLINK_CHANGE, _DISABLE) | @@ -1262,9 +1279,6 @@ nvswitch_init_dlpl_interrupts_ls10 DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_CRC_COUNTER, _ENABLE) | DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _LTSSM_PROTOCOL, _DISABLE) | DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _MINION_REQUEST, _DISABLE)); - - nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE); - nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE); } static NvU32 diff --git a/src/common/nvswitch/kernel/ls10/ls10.c b/src/common/nvswitch/kernel/ls10/ls10.c index 8a33730eb..e317f97b6 100644 --- a/src/common/nvswitch/kernel/ls10/ls10.c +++ b/src/common/nvswitch/kernel/ls10/ls10.c @@ -1103,11 +1103,6 @@ nvswitch_link_disable_interrupts_ls10 instance = link / NVSWITCH_LINKS_PER_NVLIPT_LS10; localLinkIdx = link % NVSWITCH_LINKS_PER_NVLIPT_LS10; - NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT, - DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) | - DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) | - DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0)); - NVSWITCH_ENG_WR32(device, NVLW, , instance, _NVLW, _LINK_INTR_0_MASK(localLinkIdx), DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x0) | DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) | @@ -1138,31 +1133,26 @@ _nvswitch_link_reset_interrupts_ls10 NvU32 eng_instance = link / NVSWITCH_LINKS_PER_NVLIPT_LS10; NvU32 localLinkNum = link % NVSWITCH_LINKS_PER_NVLIPT_LS10; - NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT, - DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) | - DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) | - DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1)); + NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_0_MASK(localLinkNum), + DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) | + DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0) | + DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x0)); - NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_0_MASK(localLinkNum), - DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x1) | - DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) | - DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0) | - DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x1) | - DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x0)); + NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_1_MASK(localLinkNum), + DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _FATAL, 0x0) | + DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x0) | + DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x1)); - NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_1_MASK(localLinkNum), - DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _FATAL, 0x0) | - DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _NONFATAL, 0x1) | - DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1) | - DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x0) | - DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x1)); - - NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_2_MASK(localLinkNum), - DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _FATAL, 0x0) | - DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _NONFATAL, 0x0) | - DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0) | - DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR0, 0x0) | - DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR1, 0x0)); + NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_2_MASK(localLinkNum), + DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _FATAL, 0x0) | + DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _NONFATAL, 0x0) | + DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0) | + DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR0, 0x0) | + DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR1, 0x0)); // NVLIPT_LNK regval = NVSWITCH_LINK_RD32_LS10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _INTR_CONTROL_LINK); @@ -1357,6 +1347,10 @@ nvswitch_reset_and_drain_links_ls10 NvU32 link_state; NvU32 stat_data; NvU32 link_intr_subcode; + NvBool bKeepPolling; + NvBool bIsLinkInEmergencyShutdown; + NvBool bAreDlClocksOn; + NVSWITCH_TIMEOUT timeout; if (link_mask == 0) { @@ -1425,10 +1419,9 @@ nvswitch_reset_and_drain_links_ls10 if (status != NVL_SUCCESS) { nvswitch_destroy_link(link_info); - return status; } - return -NVL_ERR_INVALID_STATE; + continue; } // @@ -1438,10 +1431,42 @@ nvswitch_reset_and_drain_links_ls10 // // Step 3.0 : - // Prior to starting port reset, perform unilateral shutdown on the - // LS10 side of the link, in case the links are not shutdown. + // Prior to starting port reset, ensure the links is in emergency shutdown // - nvswitch_execute_unilateral_link_shutdown_ls10(link_info); + bIsLinkInEmergencyShutdown = NV_FALSE; + nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + do + { + bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + status = nvswitch_minion_get_dl_status(device, link_info->linkNumber, + NV_NVLSTAT_UC01, 0, &stat_data); + + if (status != NVL_SUCCESS) + { + continue; + } + + link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data); + + bIsLinkInEmergencyShutdown = (link_state == LINKSTATUS_EMERGENCY_SHUTDOWN) ? + NV_TRUE:NV_FALSE; + + if (bIsLinkInEmergencyShutdown == NV_TRUE) + { + break; + } + } + while(bKeepPolling); + + if (bIsLinkInEmergencyShutdown == NV_FALSE) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link %d failed to enter emergency shutdown\n", + __FUNCTION__, link); + continue; + } + nvswitch_corelib_clear_link_state_ls10(link_info); // @@ -1483,6 +1508,10 @@ nvswitch_reset_and_drain_links_ls10 { link_intr_subcode = DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, stat_data); } + else + { + continue; + } if ((link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL) && (link_intr_subcode == MINION_ALARM_BUSY)) @@ -1515,9 +1544,8 @@ nvswitch_reset_and_drain_links_ls10 if (status != NVL_SUCCESS) { nvswitch_destroy_link(link_info); - return status; } - return status; + continue; } // @@ -1538,12 +1566,15 @@ nvswitch_reset_and_drain_links_ls10 status = nvlink_lib_register_link(device->nvlink_device, link_info); if (status != NVL_SUCCESS) { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to register link: 0x%x with the corelib\n", + __FUNCTION__, link); nvswitch_destroy_link(link_info); - return status; + continue; } // - // Launch ALI training to re-initialize and train the links + // Step 9.0: Launch ALI training to re-initialize and train the links // nvswitch_launch_ALI_link_training(device, link_info); // // Request active, but don't block. FM will come back and check @@ -1558,7 +1589,44 @@ nvswitch_reset_and_drain_links_ls10 NVSWITCH_PRINT(device, ERROR, "%s: TL link state request to active for ALI failed for link: 0x%x\n", __FUNCTION__, link); + continue; } + + bAreDlClocksOn = NV_FALSE; + nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + do + { + bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + status = nvswitch_minion_get_dl_status(device, link_info->linkNumber, + NV_NVLSTAT_UC01, 0, &stat_data); + + if (status != NVL_SUCCESS) + { + continue; + } + + link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data); + + bAreDlClocksOn = (link_state != LINKSTATUS_INITPHASE1) ? + NV_TRUE:NV_FALSE; + + if (bAreDlClocksOn == NV_TRUE) + { + break; + } + } + while(bKeepPolling); + + if (!bAreDlClocksOn) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link: 0x%x doesn't have the TX/RX clocks on, skipping setting DL interrupts!\n", + __FUNCTION__, link); + continue; + } + + nvswitch_set_dlpl_interrupts_ls10(link_info); } FOR_EACH_INDEX_IN_MASK_END; diff --git a/src/common/nvswitch/kernel/ls10/pmgr_ls10.c b/src/common/nvswitch/kernel/ls10/pmgr_ls10.c index dbe8108c7..2fa83b955 100644 --- a/src/common/nvswitch/kernel/ls10/pmgr_ls10.c +++ b/src/common/nvswitch/kernel/ls10/pmgr_ls10.c @@ -301,7 +301,12 @@ nvswitch_ctrl_i2c_indexed_ls10 } return nvswitch_ctrl_i2c_indexed_lr10(device, pParams); } - + + if (pParams->port == NVSWITCH_I2C_PORT_I2CA) + { + pParams->flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _100KHZ, pParams->flags); + } + if (pI2c->soeI2CSupported) { return soeI2CAccess_HAL(device, pParams); diff --git a/src/common/nvswitch/kernel/ls10/soe_ls10.c b/src/common/nvswitch/kernel/ls10/soe_ls10.c index 8a4b50047..0488b9154 100644 --- a/src/common/nvswitch/kernel/ls10/soe_ls10.c +++ b/src/common/nvswitch/kernel/ls10/soe_ls10.c @@ -480,14 +480,6 @@ nvswitch_init_soe_ls10 return status; } - // - // Set TRACEPC to stack mode for better ucode trace - // In Vulcan CR firmware, this is set to reduced mode in the SOE's manifest - // - data = flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL); - data = FLD_SET_DRF(_PRISCV, _RISCV_TRACECTL, _MODE, _STACK, data); - flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL, data); - // Sanity the command and message queues as a final check if (_nvswitch_soe_send_test_cmd(device) != NV_OK) { diff --git a/src/common/nvswitch/kernel/ls10/therm_ls10.c b/src/common/nvswitch/kernel/ls10/therm_ls10.c index e4081ca43..9a2c1e7de 100644 --- a/src/common/nvswitch/kernel/ls10/therm_ls10.c +++ b/src/common/nvswitch/kernel/ls10/therm_ls10.c @@ -460,7 +460,7 @@ nvswitch_therm_soe_callback_ls10 } // -// nvswitch_therm_read_voltage +// nvswitch_ctrl_therm_read_voltage // // Temperature and voltage are only available on SKUs which have thermal and // voltage sensors. @@ -543,3 +543,86 @@ nvswitch_ctrl_therm_read_voltage_ls10 return NVL_SUCCESS; } +// +// nvswitch_ctrl_therm_read_power +// +// Power is only available on SKUs which have thermal and +// voltage sensors. +// +NvlStatus +nvswitch_ctrl_therm_read_power_ls10 +( + nvswitch_device *device, + NVSWITCH_GET_POWER_PARAMS *pParams +) +{ + FLCN *pFlcn; + NvU32 cmdSeqDesc; + NV_STATUS status; + NvU8 flcnStatus; + RM_FLCN_CMD_SOE cmd; + RM_FLCN_MSG_SOE msg; + RM_SOE_CORE_CMD_GET_POWER *pGetPowerCmd; + NVSWITCH_TIMEOUT timeout; + + if (!nvswitch_is_soe_supported(device)) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + if (pParams == NULL) + { + return -NVL_BAD_ARGS; + } + + pFlcn = device->pSoe->pFlcn; + + nvswitch_os_memset(pParams, 0, sizeof(NVSWITCH_GET_POWER_PARAMS)); + nvswitch_os_memset(&cmd, 0, sizeof(RM_FLCN_CMD_SOE)); + nvswitch_os_memset(&msg, 0, sizeof(RM_FLCN_MSG_SOE)); + + cmd.hdr.unitId = RM_SOE_UNIT_CORE; + cmd.hdr.size = RM_SOE_CMD_SIZE(CORE, GET_POWER); + + msg.hdr.unitId = RM_SOE_UNIT_CORE; + msg.hdr.size = RM_SOE_MSG_SIZE(CORE, GET_POWER); + + pGetPowerCmd = &cmd.cmd.core.getPower; + pGetPowerCmd->cmdType = RM_SOE_CORE_CMD_GET_POWER_VALUES; + + cmdSeqDesc = 0; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + (PRM_FLCN_MSG)&msg, // pMsg + NULL, // pPayload + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to read power 0x%x\n", + __FUNCTION__, status); + return -NVL_ERR_INVALID_STATE; + } + + flcnStatus = msg.msg.core.getPower.flcnStatus; + if (flcnStatus != FLCN_OK) + { + if (flcnStatus == FLCN_ERR_MORE_PROCESSING_REQUIRED) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + else + { + return -NVL_ERR_GENERIC; + } + } + + pParams->vdd_w = msg.msg.core.getPower.vdd_w; + pParams->dvdd_w = msg.msg.core.getPower.dvdd_w; + pParams->hvdd_w = msg.msg.core.getPower.hvdd_w; + + return NVL_SUCCESS; +} \ No newline at end of file diff --git a/src/common/nvswitch/kernel/nvswitch.c b/src/common/nvswitch/kernel/nvswitch.c index 0d9d3f310..b1c7fae4f 100644 --- a/src/common/nvswitch/kernel/nvswitch.c +++ b/src/common/nvswitch/kernel/nvswitch.c @@ -3253,13 +3253,26 @@ _nvswitch_ctrl_get_board_part_number NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p ) { - if (!nvswitch_is_inforom_supported(device)) + if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) { - NVSWITCH_PRINT(device, ERROR, "InfoROM is not supported\n"); - return -NVL_ERR_NOT_SUPPORTED; - } + NVSWITCH_PRINT(device, INFO, + "%s: Skipping retrieval of board part number on FSF\n", + __FUNCTION__); - return device->hal.nvswitch_ctrl_get_board_part_number(device, p); + nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR)); + + return NVL_SUCCESS; + } + else + { + if (!nvswitch_is_inforom_supported(device)) + { + NVSWITCH_PRINT(device, ERROR, "InfoROM is not supported\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + return device->hal.nvswitch_ctrl_get_board_part_number(device, p); + } } static NvlStatus @@ -4732,6 +4745,16 @@ _nvswitch_ctrl_therm_read_voltage return device->hal.nvswitch_ctrl_therm_read_voltage(device, info); } +static NvlStatus +_nvswitch_ctrl_therm_read_power +( + nvswitch_device *device, + NVSWITCH_GET_POWER_PARAMS *info +) +{ + return device->hal.nvswitch_ctrl_therm_read_power(device, info); +} + NvlStatus nvswitch_lib_ctrl ( @@ -5071,6 +5094,9 @@ nvswitch_lib_ctrl NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_VOLTAGE, _nvswitch_ctrl_therm_read_voltage, NVSWITCH_CTRL_GET_VOLTAGE_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_POWER, + _nvswitch_ctrl_therm_read_power, + NVSWITCH_GET_POWER_PARAMS); default: nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "unknown ioctl %x\n", cmd); diff --git a/src/common/nvswitch/kernel/smbpbi_nvswitch.c b/src/common/nvswitch/kernel/smbpbi_nvswitch.c index 3816cecbe..8cefb389d 100644 --- a/src/common/nvswitch/kernel/smbpbi_nvswitch.c +++ b/src/common/nvswitch/kernel/smbpbi_nvswitch.c @@ -90,8 +90,10 @@ nvswitch_smbpbi_post_init if (status == NVL_SUCCESS) { +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS) nvswitch_lib_smbpbi_log_sxid(device, NVSWITCH_ERR_NO_ERROR, "NVSWITCH SMBPBI server is online."); +#endif // defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS) NVSWITCH_PRINT(device, INFO, "%s: SMBPBI POST INIT completed\n", __FUNCTION__); } diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h index 0b975697a..72b542a53 100644 --- a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h @@ -3701,6 +3701,9 @@ typedef struct NV2080_CTRL_GPU_GET_GFID_PARAMS { * bEnable [IN] * - Set to NV_TRUE if the GPU partition has been activated. * - Set to NV_FALSE if the GPU partition will be deactivated. + * fabricPartitionId [IN] + * - Set the fabric manager partition ID dring partition activation. + * - Ignored during partition deactivation. * * Possible status values returned are: * NV_OK @@ -3716,6 +3719,7 @@ typedef struct NV2080_CTRL_GPU_GET_GFID_PARAMS { typedef struct NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS { NvU32 gfid; NvBool bEnable; + NvU32 fabricPartitionId; } NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS; /*! @@ -4112,4 +4116,24 @@ typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS #define NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS_MESSAGE_ID (0xA8U) typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS; + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2 + * + * This command returns NVENC software sessions information for the associate GPU. + * This command is similar to NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO but doesn't have + * embedded pointers. + * + * Check NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO for detailed information. + */ + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS_MESSAGE_ID (0xA9U) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS { + NvU32 sessionInfoTblEntry; + NV2080_CTRL_NVENC_SW_SESSION_INFO sessionInfoTbl[NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES]; +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2 (0x208001a9U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS_MESSAGE_ID" */ + /* _ctrl2080gpu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h index 54e2dbdc4..81a2d85ec 100644 --- a/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h @@ -76,4 +76,67 @@ typedef struct NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS { NvBool bMemoryProfilingPermitted; } NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS; +/*! + * NVB0CC_CTRL_CMD_INTERNAL_FREE_PMA_STREAM + * + * Internal logic for PMA Stream Free + */ +#define NVB0CC_CTRL_CMD_INTERNAL_FREE_PMA_STREAM (0xb0cc0206) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS { + /*! + * [in] The PMA channel index associated with a given PMA stream. + */ + NvU32 pmaChannelIdx; +} NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_INTERNAL_GET_MAX_PMAS + * + * Get the maximum number of PMA channels + */ +#define NVB0CC_CTRL_CMD_INTERNAL_GET_MAX_PMAS (0xb0cc0207) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS { + /*! + * [out] Max number of PMA channels + */ + NvU32 maxPmaChannels; +} NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_INTERNAL_BIND_PM_RESOURCES + * + * Internally bind PM resources. + */ +#define NVB0CC_CTRL_CMD_INTERNAL_BIND_PM_RESOURCES (0xb0cc0208) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | 0x8" */ + + +/*! + * NVB0CC_CTRL_CMD_INTERNAL_UNBIND_PM_RESOURCES + * + * Internally unbind PM resources. + */ +#define NVB0CC_CTRL_CMD_INTERNAL_UNBIND_PM_RESOURCES (0xb0cc0209) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | 0x9" */ + +/*! + * NVB0CC_CTRL_CMD_INTERNAL_RESERVE_HWPM_LEGACY + * + * Reserve legacy HWPM resources + */ +#define NVB0CC_CTRL_CMD_INTERNAL_RESERVE_HWPM_LEGACY (0xb0cc020a) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS_MESSAGE_ID (0xaU) + +typedef struct NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS { + /*! + * [in] Enable ctxsw for HWPM. + */ + NvBool ctxsw; +} NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS; + /* _ctrlb0ccinternal_h_ */ diff --git a/src/nvidia/arch/nvalloc/common/inc/nvdevid.h b/src/nvidia/arch/nvalloc/common/inc/nvdevid.h index a1cf28646..75da409c6 100644 --- a/src/nvidia/arch/nvalloc/common/inc/nvdevid.h +++ b/src/nvidia/arch/nvalloc/common/inc/nvdevid.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 200-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 200-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -468,6 +468,9 @@ #define ESC_4000_G4_DEVID 0xA1C1 #define ESC_4000_G4_SSDEVID 0x871E +// Lenovo Tomcat Workstation +#define LENOVO_TOMCAT_DEVID 0x1B81 +#define LENOVO_TOMCAT_SSDEVID 0x104e // NVIDIA C51 #define NVIDIA_C51_DEVICE_ID_MIN 0x2F0 diff --git a/src/nvidia/arch/nvalloc/unix/src/os.c b/src/nvidia/arch/nvalloc/unix/src/os.c index 2bba0c0ae..2e568f571 100644 --- a/src/nvidia/arch/nvalloc/unix/src/os.c +++ b/src/nvidia/arch/nvalloc/unix/src/os.c @@ -923,16 +923,23 @@ NV_STATUS osAllocPagesInternal( if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE))) nv->force_dma32_alloc = NV_TRUE; - status = nv_alloc_pages( - NV_GET_NV_STATE(pGpu), - NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), - memdescGetContiguity(pMemDesc, AT_CPU), - memdescGetCpuCacheAttrib(pMemDesc), - pSys->getProperty(pSys, - PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), - unencrypted, - memdescGetPteArray(pMemDesc, AT_CPU), - &pMemData); + if (NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount) > NV_U32_MAX) + { + status = NV_ERR_INVALID_LIMIT; + } + else + { + status = nv_alloc_pages( + NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + pSys->getProperty(pSys, + PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + unencrypted, + memdescGetPteArray(pMemDesc, AT_CPU), + &pMemData); + } if (nv && nv->force_dma32_alloc) nv->force_dma32_alloc = NV_FALSE; @@ -942,7 +949,7 @@ NV_STATUS osAllocPagesInternal( { return status; } - + // // If the OS layer doesn't think in RM page size, we need to inflate the // PTE array into RM pages. diff --git a/src/nvidia/arch/nvalloc/unix/src/osapi.c b/src/nvidia/arch/nvalloc/unix/src/osapi.c index 83fec382c..e5505dad4 100644 --- a/src/nvidia/arch/nvalloc/unix/src/osapi.c +++ b/src/nvidia/arch/nvalloc/unix/src/osapi.c @@ -167,12 +167,25 @@ const NvU8 * RmGetGpuUuidRaw( ) { NV_STATUS rmStatus; - OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + OBJGPU *pGpu = NULL; NvU32 gidFlags; NvBool isApiLockTaken = NV_FALSE; if (pNv->nv_uuid_cache.valid) - goto done; + return pNv->nv_uuid_cache.uuid; + + if (!rmapiLockIsOwner()) + { + rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU); + if (rmStatus != NV_OK) + { + return NULL; + } + + isApiLockTaken = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); // // PBI is not present in simulation and the loop inside @@ -193,7 +206,7 @@ const NvU8 * RmGetGpuUuidRaw( rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid); if (rmStatus != NV_OK) { - return NULL; + goto err; } pNv->nv_uuid_cache.valid = NV_TRUE; @@ -209,45 +222,35 @@ const NvU8 * RmGetGpuUuidRaw( gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1) | DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY); - if (!rmapiLockIsOwner()) - { - rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU); - if (rmStatus != NV_OK) - { - return NULL; - } - - isApiLockTaken = NV_TRUE; - } - - if (pGpu == NULL) - { - if (isApiLockTaken == NV_TRUE) - { - rmapiLockRelease(); - } - - return NULL; - } + if (!pGpu) + goto err; rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags); - if (isApiLockTaken == NV_TRUE) - { - rmapiLockRelease(); - } - if (rmStatus != NV_OK) - return NULL; + goto err; if (!pGpu->gpuUuid.isInitialized) - return NULL; + goto err; // copy the uuid from the OBJGPU uuid cache os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN); pNv->nv_uuid_cache.valid = NV_TRUE; done: + if (isApiLockTaken) + { + rmapiLockRelease(); + } + return pNv->nv_uuid_cache.uuid; + +err: + if (isApiLockTaken) + { + rmapiLockRelease(); + } + + return NULL; } static NV_STATUS RmGpuUuidRawToString( diff --git a/src/nvidia/generated/g_gpu_nvoc.h b/src/nvidia/generated/g_gpu_nvoc.h index 1b4e83d8b..8931cf8fa 100644 --- a/src/nvidia/generated/g_gpu_nvoc.h +++ b/src/nvidia/generated/g_gpu_nvoc.h @@ -733,6 +733,7 @@ typedef struct PSRIOV_P2P_INFO pP2PInfo; NvBool bP2PAllocated; NvU32 maxP2pGfid; + NvU32 p2pFabricPartitionId; } _GPU_SRIOV_STATE; // Max # of instances for GPU children @@ -3795,6 +3796,7 @@ NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *p #define IS_GFID_VF(gfid) (((NvU32)(gfid)) != GPU_GFID_PF) // Invalid P2P GFID #define INVALID_P2P_GFID (0xFFFFFFFF) +#define INVALID_FABRIC_PARTITION_ID (0xFFFFFFFF) // // Generates GPU child accessor macros (i.e.: GPU_GET_{ENG}) diff --git a/src/nvidia/generated/g_hal_stubs.h b/src/nvidia/generated/g_hal_stubs.h index e02bacb06..117496700 100644 --- a/src/nvidia/generated/g_hal_stubs.h +++ b/src/nvidia/generated/g_hal_stubs.h @@ -550,8 +550,9 @@ NV_STATUS gpioWritePinHwEnum_MISSING( return NV_ERR_NOT_SUPPORTED; } -// GPIO:hal:CHECK_PROTECTION - GPIO disabled -NV_STATUS gpioCheckProtection_MISSING( +// GPIO:hal:OUTPUT_CNTL_CHECK_PROTECTION - GPIO disabled +NV_STATUS gpioOutputCntlCheckProtection_MISSING( + POBJGPU pGpu, POBJGPIO pGpio, NvU32 gpioPin, NvBool *pbIsProtected @@ -560,6 +561,17 @@ NV_STATUS gpioCheckProtection_MISSING( return NV_ERR_NOT_SUPPORTED; } +// GPIO:hal:INPUT_CNTL_CHECK_PROTECTION - GPIO disabled +NV_STATUS gpioInputCntlCheckProtection_MISSING( + POBJGPU pGpu, + POBJGPIO pGpio, + NvU32 inputHwEnum, + NvBool *pbIsProtected +) +{ + return NV_ERR_NOT_SUPPORTED; +} + // GPIO:hal:READ_INPUT - GPIO disabled NV_STATUS gpioReadInput_FWCLIENT( POBJGPIO pGpio, diff --git a/src/nvidia/generated/g_kern_bus_nvoc.c b/src/nvidia/generated/g_kern_bus_nvoc.c index c4ea40087..474c3bcde 100644 --- a/src/nvidia/generated/g_kern_bus_nvoc.c +++ b/src/nvidia/generated/g_kern_bus_nvoc.c @@ -98,6 +98,10 @@ static NV_STATUS __nvoc_thunk_KernelBus_engstateStateUnload(OBJGPU *pGpu, struct return kbusStateUnload(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), flags); } +static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePostUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) { + return kbusStatePostUnload(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + static void __nvoc_thunk_KernelBus_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus) { kbusStateDestroy(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); } @@ -106,10 +110,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState(POBJGPU pGpu return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState); } -static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePostUnload(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { - return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); -} - static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked(POBJGPU pGpu, struct KernelBus *pEngstate) { return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); } @@ -311,6 +311,12 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner * pThis->__kbusStateUnload__ = &kbusStateUnload_GM107; } + // Hal function -- kbusStatePostUnload + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__kbusStatePostUnload__ = &kbusStatePostUnload_56cd7a; + } + // Hal function -- kbusStateDestroy if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */ { @@ -844,12 +850,12 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner * pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelBus_engstateStateUnload; + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostUnload__ = &__nvoc_thunk_KernelBus_engstateStatePostUnload; + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelBus_engstateStateDestroy; pThis->__kbusReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState; - pThis->__kbusStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePostUnload; - pThis->__kbusStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked; pThis->__kbusInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kbusInitMissing; diff --git a/src/nvidia/generated/g_kern_bus_nvoc.h b/src/nvidia/generated/g_kern_bus_nvoc.h index be49a8cab..56525d232 100644 --- a/src/nvidia/generated/g_kern_bus_nvoc.h +++ b/src/nvidia/generated/g_kern_bus_nvoc.h @@ -216,6 +216,7 @@ struct __nvoc_inner_struc_KernelBus_2__ { NvU32 pageTblSize; NvU32 pageDirInit; NvU32 pageTblInit; + NvU32 cpuVisiblePgTblSize; }; struct __nvoc_inner_struc_KernelBus_3__ { @@ -302,6 +303,7 @@ struct KernelBus { NV_STATUS (*__kbusStatePostLoad__)(OBJGPU *, struct KernelBus *, NvU32); NV_STATUS (*__kbusStatePreUnload__)(OBJGPU *, struct KernelBus *, NvU32); NV_STATUS (*__kbusStateUnload__)(OBJGPU *, struct KernelBus *, NvU32); + NV_STATUS (*__kbusStatePostUnload__)(OBJGPU *, struct KernelBus *, NvU32); void (*__kbusStateDestroy__)(OBJGPU *, struct KernelBus *); NV_STATUS (*__kbusTeardownBar2CpuAperture__)(OBJGPU *, struct KernelBus *, NvU32); void (*__kbusGetP2PMailboxAttributes__)(OBJGPU *, struct KernelBus *, NvU32 *, NvU32 *, NvU32 *); @@ -349,7 +351,6 @@ struct KernelBus { void (*__kbusUnmapCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, PMEMORY_DESCRIPTOR); void (*__kbusTeardownCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, NvBool); NV_STATUS (*__kbusReconcileTunableState__)(POBJGPU, struct KernelBus *, void *); - NV_STATUS (*__kbusStatePostUnload__)(POBJGPU, struct KernelBus *, NvU32); NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *); void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *); NV_STATUS (*__kbusStatePreInitUnlocked__)(POBJGPU, struct KernelBus *); @@ -462,6 +463,8 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32); #define kbusStatePreUnload_HAL(pGpu, pKernelBus, arg0) kbusStatePreUnload_DISPATCH(pGpu, pKernelBus, arg0) #define kbusStateUnload(pGpu, pKernelBus, flags) kbusStateUnload_DISPATCH(pGpu, pKernelBus, flags) #define kbusStateUnload_HAL(pGpu, pKernelBus, flags) kbusStateUnload_DISPATCH(pGpu, pKernelBus, flags) +#define kbusStatePostUnload(pGpu, pKernelBus, arg0) kbusStatePostUnload_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusStatePostUnload_HAL(pGpu, pKernelBus, arg0) kbusStatePostUnload_DISPATCH(pGpu, pKernelBus, arg0) #define kbusStateDestroy(pGpu, pKernelBus) kbusStateDestroy_DISPATCH(pGpu, pKernelBus) #define kbusStateDestroy_HAL(pGpu, pKernelBus) kbusStateDestroy_DISPATCH(pGpu, pKernelBus) #define kbusTeardownBar2CpuAperture(pGpu, pKernelBus, gfid) kbusTeardownBar2CpuAperture_DISPATCH(pGpu, pKernelBus, gfid) @@ -557,7 +560,6 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32); #define kbusTeardownCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0) #define kbusTeardownCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0) #define kbusReconcileTunableState(pGpu, pEngstate, pTunableState) kbusReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) -#define kbusStatePostUnload(pGpu, pEngstate, arg0) kbusStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) #define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate) #define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate) #define kbusStatePreInitUnlocked(pGpu, pEngstate) kbusStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) @@ -1090,6 +1092,24 @@ static inline void kbusDestroyPeerAccess(OBJGPU *pGpu, struct KernelBus *pKernel #define kbusDestroyPeerAccess_HAL(pGpu, pKernelBus, peerNum) kbusDestroyPeerAccess(pGpu, pKernelBus, peerNum) +NvU32 kbusGetNvlinkPeerId_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu); + +static inline NvU32 kbusGetNvlinkPeerId_c732fb(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu) { + return 4294967295U; +} + + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetNvlinkPeerId(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetNvlinkPeerId(pGpu, pKernelBus, pPeerGpu) kbusGetNvlinkPeerId_c732fb(pGpu, pKernelBus, pPeerGpu) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetNvlinkPeerId_HAL(pGpu, pKernelBus, pPeerGpu) kbusGetNvlinkPeerId(pGpu, pKernelBus, pPeerGpu) + NvU32 kbusGetPeerIdFromTable_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 locPeerIdx, NvU32 remPeerIdx); @@ -1637,6 +1657,14 @@ static inline NV_STATUS kbusStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelBus return pKernelBus->__kbusStateUnload__(pGpu, pKernelBus, flags); } +static inline NV_STATUS kbusStatePostUnload_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return NV_OK; +} + +static inline NV_STATUS kbusStatePostUnload_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return pKernelBus->__kbusStatePostUnload__(pGpu, pKernelBus, arg0); +} + void kbusStateDestroy_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); static inline void kbusStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { @@ -2093,10 +2121,6 @@ static inline NV_STATUS kbusReconcileTunableState_DISPATCH(POBJGPU pGpu, struct return pEngstate->__kbusReconcileTunableState__(pGpu, pEngstate, pTunableState); } -static inline NV_STATUS kbusStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { - return pEngstate->__kbusStatePostUnload__(pGpu, pEngstate, arg0); -} - static inline NV_STATUS kbusStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) { return pEngstate->__kbusStateInitUnlocked__(pGpu, pEngstate); } diff --git a/src/nvidia/generated/g_kern_fsp_nvoc.c b/src/nvidia/generated/g_kern_fsp_nvoc.c index 111578c8b..47ad15d46 100644 --- a/src/nvidia/generated/g_kern_fsp_nvoc.c +++ b/src/nvidia/generated/g_kern_fsp_nvoc.c @@ -441,6 +441,17 @@ static void __nvoc_init_funcTable_KernelFsp_1(KernelFsp *pThis, RmHalspecOwner * pThis->__kfspCheckGspSecureScratch__ = &kfspCheckGspSecureScratch_491d52; } + // Hal function -- kfspRequiresBug3957833WAR + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ + { + pThis->__kfspRequiresBug3957833WAR__ = &kfspRequiresBug3957833WAR_GH100; + } + // default + else + { + pThis->__kfspRequiresBug3957833WAR__ = &kfspRequiresBug3957833WAR_491d52; + } + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelFsp_engstateConstructEngine; pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelFsp_engstateStateDestroy; diff --git a/src/nvidia/generated/g_kern_fsp_nvoc.h b/src/nvidia/generated/g_kern_fsp_nvoc.h index 0fa2171ee..79cfb7454 100644 --- a/src/nvidia/generated/g_kern_fsp_nvoc.h +++ b/src/nvidia/generated/g_kern_fsp_nvoc.h @@ -166,6 +166,7 @@ struct KernelFsp { NV_STATUS (*__kfspErrorCode2NvStatusMap__)(struct OBJGPU *, struct KernelFsp *, NvU32); NvU64 (*__kfspGetExtraReservedMemorySize__)(struct OBJGPU *, struct KernelFsp *); NvBool (*__kfspCheckGspSecureScratch__)(struct OBJGPU *, struct KernelFsp *); + NvBool (*__kfspRequiresBug3957833WAR__)(struct OBJGPU *, struct KernelFsp *); NV_STATUS (*__kfspReconcileTunableState__)(POBJGPU, struct KernelFsp *, void *); NV_STATUS (*__kfspStateLoad__)(POBJGPU, struct KernelFsp *, NvU32); NV_STATUS (*__kfspStateUnload__)(POBJGPU, struct KernelFsp *, NvU32); @@ -293,6 +294,8 @@ NV_STATUS __nvoc_objCreate_KernelFsp(KernelFsp**, Dynamic*, NvU32); #define kfspGetExtraReservedMemorySize_HAL(pGpu, pKernelFsp) kfspGetExtraReservedMemorySize_DISPATCH(pGpu, pKernelFsp) #define kfspCheckGspSecureScratch(pGpu, pKernelFsp) kfspCheckGspSecureScratch_DISPATCH(pGpu, pKernelFsp) #define kfspCheckGspSecureScratch_HAL(pGpu, pKernelFsp) kfspCheckGspSecureScratch_DISPATCH(pGpu, pKernelFsp) +#define kfspRequiresBug3957833WAR(pGpu, pKernelFsp) kfspRequiresBug3957833WAR_DISPATCH(pGpu, pKernelFsp) +#define kfspRequiresBug3957833WAR_HAL(pGpu, pKernelFsp) kfspRequiresBug3957833WAR_DISPATCH(pGpu, pKernelFsp) #define kfspReconcileTunableState(pGpu, pEngstate, pTunableState) kfspReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) #define kfspStateLoad(pGpu, pEngstate, arg0) kfspStateLoad_DISPATCH(pGpu, pEngstate, arg0) #define kfspStateUnload(pGpu, pEngstate, arg0) kfspStateUnload_DISPATCH(pGpu, pEngstate, arg0) @@ -585,6 +588,16 @@ static inline NvBool kfspCheckGspSecureScratch_DISPATCH(struct OBJGPU *pGpu, str return pKernelFsp->__kfspCheckGspSecureScratch__(pGpu, pKernelFsp); } +NvBool kfspRequiresBug3957833WAR_GH100(struct OBJGPU *pGpu, struct KernelFsp *pKernelFsp); + +static inline NvBool kfspRequiresBug3957833WAR_491d52(struct OBJGPU *pGpu, struct KernelFsp *pKernelFsp) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kfspRequiresBug3957833WAR_DISPATCH(struct OBJGPU *pGpu, struct KernelFsp *pKernelFsp) { + return pKernelFsp->__kfspRequiresBug3957833WAR__(pGpu, pKernelFsp); +} + static inline NV_STATUS kfspReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelFsp *pEngstate, void *pTunableState) { return pEngstate->__kfspReconcileTunableState__(pGpu, pEngstate, pTunableState); } diff --git a/src/nvidia/generated/g_kern_mem_sys_nvoc.c b/src/nvidia/generated/g_kern_mem_sys_nvoc.c index d279d724c..8b53a6859 100644 --- a/src/nvidia/generated/g_kern_mem_sys_nvoc.c +++ b/src/nvidia/generated/g_kern_mem_sys_nvoc.c @@ -326,11 +326,11 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis // Hal function -- kmemsysProgramSysmemFlushBuffer if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ { - if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | AD102 | AD103 | AD104 | AD106 | AD107 */ + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ { pThis->__kmemsysProgramSysmemFlushBuffer__ = &kmemsysProgramSysmemFlushBuffer_GM107; } - else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */ { pThis->__kmemsysProgramSysmemFlushBuffer__ = &kmemsysProgramSysmemFlushBuffer_GA100; } diff --git a/src/nvidia/generated/g_kernel_graphics_context_nvoc.h b/src/nvidia/generated/g_kernel_graphics_context_nvoc.h index 34006173b..560afce87 100644 --- a/src/nvidia/generated/g_kernel_graphics_context_nvoc.h +++ b/src/nvidia/generated/g_kernel_graphics_context_nvoc.h @@ -904,6 +904,17 @@ static inline void kgrctxFreeAssociatedCtxBuffers(struct OBJGPU *arg0, struct Ke #define kgrctxFreeAssociatedCtxBuffers(arg0, arg1) kgrctxFreeAssociatedCtxBuffers_IMPL(arg0, arg1) #endif //__nvoc_kernel_graphics_context_h_disabled +NvBool kgrctxIsFinalGlobalBufMapRefDuped_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2, GR_GLOBALCTX_BUFFER bufId); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NvBool kgrctxIsFinalGlobalBufMapRefDuped(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2, GR_GLOBALCTX_BUFFER bufId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxIsFinalGlobalBufMapRefDuped(arg0, arg1, arg2, bufId) kgrctxIsFinalGlobalBufMapRefDuped_IMPL(arg0, arg1, arg2, bufId) +#endif //__nvoc_kernel_graphics_context_h_disabled + #undef PRIVATE_FIELD diff --git a/src/nvidia/generated/g_kernel_gsp_nvoc.c b/src/nvidia/generated/g_kernel_gsp_nvoc.c index de45262c0..1aaadae38 100644 --- a/src/nvidia/generated/g_kernel_gsp_nvoc.c +++ b/src/nvidia/generated/g_kernel_gsp_nvoc.c @@ -679,18 +679,18 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner * // Hal function -- kgspGetWprHeapSize if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ { - if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x100007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GH100 */ + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ { - pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_5661b8; - } - else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ - { - pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_15390a; + pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_e3e8a1; } else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */ { pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_AD102; } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ + { + pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_cffea5; + } } // Hal function -- kgspInitVgpuPartitionLogging diff --git a/src/nvidia/generated/g_kernel_gsp_nvoc.h b/src/nvidia/generated/g_kernel_gsp_nvoc.h index d6219e777..0ee39fa4f 100644 --- a/src/nvidia/generated/g_kernel_gsp_nvoc.h +++ b/src/nvidia/generated/g_kernel_gsp_nvoc.h @@ -828,16 +828,16 @@ static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_DISPATCH return pKernelGsp->__kgspGetBinArchiveBooterUnloadUcode__(pKernelGsp); } -static inline NvU64 kgspGetWprHeapSize_5661b8(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { - return 64 * 1024 * 1024; -} - -static inline NvU64 kgspGetWprHeapSize_15390a(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { - return 80 * 1024 * 1024; +static inline NvU64 kgspGetWprHeapSize_e3e8a1(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return 96 * 1024 * 1024; } NvU64 kgspGetWprHeapSize_AD102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +static inline NvU64 kgspGetWprHeapSize_cffea5(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return 104 * 1024 * 1024; +} + static inline NvU64 kgspGetWprHeapSize_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { return pKernelGsp->__kgspGetWprHeapSize__(pGpu, pKernelGsp); } diff --git a/src/nvidia/generated/g_mem_desc_nvoc.h b/src/nvidia/generated/g_mem_desc_nvoc.h index 69dabeb12..8276d276d 100644 --- a/src/nvidia/generated/g_mem_desc_nvoc.h +++ b/src/nvidia/generated/g_mem_desc_nvoc.h @@ -344,6 +344,9 @@ typedef struct MEMORY_DESCRIPTOR // Serve as a head node in a list of submemdescs MEMORY_DESCRIPTOR_LIST *pSubMemDescList; + // Reserved for RM exclusive use + NvBool bRmExclusiveUse; + // If strung in a intrusive linked list ListNode node; @@ -650,6 +653,8 @@ PMEMORY_DESCRIPTOR memdescGetRootMemDesc(PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pRo void memdescSetCustomHeap(PMEMORY_DESCRIPTOR); NvBool memdescGetCustomHeap(PMEMORY_DESCRIPTOR); +NvBool memdescAcquireRmExclusiveUse(MEMORY_DESCRIPTOR *pMemDesc); + /*! * @brief Get PTE kind * diff --git a/src/nvidia/generated/g_nv_name_released.h b/src/nvidia/generated/g_nv_name_released.h index da6f41067..7ace7bb87 100644 --- a/src/nvidia/generated/g_nv_name_released.h +++ b/src/nvidia/generated/g_nv_name_released.h @@ -884,9 +884,13 @@ static const CHIPS_RELEASED sChipsReleased[] = { { 0x2236, 0x1482, 0x10de, "NVIDIA A10" }, { 0x2237, 0x152f, 0x10de, "NVIDIA A10G" }, { 0x2238, 0x1677, 0x10de, "NVIDIA A10M" }, + { 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" }, + { 0x2324, 0x17a6, 0x10de, "NVIDIA H800" }, + { 0x2324, 0x17a8, 0x10de, "NVIDIA H800" }, { 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" }, { 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" }, { 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" }, + { 0x2339, 0x17fc, 0x10de, "NVIDIA H100" }, { 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, { 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, { 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" }, @@ -973,11 +977,18 @@ static const CHIPS_RELEASED sChipsReleased[] = { { 0x26B1, 0x16a1, 0x10de, "NVIDIA RTX 6000 Ada Generation" }, { 0x26B1, 0x16a1, 0x17aa, "NVIDIA RTX 6000 Ada Generation" }, { 0x26B5, 0x169d, 0x10de, "NVIDIA L40" }, + { 0x26B5, 0x17da, 0x10de, "NVIDIA L40" }, { 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" }, { 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" }, { 0x2757, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" }, { 0x2782, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti" }, { 0x27A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" }, + { 0x27B0, 0x16fa, 0x1028, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B0, 0x16fa, 0x103c, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B0, 0x16fa, 0x10de, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B0, 0x16fa, 0x17aa, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B8, 0x16ca, 0x10de, "NVIDIA L4" }, + { 0x27B8, 0x16ee, 0x10de, "NVIDIA L4" }, { 0x27E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" }, { 0x2820, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" }, { 0x2860, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" }, @@ -1717,20 +1728,20 @@ static const CHIPS_RELEASED sChipsReleased[] = { { 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" }, { 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" }, { 0x2238, 0x16e6, 0x10DE, "NVIDIA A10M-1" }, - { 0x2322, 0x17e2, 0x10DE, "NVIDIA GPU-2322-17E2" }, - { 0x2322, 0x17e3, 0x10DE, "NVIDIA GPU-2322-17E3" }, - { 0x2322, 0x17e4, 0x10DE, "NVIDIA GPU-2322-17E4" }, - { 0x2322, 0x17e5, 0x10DE, "NVIDIA GPU-2322-17E5" }, - { 0x2322, 0x17e6, 0x10DE, "NVIDIA GPU-2322-17E6" }, - { 0x2322, 0x17e7, 0x10DE, "NVIDIA GPU-2322-17E7" }, - { 0x2322, 0x17e8, 0x10DE, "NVIDIA GPU-2322-17E8" }, - { 0x2322, 0x17e9, 0x10DE, "NVIDIA GPU-2322-17E9" }, - { 0x2322, 0x17ea, 0x10DE, "NVIDIA GPU-2322-17EA" }, - { 0x2322, 0x17eb, 0x10DE, "NVIDIA GPU-2322-17EB" }, - { 0x2322, 0x17ec, 0x10DE, "NVIDIA GPU-2322-17EC" }, - { 0x2322, 0x17ed, 0x10DE, "NVIDIA GPU-2322-17ED" }, - { 0x2322, 0x17ee, 0x10DE, "NVIDIA GPU-2322-17EE" }, - { 0x2322, 0x17ef, 0x10DE, "NVIDIA GPU-2322-17EF" }, + { 0x2322, 0x17e2, 0x10DE, "NVIDIA H800-1-10CME" }, + { 0x2322, 0x17e3, 0x10DE, "NVIDIA H800-1-10C" }, + { 0x2322, 0x17e4, 0x10DE, "NVIDIA H800-2-20C" }, + { 0x2322, 0x17e5, 0x10DE, "NVIDIA H800-3-40C" }, + { 0x2322, 0x17e6, 0x10DE, "NVIDIA H800-4-40C" }, + { 0x2322, 0x17e7, 0x10DE, "NVIDIA H800-7-80C" }, + { 0x2322, 0x17e8, 0x10DE, "NVIDIA H800-4C" }, + { 0x2322, 0x17e9, 0x10DE, "NVIDIA H800-5C" }, + { 0x2322, 0x17ea, 0x10DE, "NVIDIA H800-8C" }, + { 0x2322, 0x17eb, 0x10DE, "NVIDIA H800-10C" }, + { 0x2322, 0x17ec, 0x10DE, "NVIDIA H800-16C" }, + { 0x2322, 0x17ed, 0x10DE, "NVIDIA H800-20C" }, + { 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" }, + { 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" }, { 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" }, { 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" }, { 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" }, @@ -1887,37 +1898,37 @@ static const CHIPS_RELEASED sChipsReleased[] = { { 0x26B8, 0x176a, 0x10DE, "NVIDIA L40G-8C" }, { 0x26B8, 0x176b, 0x10DE, "NVIDIA L40G-12C" }, { 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" }, - { 0x27B8, 0x172f, 0x10DE, "NVIDIA GPU-27B8-172F" }, - { 0x27B8, 0x1730, 0x10DE, "NVIDIA GPU-27B8-1730" }, - { 0x27B8, 0x1731, 0x10DE, "NVIDIA GPU-27B8-1731" }, - { 0x27B8, 0x1732, 0x10DE, "NVIDIA GPU-27B8-1732" }, - { 0x27B8, 0x1733, 0x10DE, "NVIDIA GPU-27B8-1733" }, - { 0x27B8, 0x1734, 0x10DE, "NVIDIA GPU-27B8-1734" }, - { 0x27B8, 0x1735, 0x10DE, "NVIDIA GPU-27B8-1735" }, - { 0x27B8, 0x1736, 0x10DE, "NVIDIA GPU-27B8-1736" }, - { 0x27B8, 0x1737, 0x10DE, "NVIDIA GPU-27B8-1737" }, - { 0x27B8, 0x1738, 0x10DE, "NVIDIA GPU-27B8-1738" }, - { 0x27B8, 0x1739, 0x10DE, "NVIDIA GPU-27B8-1739" }, - { 0x27B8, 0x173a, 0x10DE, "NVIDIA GPU-27B8-173A" }, - { 0x27B8, 0x173b, 0x10DE, "NVIDIA GPU-27B8-173B" }, - { 0x27B8, 0x173c, 0x10DE, "NVIDIA GPU-27B8-173C" }, - { 0x27B8, 0x173d, 0x10DE, "NVIDIA GPU-27B8-173D" }, - { 0x27B8, 0x173e, 0x10DE, "NVIDIA GPU-27B8-173E" }, - { 0x27B8, 0x173f, 0x10DE, "NVIDIA GPU-27B8-173F" }, - { 0x27B8, 0x1740, 0x10DE, "NVIDIA GPU-27B8-1740" }, - { 0x27B8, 0x1741, 0x10DE, "NVIDIA GPU-27B8-1741" }, - { 0x27B8, 0x1742, 0x10DE, "NVIDIA GPU-27B8-1742" }, - { 0x27B8, 0x1743, 0x10DE, "NVIDIA GPU-27B8-1743" }, - { 0x27B8, 0x1744, 0x10DE, "NVIDIA GPU-27B8-1744" }, - { 0x27B8, 0x1745, 0x10DE, "NVIDIA GPU-27B8-1745" }, - { 0x27B8, 0x1746, 0x10DE, "NVIDIA GPU-27B8-1746" }, - { 0x27B8, 0x1747, 0x10DE, "NVIDIA GPU-27B8-1747" }, - { 0x27B8, 0x1748, 0x10DE, "NVIDIA GPU-27B8-1748" }, - { 0x27B8, 0x1749, 0x10DE, "NVIDIA GPU-27B8-1749" }, - { 0x27B8, 0x174a, 0x10DE, "NVIDIA GPU-27B8-174A" }, - { 0x27B8, 0x174b, 0x10DE, "NVIDIA GPU-27B8-174B" }, - { 0x27B8, 0x174c, 0x10DE, "NVIDIA GPU-27B8-174C" }, - { 0x27B8, 0x174d, 0x10DE, "NVIDIA GPU-27B8-174D" }, + { 0x27B8, 0x172f, 0x10DE, "NVIDIA L4-1B" }, + { 0x27B8, 0x1730, 0x10DE, "NVIDIA L4-2B" }, + { 0x27B8, 0x1731, 0x10DE, "NVIDIA L4-1Q" }, + { 0x27B8, 0x1732, 0x10DE, "NVIDIA L4-2Q" }, + { 0x27B8, 0x1733, 0x10DE, "NVIDIA L4-3Q" }, + { 0x27B8, 0x1734, 0x10DE, "NVIDIA L4-4Q" }, + { 0x27B8, 0x1735, 0x10DE, "NVIDIA L4-6Q" }, + { 0x27B8, 0x1736, 0x10DE, "NVIDIA L4-8Q" }, + { 0x27B8, 0x1737, 0x10DE, "NVIDIA L4-12Q" }, + { 0x27B8, 0x1738, 0x10DE, "NVIDIA L4-24Q" }, + { 0x27B8, 0x1739, 0x10DE, "NVIDIA L4-1A" }, + { 0x27B8, 0x173a, 0x10DE, "NVIDIA L4-2A" }, + { 0x27B8, 0x173b, 0x10DE, "NVIDIA L4-3A" }, + { 0x27B8, 0x173c, 0x10DE, "NVIDIA L4-4A" }, + { 0x27B8, 0x173d, 0x10DE, "NVIDIA L4-6A" }, + { 0x27B8, 0x173e, 0x10DE, "NVIDIA L4-8A" }, + { 0x27B8, 0x173f, 0x10DE, "NVIDIA L4-12A" }, + { 0x27B8, 0x1740, 0x10DE, "NVIDIA L4-24A" }, + { 0x27B8, 0x1741, 0x10DE, "NVIDIA L4-1" }, + { 0x27B8, 0x1742, 0x10DE, "NVIDIA L4-2" }, + { 0x27B8, 0x1743, 0x10DE, "NVIDIA L4-3" }, + { 0x27B8, 0x1744, 0x10DE, "NVIDIA L4-4" }, + { 0x27B8, 0x1745, 0x10DE, "NVIDIA L4-6" }, + { 0x27B8, 0x1746, 0x10DE, "NVIDIA L4-8" }, + { 0x27B8, 0x1747, 0x10DE, "NVIDIA L4-12" }, + { 0x27B8, 0x1748, 0x10DE, "NVIDIA L4-24" }, + { 0x27B8, 0x1749, 0x10DE, "NVIDIA L4-4C" }, + { 0x27B8, 0x174a, 0x10DE, "NVIDIA L4-6C" }, + { 0x27B8, 0x174b, 0x10DE, "NVIDIA L4-8C" }, + { 0x27B8, 0x174c, 0x10DE, "NVIDIA L4-12C" }, + { 0x27B8, 0x174d, 0x10DE, "NVIDIA L4-24C" }, }; #endif // G_NV_NAME_RELEASED_H diff --git a/src/nvidia/generated/g_objtmr_nvoc.h b/src/nvidia/generated/g_objtmr_nvoc.h index 7ebaf7444..9c764a3aa 100644 --- a/src/nvidia/generated/g_objtmr_nvoc.h +++ b/src/nvidia/generated/g_objtmr_nvoc.h @@ -239,6 +239,7 @@ struct OBJTMR { NvBool bAlarmIntrEnabled; PENG_INFO_LINK_NODE infoList; struct OBJREFCNT *pGrTickFreqRefcnt; + NvU64 sysTimerOffsetNs; }; #ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ diff --git a/src/nvidia/generated/g_profiler_v2_nvoc.c b/src/nvidia/generated/g_profiler_v2_nvoc.c index d772b1d31..1f9d99a80 100644 --- a/src/nvidia/generated/g_profiler_v2_nvoc.c +++ b/src/nvidia/generated/g_profiler_v2_nvoc.c @@ -192,12 +192,12 @@ static NvBool __nvoc_thunk_RmResource_profilerBaseAccessCallback(struct Profiler static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ProfilerBase[] = { { /* [0] */ -#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) /*pFunc=*/ (void (*)(void)) NULL, #else /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReserveHwpmLegacy_IMPL, -#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) - /*flags=*/ 0x210u, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*flags=*/ 0x2010u, /*accessRight=*/0x0u, /*methodId=*/ 0xb0cc0101u, /*paramSize=*/ sizeof(NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS), @@ -267,12 +267,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler #endif }, { /* [5] */ -#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdFreePmaStream_IMPL, -#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) - /*flags=*/ 0x210u, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, /*accessRight=*/0x0u, /*methodId=*/ 0xb0cc0106u, /*paramSize=*/ sizeof(NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS), @@ -282,12 +282,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler #endif }, { /* [6] */ -#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) /*pFunc=*/ (void (*)(void)) NULL, #else /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdBindPmResources_IMPL, -#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) - /*flags=*/ 0x210u, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*flags=*/ 0x2010u, /*accessRight=*/0x0u, /*methodId=*/ 0xb0cc0107u, /*paramSize=*/ 0, @@ -297,12 +297,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler #endif }, { /* [7] */ -#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) /*pFunc=*/ (void (*)(void)) NULL, #else /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdUnbindPmResources_IMPL, -#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) - /*flags=*/ 0x210u, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*flags=*/ 0x2010u, /*accessRight=*/0x0u, /*methodId=*/ 0xb0cc0108u, /*paramSize=*/ 0, @@ -327,12 +327,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler #endif }, { /* [9] */ -#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdExecRegops_IMPL, -#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) - /*flags=*/ 0x210u, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, /*accessRight=*/0x0u, /*methodId=*/ 0xb0cc010au, /*paramSize=*/ sizeof(NVB0CC_CTRL_EXEC_REG_OPS_PARAMS), @@ -444,6 +444,81 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), #if NV_PRINTF_STRINGS_ALLOWED /*func=*/ "profilerBaseCtrlCmdInternalPermissionsInit" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalFreePmaStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0206u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalFreePmaStream" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalGetMaxPmas_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0207u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalGetMaxPmas" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalBindPmResources_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0208u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalBindPmResources" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalUnbindPmResources_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0209u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalUnbindPmResources" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalReserveHwpmLegacy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc020au, + /*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalReserveHwpmLegacy" #endif }, @@ -451,7 +526,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler const struct NVOC_EXPORT_INFO __nvoc_export_info_ProfilerBase = { - /*numEntries=*/ 17, + /*numEntries=*/ 22, /*pExportEntries=*/ __nvoc_exported_method_def_ProfilerBase }; @@ -498,10 +573,14 @@ static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecO PORT_UNREFERENCED_VARIABLE(rmVariantHal); PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); -#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) pThis->__profilerBaseCtrlCmdReserveHwpmLegacy__ = &profilerBaseCtrlCmdReserveHwpmLegacy_IMPL; #endif +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalReserveHwpmLegacy__ = &profilerBaseCtrlCmdInternalReserveHwpmLegacy_IMPL; +#endif + #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) pThis->__profilerBaseCtrlCmdReleaseHwpmLegacy__ = &profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL; #endif @@ -518,23 +597,39 @@ static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecO pThis->__profilerBaseCtrlCmdAllocPmaStream__ = &profilerBaseCtrlCmdAllocPmaStream_IMPL; #endif -#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) pThis->__profilerBaseCtrlCmdFreePmaStream__ = &profilerBaseCtrlCmdFreePmaStream_IMPL; #endif -#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalFreePmaStream__ = &profilerBaseCtrlCmdInternalFreePmaStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalGetMaxPmas__ = &profilerBaseCtrlCmdInternalGetMaxPmas_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) pThis->__profilerBaseCtrlCmdBindPmResources__ = &profilerBaseCtrlCmdBindPmResources_IMPL; #endif -#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) pThis->__profilerBaseCtrlCmdUnbindPmResources__ = &profilerBaseCtrlCmdUnbindPmResources_IMPL; #endif +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalBindPmResources__ = &profilerBaseCtrlCmdInternalBindPmResources_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalUnbindPmResources__ = &profilerBaseCtrlCmdInternalUnbindPmResources_IMPL; +#endif + #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) pThis->__profilerBaseCtrlCmdPmaStreamUpdateGetPut__ = &profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL; #endif -#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) pThis->__profilerBaseCtrlCmdExecRegops__ = &profilerBaseCtrlCmdExecRegops_IMPL; #endif diff --git a/src/nvidia/generated/g_profiler_v2_nvoc.h b/src/nvidia/generated/g_profiler_v2_nvoc.h index 945cbf211..1639697e8 100644 --- a/src/nvidia/generated/g_profiler_v2_nvoc.h +++ b/src/nvidia/generated/g_profiler_v2_nvoc.h @@ -67,13 +67,18 @@ struct ProfilerBase { struct GpuResource *__nvoc_pbase_GpuResource; struct ProfilerBase *__nvoc_pbase_ProfilerBase; NV_STATUS (*__profilerBaseCtrlCmdReserveHwpmLegacy__)(struct ProfilerBase *, NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdInternalReserveHwpmLegacy__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS *); NV_STATUS (*__profilerBaseCtrlCmdReleaseHwpmLegacy__)(struct ProfilerBase *); NV_STATUS (*__profilerBaseCtrlCmdReservePmAreaSmpc__)(struct ProfilerBase *, NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *); NV_STATUS (*__profilerBaseCtrlCmdReleasePmAreaSmpc__)(struct ProfilerBase *); NV_STATUS (*__profilerBaseCtrlCmdAllocPmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *); NV_STATUS (*__profilerBaseCtrlCmdFreePmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdInternalFreePmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdInternalGetMaxPmas__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS *); NV_STATUS (*__profilerBaseCtrlCmdBindPmResources__)(struct ProfilerBase *); NV_STATUS (*__profilerBaseCtrlCmdUnbindPmResources__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdInternalBindPmResources__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdInternalUnbindPmResources__)(struct ProfilerBase *); NV_STATUS (*__profilerBaseCtrlCmdPmaStreamUpdateGetPut__)(struct ProfilerBase *, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *); NV_STATUS (*__profilerBaseCtrlCmdExecRegops__)(struct ProfilerBase *, NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *); NV_STATUS (*__profilerBaseCtrlCmdInternalAllocPmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *); @@ -106,6 +111,13 @@ struct ProfilerBase { NV_STATUS (*__profilerBaseControlLookup__)(struct ProfilerBase *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); NV_STATUS (*__profilerBaseMap__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); NvBool (*__profilerBaseAccessCallback__)(struct ProfilerBase *, struct RsClient *, void *, RsAccessRight); + NvU32 maxPmaChannels; + NvU32 pmaVchIdx; + NvBool bLegacyHwpm; + struct RsResourceRef **ppBytesAvailable; + struct RsResourceRef **ppStreamBuffers; + struct RsResourceRef *pBoundCntBuf; + struct RsResourceRef *pBoundPmaBuf; }; #ifndef __NVOC_CLASS_ProfilerBase_TYPEDEF__ @@ -137,13 +149,18 @@ NV_STATUS __nvoc_objCreate_ProfilerBase(ProfilerBase**, Dynamic*, NvU32, struct __nvoc_objCreate_ProfilerBase((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) #define profilerBaseCtrlCmdReserveHwpmLegacy(pProfiler, pParams) profilerBaseCtrlCmdReserveHwpmLegacy_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdInternalReserveHwpmLegacy(pProfiler, pParams) profilerBaseCtrlCmdInternalReserveHwpmLegacy_DISPATCH(pProfiler, pParams) #define profilerBaseCtrlCmdReleaseHwpmLegacy(pProfiler) profilerBaseCtrlCmdReleaseHwpmLegacy_DISPATCH(pProfiler) #define profilerBaseCtrlCmdReservePmAreaSmpc(pProfiler, pParams) profilerBaseCtrlCmdReservePmAreaSmpc_DISPATCH(pProfiler, pParams) #define profilerBaseCtrlCmdReleasePmAreaSmpc(pProfiler) profilerBaseCtrlCmdReleasePmAreaSmpc_DISPATCH(pProfiler) #define profilerBaseCtrlCmdAllocPmaStream(pProfiler, pParams) profilerBaseCtrlCmdAllocPmaStream_DISPATCH(pProfiler, pParams) #define profilerBaseCtrlCmdFreePmaStream(pProfiler, pParams) profilerBaseCtrlCmdFreePmaStream_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdInternalFreePmaStream(pProfiler, pParams) profilerBaseCtrlCmdInternalFreePmaStream_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdInternalGetMaxPmas(pProfiler, pParams) profilerBaseCtrlCmdInternalGetMaxPmas_DISPATCH(pProfiler, pParams) #define profilerBaseCtrlCmdBindPmResources(pProfiler) profilerBaseCtrlCmdBindPmResources_DISPATCH(pProfiler) #define profilerBaseCtrlCmdUnbindPmResources(pProfiler) profilerBaseCtrlCmdUnbindPmResources_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdInternalBindPmResources(pProfiler) profilerBaseCtrlCmdInternalBindPmResources_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdInternalUnbindPmResources(pProfiler) profilerBaseCtrlCmdInternalUnbindPmResources_DISPATCH(pProfiler) #define profilerBaseCtrlCmdPmaStreamUpdateGetPut(pProfiler, pParams) profilerBaseCtrlCmdPmaStreamUpdateGetPut_DISPATCH(pProfiler, pParams) #define profilerBaseCtrlCmdExecRegops(pProfiler, pParams) profilerBaseCtrlCmdExecRegops_DISPATCH(pProfiler, pParams) #define profilerBaseCtrlCmdInternalAllocPmaStream(pProfiler, pParams) profilerBaseCtrlCmdInternalAllocPmaStream_DISPATCH(pProfiler, pParams) @@ -213,6 +230,12 @@ static inline NV_STATUS profilerBaseCtrlCmdReserveHwpmLegacy_DISPATCH(struct Pro return pProfiler->__profilerBaseCtrlCmdReserveHwpmLegacy__(pProfiler, pParams); } +NV_STATUS profilerBaseCtrlCmdInternalReserveHwpmLegacy_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdInternalReserveHwpmLegacy_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdInternalReserveHwpmLegacy__(pProfiler, pParams); +} + NV_STATUS profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL(struct ProfilerBase *pProfiler); static inline NV_STATUS profilerBaseCtrlCmdReleaseHwpmLegacy_DISPATCH(struct ProfilerBase *pProfiler) { @@ -243,6 +266,18 @@ static inline NV_STATUS profilerBaseCtrlCmdFreePmaStream_DISPATCH(struct Profile return pProfiler->__profilerBaseCtrlCmdFreePmaStream__(pProfiler, pParams); } +NV_STATUS profilerBaseCtrlCmdInternalFreePmaStream_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdInternalFreePmaStream_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdInternalFreePmaStream__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdInternalGetMaxPmas_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdInternalGetMaxPmas_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdInternalGetMaxPmas__(pProfiler, pParams); +} + NV_STATUS profilerBaseCtrlCmdBindPmResources_IMPL(struct ProfilerBase *pProfiler); static inline NV_STATUS profilerBaseCtrlCmdBindPmResources_DISPATCH(struct ProfilerBase *pProfiler) { @@ -255,6 +290,18 @@ static inline NV_STATUS profilerBaseCtrlCmdUnbindPmResources_DISPATCH(struct Pro return pProfiler->__profilerBaseCtrlCmdUnbindPmResources__(pProfiler); } +NV_STATUS profilerBaseCtrlCmdInternalBindPmResources_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdInternalBindPmResources_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdInternalBindPmResources__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdInternalUnbindPmResources_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdInternalUnbindPmResources_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdInternalUnbindPmResources__(pProfiler); +} + NV_STATUS profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams); static inline NV_STATUS profilerBaseCtrlCmdPmaStreamUpdateGetPut_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams) { diff --git a/src/nvidia/generated/g_subdevice_nvoc.c b/src/nvidia/generated/g_subdevice_nvoc.c index bad622685..e93212fbd 100644 --- a/src/nvidia/generated/g_subdevice_nvoc.c +++ b/src/nvidia/generated/g_subdevice_nvoc.c @@ -1415,6 +1415,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic #endif }, { /* [79] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001a9u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2" +#endif + }, + { /* [80] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1429,7 +1444,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEventSetNotification" #endif }, - { /* [80] */ + { /* [81] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1444,7 +1459,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEventSetTrigger" #endif }, - { /* [81] */ + { /* [82] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1459,7 +1474,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEventSetMemoryNotifies" #endif }, - { /* [82] */ + { /* [83] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1474,7 +1489,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEventSetSemaphoreMemory" #endif }, - { /* [83] */ + { /* [84] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1489,7 +1504,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEventSetSemaMemValidation" #endif }, - { /* [84] */ + { /* [85] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1504,7 +1519,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEventSetTriggerFifo" #endif }, - { /* [85] */ + { /* [86] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1519,7 +1534,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdTimerSchedule" #endif }, - { /* [86] */ + { /* [87] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1534,7 +1549,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdTimerCancel" #endif }, - { /* [87] */ + { /* [88] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1549,7 +1564,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdTimerGetTime" #endif }, - { /* [88] */ + { /* [89] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1564,7 +1579,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdTimerGetRegisterOffset" #endif }, - { /* [89] */ + { /* [90] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1579,7 +1594,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo" #endif }, - { /* [90] */ + { /* [91] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1594,7 +1609,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdTimerSetGrTickFreq" #endif }, - { /* [91] */ + { /* [92] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1609,7 +1624,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdI2cReadBuffer" #endif }, - { /* [92] */ + { /* [93] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1624,7 +1639,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdI2cWriteBuffer" #endif }, - { /* [93] */ + { /* [94] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1639,7 +1654,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdI2cReadReg" #endif }, - { /* [94] */ + { /* [95] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1654,7 +1669,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdI2cWriteReg" #endif }, - { /* [95] */ + { /* [96] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1669,7 +1684,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBiosGetSKUInfo" #endif }, - { /* [96] */ + { /* [97] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1684,7 +1699,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBiosGetPostTime" #endif }, - { /* [97] */ + { /* [98] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1699,7 +1714,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBiosGetUefiSupport" #endif }, - { /* [98] */ + { /* [99] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1714,7 +1729,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBiosGetNbsiV2" #endif }, - { /* [99] */ + { /* [100] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1729,7 +1744,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBiosGetInfoV2" #endif }, - { /* [100] */ + { /* [101] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1744,7 +1759,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplayGetStaticInfo" #endif }, - { /* [101] */ + { /* [102] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1759,7 +1774,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysGetStaticConfig" #endif }, - { /* [102] */ + { /* [103] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1774,7 +1789,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer" #endif }, - { /* [103] */ + { /* [104] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1789,7 +1804,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer" #endif }, - { /* [104] */ + { /* [105] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1804,7 +1819,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetCaps" #endif }, - { /* [105] */ + { /* [106] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1819,7 +1834,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer" #endif }, - { /* [106] */ + { /* [107] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1834,7 +1849,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder" #endif }, - { /* [107] */ + { /* [108] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1849,7 +1864,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMsencGetCaps" #endif }, - { /* [108] */ + { /* [109] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1864,7 +1879,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks" #endif }, - { /* [109] */ + { /* [110] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x80000u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1879,7 +1894,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferPtes" #endif }, - { /* [110] */ + { /* [111] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1894,7 +1909,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize" #endif }, - { /* [111] */ + { /* [112] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1909,7 +1924,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetInfo" #endif }, - { /* [112] */ + { /* [113] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1924,7 +1939,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetZcullInfo" #endif }, - { /* [113] */ + { /* [114] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1939,7 +1954,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetRopInfo" #endif }, - { /* [114] */ + { /* [115] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1954,7 +1969,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetPpcMasks" #endif }, - { /* [115] */ + { /* [116] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1969,7 +1984,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo" #endif }, - { /* [116] */ + { /* [117] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1984,7 +1999,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier" #endif }, - { /* [117] */ + { /* [118] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -1999,7 +2014,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetChipInfo" #endif }, - { /* [118] */ + { /* [119] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2014,7 +2029,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize" #endif }, - { /* [119] */ + { /* [120] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2029,7 +2044,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines" #endif }, - { /* [120] */ + { /* [121] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2044,7 +2059,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetDeviceInfoTable" #endif }, - { /* [121] */ + { /* [122] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2059,7 +2074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetUserRegisterAccessMap" #endif }, - { /* [122] */ + { /* [123] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2074,7 +2089,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetConstructedFalconInfo" #endif }, - { /* [123] */ + { /* [124] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2089,7 +2104,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetPdbProperties" #endif }, - { /* [124] */ + { /* [125] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2104,7 +2119,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplayWriteInstMem" #endif }, - { /* [125] */ + { /* [126] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2119,7 +2134,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalRecoverAllComputeContexts" #endif }, - { /* [126] */ + { /* [127] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2134,7 +2149,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplayGetIpVersion" #endif }, - { /* [127] */ + { /* [128] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2149,7 +2164,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetSmcMode" #endif }, - { /* [128] */ + { /* [129] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2164,7 +2179,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplaySetupRgLineIntr" #endif }, - { /* [129] */ + { /* [130] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2179,7 +2194,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysSetPartitionableMem" #endif }, - { /* [130] */ + { /* [131] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2194,7 +2209,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers" #endif }, - { /* [131] */ + { /* [132] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2209,7 +2224,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplaySetImportedImpData" #endif }, - { /* [132] */ + { /* [133] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2224,7 +2239,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplaySetChannelPushbuffer" #endif }, - { /* [133] */ + { /* [134] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2239,7 +2254,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGmmuGetStaticInfo" #endif }, - { /* [134] */ + { /* [135] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2254,7 +2269,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetHeapReservationSize" #endif }, - { /* [135] */ + { /* [136] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2269,7 +2284,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdIntrGetKernelTable" #endif }, - { /* [136] */ + { /* [137] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2284,7 +2299,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDisplayGetDisplayMask" #endif }, - { /* [137] */ + { /* [138] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2299,7 +2314,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalFifoGetNumChannels" #endif }, - { /* [138] */ + { /* [139] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2314,7 +2329,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles" #endif }, - { /* [139] */ + { /* [140] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2329,7 +2344,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines" #endif }, - { /* [140] */ + { /* [141] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2344,7 +2359,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges" #endif }, - { /* [141] */ + { /* [142] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2359,7 +2374,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKMemSysGetMIGMemoryConfig" #endif }, - { /* [142] */ + { /* [143] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2374,7 +2389,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetZbcReferenced" #endif }, - { /* [143] */ + { /* [144] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2389,7 +2404,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalRcWatchdogTimeout" #endif }, - { /* [144] */ + { /* [145] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2404,7 +2419,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable" #endif }, - { /* [145] */ + { /* [146] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2419,7 +2434,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysL2InvalidateEvict" #endif }, - { /* [146] */ + { /* [147] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2434,7 +2449,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches" #endif }, - { /* [147] */ + { /* [148] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2449,7 +2464,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysDisableNvlinkPeers" #endif }, - { /* [148] */ + { /* [149] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2464,7 +2479,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMemSysProgramRawCompressionMode" #endif }, - { /* [149] */ + { /* [150] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2479,7 +2494,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalBusFlushWithSysmembar" #endif }, - { /* [150] */ + { /* [151] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2494,7 +2509,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal" #endif }, - { /* [151] */ + { /* [152] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2509,7 +2524,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote" #endif }, - { /* [152] */ + { /* [153] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2524,7 +2539,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalBusDestroyP2pMailbox" #endif }, - { /* [153] */ + { /* [154] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2539,7 +2554,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalBusCreateC2cPeerMapping" #endif }, - { /* [154] */ + { /* [155] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2554,7 +2569,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping" #endif }, - { /* [155] */ + { /* [156] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2569,7 +2584,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfCudaLimitDisable" #endif }, - { /* [156] */ + { /* [157] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2584,7 +2599,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPmgrUnsetDynamicBoostLimit" #endif }, - { /* [157] */ + { /* [158] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2599,7 +2614,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfOptpCliClear" #endif }, - { /* [158] */ + { /* [159] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2614,7 +2629,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl" #endif }, - { /* [159] */ + { /* [160] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2629,7 +2644,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits" #endif }, - { /* [160] */ + { /* [161] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2644,7 +2659,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo" #endif }, - { /* [161] */ + { /* [162] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2659,7 +2674,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfGetAuxPowerState" #endif }, - { /* [162] */ + { /* [163] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2674,7 +2689,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdHshubPeerConnConfig" #endif }, - { /* [163] */ + { /* [164] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2689,7 +2704,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdHshubFirstLinkPeerId" #endif }, - { /* [164] */ + { /* [165] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2704,7 +2719,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdHshubGetHshubIdForLinks" #endif }, - { /* [165] */ + { /* [166] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2719,7 +2734,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdHshubGetNumUnits" #endif }, - { /* [166] */ + { /* [167] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2734,7 +2749,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdHshubNextHshubId" #endif }, - { /* [167] */ + { /* [168] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2749,7 +2764,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck" #endif }, - { /* [168] */ + { /* [169] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2764,7 +2779,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet" #endif }, - { /* [169] */ + { /* [170] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2779,7 +2794,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfBoostSet_2x" #endif }, - { /* [170] */ + { /* [171] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2794,7 +2809,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer" #endif }, - { /* [171] */ + { /* [172] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2809,7 +2824,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer" #endif }, - { /* [172] */ + { /* [173] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2824,7 +2839,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer" #endif }, - { /* [173] */ + { /* [174] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2839,7 +2854,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer" #endif }, - { /* [174] */ + { /* [175] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2854,7 +2869,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGmmuCopyReservedSplitGVASpacePdesServer" #endif }, - { /* [175] */ + { /* [176] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2869,7 +2884,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfBoostSet_3x" #endif }, - { /* [176] */ + { /* [177] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2884,7 +2899,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfBoostClear_3x" #endif }, - { /* [177] */ + { /* [178] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2899,7 +2914,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance" #endif }, - { /* [178] */ + { /* [179] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2914,7 +2929,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance" #endif }, - { /* [179] */ + { /* [180] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2929,7 +2944,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBifGetStaticInfo" #endif }, - { /* [180] */ + { /* [181] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2944,7 +2959,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr" #endif }, - { /* [181] */ + { /* [182] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2959,7 +2974,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr" #endif }, - { /* [182] */ + { /* [183] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2974,7 +2989,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBifGetAspmL1Flags" #endif }, - { /* [183] */ + { /* [184] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -2989,7 +3004,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount" #endif }, - { /* [184] */ + { /* [185] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3004,7 +3019,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCcuMap" #endif }, - { /* [185] */ + { /* [186] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3019,7 +3034,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCcuUnmap" #endif }, - { /* [186] */ + { /* [187] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3034,7 +3049,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalSetP2pCaps" #endif }, - { /* [187] */ + { /* [188] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3049,7 +3064,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalRemoveP2pCaps" #endif }, - { /* [188] */ + { /* [189] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3064,7 +3079,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetPcieP2pCaps" #endif }, - { /* [189] */ + { /* [190] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3079,7 +3094,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBifSetPcieRo" #endif }, - { /* [190] */ + { /* [191] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3094,7 +3109,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles" #endif }, - { /* [191] */ + { /* [192] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3109,7 +3124,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCcuSetStreamState" #endif }, - { /* [192] */ + { /* [193] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3124,7 +3139,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalInitGpuIntr" #endif }, - { /* [193] */ + { /* [194] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3139,7 +3154,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGsyncOptimizeTiming" #endif }, - { /* [194] */ + { /* [195] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3154,7 +3169,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGsyncGetDisplayIds" #endif }, - { /* [195] */ + { /* [196] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3169,7 +3184,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGsyncSetStereoSync" #endif }, - { /* [196] */ + { /* [197] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3184,7 +3199,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalFbsrInit" #endif }, - { /* [197] */ + { /* [198] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3199,7 +3214,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalFbsrSendRegionInfo" #endif }, - { /* [198] */ + { /* [199] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3214,7 +3229,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGsyncGetVactiveLines" #endif }, - { /* [199] */ + { /* [200] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3229,7 +3244,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalMemmgrGetVgpuHostRmReservedFb" #endif }, - { /* [200] */ + { /* [201] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3244,7 +3259,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalPostInitBrightcStateLoad" #endif }, - { /* [201] */ + { /* [202] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3259,7 +3274,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalNvlinkGetNumActiveLinksPerIoctrl" #endif }, - { /* [202] */ + { /* [203] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3274,7 +3289,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalNvlinkGetTotalNumLinksPerIoctrl" #endif }, - { /* [203] */ + { /* [204] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3289,7 +3304,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGsyncIsDisplayIdValid" #endif }, - { /* [204] */ + { /* [205] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3304,7 +3319,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGsyncSetOrRestoreGpioRasterSync" #endif }, - { /* [205] */ + { /* [206] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3319,7 +3334,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdInternalGetCoherentFbApertureSize" #endif }, - { /* [206] */ + { /* [207] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3334,7 +3349,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdSetGpfifo" #endif }, - { /* [207] */ + { /* [208] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3349,7 +3364,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoBindEngines" #endif }, - { /* [208] */ + { /* [209] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3364,7 +3379,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdSetOperationalProperties" #endif }, - { /* [209] */ + { /* [210] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3379,7 +3394,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGetPhysicalChannelCount" #endif }, - { /* [210] */ + { /* [211] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3394,7 +3409,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoGetInfo" #endif }, - { /* [211] */ + { /* [212] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3409,7 +3424,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoDisableChannels" #endif }, - { /* [212] */ + { /* [213] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3424,7 +3439,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoGetChannelMemInfo" #endif }, - { /* [213] */ + { /* [214] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3439,7 +3454,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoGetUserdLocation" #endif }, - { /* [214] */ + { /* [215] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3454,7 +3469,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoGetDeviceInfoTable" #endif }, - { /* [215] */ + { /* [216] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3469,7 +3484,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoClearFaultedBit" #endif }, - { /* [216] */ + { /* [217] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2310u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3484,7 +3499,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoRunlistSetSchedPolicy" #endif }, - { /* [217] */ + { /* [218] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3499,7 +3514,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoUpdateChannelInfo" #endif }, - { /* [218] */ + { /* [219] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3514,7 +3529,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoDisableUsermodeChannels" #endif }, - { /* [219] */ + { /* [220] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3529,7 +3544,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoSetupVfZombieSubctxPdb" #endif }, - { /* [220] */ + { /* [221] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3544,7 +3559,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFifoGetAllocatedChannels" #endif }, - { /* [221] */ + { /* [222] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3559,7 +3574,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetInfo" #endif }, - { /* [222] */ + { /* [223] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3574,7 +3589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrCtxswZcullMode" #endif }, - { /* [223] */ + { /* [224] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3589,7 +3604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetZcullInfo" #endif }, - { /* [224] */ + { /* [225] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3604,7 +3619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrCtxswPmMode" #endif }, - { /* [225] */ + { /* [226] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3619,7 +3634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrCtxswZcullBind" #endif }, - { /* [226] */ + { /* [227] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3634,7 +3649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrCtxswPmBind" #endif }, - { /* [227] */ + { /* [228] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3649,7 +3664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrSetGpcTileMap" #endif }, - { /* [228] */ + { /* [229] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3664,7 +3679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrCtxswSmpcMode" #endif }, - { /* [229] */ + { /* [230] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3679,7 +3694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetSmToGpcTpcMappings" #endif }, - { /* [230] */ + { /* [231] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3694,7 +3709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrSetCtxswPreemptionMode" #endif }, - { /* [231] */ + { /* [232] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3709,7 +3724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrCtxswPreemptionBind" #endif }, - { /* [232] */ + { /* [233] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3724,7 +3739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrPcSamplingMode" #endif }, - { /* [233] */ + { /* [234] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3739,7 +3754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetROPInfo" #endif }, - { /* [234] */ + { /* [235] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3754,7 +3769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCtxswStats" #endif }, - { /* [235] */ + { /* [236] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3769,7 +3784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferSize" #endif }, - { /* [236] */ + { /* [237] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x80000u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3784,7 +3799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferInfo" #endif }, - { /* [237] */ + { /* [238] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3799,7 +3814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetGlobalSmOrder" #endif }, - { /* [238] */ + { /* [239] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3814,7 +3829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCurrentResidentChannel" #endif }, - { /* [239] */ + { /* [240] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3829,7 +3844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetVatAlarmData" #endif }, - { /* [240] */ + { /* [241] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3844,7 +3859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetAttributeBufferSize" #endif }, - { /* [241] */ + { /* [242] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3859,7 +3874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGfxPoolQuerySize" #endif }, - { /* [242] */ + { /* [243] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3874,7 +3889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGfxPoolInitialize" #endif }, - { /* [243] */ + { /* [244] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3889,7 +3904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGfxPoolAddSlots" #endif }, - { /* [244] */ + { /* [245] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3904,7 +3919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGfxPoolRemoveSlots" #endif }, - { /* [245] */ + { /* [246] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3919,7 +3934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCapsV2" #endif }, - { /* [246] */ + { /* [247] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3934,7 +3949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetInfoV2" #endif }, - { /* [247] */ + { /* [248] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3949,7 +3964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetGpcMask" #endif }, - { /* [248] */ + { /* [249] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3964,7 +3979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetTpcMask" #endif }, - { /* [249] */ + { /* [250] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3979,7 +3994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrSetTpcPartitionMode" #endif }, - { /* [250] */ + { /* [251] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -3994,7 +4009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetEngineContextProperties" #endif }, - { /* [251] */ + { /* [252] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4009,7 +4024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetSmIssueRateModifier" #endif }, - { /* [252] */ + { /* [253] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4024,7 +4039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUid" #endif }, - { /* [253] */ + { /* [254] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4039,7 +4054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetPhysGpcMask" #endif }, - { /* [254] */ + { /* [255] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4054,7 +4069,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetPpcMask" #endif }, - { /* [255] */ + { /* [256] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4069,7 +4084,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetNumTpcsForGpc" #endif }, - { /* [256] */ + { /* [257] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4084,7 +4099,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetCtxswModes" #endif }, - { /* [257] */ + { /* [258] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4099,7 +4114,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetGpcTileMap" #endif }, - { /* [258] */ + { /* [259] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4114,7 +4129,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetZcullMask" #endif }, - { /* [259] */ + { /* [260] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8010u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4129,7 +4144,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2" #endif }, - { /* [260] */ + { /* [261] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4144,7 +4159,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKGrGetGfxGpcAndTpcInfo" #endif }, - { /* [261] */ + { /* [262] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4159,7 +4174,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetInfo" #endif }, - { /* [262] */ + { /* [263] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4174,7 +4189,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetInfoV2" #endif }, - { /* [263] */ + { /* [264] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4189,7 +4204,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetCarveoutAddressInfo" #endif }, - { /* [264] */ + { /* [265] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4204,7 +4219,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetCalibrationLockFailed" #endif }, - { /* [265] */ + { /* [266] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4219,7 +4234,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbFlushGpuCache" #endif }, - { /* [266] */ + { /* [267] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4234,7 +4249,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetGpuCacheAllocPolicy" #endif }, - { /* [267] */ + { /* [268] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4249,7 +4264,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetBar1Offset" #endif }, - { /* [268] */ + { /* [269] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4264,7 +4279,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetGpuCacheAllocPolicy" #endif }, - { /* [269] */ + { /* [270] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4279,7 +4294,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbIsKind" #endif }, - { /* [270] */ + { /* [271] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4294,7 +4309,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetGpuCacheInfo" #endif }, - { /* [271] */ + { /* [272] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4309,7 +4324,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2" #endif }, - { /* [272] */ + { /* [273] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4324,7 +4339,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2" #endif }, - { /* [273] */ + { /* [274] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4339,7 +4354,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetFBRegionInfo" #endif }, - { /* [274] */ + { /* [275] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4354,7 +4369,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetOfflinedPages" #endif }, - { /* [275] */ + { /* [276] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4369,7 +4384,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetLTCInfoForFBP" #endif }, - { /* [276] */ + { /* [277] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4384,7 +4399,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbCBCOp" #endif }, - { /* [277] */ + { /* [278] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4399,7 +4414,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetCtagsForCbcEviction" #endif }, - { /* [278] */ + { /* [279] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4414,7 +4429,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetupVprRegion" #endif }, - { /* [279] */ + { /* [280] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4429,7 +4444,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetCliManagedOfflinedPages" #endif }, - { /* [280] */ + { /* [281] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4444,7 +4459,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetCompBitCopyConstructInfo" #endif }, - { /* [281] */ + { /* [282] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4459,7 +4474,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetRrd" #endif }, - { /* [282] */ + { /* [283] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4474,7 +4489,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetReadLimit" #endif }, - { /* [283] */ + { /* [284] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4489,7 +4504,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbSetWriteLimit" #endif }, - { /* [284] */ + { /* [285] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4504,7 +4519,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbPatchPbrForMining" #endif }, - { /* [285] */ + { /* [286] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4519,7 +4534,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetMemAlignment" #endif }, - { /* [286] */ + { /* [287] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4534,7 +4549,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetRemappedRows" #endif }, - { /* [287] */ + { /* [288] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4549,7 +4564,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetFsInfo" #endif }, - { /* [288] */ + { /* [289] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4564,7 +4579,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetRowRemapperHistogram" #endif }, - { /* [289] */ + { /* [290] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4579,7 +4594,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetDynamicOfflinedPages" #endif }, - { /* [290] */ + { /* [291] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4594,7 +4609,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbUpdateNumaStatus" #endif }, - { /* [291] */ + { /* [292] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4609,7 +4624,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFbGetNumaInfo" #endif }, - { /* [292] */ + { /* [293] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4624,7 +4639,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMcGetArchInfo" #endif }, - { /* [293] */ + { /* [294] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4639,7 +4654,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMcServiceInterrupts" #endif }, - { /* [294] */ + { /* [295] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4654,7 +4669,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMcGetManufacturer" #endif }, - { /* [295] */ + { /* [296] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4669,7 +4684,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMcQueryHostclkSlowdownStatus" #endif }, - { /* [296] */ + { /* [297] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4684,7 +4699,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMcSetHostclkSlowdownStatus" #endif }, - { /* [297] */ + { /* [298] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4699,7 +4714,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdMcChangeReplayableFaultOwnership" #endif }, - { /* [298] */ + { /* [299] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4714,7 +4729,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPciInfo" #endif }, - { /* [299] */ + { /* [300] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4729,7 +4744,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetInfo" #endif }, - { /* [300] */ + { /* [301] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4744,7 +4759,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPciBarInfo" #endif }, - { /* [301] */ + { /* [302] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4759,7 +4774,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSetPcieLinkWidth" #endif }, - { /* [302] */ + { /* [303] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4774,7 +4789,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSetPcieSpeed" #endif }, - { /* [303] */ + { /* [304] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4789,7 +4804,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed" #endif }, - { /* [304] */ + { /* [305] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4804,7 +4819,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed" #endif }, - { /* [305] */ + { /* [306] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4819,7 +4834,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusHWBCGetUpstreamBAR0" #endif }, - { /* [306] */ + { /* [307] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4834,7 +4849,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusServiceGpuMultifunctionState" #endif }, - { /* [307] */ + { /* [308] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4849,7 +4864,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPexCounters" #endif }, - { /* [308] */ + { /* [309] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4864,7 +4879,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusClearPexCounters" #endif }, - { /* [309] */ + { /* [310] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4879,7 +4894,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusFreezePexCounters" #endif }, - { /* [310] */ + { /* [311] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4894,7 +4909,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPexLaneCounters" #endif }, - { /* [311] */ + { /* [312] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4909,7 +4924,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPcieLtrLatency" #endif }, - { /* [312] */ + { /* [313] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4924,7 +4939,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSetPcieLtrLatency" #endif }, - { /* [313] */ + { /* [314] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4939,7 +4954,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPexUtilCounters" #endif }, - { /* [314] */ + { /* [315] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4954,7 +4969,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusClearPexUtilCounters" #endif }, - { /* [315] */ + { /* [316] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4969,7 +4984,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetBFD" #endif }, - { /* [316] */ + { /* [317] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4984,7 +4999,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetAspmDisableFlags" #endif }, - { /* [317] */ + { /* [318] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -4999,7 +5014,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetInfoV2" #endif }, - { /* [318] */ + { /* [319] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5014,7 +5029,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusControlPublicAspmBits" #endif }, - { /* [319] */ + { /* [320] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5029,7 +5044,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetNvlinkPeerIdMask" #endif }, - { /* [320] */ + { /* [321] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5044,7 +5059,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSetEomParameters" #endif }, - { /* [321] */ + { /* [322] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5059,7 +5074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetUphyDlnCfgSpace" #endif }, - { /* [322] */ + { /* [323] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5074,7 +5089,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetEomStatus" #endif }, - { /* [323] */ + { /* [324] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5089,7 +5104,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPcieReqAtomicsCaps" #endif }, - { /* [324] */ + { /* [325] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5104,7 +5119,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics" #endif }, - { /* [325] */ + { /* [326] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5119,7 +5134,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetC2CInfo" #endif }, - { /* [326] */ + { /* [327] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5134,7 +5149,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSysmemAccess" #endif }, - { /* [327] */ + { /* [328] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5149,7 +5164,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetC2CErrorInfo" #endif }, - { /* [328] */ + { /* [329] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5164,7 +5179,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusSetP2pMapping" #endif }, - { /* [329] */ + { /* [330] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5179,7 +5194,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusUnsetP2pMapping" #endif }, - { /* [330] */ + { /* [331] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5194,7 +5209,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdKPerfBoost" #endif }, - { /* [331] */ + { /* [332] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5209,7 +5224,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPerfSetPowerstate" #endif }, - { /* [332] */ + { /* [333] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5224,7 +5239,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPerfRatedTdpGetControl" #endif }, - { /* [333] */ + { /* [334] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5239,7 +5254,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPerfRatedTdpSetControl" #endif }, - { /* [334] */ + { /* [335] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5254,7 +5269,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPerfSetAuxPowerState" #endif }, - { /* [335] */ + { /* [336] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5269,7 +5284,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPerfReservePerfmonHw" #endif }, - { /* [336] */ + { /* [337] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5284,7 +5299,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2" #endif }, - { /* [337] */ + { /* [338] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5299,7 +5314,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcReadVirtualMem" #endif }, - { /* [338] */ + { /* [339] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5314,7 +5329,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcGetErrorCount" #endif }, - { /* [339] */ + { /* [340] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5329,7 +5344,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcSetCleanErrorHistory" #endif }, - { /* [340] */ + { /* [341] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5344,7 +5359,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcGetWatchdogInfo" #endif }, - { /* [341] */ + { /* [342] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5359,7 +5374,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcDisableWatchdog" #endif }, - { /* [342] */ + { /* [343] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5374,7 +5389,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcEnableWatchdog" #endif }, - { /* [343] */ + { /* [344] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5389,7 +5404,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcReleaseWatchdogRequests" #endif }, - { /* [344] */ + { /* [345] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5404,7 +5419,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdSetRcRecovery" #endif }, - { /* [345] */ + { /* [346] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5419,7 +5434,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGetRcRecovery" #endif }, - { /* [346] */ + { /* [347] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5434,7 +5449,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcSoftDisableWatchdog" #endif }, - { /* [347] */ + { /* [348] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5449,7 +5464,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdSetRcInfo" #endif }, - { /* [348] */ + { /* [349] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5464,7 +5479,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGetRcInfo" #endif }, - { /* [349] */ + { /* [350] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5479,7 +5494,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdRcGetErrorV2" #endif }, - { /* [350] */ + { /* [351] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5494,7 +5509,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvdGetDumpSize" #endif }, - { /* [351] */ + { /* [352] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5509,7 +5524,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvdGetDump" #endif }, - { /* [352] */ + { /* [353] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5524,7 +5539,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvdGetNocatJournalRpt" #endif }, - { /* [353] */ + { /* [354] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5539,7 +5554,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvdSetNocatJournalData" #endif }, - { /* [354] */ + { /* [355] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5554,7 +5569,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDmaInvalidateTLB" #endif }, - { /* [355] */ + { /* [356] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5569,7 +5584,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdDmaGetInfo" #endif }, - { /* [356] */ + { /* [357] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5584,7 +5599,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdPmgrGetModuleInfo" #endif }, - { /* [357] */ + { /* [358] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5599,7 +5614,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdLpwrDifrCtrl" #endif }, - { /* [358] */ + { /* [359] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5614,7 +5629,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdLpwrDifrPrefetchResponse" #endif }, - { /* [359] */ + { /* [360] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5629,7 +5644,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetCaps" #endif }, - { /* [360] */ + { /* [361] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5644,7 +5659,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetCePceMask" #endif }, - { /* [361] */ + { /* [362] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5659,7 +5674,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetCapsV2" #endif }, - { /* [362] */ + { /* [363] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5674,7 +5689,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeUpdatePceLceMappings" #endif }, - { /* [363] */ + { /* [364] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5689,7 +5704,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeUpdateClassDB" #endif }, - { /* [364] */ + { /* [365] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100e40u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5704,7 +5719,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetPhysicalCaps" #endif }, - { /* [365] */ + { /* [366] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5719,7 +5734,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetFaultMethodBufferSize" #endif }, - { /* [366] */ + { /* [367] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5734,7 +5749,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetHubPceMask" #endif }, - { /* [367] */ + { /* [368] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5749,7 +5764,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetAllCaps" #endif }, - { /* [368] */ + { /* [369] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5764,7 +5779,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdCeGetAllPhysicalCaps" #endif }, - { /* [369] */ + { /* [370] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5779,7 +5794,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetNvlinkCaps" #endif }, - { /* [370] */ + { /* [371] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5794,7 +5809,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetNvlinkStatus" #endif }, - { /* [371] */ + { /* [372] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5809,7 +5824,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdBusGetNvlinkErrInfo" #endif }, - { /* [372] */ + { /* [373] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5824,7 +5839,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGetNvlinkCounters" #endif }, - { /* [373] */ + { /* [374] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5839,7 +5854,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdClearNvlinkCounters" #endif }, - { /* [374] */ + { /* [375] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5854,7 +5869,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts" #endif }, - { /* [375] */ + { /* [376] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5869,7 +5884,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSetupEom" #endif }, - { /* [376] */ + { /* [377] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5884,7 +5899,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetPowerState" #endif }, - { /* [377] */ + { /* [378] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5899,7 +5914,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinGetLinkFomValues" #endif }, - { /* [378] */ + { /* [379] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5914,7 +5929,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetNvlinkEccErrors" #endif }, - { /* [379] */ + { /* [380] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5929,7 +5944,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkReadTpCounters" #endif }, - { /* [380] */ + { /* [381] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5944,7 +5959,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkEnableNvlinkPeer" #endif }, - { /* [381] */ + { /* [382] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5959,7 +5974,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetLpCounters" #endif }, - { /* [382] */ + { /* [383] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5974,7 +5989,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkCoreCallback" #endif }, - { /* [383] */ + { /* [384] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -5989,7 +6004,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetAliEnabled" #endif }, - { /* [384] */ + { /* [385] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6004,7 +6019,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid" #endif }, - { /* [385] */ + { /* [386] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6019,7 +6034,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkUpdateHshubMux" #endif }, - { /* [386] */ + { /* [387] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6034,7 +6049,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer" #endif }, - { /* [387] */ + { /* [388] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6049,7 +6064,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer" #endif }, - { /* [388] */ + { /* [389] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6064,7 +6079,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkRemoveNvlinkMapping" #endif }, - { /* [389] */ + { /* [390] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6079,7 +6094,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSaveRestoreHshubState" #endif }, - { /* [390] */ + { /* [391] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6094,7 +6109,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkProgramBufferready" #endif }, - { /* [391] */ + { /* [392] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6109,7 +6124,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkUpdateCurrentConfig" #endif }, - { /* [392] */ + { /* [393] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6124,7 +6139,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSetLoopbackMode" #endif }, - { /* [393] */ + { /* [394] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6139,7 +6154,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkUpdatePeerLinkMask" #endif }, - { /* [394] */ + { /* [395] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6154,7 +6169,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkUpdateLinkConnection" #endif }, - { /* [395] */ + { /* [396] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6169,7 +6184,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkEnableLinksPostTopology" #endif }, - { /* [396] */ + { /* [397] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6184,7 +6199,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkPreLinkTrainAli" #endif }, - { /* [397] */ + { /* [398] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6199,7 +6214,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetRefreshCounters" #endif }, - { /* [398] */ + { /* [399] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6214,7 +6229,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkClearRefreshCounters" #endif }, - { /* [399] */ + { /* [400] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6229,7 +6244,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet" #endif }, - { /* [400] */ + { /* [401] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6244,7 +6259,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkLinkTrainAli" #endif }, - { /* [401] */ + { /* [402] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6259,7 +6274,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo" #endif }, - { /* [402] */ + { /* [403] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6274,7 +6289,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo" #endif }, - { /* [403] */ + { /* [404] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6289,7 +6304,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkProgramLinkSpeed" #endif }, - { /* [404] */ + { /* [405] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6304,7 +6319,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkAreLinksTrained" #endif }, - { /* [405] */ + { /* [406] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6319,7 +6334,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkResetLinks" #endif }, - { /* [406] */ + { /* [407] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6334,7 +6349,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkDisableDlInterrupts" #endif }, - { /* [407] */ + { /* [408] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6349,7 +6364,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetLinkAndClockInfo" #endif }, - { /* [408] */ + { /* [409] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6364,7 +6379,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSetupNvlinkSysmem" #endif }, - { /* [409] */ + { /* [410] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6379,7 +6394,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkProcessForcedConfigs" #endif }, - { /* [410] */ + { /* [411] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6394,7 +6409,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSyncLaneShutdownProps" #endif }, - { /* [411] */ + { /* [412] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6409,7 +6424,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts" #endif }, - { /* [412] */ + { /* [413] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6424,7 +6439,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask" #endif }, - { /* [413] */ + { /* [414] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6439,7 +6454,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr" #endif }, - { /* [414] */ + { /* [415] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100201u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6454,7 +6469,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo" #endif }, - { /* [415] */ + { /* [416] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6469,7 +6484,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkEnableLinks" #endif }, - { /* [416] */ + { /* [417] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6484,7 +6499,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkProcessInitDisabledLinks" #endif }, - { /* [417] */ + { /* [418] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6499,7 +6514,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkEomControl" #endif }, - { /* [418] */ + { /* [419] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6514,7 +6529,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkSetL1Threshold" #endif }, - { /* [419] */ + { /* [420] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6529,7 +6544,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkGetL1Threshold" #endif }, - { /* [420] */ + { /* [421] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1240u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6544,7 +6559,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkInbandSendData" #endif }, - { /* [421] */ + { /* [422] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6559,7 +6574,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkDirectConnectCheck" #endif }, - { /* [422] */ + { /* [423] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6574,7 +6589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdNvlinkPostFaultUp" #endif }, - { /* [423] */ + { /* [424] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6589,7 +6604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage" #endif }, - { /* [424] */ + { /* [425] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6604,7 +6619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch" #endif }, - { /* [425] */ + { /* [426] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6619,7 +6634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo" #endif }, - { /* [426] */ + { /* [427] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6634,7 +6649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet" #endif }, - { /* [427] */ + { /* [428] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6649,7 +6664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet" #endif }, - { /* [428] */ + { /* [429] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6664,7 +6679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo" #endif }, - { /* [429] */ + { /* [430] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6679,7 +6694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize" #endif }, - { /* [430] */ + { /* [431] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6694,7 +6709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters" #endif }, - { /* [431] */ + { /* [432] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6709,7 +6724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlaRange" #endif }, - { /* [432] */ + { /* [433] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6724,7 +6739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock" #endif }, - { /* [433] */ + { /* [434] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100004u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6739,7 +6754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlaGetRange" #endif }, - { /* [434] */ + { /* [435] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6754,7 +6769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats" #endif }, - { /* [435] */ + { /* [436] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6769,7 +6784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGspGetFeatures" #endif }, - { /* [436] */ + { /* [437] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6784,7 +6799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo" #endif }, - { /* [437] */ + { /* [438] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6799,7 +6814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt" #endif }, - { /* [438] */ + { /* [439] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6814,7 +6829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff" #endif }, - { /* [439] */ + { /* [440] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6829,7 +6844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower" #endif }, - { /* [440] */ + { /* [441] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6844,7 +6859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus" #endif }, - { /* [441] */ + { /* [442] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6859,7 +6874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus" #endif }, - { /* [442] */ + { /* [443] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6874,7 +6889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask" #endif }, - { /* [443] */ + { /* [444] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6889,7 +6904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask" #endif }, - { /* [444] */ + { /* [445] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6904,7 +6919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType" #endif }, - { /* [445] */ + { /* [446] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6919,7 +6934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu" #endif }, - { /* [446] */ + { /* [447] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6934,7 +6949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo" #endif }, - { /* [447] */ + { /* [448] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6949,7 +6964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage" #endif }, - { /* [448] */ + { /* [449] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6964,7 +6979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity" #endif }, - { /* [449] */ + { /* [450] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6979,7 +6994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources" #endif }, - { /* [450] */ + { /* [451] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -6994,7 +7009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding" #endif }, - { /* [451] */ + { /* [452] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -7009,7 +7024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport" #endif }, - { /* [452] */ + { /* [453] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -7024,7 +7039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig" #endif }, - { /* [453] */ + { /* [454] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -7039,7 +7054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic /*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask" #endif }, - { /* [454] */ + { /* [455] */ #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) /*pFunc=*/ (void (*)(void)) NULL, #else @@ -7059,7 +7074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice = { - /*numEntries=*/ 455, + /*numEntries=*/ 456, /*pExportEntries=*/ __nvoc_exported_method_def_Subdevice }; @@ -8063,6 +8078,10 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner * pThis->__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__ = &subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL; #endif +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2__ = &subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL; +#endif + #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) pThis->__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__ = &subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL; #endif @@ -8166,10 +8185,6 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner * #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) pThis->__subdeviceCtrlCmdGpuHandleGpuSR__ = &subdeviceCtrlCmdGpuHandleGpuSR_IMPL; #endif - -#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) - pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL; -#endif } static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { @@ -8180,6 +8195,10 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner * PORT_UNREFERENCED_VARIABLE(rmVariantHal); PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL; +#endif + #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL; #endif diff --git a/src/nvidia/generated/g_subdevice_nvoc.h b/src/nvidia/generated/g_subdevice_nvoc.h index 24b283ce6..870f7463f 100644 --- a/src/nvidia/generated/g_subdevice_nvoc.h +++ b/src/nvidia/generated/g_subdevice_nvoc.h @@ -353,6 +353,7 @@ struct Subdevice { NV_STATUS (*__subdeviceCtrlCmdGpuGetEncoderCapacity__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionStats__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdGpuSetFabricAddr__)(struct Subdevice *, NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS *); @@ -898,6 +899,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C #define subdeviceCtrlCmdGpuGetEncoderCapacity(pSubdevice, pEncoderCapacityParams) subdeviceCtrlCmdGpuGetEncoderCapacity_DISPATCH(pSubdevice, pEncoderCapacityParams) #define subdeviceCtrlCmdGpuGetNvencSwSessionStats(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionStats_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdGpuGetNvencSwSessionInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdGpuGetNvfbcSwSessionStats(pSubdevice, params) subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_DISPATCH(pSubdevice, params) #define subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo(pSubdevice, params) subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_DISPATCH(pSubdevice, params) #define subdeviceCtrlCmdGpuSetFabricAddr(pSubdevice, pParams) subdeviceCtrlCmdGpuSetFabricAddr_DISPATCH(pSubdevice, pParams) @@ -2568,6 +2570,12 @@ static inline NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfo_DISPATCH(struct return pSubdevice->__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__(pSubdevice, pParams); } +NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2__(pSubdevice, pParams); +} + NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *params); static inline NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *params) { diff --git a/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h index 816318b0f..63bc22004 100644 --- a/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h +++ b/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h @@ -153,6 +153,7 @@ typedef struct GspSystemInfo BUSINFO FHBBusInfo; BUSINFO chipsetIDInfo; ACPI_METHOD_DATA acpiMethodData; + NvU64 sysTimerOffsetNs; } GspSystemInfo; diff --git a/src/nvidia/kernel/vgpu/nv/rpc.c b/src/nvidia/kernel/vgpu/nv/rpc.c index 30e9e5bf4..d5dd85b2a 100644 --- a/src/nvidia/kernel/vgpu/nv/rpc.c +++ b/src/nvidia/kernel/vgpu/nv/rpc.c @@ -52,6 +52,7 @@ #include "virtualization/hypervisor/hypervisor.h" #include "finn_rm_api.h" #include "os/os.h" +#include "objtmr.h" #define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER #include "g_allclasses.h" @@ -362,7 +363,8 @@ static NV_STATUS _issueRpcLarge pBuf8 = (NvU8 *)pBuffer; remainingSize = bufSize; - entryLength = NV_MIN(bufSize, pRpc->maxRpcSize); + entryLength = NV_MIN(bufSize, vgpu_rpc_message_header_v->length); + NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE); if (((NvU8 *)vgpu_rpc_message_header_v != pBuf8) && bBidirectional) portMemCopy(pBuf8, entryLength, vgpu_rpc_message_header_v, entryLength); @@ -393,8 +395,11 @@ static NV_STATUS _issueRpcLarge NV_ASSERT(0); return nvStatus; } - entryLength = vgpu_rpc_message_header_v->length - sizeof(rpc_message_header_v); + + entryLength = vgpu_rpc_message_header_v->length; NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE); + NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength >= sizeof(rpc_message_header_v), NV_ERR_INVALID_STATE); + entryLength -= sizeof(rpc_message_header_v); if (entryLength > remainingSize) entryLength = remainingSize; @@ -1312,6 +1317,9 @@ NV_STATUS rpcGspSetSystemInfo_v17_00 rpcInfo->bUpstreamL1PorMobileOnly = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY); rpcInfo->upstreamAddressValid = pGpu->gpuClData.upstreamPort.addr.valid; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + rpcInfo->sysTimerOffsetNs = pTmr->sysTimerOffsetNs; + status = _issueRpcAsync(pGpu, pRpc); } diff --git a/src/nvidia/src/kernel/core/locks.c b/src/nvidia/src/kernel/core/locks.c index 626127445..7c611d601 100644 --- a/src/nvidia/src/kernel/core/locks.c +++ b/src/nvidia/src/kernel/core/locks.c @@ -563,12 +563,11 @@ _rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pG // If we own a higher order lock than one of the needed ones, we are // violating the locking order and need to do a conditional acquire // clz32(0) == ctz(0) == 32: - // owned=0b00110000, needed=0b00001100: (4 < (32-28)), bCond=FALSE - // owned=0b00110010, needed=0b00001100: (1 < (32-28)), bCond=TRUE - // owned=0b00010000, needed=0b11000011: (4 < (32-24)), bCond=TRUE - // owned=0b00000000, needed=0b00001100: (32 < (32-28)), bCond=FALSE - // owned=0b00000001, needed=0b00000000: (0 < (32-32)), bCond=FALSE - if (portUtilCountTrailingZeros32(ownedMask) < (32-portUtilCountLeadingZeros32(gpuMask))) + // owned=0b00001100, needed=0b00110000: ((32-28) > 4), bCond=FALSE + // owned=0b00001100, needed=0b00110010: ((32-28) > 1), bCond=TRUE + // owned=0b11000011, needed=0b00010000: ((32-24) > 4), bCond=TRUE + // owned=0b00000000, needed=0b00000001: ((32-32) > 0), bCond=FALSE + if ((32-portUtilCountLeadingZeros32(ownedMask)) > portUtilCountTrailingZeros32(gpuMask)) { bCondAcquireCheck = NV_TRUE; } diff --git a/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c b/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c index ccae855e4..b0a9e1458 100644 --- a/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c +++ b/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c @@ -1375,3 +1375,36 @@ kbusGetFlaRange_GA100 return NV_OK; } + +/*! + * @brief Returns the Nvlink specific peer number from pGpu (Local) to pGpuPeer. + * Used only by VF. + * + * @param[in] pGpu Local + * @param[in] pKernelBus Local + * @param[in] pGpuPeer Remote + * + * @returns NvU32 bus peer number + */ +NvU32 +kbusGetNvlinkPeerId_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + OBJGPU *pGpuPeer +) +{ + NvU32 gpuPeerInst = gpuGetInstance(pGpuPeer); + NvU32 peerId = pKernelBus->p2p.busNvlinkPeerNumberMask[gpuPeerInst]; + + if (peerId == 0) + { + NV_PRINTF(LEVEL_INFO, + "NVLINK P2P not set up between GPU%u and GPU%u, checking for PCIe P2P...\n", + gpuGetInstance(pGpu), gpuPeerInst); + return BUS_INVALID_PEER; + } + + LOWESTBITIDX_32(peerId); + return peerId; +} diff --git a/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c index f86741762..7c7c50fe3 100644 --- a/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c +++ b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c @@ -475,16 +475,12 @@ kbusStateUnload_GM107 kbusUnlinkP2P_HAL(pGpu, pKernelBus); } - if (flags & GPU_STATE_FLAGS_PRESERVING) + if ((flags & GPU_STATE_FLAGS_PRESERVING) && !IS_VIRTUAL_WITH_SRIOV(pGpu)) { if (!IS_GPU_GC6_STATE_ENTERING(pGpu)) { status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF); - if (!IS_VIRTUAL_WITH_SRIOV(pGpu)) - { - // Do not use BAR2 physical mode for bootstrapping BAR2 across S/R. - pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE; - } + pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE; } } else @@ -1439,6 +1435,9 @@ kbusSetupBar2GpuVaSpace_GM107 status = mmuWalkReserveEntries(pWalk, pLevelFmt, pKernelBus->bar2[gfid].cpuVisibleBase, pKernelBus->bar2[gfid].cpuVisibleLimit, NV_FALSE); NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + + pKernelBus->bar2[gfid].cpuVisiblePgTblSize = pKernelBus->bar2[gfid].pageTblInit * pKernelBus->bar2[gfid].pageTblSize; + status = mmuWalkSparsify(pWalk, pKernelBus->bar2[gfid].cpuVisibleBase, pKernelBus->bar2[gfid].cpuVisibleLimit, NV_TRUE); NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); } diff --git a/src/nvidia/src/kernel/gpu/bus/kern_bus.c b/src/nvidia/src/kernel/gpu/bus/kern_bus.c index 801b6f2a8..f3d2be829 100644 --- a/src/nvidia/src/kernel/gpu/bus/kern_bus.c +++ b/src/nvidia/src/kernel/gpu/bus/kern_bus.c @@ -1243,11 +1243,3 @@ kbusIsGpuP2pAlive_IMPL return (pKernelBus->totalP2pObjectsAliveRefCount > 0); } -/** - * @brief Setup VF BAR2 during hibernate resume - * - * @param[in] pGpu - * @param[in] pKernelBus - * @param[in] flags - */ - diff --git a/src/nvidia/src/kernel/gpu/disp/disp_common_ctrl_acpi.c b/src/nvidia/src/kernel/gpu/disp/disp_common_ctrl_acpi.c index 2112260ad..ecaa8866e 100644 --- a/src/nvidia/src/kernel/gpu/disp/disp_common_ctrl_acpi.c +++ b/src/nvidia/src/kernel/gpu/disp/disp_common_ctrl_acpi.c @@ -167,6 +167,11 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL { case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } // // get display mask from input buffer // display mask is 4 byte long and available at byte 1 @@ -181,27 +186,55 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL // get acpi id acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_MXMX(pGpu, acpiId, pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUON: { + if (inOutDataSize < sizeof(NvU32)) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_GPUON(pGpu, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUOFF: { + if (inOutDataSize < sizeof(NvU32)) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_GPUOFF(pGpu, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUSTA: { + if (inOutDataSize < sizeof(NvU32)) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_GPUSTA(pGpu, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + // // get acpi id from input buffer // acpi id is 4 byte long and available at byte 4 @@ -213,11 +246,18 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL ((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET, sizeof(NvU32)); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_MXDS(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + // // get acpi id from input buffer // acpi id is 4 byte long and available at byte 4 @@ -229,11 +269,18 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL ((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET, sizeof(NvU32)); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_MXDS(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + // // get acpi id from input buffer // acpi id is 4 byte long and available at byte 4 @@ -245,31 +292,53 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL ((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM_DISP_MASK_OFFSET, sizeof(NvU32)); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_MXDM(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + // get acpi id from input buffer portMemCopy(&acpiId, sizeof(NvU32), ((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET, sizeof(NvU32)); + + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_MXID(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + portMemCopy(&acpiId, sizeof(NvU32), ((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET, sizeof(NvU32)); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_LRST(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + portMemCopy(&acpiId, sizeof(NvU32), ((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID_DISP_MASK_OFFSET, @@ -283,6 +352,12 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + // // get acpi id from input buffer // acpi id is 4 byte long and available at byte 4 @@ -297,11 +372,17 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL // get acpi id acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_MXMX(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS: { + if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS_DISP_MASK_OFFSET + sizeof(NvU32))) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } // // get acpi id from input buffer // acpi id is 4 byte long and available at byte 4 @@ -316,19 +397,35 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL // get acpi id acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_DOS(pGpu, acpiId, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_ROM: { + NvU32 *pBuffer = (NvU32*) pInOutData; + if ((inOutDataSize < (2 * sizeof(NvU32))) || (inOutDataSize < pBuffer[1])) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + outDataSize = pBuffer[1]; outStatus = pGpu->pOS->osCallACPI_NVHG_ROM(pGpu, (NvU32*) pInOutData, (NvU32*) pInOutData); break; } case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DCS: { + if (inOutDataSize < sizeof(NvU32)) + { + outStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + // get display mask from input buffer portMemCopy(&acpiId, sizeof(NvU32), pInOutData, sizeof(NvU32)); + outDataSize = sizeof(NvU32); outStatus = pOS->osCallACPI_NVHG_DCS(pGpu, acpiId, (NvU32*) pInOutData); break; } diff --git a/src/nvidia/src/kernel/gpu/fsp/arch/hopper/kern_fsp_gh100.c b/src/nvidia/src/kernel/gpu/fsp/arch/hopper/kern_fsp_gh100.c index ed3114efc..59ddfe926 100644 --- a/src/nvidia/src/kernel/gpu/fsp/arch/hopper/kern_fsp_gh100.c +++ b/src/nvidia/src/kernel/gpu/fsp/arch/hopper/kern_fsp_gh100.c @@ -953,6 +953,18 @@ kfspDumpDebugState_GH100 KernelFsp *pKernelFsp ) { + // + // Older microcodes did not have the version populated in scratch. + // They will report a version of 0. + // + const NvU32 fspUcodeVersion = GPU_REG_RD_DRF(pGpu, _GFW, _FSP_UCODE_VERSION, _FULL); + if (fspUcodeVersion > 0) + { + NV_PRINTF(LEVEL_ERROR, "FSP microcode v%u.%u\n", + DRF_VAL(_GFW, _FSP_UCODE_VERSION, _MAJOR, fspUcodeVersion), + DRF_VAL(_GFW, _FSP_UCODE_VERSION, _MINOR, fspUcodeVersion)); + } + NV_PRINTF(LEVEL_ERROR, "NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(0) = 0x%x\n", GPU_REG_RD32(pGpu, NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(0))); NV_PRINTF(LEVEL_ERROR, "NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(1) = 0x%x\n", @@ -1209,3 +1221,15 @@ kfspGetExtraReservedMemorySize_GH100 // Bug: 3763996 return 4 * 1024; } + +NvBool +kfspRequiresBug3957833WAR_GH100 +( + OBJGPU *pGpu, + KernelFsp *pKernelFsp +) +{ + const NvU32 FSP_BUG_3957833_FIX_VERSION = 0x44C; + const NvU32 fspUcodeVersion = GPU_REG_RD_DRF(pGpu, _GFW, _FSP_UCODE_VERSION, _FULL); + return fspUcodeVersion < FSP_BUG_3957833_FIX_VERSION; +} diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c b/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c index 076ea39c9..b9f699608 100644 --- a/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c +++ b/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c @@ -2132,6 +2132,9 @@ deviceCtrlCmdKGrGetCaps_IMPL return NV_ERR_NOT_SUPPORTED; } + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGrCaps != NULL, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pParams->capsTblSize == NV0080_CTRL_GR_CAPS_TBL_SIZE, NV_ERR_INVALID_ARGUMENT); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) { KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c index 510478207..cebaa508a 100644 --- a/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c +++ b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c @@ -2244,6 +2244,73 @@ kgrctxUnmapCtxPreemptionBuffers_IMPL kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pVAS, &pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.vAddrList); } +/*! + * @brief This funciton operates as a last step check before proceeding with unmapping + * various global ctx buffers. + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContext + * @param[in] pKernelChannel KernelChannel attempting to be unmapped + * @param[in] bufId GR_GLOBALCTX_BUFFER checking for un-accounted references + * + * @return NV_FALSE unmapping on bufId can proceed for pKernelChannel's VAS + * @return NV_TRUE References which use the mapping are still alive + */ +NvBool +kgrctxIsFinalGlobalBufMapRefDuped_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel, + GR_GLOBALCTX_BUFFER bufId +) +{ + CHANNEL_NODE *pChanNode; + CHANNEL_LIST *pChanList; + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status = NV_OK; + NvU64 refCount; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelChannel != NULL, NV_FALSE); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelChannel->pKernelChannelGroupApi != NULL, NV_FALSE); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL, NV_FALSE); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast), + return NV_FALSE;); + + // + // Return NV_FALSE if the VA is not found or the refCount for the channels pVAS is not exactly 1. + // Both cases we want to handle in the unmapping call itself. + // + status = vaListGetRefCount(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[bufId], pKernelChannel->pVAS, &refCount); + if (status != NV_OK || refCount != 1) + { + return NV_FALSE; + } + + pChanList = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList; + NV_CHECK_OR_RETURN(LEVEL_ERROR, pChanList != NULL, NV_FALSE); + + for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext) + { + // Skip the channel we are looking to unmap + if (kchannelGetDebugTag(pKernelChannel) == kchannelGetDebugTag(pChanNode->pKernelChannel)) + continue; + + if (pKernelChannel->pVAS == pChanNode->pKernelChannel->pVAS) + { + NV_PRINTF(LEVEL_INFO, "Channel %d shares a pVAS with channel %d\n", + kchannelGetDebugTag(pKernelChannel), + kchannelGetDebugTag(pChanNode->pKernelChannel)); + return NV_TRUE; + } + } + return NV_FALSE; +} + /** * @brief Unmap associated ctx buffers (main, patch, global buffers etc). * @@ -2304,7 +2371,8 @@ kgrctxUnmapAssociatedCtxBuffers_IMPL (vaListGetRefCount(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[GR_GLOBALCTX_BUFFER_FECS_EVENT], pKernelChannel->pVAS, &refCount) == NV_OK) && (refCount == 1)) { - kgrctxUnmapGlobalCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel->pVAS, GR_GLOBALCTX_BUFFER_FECS_EVENT); + if (!(kgrctxIsFinalGlobalBufMapRefDuped(pGpu, pKernelGraphicsContext, pKernelChannel, GR_GLOBALCTX_BUFFER_FECS_EVENT))) + kgrctxUnmapGlobalCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel->pVAS, GR_GLOBALCTX_BUFFER_FECS_EVENT); } if ((pKernelGraphicsContextUnicast->pmCtxswBuffer.pMemDesc != NULL) && diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/ada/kernel_gsp_ad102.c b/src/nvidia/src/kernel/gpu/gsp/arch/ada/kernel_gsp_ad102.c index 0ba69346f..538e6703d 100644 --- a/src/nvidia/src/kernel/gpu/gsp/arch/ada/kernel_gsp_ad102.c +++ b/src/nvidia/src/kernel/gpu/gsp/arch/ada/kernel_gsp_ad102.c @@ -106,6 +106,7 @@ kgspGetWprHeapSize_AD102 } else { - return 80 * 1024 * 1024; + // GSP-RM WPR heap (96MB) + libos3 (16MB) + return 112 * 1024 * 1024; } } \ No newline at end of file diff --git a/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c b/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c index 712d9d6f9..b2613afc7 100644 --- a/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c +++ b/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c @@ -22,9 +22,185 @@ */ #include "gpu/gpu.h" +#include "nvoc/prelude.h" +#include "nvstatuscodes.h" #include "rmapi/rs_utils.h" #include "gpu/hwpm/profiler_v2.h" #include "ctrl/ctrlb0cc/ctrlb0ccinternal.h" +#include "mem_mgr/mem.h" + +NV_STATUS +profilerBaseCtrlCmdFreePmaStream_IMPL +( + ProfilerBase *pProfiler, + NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *pParams +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(GPU_RES_GET_GPU(pProfiler)); + NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS internalParams; + + portMemSet(&internalParams, 0, sizeof(NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS)); + internalParams.pmaChannelIdx = pParams->pmaChannelIdx; + { + RsResourceRef *pCountRef = NULL; + RsResourceRef *pBufferRef = NULL; + + if (pProfiler->maxPmaChannels <= pParams->pmaChannelIdx) + { + goto err; + } + + pCountRef = pProfiler->ppBytesAvailable[pParams->pmaChannelIdx]; + pProfiler->ppBytesAvailable[pParams->pmaChannelIdx] = NULL; + pBufferRef = pProfiler->ppStreamBuffers[pParams->pmaChannelIdx]; + pProfiler->ppStreamBuffers[pParams->pmaChannelIdx] = NULL; + + if(pProfiler->pBoundCntBuf == pCountRef && pProfiler->pBoundPmaBuf == pBufferRef) + { + Memory *pCntMem = dynamicCast(pCountRef->pResource, Memory); + Memory *pBufMem = dynamicCast(pBufferRef->pResource, Memory); + pProfiler->pBoundCntBuf = NULL; + pProfiler->pBoundPmaBuf = NULL; + pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + + } + if (pCountRef != NULL) + { + refRemoveDependant(pCountRef, RES_GET_REF(pProfiler)); + } + if (pBufferRef != NULL) + { + refRemoveDependant(pBufferRef, RES_GET_REF(pProfiler)); + } + } +err: + + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pProfiler), + RES_GET_HANDLE(pProfiler), + NVB0CC_CTRL_CMD_INTERNAL_FREE_PMA_STREAM, + &internalParams, sizeof(internalParams)); +} + +NV_STATUS +profilerBaseCtrlCmdBindPmResources_IMPL +( + ProfilerBase *pProfiler +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfiler); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler); + NvHandle hObject = RES_GET_HANDLE(pProfiler); + NV_STATUS status = NV_OK; + RsResourceRef *pCntRef = NULL; + RsResourceRef *pBufRef = NULL; + Memory *pCntMem = NULL; + Memory *pBufMem = NULL; + + NV_CHECK_OR_GOTO(LEVEL_INFO, + !pProfiler->bLegacyHwpm && pProfiler->maxPmaChannels != 0, physical_control); + + if (pProfiler->maxPmaChannels <= pProfiler->pmaVchIdx) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pCntRef = pProfiler->ppBytesAvailable[pProfiler->pmaVchIdx]; + pBufRef = pProfiler->ppStreamBuffers[pProfiler->pmaVchIdx]; + + NV_CHECK_OR_GOTO(LEVEL_INFO, + pCntRef != NULL && pBufRef != NULL, physical_control); + + pCntMem = dynamicCast(pCntRef->pResource, Memory); + pBufMem = dynamicCast(pBufRef->pResource, Memory); + + NV_ASSERT_OR_RETURN(pCntMem != NULL && pBufMem != NULL, NV_ERR_INVALID_STATE); + + if (!memdescAcquireRmExclusiveUse(pCntMem->pMemDesc) || + !memdescAcquireRmExclusiveUse(pBufMem->pMemDesc)) + { + pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + return NV_ERR_INVALID_ARGUMENT; + } + + pProfiler->pBoundCntBuf = pCntRef; + pProfiler->pBoundPmaBuf = pBufRef; +physical_control: + + status = pRmApi->Control(pRmApi, hClient, hObject, + NVB0CC_CTRL_CMD_INTERNAL_BIND_PM_RESOURCES, + NULL, 0); + if (status != NV_OK && pCntMem != NULL && pBufMem != NULL) + { + pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + pProfiler->pBoundCntBuf = NULL; + pProfiler->pBoundPmaBuf = NULL; + } + return status; +} + +NV_STATUS +profilerBaseCtrlCmdUnbindPmResources_IMPL +( + ProfilerBase *pProfiler +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfiler); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler); + NvHandle hObject = RES_GET_HANDLE(pProfiler); + RsResourceRef *pCntRef = NULL; + RsResourceRef *pBufRef = NULL; + + pCntRef = pProfiler->pBoundCntBuf; + pBufRef = pProfiler->pBoundPmaBuf; + + if (pCntRef != NULL) + { + Memory *pCntMem = dynamicCast(pCntRef->pResource, Memory); + if (pCntMem != NULL) + { + pCntMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + } + pProfiler->pBoundCntBuf = NULL; + } + + if (pBufRef != NULL) + { + Memory *pBufMem = dynamicCast(pBufRef->pResource, Memory); + if (pBufMem != NULL) + { + pBufMem->pMemDesc->bRmExclusiveUse = NV_FALSE; + } + pProfiler->pBoundPmaBuf = NULL; + } + + return pRmApi->Control(pRmApi, hClient, hObject, + NVB0CC_CTRL_CMD_INTERNAL_UNBIND_PM_RESOURCES, + NULL, 0); +} + +NV_STATUS +profilerBaseCtrlCmdReserveHwpmLegacy_IMPL +( + ProfilerBase *pProfiler, + NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfiler); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler); + NvHandle hObject = RES_GET_HANDLE(pProfiler); + + pProfiler->bLegacyHwpm = NV_TRUE; + return pRmApi->Control(pRmApi, hClient, hObject, + NVB0CC_CTRL_CMD_INTERNAL_RESERVE_HWPM_LEGACY, + pParams, sizeof(*pParams)); +} NV_STATUS profilerBaseCtrlCmdAllocPmaStream_IMPL @@ -41,39 +217,57 @@ profilerBaseCtrlCmdAllocPmaStream_IMPL NvHandle hObject = RES_GET_HANDLE(pProfiler); NvBool bMemPmaBufferRegistered = NV_FALSE; NvBool bMemPmaBytesAvailableRegistered = NV_FALSE; - + RsResourceRef *pMemoryRef = NULL; // // REGISTER MEMDESCs TO GSP // These are no-op with BareMetal/No GSP // - status = memdescRegisterToGSP(pGpu, hClient, hParent, pParams->hMemPmaBuffer); - if (status != NV_OK) - { - goto fail; - } + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescRegisterToGSP(pGpu, hClient, hParent, pParams->hMemPmaBuffer), + fail); bMemPmaBufferRegistered = NV_TRUE; - status = memdescRegisterToGSP(pGpu, hClient, hParent, pParams->hMemPmaBytesAvailable); - if (status != NV_OK) - { - goto fail; - } + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescRegisterToGSP(pGpu, hClient, hParent, pParams->hMemPmaBytesAvailable), + fail); bMemPmaBytesAvailableRegistered = NV_TRUE; - // - // With BareMetal/No GSP: this control is a direct call to - // profilerBaseCtrlCmdInternalReleaseHwpmLegacy_IMPL - // - status = pRmApi->Control(pRmApi, - hClient, - hObject, - NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM, - pParams, sizeof(*pParams)); - if (status != NV_OK) - { - goto fail; - } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hObject, + NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM, + pParams, sizeof(*pParams)), fail); + + if (pProfiler->ppBytesAvailable == NULL) + { + NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS maxPmaParams; + portMemSet(&maxPmaParams, 0, sizeof(NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS)); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, hClient, hObject, + NVB0CC_CTRL_CMD_INTERNAL_GET_MAX_PMAS, + &maxPmaParams, sizeof(maxPmaParams)), fail); + + pProfiler->maxPmaChannels = maxPmaParams.maxPmaChannels; + pProfiler->ppBytesAvailable = (RsResourceRef**)portMemAllocNonPaged(maxPmaParams.maxPmaChannels * sizeof(RsResourceRef*)); + pProfiler->ppStreamBuffers = (RsResourceRef**)portMemAllocNonPaged(maxPmaParams.maxPmaChannels * sizeof(RsResourceRef*)); + } + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + serverutilGetResourceRef(hClient, pParams->hMemPmaBytesAvailable, &pMemoryRef), fail); + pProfiler->ppBytesAvailable[pParams->pmaChannelIdx] = pMemoryRef; + refAddDependant(pMemoryRef, RES_GET_REF(pProfiler)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + serverutilGetResourceRef(hClient, pParams->hMemPmaBuffer, &pMemoryRef), fail); + pProfiler->ppStreamBuffers[pParams->pmaChannelIdx] = pMemoryRef; + refAddDependant(pMemoryRef, RES_GET_REF(pProfiler)); + + // Copy output params to external struct. + pProfiler->pmaVchIdx = pParams->pmaChannelIdx; + pProfiler->bLegacyHwpm = NV_FALSE; + return status; fail: diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c index c0085172c..7d5197968 100644 --- a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c @@ -894,13 +894,23 @@ dmaAllocMapping_GM107 if (pPeerGpu != NULL) { - KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu); - - if ((pKernelNvlink != NULL) && - knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu)) + if (IS_VIRTUAL_WITH_SRIOV(pMappingGpu) && + !gpuIsWarBug200577889SriovHeavyEnabled(pMappingGpu)) { - pLocals->peerNumber = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), - pPeerGpu); + pLocals->peerNumber = kbusGetNvlinkPeerId_HAL(pMappingGpu, + GPU_GET_KERNEL_BUS(pMappingGpu), + pPeerGpu); + } + else + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu); + + if ((pKernelNvlink != NULL) && + knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu)) + { + pLocals->peerNumber = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), + pPeerGpu); + } } } else diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/heap.c b/src/nvidia/src/kernel/gpu/mem_mgr/heap.c index 501085d45..916cb2eac 100644 --- a/src/nvidia/src/kernel/gpu/mem_mgr/heap.c +++ b/src/nvidia/src/kernel/gpu/mem_mgr/heap.c @@ -4735,8 +4735,11 @@ NV_STATUS heapResize_IMPL if (resizeBy < 0) // Shrink the allocation { + NvS64 newSize; + NV_ASSERT_OR_RETURN(pBlockLast->owner == NVOS32_BLOCK_TYPE_FREE, NV_ERR_NO_MEMORY); - NV_ASSERT_OR_RETURN((pBlockLast->end - pBlockLast->begin + resizeBy > 0), NV_ERR_INVALID_LIMIT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddS64(pBlockLast->end - pBlockLast->begin, resizeBy, &newSize) && + (newSize > 0), NV_ERR_INVALID_LIMIT); pBlockLast->end += resizeBy; } else // Grow the allocation diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c index c93ec6c65..b28daeeb5 100644 --- a/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c @@ -2317,6 +2317,28 @@ memdescFillPages } } +/*! + * @brief Acquire exclusive use for memdesc for RM. + * + * @param[inout] pMemDesc Memory descriptor + * + * @returns Boolean indicating whether we successfully acquired the memdesc for exclusive use + */ +NvBool +memdescAcquireRmExclusiveUse +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pParentDescriptor == NULL && + !pMemDesc->bRmExclusiveUse && + pMemDesc->DupCount == 1, + NV_FALSE); + + pMemDesc->bRmExclusiveUse = NV_TRUE; + return NV_TRUE; +} + // // SubMemory per subdevice chart: (MD - Memory Descriptor, SD - subdevice) // @@ -2438,6 +2460,7 @@ memdescCreateSubMem pMemDescNew->bUsingSuballocator = pMemDesc->bUsingSuballocator; pMemDescNew->_pParentDescriptor = pMemDesc; pMemDesc->childDescriptorCnt++; + pMemDescNew->bRmExclusiveUse = pMemDesc->bRmExclusiveUse; pMemDescNew->subMemOffset = Offset; diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_pwr_mgmt.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_pwr_mgmt.c index 3198a7775..3473d3e32 100644 --- a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_pwr_mgmt.c +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_pwr_mgmt.c @@ -292,28 +292,43 @@ _memmgrAllocFbsrReservedRanges // Alloc the Memory descriptors for Fbsr Reserved regions, if not allocated. if (pMemoryManager->fbsrReservedRanges[FBSR_RESERVED_INST_MEMORY_BEFORE_BAR2PTE] == NULL) { - // Allocate Vid Mem descriptor for RM INSTANCE memory from start to BAR2PTE - size = memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) - pMemoryManager->rsvdMemoryBase; + if(IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + // Allocate Vid Mem descriptor for RM INSTANCE memory from start to BAR2 PDE base. + size = memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPageLevelsMemDesc, AT_GPU, 0) - pMemoryManager->rsvdMemoryBase; + } + else + { + // Allocate Vid Mem descriptor for RM INSTANCE memory from start to BAR2PTE + size = memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) - pMemoryManager->rsvdMemoryBase; + } + NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pMemoryManager->fbsrReservedRanges[FBSR_RESERVED_INST_MEMORY_BEFORE_BAR2PTE], pGpu, size, 0, NV_TRUE, ADDR_FBMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE), fail); - // Describe the MemDescriptor for RM Instance Memory from start to BAR2PTE + // Describe the MemDescriptor for RM Instance Memory from start to BAR2 PDE base. memdescDescribe(pMemoryManager->fbsrReservedRanges[FBSR_RESERVED_INST_MEMORY_BEFORE_BAR2PTE], ADDR_FBMEM, pMemoryManager->rsvdMemoryBase, size); } if (pMemoryManager->fbsrReservedRanges[FBSR_RESERVED_INST_MEMORY_AFTER_BAR2PTE] == NULL) { - // Allocate Mem descriptors for AFTER_BAR2PTE, GSP HEAP, WPR, NON WPR and VGA Workspace regions + RmPhysAddr afterBar2PteRegionStart = 0; + NvU64 afterBar2PteRegionSize = 0; + + /* + * Allocate Mem descriptors for AFTER_BAR2PTE, GSP HEAP, WPR, NON WPR and VGA Workspace regions. + */ if (IS_GSP_CLIENT(pGpu)) { KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); - RmPhysAddr afterBar2PteRegionStart = memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) + + NvU64 afterBar2PteRegionEnd = 0; + afterBar2PteRegionStart = memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) + pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc->Size; - NvU64 afterBar2PteRegionEnd = pMemoryManager->rsvdMemoryBase + pMemoryManager->rsvdMemorySize; - NvU64 afterBar2PteRegionSize = afterBar2PteRegionEnd - afterBar2PteRegionStart; + afterBar2PteRegionEnd = pMemoryManager->rsvdMemoryBase + pMemoryManager->rsvdMemorySize; + afterBar2PteRegionSize = afterBar2PteRegionEnd - afterBar2PteRegionStart; NvU64 gspHeapRegionStart = afterBar2PteRegionEnd; NvU64 gspHeapRegionSize = pKernelGsp->pWprMeta->gspFwRsvdStart - gspHeapRegionStart; NvU64 gspNonWprRegionSize = pKernelGsp->pWprMeta->gspFwWprStart - pKernelGsp->pWprMeta->gspFwRsvdStart; @@ -362,17 +377,34 @@ _memmgrAllocFbsrReservedRanges // Allocate Vid Mem descriptor for RM INSTANCE memory, specific to VGA i.e. after BAR2PTE to end. else { - NvU64 fbAddrSpaceSize = _memmgrGetFbEndExcludingLostOnSuspendRegions(pGpu, pMemoryManager); - size = (fbAddrSpaceSize) - memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) - pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc->Size; + NvU64 fbAddrSpaceSize = _memmgrGetFbEndExcludingLostOnSuspendRegions(pGpu, pMemoryManager); + + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + /* + * From BAR2 region we skip BAR2 PDEs and CPU visible region PTEs as we rebuild them on restore. + * But we need to save CPU invisible region PTEs across S/R, hence AFTER_BAR2PTE range starts + * after CPU visible region PTEs ends. + */ + afterBar2PteRegionStart = pKernelBus->bar2[GPU_GFID_PF].pteBase + + pKernelBus->bar2[GPU_GFID_PF].cpuVisiblePgTblSize; + } + else + { + afterBar2PteRegionStart = memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) + + pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc->Size; + } + + afterBar2PteRegionSize = fbAddrSpaceSize - afterBar2PteRegionStart; NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pMemoryManager->fbsrReservedRanges[FBSR_RESERVED_INST_MEMORY_AFTER_BAR2PTE], - pGpu, size, 0, NV_TRUE, ADDR_FBMEM, + pGpu, afterBar2PteRegionSize, 0, NV_TRUE, ADDR_FBMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE), fail); memdescDescribe(pMemoryManager->fbsrReservedRanges[FBSR_RESERVED_INST_MEMORY_AFTER_BAR2PTE], ADDR_FBMEM, - memdescGetPhysAddr(pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc, AT_GPU, 0) + pKernelBus->virtualBar2[GPU_GFID_PF].pPTEMemDesc->Size, size); + afterBar2PteRegionStart, afterBar2PteRegionSize); } } diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c b/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c index c884c5d11..0d2b870f8 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c @@ -3482,12 +3482,6 @@ kmigmgrCreateComputeInstances_VF (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) ? params.inst.request.pReqComputeInstanceInfo[CIIdx].sharedEngFlag : params.inst.restore.pComputeInstanceSave->ciInfo.sharedEngFlags; - NvU32 grCount; - NvU32 ceCount; - NvU32 decCount; - NvU32 encCount; - NvU32 jpgCount; - NvU32 ofaCount; NvU32 spanStart; NvU32 ctsId; @@ -3592,112 +3586,214 @@ kmigmgrCreateComputeInstances_VF remainingGpcCount -= pCIProfile->gpcCount; } - if (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) - { - grCount = 1; - ceCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].ceCount; - decCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvDecCount; - encCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvEncCount; - jpgCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvJpgCount; - ofaCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].ofaCount; - } - else + if (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE) { ENGTYPE_BIT_VECTOR engines; + bitVectorClrAll(&pResourceAllocation->engines); - bitVectorFromRaw(&engines, + // Set engines requested directly in resource allocation mask + bitVectorFromRaw(&pResourceAllocation->engines, params.inst.restore.pComputeInstanceSave->ciInfo.enginesMask, sizeof(params.inst.restore.pComputeInstanceSave->ciInfo.enginesMask)); - grCount = kmigmgrCountEnginesOfType(&engines, - RM_ENGINE_TYPE_GR(0)); + // Sanity check that all engines requested exist in the GI engine mask + bitVectorClrAll(&engines); + bitVectorAnd(&engines, &pResourceAllocation->engines, &pKernelMIGGpuInstance->resourceAllocation.localEngines); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestEqual(&engines, &pResourceAllocation->engines), + status = NV_ERR_INVALID_ARGUMENT; goto done;); - ceCount = kmigmgrCountEnginesOfType(&engines, - RM_ENGINE_TYPE_COPY(0)); + // Set Shared/Exclusive Engine Masks for GRs restored + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, RM_ENGINE_RANGE_GR()); + bitVectorAnd(&engines, &engines, &pResourceAllocation->engines); - decCount = kmigmgrCountEnginesOfType(&engines, - RM_ENGINE_TYPE_NVDEC(0)); + // Only 1 GR can be requested per compute instance + NV_CHECK_OR_ELSE(LEVEL_ERROR, + (kmigmgrCountEnginesOfType(&engines, RM_ENGINE_TYPE_GR(0)) == 1), + status = NV_ERR_INVALID_ARGUMENT; goto done;); - encCount = kmigmgrCountEnginesOfType(&engines, - RM_ENGINE_TYPE_NVENC(0)); + if ((pMIGComputeInstance->sharedEngFlag & NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NONE) != 0x0) + bitVectorOr(&shadowSharedEngMask, &shadowSharedEngMask, &engines); + else + { + ENGTYPE_BIT_VECTOR tempVector; - jpgCount = kmigmgrCountEnginesOfType(&engines, - RM_ENGINE_TYPE_NVJPEG(0)); + // Exclusive engine mask should not intersect with the current exclusive mask + bitVectorAnd(&tempVector, &engines, &shadowExclusiveEngMask); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestAllCleared(&tempVector), + status = NV_ERR_STATE_IN_USE; goto done;); + bitVectorOr(&shadowExclusiveEngMask, &shadowExclusiveEngMask, &engines); + } - ofaCount = kmigmgrCountEnginesOfType(&engines, - RM_ENGINE_TYPE_OFA); + // Set Shared/Exclusive Engine Masks for CEs restored + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, RM_ENGINE_RANGE_COPY()); + bitVectorAnd(&engines, &engines, &pResourceAllocation->engines); + if ((pMIGComputeInstance->sharedEngFlag & NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_CE) != 0x0) + bitVectorOr(&shadowSharedEngMask, &shadowSharedEngMask, &engines); + else + { + ENGTYPE_BIT_VECTOR tempVector; - NV_ASSERT(grCount == 1); + // Exclusive engine mask should not intersect with the current exclusive mask + bitVectorAnd(&tempVector, &engines, &shadowExclusiveEngMask); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestAllCleared(&tempVector), + status = NV_ERR_STATE_IN_USE; goto done;); + bitVectorOr(&shadowExclusiveEngMask, &shadowExclusiveEngMask, &engines); + } + + // Set Shared/Exclusive Engine Masks for NVDECs restored + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, RM_ENGINE_RANGE_NVDEC()); + bitVectorAnd(&engines, &engines, &pResourceAllocation->engines); + if ((pMIGComputeInstance->sharedEngFlag & NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVDEC) != 0x0) + bitVectorOr(&shadowSharedEngMask, &shadowSharedEngMask, &engines); + else + { + ENGTYPE_BIT_VECTOR tempVector; + + // Exclusive engine mask should not intersect with the current exclusive mask + bitVectorAnd(&tempVector, &engines, &shadowExclusiveEngMask); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestAllCleared(&tempVector), + status = NV_ERR_STATE_IN_USE; goto done;); + bitVectorOr(&shadowExclusiveEngMask, &shadowExclusiveEngMask, &engines); + } + + // Set Shared/Exclusive Engine Masks for NVENCs restored + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, RM_ENGINE_RANGE_NVENC()); + bitVectorAnd(&engines, &engines, &pResourceAllocation->engines); + if ((pMIGComputeInstance->sharedEngFlag & NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVENC) != 0x0) + bitVectorOr(&shadowSharedEngMask, &shadowSharedEngMask, &engines); + else + { + ENGTYPE_BIT_VECTOR tempVector; + + // Exclusive engine mask should not intersect with the current exclusive mask + bitVectorAnd(&tempVector, &engines, &shadowExclusiveEngMask); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestAllCleared(&tempVector), + status = NV_ERR_STATE_IN_USE; goto done;); + bitVectorOr(&shadowExclusiveEngMask, &shadowExclusiveEngMask, &engines); + } + + // Set Shared/Exclusive Engine Masks for NVJPEGs restored + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, RM_ENGINE_RANGE_NVJPEG()); + bitVectorAnd(&engines, &engines, &pResourceAllocation->engines); + if ((pMIGComputeInstance->sharedEngFlag & NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVJPG) != 0x0) + bitVectorOr(&shadowSharedEngMask, &shadowSharedEngMask, &engines); + else + { + ENGTYPE_BIT_VECTOR tempVector; + + // Exclusive engine mask should not intersect with the current exclusive mask + bitVectorAnd(&tempVector, &engines, &shadowExclusiveEngMask); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestAllCleared(&tempVector), + status = NV_ERR_STATE_IN_USE; goto done;); + bitVectorOr(&shadowExclusiveEngMask, &shadowExclusiveEngMask, &engines); + } + + // Set Shared/Exclusive Engine Masks for OFAs restored + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, rangeMake(RM_ENGINE_TYPE_OFA, RM_ENGINE_TYPE_OFA)); + bitVectorAnd(&engines, &engines, &pResourceAllocation->engines); + if ((pMIGComputeInstance->sharedEngFlag & NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_OFA) != 0x0) + bitVectorOr(&shadowSharedEngMask, &shadowSharedEngMask, &engines); + else + { + ENGTYPE_BIT_VECTOR tempVector; + + // Exclusive engine mask should not intersect with the current exclusive mask + bitVectorAnd(&tempVector, &engines, &shadowExclusiveEngMask); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + bitVectorTestAllCleared(&tempVector), + status = NV_ERR_STATE_IN_USE; goto done;); + bitVectorOr(&shadowExclusiveEngMask, &shadowExclusiveEngMask, &engines); + } } + else + { + NvU32 grCount = 1; + NvU32 ceCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].ceCount; + NvU32 decCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvDecCount; + NvU32 encCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvEncCount; + NvU32 jpgCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvJpgCount; + NvU32 ofaCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].ofaCount; - bitVectorClrAll(&pResourceAllocation->engines); + bitVectorClrAll(&pResourceAllocation->engines); - // Allocate the GR engines for this compute instance - NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, - kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, - ((pMIGComputeInstance->sharedEngFlag & - NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NONE) != 0x0), - RM_ENGINE_RANGE_GR(), - grCount, - &pResourceAllocation->engines, - &shadowExclusiveEngMask, - &shadowSharedEngMask), done); + // Allocate the GR engines for this compute instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NONE) != 0x0), + RM_ENGINE_RANGE_GR(), + grCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask), done); - // Allocate the Copy engines for this compute instance - NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, - kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, - ((pMIGComputeInstance->sharedEngFlag & - NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_CE) != 0x0), - RM_ENGINE_RANGE_COPY(), - ceCount, - &pResourceAllocation->engines, - &shadowExclusiveEngMask, - &shadowSharedEngMask), done); + // Allocate the Copy engines for this compute instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_CE) != 0x0), + RM_ENGINE_RANGE_COPY(), + ceCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask), done); - // Allocate the NVDEC engines for this compute instance - NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, - kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, - ((pMIGComputeInstance->sharedEngFlag & - NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVDEC) != 0x0), - RM_ENGINE_RANGE_NVDEC(), - decCount, - &pResourceAllocation->engines, - &shadowExclusiveEngMask, - &shadowSharedEngMask), done); + // Allocate the NVDEC engines for this compute instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVDEC) != 0x0), + RM_ENGINE_RANGE_NVDEC(), + decCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask), done); - // Allocate the NVENC engines for this compute instance - NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, - kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, - ((pMIGComputeInstance->sharedEngFlag & - NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVENC) != 0x0), - RM_ENGINE_RANGE_NVENC(), - encCount, - &pResourceAllocation->engines, - &shadowExclusiveEngMask, - &shadowSharedEngMask), done); + // Allocate the NVENC engines for this compute instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVENC) != 0x0), + RM_ENGINE_RANGE_NVENC(), + encCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask), done); - // Allocate the NVJPG engines for this compute instance - NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, - kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, - ((pMIGComputeInstance->sharedEngFlag & - NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVJPG) != 0x0), - RM_ENGINE_RANGE_NVJPEG(), - jpgCount, - &pResourceAllocation->engines, - &shadowExclusiveEngMask, - &shadowSharedEngMask), done); - - // Allocate the NVOFA engines for this compute instance - NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, - kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, - ((pMIGComputeInstance->sharedEngFlag & - NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_OFA) != 0x0), - rangeMake(RM_ENGINE_TYPE_OFA, RM_ENGINE_TYPE_OFA), - ofaCount, - &pResourceAllocation->engines, - &shadowExclusiveEngMask, - &shadowSharedEngMask), done); + // Allocate the NVJPG engines for this compute instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVJPG) != 0x0), + RM_ENGINE_RANGE_NVJPEG(), + jpgCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask), done); + // Allocate the NVOFA engines for this compute instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_OFA) != 0x0), + rangeMake(RM_ENGINE_TYPE_OFA, RM_ENGINE_TYPE_OFA), + ofaCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask), done); + } // Cache local mask of engine IDs for this compute instance kmigmgrGetLocalEngineMask(&pResourceAllocation->engines, diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c index 5d35cb3a8..946d867a8 100644 --- a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c @@ -630,6 +630,14 @@ knvlinkInbandMsgCallbackDispatcher_WORKITEM pHeader = (nvlink_inband_msg_header_t *)pMessage->data; + if (pKernelNvlink->inbandCallback[pHeader->type].pCallback == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "No Callback Registered for type %d. Dropping the msg\n", + pHeader->type); + return; + } + // Assert reserved in msgHdr are zero pRsvd = &pHeader->reserved[0]; NV_ASSERT((pRsvd[0] == 0) && portMemCmp(pRsvd, pRsvd + 1, diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c index de1f711f9..658019254 100644 --- a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c @@ -721,6 +721,19 @@ subdeviceCtrlCmdGpuGetNvencSwSessionStats_IMPL return NV_OK; } +NV_STATUS +_subdeviceCtrlCmdGpuGetNvencSwSessionInfo +( + OBJGPU *pGpu, + NvU32 sessionInfoTblEntry, + NV2080_CTRL_NVENC_SW_SESSION_INFO *pSessionInfo, + NvU32 *entryCount +) +{ + + return NV_OK; +} + // // subdeviceCtrlCmdGpuGetNvencSwSessionInfo // @@ -735,9 +748,23 @@ subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *pParams ) { + NV_STATUS status = NV_OK; pParams->sessionInfoTblEntry = 0; - return NV_OK; + return status; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + pParams->sessionInfoTblEntry = 0; + + return status; } // @@ -2432,6 +2459,7 @@ subdeviceCtrlCmdUpdateGfidP2pCapability_IMPL if (pP2PInfo[idx].gfid == INVALID_P2P_GFID) { pP2PInfo[idx].gfid = pParams->gfid; + pGpu->sriovState.p2pFabricPartitionId = pParams->fabricPartitionId; bSetP2PAccess = NV_TRUE; break; } @@ -2441,6 +2469,7 @@ subdeviceCtrlCmdUpdateGfidP2pCapability_IMPL if (pP2PInfo[idx].gfid == pParams->gfid) { pP2PInfo[idx].gfid = INVALID_P2P_GFID; + pGpu->sriovState.p2pFabricPartitionId = INVALID_FABRIC_PARTITION_ID; bSetP2PAccess = NV_TRUE; break; } diff --git a/src/nvidia/src/kernel/gpu/timer/arch/hopper/timer_gh100.c b/src/nvidia/src/kernel/gpu/timer/arch/hopper/timer_gh100.c index 127515e36..900f636b5 100644 --- a/src/nvidia/src/kernel/gpu/timer/arch/hopper/timer_gh100.c +++ b/src/nvidia/src/kernel/gpu/timer/arch/hopper/timer_gh100.c @@ -29,6 +29,7 @@ /* ------------------------- Includes --------------------------------------- */ #include "gpu/gpu.h" #include "objtmr.h" +#include "gpu/fsp/kern_fsp.h" #include "published/hopper/gh100/dev_vm.h" #include "published/hopper/gh100/dev_timer.h" #include "published/hopper/gh100/dev_gc6_island.h" @@ -36,6 +37,7 @@ /* ------------------------- Macros ----------------------------------------- */ /* ------------------------- Static Function Prototypes --------------------- */ /* ------------------------- Public Functions ------------------------------ */ + /* * @brief Sets the GPU time to the current wall-clock time. * @@ -50,7 +52,9 @@ NV_STATUS tmrSetCurrentTime_GH100 OBJTMR *pTmr ) { - NvU64 ns; + KernelFsp *pKernelFsp = GPU_GET_KERNEL_FSP(pGpu); + NvU64 osTimeNs, secTimerNs, sysTimerOffsetNs; + NvU32 secTimerLo, secTimerHi, secTimerHi2; NvU32 seconds; NvU32 useconds; @@ -60,10 +64,42 @@ NV_STATUS tmrSetCurrentTime_GH100 "osGetCurrentTime returns 0x%x seconds, 0x%x useconds\n", seconds, useconds); - ns = ((NvU64)seconds * 1000000 + useconds) * 1000; + osTimeNs = ((NvU64)seconds * 1000000 + useconds) * 1000; - GPU_REG_WR32(pGpu, NV_PGC6_SCI_SYS_TIMER_OFFSET_1, NvU64_HI32(ns)); - GPU_REG_WR32(pGpu, NV_PGC6_SCI_SYS_TIMER_OFFSET_0, NvU64_LO32(ns)); + // + // Get the current secure timer value to calculate the offset to apply + // Use hi-lo-hi reading to ensure a consistent value. + // + secTimerHi2 = GPU_REG_RD32(pGpu, NV_PGC6_SCI_SEC_TIMER_TIME_1); + do + { + secTimerHi = secTimerHi2; + secTimerLo = GPU_REG_RD32(pGpu, NV_PGC6_SCI_SEC_TIMER_TIME_0); + secTimerHi2 = GPU_REG_RD32(pGpu, NV_PGC6_SCI_SEC_TIMER_TIME_1); + } while (secTimerHi != secTimerHi2); + secTimerNs = secTimerLo | (((NvU64)secTimerHi) << 32); + + NV_ASSERT_OR_RETURN(secTimerNs < osTimeNs, NV_ERR_INVALID_STATE); + sysTimerOffsetNs = osTimeNs - secTimerNs; + + if ((pKernelFsp == NULL) || !kfspRequiresBug3957833WAR_HAL(pGpu, pKernelFsp)) + { + // + // We can only safely program the timer offset if FSP includes the fix + // for bug 3957833. + // + GPU_REG_WR32(pGpu, NV_PGC6_SCI_SYS_TIMER_OFFSET_1, NvU64_HI32(sysTimerOffsetNs)); + GPU_REG_WR32(pGpu, NV_PGC6_SCI_SYS_TIMER_OFFSET_0, NvU64_LO32(sysTimerOffsetNs) | + DRF_DEF(_PGC6, _SCI_SYS_TIMER_OFFSET_0, _UPDATE, _TRIGGER)); + } + + // + // PTIMER (the system timer) may need to be manually adjusted by the offset + // everywhere it is supposed to match the host timestamp (for cases where + // the above writes didn't stick, or where the calling code doesn't have + // ready access to NV_PTIMER_TIME). + // + pTmr->sysTimerOffsetNs = sysTimerOffsetNs; return NV_OK; } diff --git a/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c b/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c index 457f79b11..bcfde68b2 100644 --- a/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c +++ b/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c @@ -138,9 +138,9 @@ uvmDisableAccessCntr_GV100 uvmReadAccessCntrBufferGetPtr_HAL(pGpu, pUvm, &getPtr); if (getPtr != putPtr) { - MEMORY_DESCRIPTOR *pMemDesc = IS_GSP_CLIENT(pGpu) ? - pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc : - pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc; + MEMORY_DESCRIPTOR *pMemDesc = RMCFG_FEATURE_PLATFORM_GSP ? + pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc : + pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc; NvU8 *pAccessCntrBufferPage; NvU32 entriesPerPage = RM_PAGE_SIZE / NVC365_NOTIFY_BUF_SIZE; NvU32 pageSizeModBufSize = RM_PAGE_SIZE % NVC365_NOTIFY_BUF_SIZE; @@ -156,7 +156,8 @@ uvmDisableAccessCntr_GV100 NV_TRUE, NV_PROTECT_READ_WRITE, &pAddr, &pPriv); if (status != NV_OK) { - NV_PRINTF(LEVEL_ERROR, "Failed to map access counter buffer while disabling it.\n"); + NV_PRINTF(LEVEL_ERROR, "Failed to map access counter buffer while disabling it: %d\n", + status); return status; } @@ -180,7 +181,12 @@ uvmDisableAccessCntr_GV100 NV_TRUE, NV_PROTECT_READ_WRITE, &pAddr, &pPriv); if (status != NV_OK) { - NV_PRINTF(LEVEL_ERROR, "Failed to map access counter buffer while disabling it.\n"); + NV_PRINTF(LEVEL_ERROR, "Failed to map access counter buffer while disabling it: %d\n", + status); + + // Write get progress so far, all entries in [get, put) + // are valid or will become valid. + uvmWriteAccessCntrBufferGetPtr_HAL(pGpu, pUvm, getPtr); return status; } } diff --git a/src/nvidia/src/kernel/mem_mgr/mem_list.c b/src/nvidia/src/kernel/mem_mgr/mem_list.c index 0188ccc4f..f8e49bb05 100644 --- a/src/nvidia/src/kernel/mem_mgr/mem_list.c +++ b/src/nvidia/src/kernel/mem_mgr/mem_list.c @@ -310,7 +310,8 @@ continue_alloc_object: pPteArray = memdescGetPteArray(pMemDesc, AT_GPU); - if (!portSafeMulU32(sizeof(NvU64), pAllocParams->pageCount, &result)) + if ((pAllocParams->pageCount > pMemDesc->PageCount) || + !portSafeMulU32(sizeof(NvU64), pAllocParams->pageCount, &result)) { memdescDestroy(pMemDesc); return NV_ERR_INVALID_ARGUMENT; diff --git a/src/nvidia/src/kernel/platform/nbsi/nbsi_init.c b/src/nvidia/src/kernel/platform/nbsi/nbsi_init.c index ef16379f1..fab940120 100644 --- a/src/nvidia/src/kernel/platform/nbsi/nbsi_init.c +++ b/src/nvidia/src/kernel/platform/nbsi/nbsi_init.c @@ -1346,27 +1346,37 @@ static NV_STATUS getNbsiObjFromCache // If we found it in cache... return it (if they have enough room) if (status == NV_OK) { + NvU32 rtnObjSizeWithOffset; + // return the full table size *pTotalObjSize = tempGlobSize; - // is rtnsize larger than remaining part of table? - if (*pRtnObjSize >= (*pTotalObjSize - rtnObjOffset)) + if (!portSafeSubU32(*pTotalObjSize, rtnObjOffset, &rtnObjSizeWithOffset)) { - // then we can return it all this time. - *pRtnObjSize = *pTotalObjSize - rtnObjOffset; - *globTypeRtnStatus = NV2080_CTRL_BIOS_GET_NBSI_SUCCESS; + // Failed argument validation. + status = NV_ERR_INVALID_OFFSET; } else { - // return what we can and indicate incomplete. - *globTypeRtnStatus = NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE; - } + if (*pRtnObjSize >= rtnObjSizeWithOffset) + { + // if rtnsize is larger than remaining part of table, + // then we can return it all this time. + *pRtnObjSize = rtnObjSizeWithOffset; + *globTypeRtnStatus = NV2080_CTRL_BIOS_GET_NBSI_SUCCESS; + } + else + { + // return what we can and indicate incomplete. + *globTypeRtnStatus = NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE; + } - if (*pRtnObjSize > 0) - { - bufPtr = (NvU8 *) pTempGlob; - bufPtr = &bufPtr[rtnObjOffset]; - portMemCopy(pRtnObj, *pRtnObjSize, bufPtr, *pRtnObjSize); + if (*pRtnObjSize > 0) + { + bufPtr = (NvU8 *) pTempGlob; + bufPtr = &bufPtr[rtnObjOffset]; + portMemCopy(pRtnObj, *pRtnObjSize, bufPtr, *pRtnObjSize); + } } } return status; @@ -2835,6 +2845,7 @@ NV_STATUS getNbsiObjByType if (globType == GLOB_TYPE_GET_NBSI_ACPI_RAW) { + // TODO: Add offset arg validation when ACPI calls get support from GSP firmware. NvU16 rtnSize; // // (IN) wantedRtnObjOffset = acpi function, @@ -2878,7 +2889,7 @@ NV_STATUS getNbsiObjByType pRtnObjSize, pTotalObjSize, pRtnGlobStatus); - if (status == NV_OK) + if (status != NV_ERR_GENERIC) { // It's in the cache, it may or may not fit. return status; @@ -3013,6 +3024,8 @@ NV_STATUS getNbsiObjByType if (bFound == NV_TRUE) { + NvU32 rtnObjSizeWithOffset; + // Currently only NBSI and NBCI objects are cached... if ((acpiFunction == ACPI_DSM_FUNCTION_NBSI) || (acpiFunction == ACPI_DSM_FUNCTION_NBCI)) @@ -3046,24 +3059,32 @@ NV_STATUS getNbsiObjByType // return the full table size *pTotalObjSize = testObjSize; - // is rtnsize larger than remaining part of table? - if (*pRtnObjSize >= (*pTotalObjSize - wantedRtnObjOffset)) + if (!portSafeSubU32(*pTotalObjSize, wantedRtnObjOffset, &rtnObjSizeWithOffset)) { - // then we can return it all this time. - *pRtnObjSize = *pTotalObjSize - wantedRtnObjOffset; - *pRtnGlobStatus = NV2080_CTRL_BIOS_GET_NBSI_SUCCESS; + // Failed argument validation. + status = NV_ERR_INVALID_OFFSET; } else { - // return what we can and indicate incomplete. - *pRtnGlobStatus = NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE; - } + if (*pRtnObjSize >= rtnObjSizeWithOffset) + { + // if rtnsize is larger than remaining part of table, + // then we can return it all this time. + *pRtnObjSize = rtnObjSizeWithOffset; + *pRtnGlobStatus = NV2080_CTRL_BIOS_GET_NBSI_SUCCESS; + } + else + { + // return what we can and indicate incomplete. + *pRtnGlobStatus = NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE; + } - if (*pRtnObjSize > 0) - { - bufPtr = (NvU8 *) pTestObj; - bufPtr = &bufPtr[wantedRtnObjOffset]; - portMemCopy(pRtnObj, *pRtnObjSize, bufPtr, *pRtnObjSize); + if (*pRtnObjSize > 0) + { + bufPtr = (NvU8 *) pTestObj; + bufPtr = &bufPtr[wantedRtnObjOffset]; + portMemCopy(pRtnObj, *pRtnObjSize, bufPtr, *pRtnObjSize); + } } } else diff --git a/src/nvidia/src/kernel/rmapi/embedded_param_copy.c b/src/nvidia/src/kernel/rmapi/embedded_param_copy.c index 2d9c200e6..83ba12f32 100644 --- a/src/nvidia/src/kernel/rmapi/embedded_param_copy.c +++ b/src/nvidia/src/kernel/rmapi/embedded_param_copy.c @@ -334,6 +334,27 @@ NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCt break; } + case NV0073_CTRL_CMD_SYSTEM_EXECUTE_ACPI_METHOD: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->inData, + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->inData, + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->inDataSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + RMAPI_PARAM_COPY_INIT(paramCopies[1], + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->outData, + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->outData, + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->outDataSize, 1); + paramCopies[1].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[1].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + paramsCnt++; + + break; + } case NV0080_CTRL_CMD_HOST_GET_CAPS: { CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_HOST_GET_CAPS_PARAMS); @@ -840,6 +861,20 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmC status = inParamsStatus; break; } + case NV0073_CTRL_CMD_SYSTEM_EXECUTE_ACPI_METHOD: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS); + + NV_STATUS inParamsStatus = rmapiParamsRelease(¶mCopies[0]); + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->inData = paramCopies[0].pUserParams; + + status = rmapiParamsRelease(¶mCopies[1]); + ((NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS*)pParams)->outData = paramCopies[1].pUserParams; + + if (inParamsStatus != NV_OK) + status = inParamsStatus; + break; + } case NV83DE_CTRL_CMD_DEBUG_READ_MEMORY: { CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS); diff --git a/src/nvidia/src/libraries/nvport/memory/memory_tracking.c b/src/nvidia/src/libraries/nvport/memory/memory_tracking.c index b0a880d3e..fc0ff1634 100644 --- a/src/nvidia/src/libraries/nvport/memory/memory_tracking.c +++ b/src/nvidia/src/libraries/nvport/memory/memory_tracking.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a @@ -182,10 +182,9 @@ _portMemCounterInc activeAllocs = PORT_MEM_ATOMIC_INC_U32(&pCounter->activeAllocs); PORT_MEM_ATOMIC_INC_U32(&pCounter->totalAllocs); - if (PORT_MEM_TRACK_USE_FENCEPOSTS) - { - activeSize = PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->activeSize, size); - } +#if PORT_MEM_TRACK_USE_FENCEPOSTS + activeSize = PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->activeSize, size); +#endif PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->totalSize, size); // Atomically compare the peak value with the active, and update if greater. @@ -211,12 +210,12 @@ _portMemCounterDec void *pMem ) { + PORT_UNREFERENCED_VARIABLE(pMem); PORT_MEM_ATOMIC_DEC_U32(&pCounter->activeAllocs); - if (PORT_MEM_TRACK_USE_FENCEPOSTS) - { - PORT_MEM_ATOMIC_SUB_SIZE(&pCounter->activeSize, - ((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize); - } +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_ATOMIC_SUB_SIZE(&pCounter->activeSize, + ((PORT_MEM_HEADER *)pMem-1)->fence.blockSize); +#endif } #define PORT_MEM_COUNTER_INIT(pCounter) _portMemCounterInit(pCounter) @@ -538,11 +537,10 @@ static struct PORT_MEM_GLOBALS static NV_INLINE void _portMemLimitInc(NvU32 pid, void *pMem, NvU64 size) { + PORT_MEM_HEADER *pMemHeader = PORT_MEM_SUB_HEADER_PTR(pMem); + pMemHeader->pid = pid; if (portMemGlobals.bLimitEnabled) { - PORT_MEM_HEADER *pMemHeader = PORT_MEM_SUB_HEADER_PTR(pMem); - pMemHeader->pid = pid; - if ((pid > 0) && (pid <= PORT_MEM_LIMIT_MAX_PIDS)) { NvU32 pidIdx = pid - 1; @@ -565,7 +563,6 @@ _portMemLimitDec(void *pMem) NvU32 pidIdx = pid - 1; if (portMemGlobals.counterPid[pidIdx] < pMemHeader->blockSize) { - // TODO: How do we protect against double frees? PORT_MEM_PRINT_ERROR("memory free error: counter underflow\n"); PORT_BREAKPOINT_CHECKED(); } diff --git a/version.mk b/version.mk index f86e1b58a..e0a7d27ac 100644 --- a/version.mk +++ b/version.mk @@ -1,4 +1,4 @@ -NVIDIA_VERSION = 525.89.02 +NVIDIA_VERSION = 525.105.17 # This file. VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))