diff --git a/README.md b/README.md index 2600ed18b..836d6a927 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # NVIDIA Linux Open GPU Kernel Module Source This is the source release of the NVIDIA Linux open GPU kernel modules, -version 570.195.03. +version 570.207. ## How to Build @@ -17,7 +17,7 @@ as root: Note that the kernel modules built here must be used with GSP firmware and user-space NVIDIA GPU driver components from a corresponding -570.195.03 driver release. This can be achieved by installing +570.207 driver release. This can be achieved by installing the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` option. E.g., @@ -185,7 +185,7 @@ table below). For details on feature support and limitations, see the NVIDIA GPU driver end user README here: -https://us.download.nvidia.com/XFree86/Linux-x86_64/570.195.03/README/kernel_open.html +https://us.download.nvidia.com/XFree86/Linux-x86_64/570.207/README/kernel_open.html For vGPU support, please refer to the README.vgpu packaged in the vGPU Host Package for more details. @@ -981,6 +981,7 @@ Subsystem Device ID. | NVIDIA RTX PRO 4000 Blackwell Generation Laptop GPU | 2C39 | | NVIDIA GeForce RTX 5090 Laptop GPU | 2C58 | | NVIDIA GeForce RTX 5080 Laptop GPU | 2C59 | +| NVIDIA RTX PRO 4000 Blackwell Embedded GPU | 2C79 | | NVIDIA GeForce RTX 5060 Ti | 2D04 | | NVIDIA GeForce RTX 5060 | 2D05 | | NVIDIA GeForce RTX 5070 Laptop GPU | 2D18 | @@ -991,10 +992,12 @@ Subsystem Device ID. | NVIDIA RTX PRO 2000 Blackwell Generation Laptop GPU | 2D39 | | NVIDIA GeForce RTX 5070 Laptop GPU | 2D58 | | NVIDIA GeForce RTX 5060 Laptop GPU | 2D59 | +| NVIDIA RTX PRO 2000 Blackwell Embedded GPU | 2D79 | | NVIDIA GeForce RTX 5050 Laptop GPU | 2D98 | | NVIDIA RTX PRO 1000 Blackwell Generation Laptop GPU | 2DB8 | | NVIDIA RTX PRO 500 Blackwell Generation Laptop GPU | 2DB9 | | NVIDIA GeForce RTX 5050 Laptop GPU | 2DD8 | +| NVIDIA RTX PRO 500 Blackwell Embedded GPU | 2DF9 | | NVIDIA GeForce RTX 5070 | 2F04 | | NVIDIA GeForce RTX 5070 Ti Laptop GPU | 2F18 | | NVIDIA RTX PRO 3000 Blackwell Generation Laptop GPU | 2F38 | diff --git a/kernel-open/Kbuild b/kernel-open/Kbuild index 3d2240d87..545f92057 100644 --- a/kernel-open/Kbuild +++ b/kernel-open/Kbuild @@ -79,7 +79,7 @@ ccflags-y += -I$(src)/common/inc ccflags-y += -I$(src) ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args ccflags-y += -D__KERNEL__ -DMODULE -DNVRM -ccflags-y += -DNV_VERSION_STRING=\"570.195.03\" +ccflags-y += -DNV_VERSION_STRING=\"570.207\" ifneq ($(SYSSRCHOST1X),) ccflags-y += -I$(SYSSRCHOST1X) diff --git a/kernel-open/common/inc/nv-linux.h b/kernel-open/common/inc/nv-linux.h index 8c6b4b7a1..d7aeabcdc 100644 --- a/kernel-open/common/inc/nv-linux.h +++ b/kernel-open/common/inc/nv-linux.h @@ -1486,7 +1486,8 @@ typedef struct typedef struct nv_linux_state_s { nv_state_t nv_state; - atomic_t usage_count; + atomic_t usage_count; + NvU32 suspend_count; struct device *dev; diff --git a/kernel-open/common/inc/nv-lock.h b/kernel-open/common/inc/nv-lock.h index 5d58b73a9..665cc5999 100644 --- a/kernel-open/common/inc/nv-lock.h +++ b/kernel-open/common/inc/nv-lock.h @@ -35,18 +35,6 @@ #include /* signal_pending for kernels >= 4.11 */ #endif -#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL) -typedef raw_spinlock_t nv_spinlock_t; -#define NV_DEFINE_SPINLOCK(lock) DEFINE_RAW_SPINLOCK(lock) -#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock) -#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock) -#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock) -#define NV_SPIN_LOCK_IRQSAVE(lock,flags) raw_spin_lock_irqsave(lock,flags) -#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) raw_spin_unlock_irqrestore(lock,flags) -#define NV_SPIN_LOCK(lock) raw_spin_lock(lock) -#define NV_SPIN_UNLOCK(lock) raw_spin_unlock(lock) -#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock) -#else typedef spinlock_t nv_spinlock_t; #define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock) #define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock) @@ -57,7 +45,6 @@ typedef spinlock_t nv_spinlock_t; #define NV_SPIN_LOCK(lock) spin_lock(lock) #define NV_SPIN_UNLOCK(lock) spin_unlock(lock) #define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock) -#endif #define NV_INIT_MUTEX(mutex) sema_init(mutex, 1) diff --git a/kernel-open/common/inc/nvkms-kapi.h b/kernel-open/common/inc/nvkms-kapi.h index 630b28321..edc3b7cae 100644 --- a/kernel-open/common/inc/nvkms-kapi.h +++ b/kernel-open/common/inc/nvkms-kapi.h @@ -1559,6 +1559,26 @@ struct NvKmsKapiFunctionsTable { NvS32 index ); + /*! + * Check or wait on a head's LUT notifier. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head The head to check for LUT completion. + * + * \param [in] waitForCompletion If true, wait for the notifier in NvKms + * before returning. + * + * \param [out] complete Returns whether the notifier has completed. + */ + NvBool + (*checkLutNotifier) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + NvBool waitForCompletion + ); + /* * Notify NVKMS that the system's framebuffer console has been disabled and * the reserved allocation for the old framebuffer console can be unmapped. diff --git a/kernel-open/common/inc/os-interface.h b/kernel-open/common/inc/os-interface.h index 2e1ab481c..b01d280f0 100644 --- a/kernel-open/common/inc/os-interface.h +++ b/kernel-open/common/inc/os-interface.h @@ -62,6 +62,11 @@ struct os_work_queue; /* Each OS defines its own version of this opaque type */ typedef struct os_wait_queue os_wait_queue; +/* Flags needed by os_get_current_proccess_flags */ +#define OS_CURRENT_PROCESS_FLAG_NONE 0x0 +#define OS_CURRENT_PROCESS_FLAG_KERNEL_THREAD 0x1 +#define OS_CURRENT_PROCESS_FLAG_EXITING 0x2 + /* * --------------------------------------------------------------------------- * @@ -194,6 +199,7 @@ NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); NvBool NV_API_CALL os_is_nvswitch_present (void); NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NvU32 NV_API_CALL os_get_current_process_flags(void); NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); void NV_API_CALL os_free_wait_queue (os_wait_queue *); void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); diff --git a/kernel-open/nvidia-drm/nvidia-drm-modeset.c b/kernel-open/nvidia-drm/nvidia-drm-modeset.c index adaee1148..028ff2388 100644 --- a/kernel-open/nvidia-drm/nvidia-drm-modeset.c +++ b/kernel-open/nvidia-drm/nvidia-drm-modeset.c @@ -677,6 +677,33 @@ int nv_drm_atomic_commit(struct drm_device *dev, "Flip event timeout on head %u", nv_crtc->head); } } + +#if defined(NV_DRM_COLOR_MGMT_AVAILABLE) + /* + * If the legacy LUT needs to be updated, ensure that the previous LUT + * update is complete first. + */ + if (crtc_state->color_mgmt_changed) { + NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice, + nv_crtc->head, + !nonblock /* waitForCompletion */); + + /* If checking the LUT notifier failed, assume no LUT notifier is set. */ + if (!complete) { + if (nonblock) { + return -EBUSY; + } else { + /* + * checkLutNotifier should wait on the notifier in this + * case, so we should only get here if the wait timed out. + */ + NV_DRM_DEV_LOG_ERR( + nv_dev, + "LUT notifier timeout on head %u", nv_crtc->head); + } + } + } +#endif } #if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG) @@ -810,6 +837,19 @@ int nv_drm_atomic_commit(struct drm_device *dev, __nv_drm_handle_flip_event(nv_crtc); } } + +#if defined(NV_DRM_COLOR_MGMT_AVAILABLE) + if (crtc_state->color_mgmt_changed) { + NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice, + nv_crtc->head, + true /* waitForCompletion */); + if (!complete) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "LUT notifier timeout on head %u", nv_crtc->head); + } + } +#endif } } diff --git a/kernel-open/nvidia/nv.c b/kernel-open/nvidia/nv.c index 9e29338c3..5d7731dae 100644 --- a/kernel-open/nvidia/nv.c +++ b/kernel-open/nvidia/nv.c @@ -1627,6 +1627,7 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp) nv_assert_not_in_gpu_exclusion_list(sp, nv); NV_ATOMIC_INC(nvl->usage_count); + return 0; } diff --git a/kernel-open/nvidia/os-interface.c b/kernel-open/nvidia/os-interface.c index 24669d310..b6bfd4ce3 100644 --- a/kernel-open/nvidia/os-interface.c +++ b/kernel-open/nvidia/os-interface.c @@ -2073,6 +2073,22 @@ NV_STATUS NV_API_CALL os_get_random_bytes return NV_OK; } +NvU32 NV_API_CALL os_get_current_process_flags +( + void +) +{ + NvU32 flags = OS_CURRENT_PROCESS_FLAG_NONE; + + if (current->flags & PF_EXITING) + flags |= OS_CURRENT_PROCESS_FLAG_EXITING; + + if (current->flags & PF_KTHREAD) + flags |= OS_CURRENT_PROCESS_FLAG_KERNEL_THREAD; + + return flags; +} + NV_STATUS NV_API_CALL os_alloc_wait_queue ( os_wait_queue **wq diff --git a/src/common/displayport/inc/dp_connectorimpl.h b/src/common/displayport/inc/dp_connectorimpl.h index b4cfd9410..6475d890b 100644 --- a/src/common/displayport/inc/dp_connectorimpl.h +++ b/src/common/displayport/inc/dp_connectorimpl.h @@ -266,6 +266,12 @@ namespace DisplayPort // bool bHDMIOnDPPlusPlus; + // + // Flag to enable accounting available DP tunnelling BW while generating PPS + // for the mode + // + bool bOptimizeDscBppForTunnellingBw; + bool bSkipResetLinkStateDuringPlug; // Flag to check if LT should be skipped. diff --git a/src/common/displayport/inc/dp_regkeydatabase.h b/src/common/displayport/inc/dp_regkeydatabase.h index f74609810..f648e87c9 100644 --- a/src/common/displayport/inc/dp_regkeydatabase.h +++ b/src/common/displayport/inc/dp_regkeydatabase.h @@ -113,6 +113,9 @@ // This regkey ensures DPLib takes into account Displayport++ supports HDMI. #define NV_DP_REGKEY_HDMI_ON_DP_PLUS_PLUS "HDMI_ON_DP_PLUS_PLUS" +// This regkey ensures DP IMP takes DP tunnelling BW into account while calculating DSC BPP +#define NV_DP_REGKEY_OPTIMIZE_DSC_BPP_FOR_TUNNELLING_BW "OPTIMIZE_DSC_BPP_FOR_TUNNELLING_BW" + // Data Base used to store all the regkey values. // The actual data base is declared statically in dp_evoadapter.cpp. // All entries set to 0 before initialized by the first EvoMainLink constructor. @@ -157,6 +160,7 @@ struct DP_REGKEY_DATABASE bool bSkipSettingLinkStateDuringUnplug; bool bEnableDevId; bool bHDMIOnDPPlusPlus; + bool bOptimizeDscBppForTunnellingBw; }; extern struct DP_REGKEY_DATABASE dpRegkeyDatabase; diff --git a/src/common/displayport/src/dp_connectorimpl.cpp b/src/common/displayport/src/dp_connectorimpl.cpp index c36916afe..2def89684 100644 --- a/src/common/displayport/src/dp_connectorimpl.cpp +++ b/src/common/displayport/src/dp_connectorimpl.cpp @@ -52,13 +52,13 @@ #include "dp_tracing.h" /* - * This is needed by Synaptics to disable DisplayExpand feature - * in some of their docking station based on if GPU supports DSC. + * This is needed by Synaptics to disable DisplayExpand feature + * in some of their docking station based on if GPU supports DSC. * Feature is not needed if DSC is supported. * Customers reported problems with the feature enabled on GB20x devices * and requested GPU DSC detection to disable DisplayExpand feature. * DSC is supported in Turing and later SKUs hence - * exposing Turing DevId to customers to address their requirement. + * exposing Turing DevId to customers to address their requirement. */ #define TURING_DEV_ID 0x1E @@ -200,6 +200,7 @@ void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatab this->bSkipSettingLinkStateDuringUnplug = dpRegkeyDatabase.bSkipSettingLinkStateDuringUnplug; this->bEnableDevId = dpRegkeyDatabase.bEnableDevId; this->bHDMIOnDPPlusPlus = dpRegkeyDatabase.bHDMIOnDPPlusPlus; + this->bOptimizeDscBppForTunnellingBw = dpRegkeyDatabase.bOptimizeDscBppForTunnellingBw; } void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled) @@ -1202,12 +1203,14 @@ bool ConnectorImpl::compoundQueryAttachTunneling(const DpModesetParams &modesetP } NvU64 bpp = modesetParams.modesetInfo.depth; + NvU32 dscFactor = 1U; + if (pDscParams->bEnableDsc) { - bpp = divide_ceil(pDscParams->bitsPerPixelX16, 16); + dscFactor = 16U; } - NvU64 modeBwRequired = modesetParams.modesetInfo.pixelClockHz * bpp; + NvU64 modeBwRequired = (modesetParams.modesetInfo.pixelClockHz * bpp)/dscFactor; NvU64 freeTunnelingBw = allocatedDpTunnelBw - compoundQueryUsedTunnelingBw; if (modeBwRequired > freeTunnelingBw) @@ -1405,13 +1408,13 @@ bool ConnectorImpl::compoundQueryAttachMST(Group * target, { return false; } - + compoundQueryResult = compoundQueryAttachMSTGeneric(target, modesetParams, &localInfo, - pDscParams, pErrorCode); + pDscParams, pErrorCode); // // compoundQueryAttachMST Generic might fail due to the insufficient bandwidth , // We only check whether bpp can be fit in the available bandwidth based on the tranied link config in compoundQueryAttachMSTDsc function. - // There might be cases where the default 10 bpp might fit in the available bandwidth based on the trained link config, + // There might be cases where the default 10 bpp might fit in the available bandwidth based on the trained link config, // however, the bandwidth might be insufficient at the actual bottleneck link between source and sink to drive the mode, causing CompoundQueryAttachMSTGeneric to fail. // Incase of CompoundQueryAttachMSTGeneric failure, instead of returning false, check whether the mode can be supported with the max dsc compression bpp // and return true if it can be supported. @@ -1694,7 +1697,7 @@ bool ConnectorImpl::compoundQueryAttachMSTDsc(Group * target, localInfo->localModesetInfo.bEnableDsc = true; localInfo->localModesetInfo.depth = bitsPerPixelX16; if (modesetParams.colorFormat == dpColorFormat_YCbCr422 && - dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 && + dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 && (dscInfo.gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && (dscInfo.sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) { @@ -1863,11 +1866,11 @@ bool ConnectorImpl::compoundQueryAttachMSTGeneric(Group * target, } // If the compoundQueryResult is false, we need to reset the compoundQueryLocalLinkPBN - if (!compoundQueryResult && this->bEnableLowerBppCheckForDsc) + if (!compoundQueryResult && this->bEnableLowerBppCheckForDsc) { compoundQueryLocalLinkPBN -= slots_pbn; } - + return compoundQueryResult; } bool ConnectorImpl::compoundQueryAttachSST(Group * target, @@ -1989,6 +1992,15 @@ bool ConnectorImpl::compoundQueryAttachSST(Group * target, availableBandwidthBitsPerSecond = lc.convertMinRateToDataRate() * 8 * lc.lanes; + if (this-> bOptimizeDscBppForTunnellingBw && hal->isDpTunnelBwAllocationEnabled()) + { + NvU64 freeTunnelingBw = allocatedDpTunnelBw - compoundQueryUsedTunnelingBw; + if (freeTunnelingBw < availableBandwidthBitsPerSecond) + { + availableBandwidthBitsPerSecond = freeTunnelingBw; + } + } + warData.dpData.linkRateHz = lc.peakRate; warData.dpData.bIs128b132bChannelCoding = lc.bIs128b132bChannelCoding; @@ -2073,6 +2085,7 @@ bool ConnectorImpl::compoundQueryAttachSST(Group * target, { pDscParams->bEnableDsc = true; compoundQueryResult = true; + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; if (pDscParams->pDscOutParams != NULL) { @@ -2081,7 +2094,6 @@ bool ConnectorImpl::compoundQueryAttachSST(Group * target, // possible with DSC and calculated PPS and bits per pixel. // dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); - pDscParams->bitsPerPixelX16 = bitsPerPixelX16; } else { @@ -6088,7 +6100,7 @@ bool ConnectorImpl::allocateTimeslice(GroupImpl * targetGroup) // Check for available timeslots if (slot_count > freeSlots) { - DP_PRINTF(DP_ERROR, "DP-TS> Failed to allocate timeslot!! Not enough free slots. slot_count: %d, freeSlots: %d", + DP_PRINTF(DP_ERROR, "DP-TS> Failed to allocate timeslot!! Not enough free slots. slot_count: %d, freeSlots: %d", slot_count, freeSlots); return false; } @@ -6758,7 +6770,9 @@ void ConnectorImpl::notifyLongPulseInternal(bool statusConnected) // Some panels whose TCON erroneously sets DPCD 0x200 SINK_COUNT=0. if (main->isEDP() && hal->getSinkCount() == 0) + { hal->setSinkCount(1); + } // disconnect all devices for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) { @@ -7433,7 +7447,7 @@ void ConnectorImpl::notifyShortPulse() } disableFlush(); } - + if (!this->bDisable5019537Fix) { main->invalidateLinkRatesInFallbackTable(originalActiveLinkConfig.peakRate); @@ -7499,7 +7513,7 @@ void ConnectorImpl::notifyShortPulse() bool ConnectorImpl::detectSinkCountChange() { - if (this->linkUseMultistream()) + if (this->linkUseMultistream() || main->isEDP()) return false; DeviceImpl * existingDev = findDeviceInList(Address()); diff --git a/src/common/displayport/src/dp_evoadapter.cpp b/src/common/displayport/src/dp_evoadapter.cpp index 6d759f933..2fad1359f 100644 --- a/src/common/displayport/src/dp_evoadapter.cpp +++ b/src/common/displayport/src/dp_evoadapter.cpp @@ -109,7 +109,8 @@ const struct {NV_DP_REGKEY_ENABLE_LOWER_BPP_CHECK_FOR_DSC, &dpRegkeyDatabase.bEnableLowerBppCheckForDsc, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_SKIP_SETTING_LINK_STATE_DURING_UNPLUG, &dpRegkeyDatabase.bSkipSettingLinkStateDuringUnplug, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_EXPOSE_DSC_DEVID_WAR, &dpRegkeyDatabase.bEnableDevId, DP_REG_VAL_BOOL}, - {NV_DP_REGKEY_HDMI_ON_DP_PLUS_PLUS, &dpRegkeyDatabase.bHDMIOnDPPlusPlus, DP_REG_VAL_BOOL} + {NV_DP_REGKEY_HDMI_ON_DP_PLUS_PLUS, &dpRegkeyDatabase.bHDMIOnDPPlusPlus, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_OPTIMIZE_DSC_BPP_FOR_TUNNELLING_BW, &dpRegkeyDatabase.bOptimizeDscBppForTunnellingBw, DP_REG_VAL_BOOL} }; EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) : diff --git a/src/common/displayport/src/dp_wardatabase.cpp b/src/common/displayport/src/dp_wardatabase.cpp index 9bfbd535b..cb0078b5d 100644 --- a/src/common/displayport/src/dp_wardatabase.cpp +++ b/src/common/displayport/src/dp_wardatabase.cpp @@ -125,7 +125,7 @@ void ConnectorImpl2x::applyOuiWARs() bStuffDummySymbolsFor8b10b = true; } break; - + } } @@ -513,16 +513,26 @@ void Edid::applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDen // LG case 0xE430: - if (ProductID == 0x0469) + switch (ProductID) { - // - // The LG display can't be driven at FHD with 2*RBR. - // Force max link config - // - this->WARFlags.forceMaxLinkConfig = true; - DP_PRINTF(DP_NOTICE, "DP-WAR> Force maximum link config WAR required on LG panel."); - DP_PRINTF(DP_NOTICE, "DP-WAR> bug 1649626"); - break; + case 0x0469: + { + // + // The LG display can't be driven at FHD with 2*RBR. + // Force max link config + // + this->WARFlags.forceMaxLinkConfig = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Force maximum link config WAR required on LG panel."); + DP_PRINTF(DP_NOTICE, "DP-WAR> bug 1649626"); + break; + } + case 0x06DB: + { + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> LG eDP"); + DP_PRINTF(DP_NOTICE, "implements only Legacy interrupt address range"); + break; + } } break; case 0x8F34: diff --git a/src/common/inc/nvBldVer.h b/src/common/inc/nvBldVer.h index 950302998..0936fe53f 100644 --- a/src/common/inc/nvBldVer.h +++ b/src/common/inc/nvBldVer.h @@ -36,25 +36,25 @@ // and then checked back in. You cannot make changes to these sections without // corresponding changes to the buildmeister script #ifndef NV_BUILD_BRANCH - #define NV_BUILD_BRANCH r573_76 + #define NV_BUILD_BRANCH r570_00 #endif #ifndef NV_PUBLIC_BRANCH - #define NV_PUBLIC_BRANCH r573_76 + #define NV_PUBLIC_BRANCH r570_00 #endif #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) -#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r570/r573_76-590" -#define NV_BUILD_CHANGELIST_NUM (36569223) +#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r570/r570_00-658" +#define NV_BUILD_CHANGELIST_NUM (36886698) #define NV_BUILD_TYPE "Official" -#define NV_BUILD_NAME "rel/gpu_drv/r570/r573_76-590" -#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36569223) +#define NV_BUILD_NAME "rel/gpu_drv/r570/r570_00-658" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36886698) #else /* Windows builds */ -#define NV_BUILD_BRANCH_VERSION "r573_76-1" -#define NV_BUILD_CHANGELIST_NUM (36518415) -#define NV_BUILD_TYPE "Nightly" -#define NV_BUILD_NAME "r573_76-250909" -#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36506718) +#define NV_BUILD_BRANCH_VERSION "r570_00-640" +#define NV_BUILD_CHANGELIST_NUM (36886698) +#define NV_BUILD_TYPE "Official" +#define NV_BUILD_NAME "573.92" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36886698) #define NV_BUILD_BRANCH_BASE_VERSION R570 #endif // End buildmeister python edited section diff --git a/src/common/inc/nvUnixVersion.h b/src/common/inc/nvUnixVersion.h index 22459a025..fb4a97b49 100644 --- a/src/common/inc/nvUnixVersion.h +++ b/src/common/inc/nvUnixVersion.h @@ -4,7 +4,7 @@ #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) -#define NV_VERSION_STRING "570.195.03" +#define NV_VERSION_STRING "570.207" #else diff --git a/src/common/nvlink/inband/interface/nvlink_inband_msg.h b/src/common/nvlink/inband/interface/nvlink_inband_msg.h index 6249254d5..06737c7e8 100644 --- a/src/common/nvlink/inband/interface/nvlink_inband_msg.h +++ b/src/common/nvlink/inband/interface/nvlink_inband_msg.h @@ -82,6 +82,7 @@ typedef struct #define NVLINK_INBAND_GPU_PROBE_CAPS_PROBE_UPDATE NVBIT(1) #define NVLINK_INBAND_GPU_PROBE_CAPS_EGM_SUPPORT NVBIT(2) #define NVLINK_INBAND_GPU_PROBE_CAPS_ATS_SUPPORT NVBIT(3) +#define NVLINK_INBAND_GPU_PROBE_CAPS_MC_RETRY NVBIT(8) /* Add more caps as need in the future */ diff --git a/src/nvidia-modeset/interface/nvkms-api.h b/src/nvidia-modeset/interface/nvkms-api.h index b1def4d70..ef7ad48b4 100644 --- a/src/nvidia-modeset/interface/nvkms-api.h +++ b/src/nvidia-modeset/interface/nvkms-api.h @@ -221,6 +221,7 @@ enum NvKmsIoctlCommand { NVKMS_IOCTL_SET_CURSOR_IMAGE, NVKMS_IOCTL_MOVE_CURSOR, NVKMS_IOCTL_SET_LUT, + NVKMS_IOCTL_CHECK_LUT_NOTIFIER, NVKMS_IOCTL_IDLE_BASE_CHANNEL, NVKMS_IOCTL_FLIP, NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, @@ -2196,6 +2197,27 @@ struct NvKmsSetLutParams { struct NvKmsSetLutReply reply; /*! out */ }; +/*! + * NVKMS_IOCTL_CHECK_LUT_NOTIFIER: Check or wait on the LUT notifier for the + * specified apiHead. + */ + +struct NvKmsCheckLutNotifierRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + NvBool waitForCompletion; +}; + +struct NvKmsCheckLutNotifierReply { + NvBool complete; +}; + +struct NvKmsCheckLutNotifierParams { + struct NvKmsCheckLutNotifierRequest request; /*! in */ + struct NvKmsCheckLutNotifierReply reply; /*! out */ +}; /*! * NVKMS_IOCTL_IDLE_BASE_CHANNEL: Wait for the base channel to be idle on diff --git a/src/nvidia-modeset/kapi/interface/nvkms-kapi.h b/src/nvidia-modeset/kapi/interface/nvkms-kapi.h index 630b28321..edc3b7cae 100644 --- a/src/nvidia-modeset/kapi/interface/nvkms-kapi.h +++ b/src/nvidia-modeset/kapi/interface/nvkms-kapi.h @@ -1559,6 +1559,26 @@ struct NvKmsKapiFunctionsTable { NvS32 index ); + /*! + * Check or wait on a head's LUT notifier. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head The head to check for LUT completion. + * + * \param [in] waitForCompletion If true, wait for the notifier in NvKms + * before returning. + * + * \param [out] complete Returns whether the notifier has completed. + */ + NvBool + (*checkLutNotifier) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + NvBool waitForCompletion + ); + /* * Notify NVKMS that the system's framebuffer console has been disabled and * the reserved allocation for the old framebuffer console can be unmapped. diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi.c b/src/nvidia-modeset/kapi/src/nvkms-kapi.c index 3138c6aff..92c83281a 100644 --- a/src/nvidia-modeset/kapi/src/nvkms-kapi.c +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi.c @@ -3807,6 +3807,31 @@ static NvBool SignalVrrSemaphore return status; } +static NvBool CheckLutNotifier +( + struct NvKmsKapiDevice *device, + NvU32 head, + NvBool waitForCompletion +) +{ + NvBool status = NV_TRUE; + struct NvKmsCheckLutNotifierParams params = { }; + params.request.deviceHandle = device->hKmsDevice; + params.request.dispHandle = device->hKmsDisp; + params.request.head = head; + params.request.waitForCompletion = waitForCompletion; + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_CHECK_LUT_NOTIFIER, + ¶ms, sizeof(params)); + + /* + * In cases where we're first enabling a head, we would expect status to be + * false, but in that case, there's no LUT notifier to wait for, so treat + * that case as complete. + */ + return !status || params.reply.complete; +} + static void FramebufferConsoleDisabled ( struct NvKmsKapiDevice *device @@ -3913,6 +3938,7 @@ NvBool nvKmsKapiGetFunctionsTableInternal funcsTable->signalDisplaySemaphore = nvKmsKapiSignalDisplaySemaphore; funcsTable->cancelDisplaySemaphore = nvKmsKapiCancelDisplaySemaphore; funcsTable->signalVrrSemaphore = SignalVrrSemaphore; + funcsTable->checkLutNotifier = CheckLutNotifier; return NV_TRUE; } diff --git a/src/nvidia-modeset/src/nvkms-hdmi.c b/src/nvidia-modeset/src/nvkms-hdmi.c index aca7eeefa..c162f0e42 100644 --- a/src/nvidia-modeset/src/nvkms-hdmi.c +++ b/src/nvidia-modeset/src/nvkms-hdmi.c @@ -2017,7 +2017,13 @@ NvBool nvHdmiDpySupportsFrl(const NVDpyEvoRec *pDpyEvo) NvU32 passiveDpDongleMaxPclkKHz; const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; - nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + /* + * Can't use FRL if HDMI is not supported by the GPU and the monitor + * connection. + */ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return FALSE; + } /* Can't use FRL if disabled by kernel module param. */ if (nvkms_disable_hdmi_frl()) { diff --git a/src/nvidia-modeset/src/nvkms-modepool.c b/src/nvidia-modeset/src/nvkms-modepool.c index ddcfba100..c80d9444c 100644 --- a/src/nvidia-modeset/src/nvkms-modepool.c +++ b/src/nvidia-modeset/src/nvkms-modepool.c @@ -1263,43 +1263,66 @@ static NvBool ValidateModeTimings( } } - /* reject modes with too high pclk */ + /* + * Reject modes with too high pclk, except when using HDMI FRL or + * DisplayPort. FRL and DP have features like DSC that cannot be trivially + * checked against a pixel clock rate limit. Instead: + * + * - DPlib will perform link assessment to determine whether both the + * monitor and GPU can drive a particular bandwidth. + * + * - hdmipacket will perform the equivalent for FRL. + * + * TMDS will only be considered on a connection capable of HDMI FRL for the + * mode being validated if nvHdmiIsTmdsPossible returns TRUE in the + * following callpath: + * + * ValidateMode + * |_ ValidateModeTimings + * |_ nvConstructHwModeTimingsEvo + * |_ GetDfpProtocol + * |_ GetDfpHdmiProtocol + * |_ nvHdmiIsTmdsPossible + */ - if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0) { + if (!(nvHdmiDpySupportsFrl(pDpyEvo) || + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo))) { + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0) { - NvU32 maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; - NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); - if (pModeTimings->yuv420Mode != NV_YUV420_MODE_NONE) { - realPixelClock /= 2; - } - - if (realPixelClock > maxPixelClockKHz) { - NvU32 hdmi3DPixelClock = realPixelClock; - - if (pModeTimings->hdmi3D) { - hdmi3DPixelClock /= 2; + NvU32 maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode != NV_YUV420_MODE_NONE) { + realPixelClock /= 2; } - if (is3DVisionStereo && - pDpyEvo->stereo3DVision.requiresModetimingPatching && - (realPixelClock - maxPixelClockKHz < 5000)) { + if (realPixelClock > maxPixelClockKHz) { + NvU32 hdmi3DPixelClock = realPixelClock; - nvAssert(!pModeTimings->hdmi3D); + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } - nvEvoLogInfoString(pInfoString, - "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than Display Device maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", - NV_VA_DIV_1000_POINT_1(realPixelClock), - NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { - } else { + nvAssert(!pModeTimings->hdmi3D); - LogModeValidationEnd(pDispEvo, pInfoString, - "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for Display Device (Max: " NV_FMT_DIV_1000_POINT_1 " MHz)", - NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), - pModeTimings->hdmi3D ? - ", doubled for HDMI 3D" : "", - NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); - return FALSE; + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than Display Device maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for Display Device (Max: " NV_FMT_DIV_1000_POINT_1 " MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } } } } diff --git a/src/nvidia-modeset/src/nvkms.c b/src/nvidia-modeset/src/nvkms.c index 4603a6ed0..18bbf9303 100644 --- a/src/nvidia-modeset/src/nvkms.c +++ b/src/nvidia-modeset/src/nvkms.c @@ -1,6 +1,6 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,42 +21,41 @@ * DEALINGS IN THE SOFTWARE. */ -#include "nvkms.h" -#include "nvkms-private.h" #include "nvkms-api.h" +#include "nvkms-private.h" +#include "nvkms.h" -#include "nvkms-types.h" -#include "nvkms-utils.h" +#include "nvkms-3dvision.h" +#include "nvkms-attributes.h" #include "nvkms-console-restore.h" -#include "nvkms-dpy.h" #include "nvkms-dma.h" +#include "nvkms-dpy-override.h" +#include "nvkms-dpy.h" #include "nvkms-evo.h" -#include "nvkms-rm.h" -#include "nvkms-rmapi.h" +#include "nvkms-framelock.h" +#include "nvkms-ioctl.h" #include "nvkms-modepool.h" #include "nvkms-modeset.h" -#include "nvkms-attributes.h" -#include "nvkms-dpy-override.h" -#include "nvkms-framelock.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" #include "nvkms-stereo.h" #include "nvkms-surface.h" -#include "nvkms-3dvision.h" -#include "nvkms-ioctl.h" +#include "nvkms-types.h" +#include "nvkms-utils.h" #include "nvkms-vblank-sem-control.h" -#include "nvkms-headsurface.h" #include "nvkms-headsurface-ioctl.h" #include "nvkms-headsurface-swapgroup.h" +#include "nvkms-headsurface.h" #include "nvkms-flip.h" /* nvFlipEvo */ #include "nvkms-vrr.h" #include "dp/nvdp-connector.h" #include "nvUnixVersion.h" /* NV_VERSION_STRING */ -#include /* NV01_NULL_OBJECT/NV01_ROOT */ +#include /* NV01_NULL_OBJECT/NV01_ROOT */ #include "nv_list.h" - /*! \file * * This source file implements the API of NVKMS, built around open, @@ -72,7 +71,6 @@ * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE). */ - /* * When the NVKMS device file is opened, the per-open structure could * be used for one of several actions, denoted by its "type". The @@ -81,144 +79,144 @@ * anything, it can never transition to any other type. */ enum NvKmsPerOpenType { - /* - * The per-open is used for making ioctl calls to make requests of - * NVKMS. - */ - NvKmsPerOpenTypeIoctl, + /* + * The per-open is used for making ioctl calls to make requests of + * NVKMS. + */ + NvKmsPerOpenTypeIoctl, - /* - * The per-open is used for granting access to a NVKMS registered - * surface. - */ - NvKmsPerOpenTypeGrantSurface, + /* + * The per-open is used for granting access to a NVKMS registered + * surface. + */ + NvKmsPerOpenTypeGrantSurface, - /* - * The per-open is used for granting permissions. - */ - NvKmsPerOpenTypeGrantPermissions, + /* + * The per-open is used for granting permissions. + */ + NvKmsPerOpenTypeGrantPermissions, - /* - * The per-open is used for granting access to a swap group - */ - NvKmsPerOpenTypeGrantSwapGroup, + /* + * The per-open is used for granting access to a swap group + */ + NvKmsPerOpenTypeGrantSwapGroup, - /* - * The per-open is used to unicast a specific event. - */ - NvKmsPerOpenTypeUnicastEvent, + /* + * The per-open is used to unicast a specific event. + */ + NvKmsPerOpenTypeUnicastEvent, - /* - * The per-open is currently undefined (this is the initial - * state). - */ - NvKmsPerOpenTypeUndefined, + /* + * The per-open is currently undefined (this is the initial + * state). + */ + NvKmsPerOpenTypeUndefined, }; enum NvKmsUnicastEventType { - /* Used by: - * NVKMS_IOCTL_JOIN_SWAP_GROUP */ - NvKmsUnicastEventTypeDeferredRequest, + /* Used by: + * NVKMS_IOCTL_JOIN_SWAP_GROUP */ + NvKmsUnicastEventTypeDeferredRequest, - /* Used by: - * NVKMS_IOCTL_NOTIFY_VBLANK */ - NvKmsUnicastEventTypeVblankNotification, + /* Used by: + * NVKMS_IOCTL_NOTIFY_VBLANK */ + NvKmsUnicastEventTypeVblankNotification, - /* Undefined, this indicates the unicast fd is available for use. */ - NvKmsUnicastEventTypeUndefined, + /* Undefined, this indicates the unicast fd is available for use. */ + NvKmsUnicastEventTypeUndefined, }; struct NvKmsPerOpenConnector { - NVConnectorEvoPtr pConnectorEvo; - NvKmsConnectorHandle nvKmsApiHandle; + NVConnectorEvoPtr pConnectorEvo; + NvKmsConnectorHandle nvKmsApiHandle; }; struct NvKmsPerOpenFrameLock { - NVFrameLockEvoPtr pFrameLockEvo; - int refCnt; - NvKmsFrameLockHandle nvKmsApiHandle; + NVFrameLockEvoPtr pFrameLockEvo; + int refCnt; + NvKmsFrameLockHandle nvKmsApiHandle; }; struct NvKmsPerOpenDisp { - NVDispEvoPtr pDispEvo; - NvKmsDispHandle nvKmsApiHandle; - NvKmsFrameLockHandle frameLockHandle; - NVEvoApiHandlesRec connectorHandles; - struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; - NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; - NVEvoApiHandlesRec vblankCallbackHandles[NVKMS_MAX_HEADS_PER_DISP]; - NVEvoApiHandlesRec vblankSemControlHandles; + NVDispEvoPtr pDispEvo; + NvKmsDispHandle nvKmsApiHandle; + NvKmsFrameLockHandle frameLockHandle; + NVEvoApiHandlesRec connectorHandles; + struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; + NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoApiHandlesRec vblankCallbackHandles[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoApiHandlesRec vblankSemControlHandles; }; struct NvKmsPerOpenDev { - NVDevEvoPtr pDevEvo; - NvKmsDeviceHandle nvKmsApiHandle; - NVEvoApiHandlesRec dispHandles; - NVEvoApiHandlesRec surfaceHandles; - struct NvKmsFlipPermissions flipPermissions; - struct NvKmsModesetPermissions modesetPermissions; - struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; - NvBool isPrivileged; - NVEvoApiHandlesRec deferredRequestFifoHandles; - NVEvoApiHandlesRec swapGroupHandles; + NVDevEvoPtr pDevEvo; + NvKmsDeviceHandle nvKmsApiHandle; + NVEvoApiHandlesRec dispHandles; + NVEvoApiHandlesRec surfaceHandles; + struct NvKmsFlipPermissions flipPermissions; + struct NvKmsModesetPermissions modesetPermissions; + struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; + NvBool isPrivileged; + NVEvoApiHandlesRec deferredRequestFifoHandles; + NVEvoApiHandlesRec swapGroupHandles; }; struct NvKmsPerOpenEventListEntry { - NVListRec eventListEntry; - struct NvKmsEvent event; + NVListRec eventListEntry; + struct NvKmsEvent event; }; struct NvKmsPerOpen { - nvkms_per_open_handle_t *pOpenKernel; - NvU32 pid; - enum NvKmsClientType clientType; - NVListRec perOpenListEntry; - NVListRec perOpenIoctlListEntry; - enum NvKmsPerOpenType type; + nvkms_per_open_handle_t *pOpenKernel; + NvU32 pid; + enum NvKmsClientType clientType; + NVListRec perOpenListEntry; + NVListRec perOpenIoctlListEntry; + enum NvKmsPerOpenType type; - union { + union { + struct { + NVListRec eventList; + NvU32 eventInterestMask; + NVEvoApiHandlesRec devHandles; + NVEvoApiHandlesRec frameLockHandles; + } ioctl; + + struct { + NVSurfaceEvoPtr pSurfaceEvo; + } grantSurface; + + struct { + NVDevEvoPtr pDevEvo; + NVSwapGroupPtr pSwapGroup; + } grantSwapGroup; + + struct { + NVDevEvoPtr pDevEvo; + struct NvKmsPermissions permissions; + } grantPermissions; + + struct { + /* + * A unicast event NvKmsPerOpen is assigned to an object, so that + * that object can generate events on the unicast event. Store a + * pointer to that object, so that we can clear the pointer when the + * unicast event NvKmsPerOpen is closed. + */ + enum NvKmsUnicastEventType type; + union { struct { - NVListRec eventList; - NvU32 eventInterestMask; - NVEvoApiHandlesRec devHandles; - NVEvoApiHandlesRec frameLockHandles; - } ioctl; + NVDeferredRequestFifoPtr pDeferredRequestFifo; + } deferred; struct { - NVSurfaceEvoPtr pSurfaceEvo; - } grantSurface; - - struct { - NVDevEvoPtr pDevEvo; - NVSwapGroupPtr pSwapGroup; - } grantSwapGroup; - - struct { - NVDevEvoPtr pDevEvo; - struct NvKmsPermissions permissions; - } grantPermissions; - - struct { - /* - * A unicast event NvKmsPerOpen is assigned to an object, so that - * that object can generate events on the unicast event. Store a - * pointer to that object, so that we can clear the pointer when the - * unicast event NvKmsPerOpen is closed. - */ - enum NvKmsUnicastEventType type; - union { - struct { - NVDeferredRequestFifoPtr pDeferredRequestFifo; - } deferred; - - struct { - NvKmsGenericHandle hCallback; - struct NvKmsPerOpenDisp *pOpenDisp; - NvU32 apiHead; - } vblankNotification; - } e; - } unicastEvent; - }; + NvKmsGenericHandle hCallback; + struct NvKmsPerOpenDisp *pOpenDisp; + NvU32 apiHead; + } vblankNotification; + } e; + } unicastEvent; + }; }; static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); @@ -234,158 +232,141 @@ static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList); * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has * the specified deviceId. */ -static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, NvU32 deviceId) -{ - struct NvKmsPerOpenDev *pOpenDev; - NvKmsGenericHandle dev; +static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, + NvU32 deviceId) { + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, - pOpenDev, dev) { - if (pOpenDev->pDevEvo->usesTegraDevice && - (deviceId == NVKMS_DEVICE_ID_TEGRA)) { - return TRUE; - } else if (pOpenDev->pDevEvo->deviceId == deviceId) { - return TRUE; - } + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, pOpenDev, dev) { + if (pOpenDev->pDevEvo->usesTegraDevice && + (deviceId == NVKMS_DEVICE_ID_TEGRA)) { + return TRUE; + } else if (pOpenDev->pDevEvo->deviceId == deviceId) { + return TRUE; } + } - return FALSE; + return FALSE; } - /*! * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle. */ -static struct NvKmsPerOpenDev *GetPerOpenDev( - const struct NvKmsPerOpen *pOpen, - const NvKmsDeviceHandle deviceHandle) -{ - if (pOpen == NULL) { - return NULL; - } +static struct NvKmsPerOpenDev * +GetPerOpenDev(const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle) { + if (pOpen == NULL) { + return NULL; + } - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); } - /*! * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by * NvKmsPerOpen + deviceHandle + dispHandle. */ -static NvBool GetPerOpenDevAndDisp( - const struct NvKmsPerOpen *pOpen, - const NvKmsDeviceHandle deviceHandle, - const NvKmsDispHandle dispHandle, - struct NvKmsPerOpenDev **ppOpenDev, - struct NvKmsPerOpenDisp **ppOpenDisp) -{ - struct NvKmsPerOpenDev *pOpenDev; - struct NvKmsPerOpenDisp *pOpenDisp; +static NvBool GetPerOpenDevAndDisp(const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + struct NvKmsPerOpenDev **ppOpenDev, + struct NvKmsPerOpenDisp **ppOpenDisp) { + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; - pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); - if (pOpenDev == NULL) { - return FALSE; - } + if (pOpenDev == NULL) { + return FALSE; + } - pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, - dispHandle); + pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); - if (pOpenDisp == NULL) { - return FALSE; - } + if (pOpenDisp == NULL) { + return FALSE; + } - *ppOpenDev = pOpenDev; - *ppOpenDisp = pOpenDisp; + *ppOpenDev = pOpenDev; + *ppOpenDisp = pOpenDisp; - return TRUE; + return TRUE; } - /*! * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle + * dispHandle. */ -static struct NvKmsPerOpenDisp *GetPerOpenDisp( - const struct NvKmsPerOpen *pOpen, - const NvKmsDeviceHandle deviceHandle, - const NvKmsDispHandle dispHandle) -{ - struct NvKmsPerOpenDev *pOpenDev; +static struct NvKmsPerOpenDisp * +GetPerOpenDisp(const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle) { + struct NvKmsPerOpenDev *pOpenDev; - pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); - if (pOpenDev == NULL) { - return NULL; - } + if (pOpenDev == NULL) { + return NULL; + } - return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); + return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); } - /*! * Get the NvKmsPerOpenConnector described by NvKmsPerOpen + * deviceHandle + dispHandle + connectorHandle. */ -static struct NvKmsPerOpenConnector *GetPerOpenConnector( - const struct NvKmsPerOpen *pOpen, - const NvKmsDeviceHandle deviceHandle, - const NvKmsDispHandle dispHandle, - const NvKmsConnectorHandle connectorHandle) -{ - struct NvKmsPerOpenDisp *pOpenDisp; +static struct NvKmsPerOpenConnector * +GetPerOpenConnector(const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NvKmsConnectorHandle connectorHandle) { + struct NvKmsPerOpenDisp *pOpenDisp; - pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); - if (pOpenDisp == NULL) { - return NULL; - } + if (pOpenDisp == NULL) { + return NULL; + } - return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, - connectorHandle); + return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, + connectorHandle); } - /*! * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle + * dispHandle + dpyId. */ -static NVDpyEvoRec *GetPerOpenDpy( - const struct NvKmsPerOpen *pOpen, - const NvKmsDeviceHandle deviceHandle, - const NvKmsDispHandle dispHandle, - const NVDpyId dpyId) -{ - struct NvKmsPerOpenDisp *pOpenDisp; +static NVDpyEvoRec *GetPerOpenDpy(const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVDpyId dpyId) { + struct NvKmsPerOpenDisp *pOpenDisp; - pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); - if (pOpenDisp == NULL) { - return NULL; - } + if (pOpenDisp == NULL) { + return NULL; + } - return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); + return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); } - /*! * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle. */ -static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( - const struct NvKmsPerOpen *pOpen, - NvKmsFrameLockHandle frameLockHandle) -{ - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); +static struct NvKmsPerOpenFrameLock * +GetPerOpenFrameLock(const struct NvKmsPerOpen *pOpen, + NvKmsFrameLockHandle frameLockHandle) { + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, - frameLockHandle); + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + frameLockHandle); } - /*! * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp. * @@ -399,33 +380,30 @@ static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( * NvKmsPerOpenFrameLock should be freed. */ static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDisp *pOpenDisp) -{ - struct NvKmsPerOpenFrameLock *pOpenFrameLock; + struct NvKmsPerOpenDisp *pOpenDisp) { + struct NvKmsPerOpenFrameLock *pOpenFrameLock; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - pOpenFrameLock = - nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, - pOpenDisp->frameLockHandle); - if (pOpenFrameLock == NULL) { - return; - } + pOpenFrameLock = nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenDisp->frameLockHandle); + if (pOpenFrameLock == NULL) { + return; + } - pOpenDisp->frameLockHandle = 0; + pOpenDisp->frameLockHandle = 0; - pOpenFrameLock->refCnt--; + pOpenFrameLock->refCnt--; - if (pOpenFrameLock->refCnt != 0) { - return; - } + if (pOpenFrameLock->refCnt != 0) { + return; + } - nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, - pOpenFrameLock->nvKmsApiHandle); - nvFree(pOpenFrameLock); + nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock->nvKmsApiHandle); + nvFree(pOpenFrameLock); } - /*! * Allocate and initialize an NvKmsPerOpenFrameLock. * @@ -443,175 +421,158 @@ static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding * NvKmsPerOpenFrameLock should be allocated. */ -static NvBool AllocPerOpenFrameLock( - struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDisp *pOpenDisp) -{ - struct NvKmsPerOpenFrameLock *pOpenFrameLock; - NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; - NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; - NvKmsGenericHandle handle; +static NvBool AllocPerOpenFrameLock(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) { + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvKmsGenericHandle handle; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - if (pFrameLockEvo == NULL) { - return TRUE; + if (pFrameLockEvo == NULL) { + return TRUE; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + goto done; } + } - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, - pOpenFrameLock, handle) { - if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { - goto done; - } - } + pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); - pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); + if (pOpenFrameLock == NULL) { + return FALSE; + } - if (pOpenFrameLock == NULL) { - return FALSE; - } + pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; + pOpenFrameLock->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); - pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; - pOpenFrameLock->nvKmsApiHandle = - nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); - - if (pOpenFrameLock->nvKmsApiHandle == 0) { - nvFree(pOpenFrameLock); - return FALSE; - } + if (pOpenFrameLock->nvKmsApiHandle == 0) { + nvFree(pOpenFrameLock); + return FALSE; + } done: - pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; - pOpenFrameLock->refCnt++; - return TRUE; + pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; + pOpenFrameLock->refCnt++; + return TRUE; } - /*! * Get the NvKmsConnectorHandle that corresponds to the given * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle. */ static NvKmsConnectorHandle ConnectorEvoToConnectorHandle( - const struct NvKmsPerOpen *pOpen, - const NvKmsDeviceHandle deviceHandle, - const NvKmsDispHandle dispHandle, - const NVConnectorEvoRec *pConnectorEvo) -{ - struct NvKmsPerOpenDisp *pOpenDisp; - struct NvKmsPerOpenConnector *pOpenConnector; - NvKmsGenericHandle connector; + const struct NvKmsPerOpen *pOpen, const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, const NVConnectorEvoRec *pConnectorEvo) { + struct NvKmsPerOpenDisp *pOpenDisp; + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; - pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); - - if (pOpenDisp == NULL) { - return 0; - } - - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, - pOpenConnector, connector) { - if (pOpenConnector->pConnectorEvo == pConnectorEvo) { - return pOpenConnector->nvKmsApiHandle; - } - } + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + if (pOpenDisp == NULL) { return 0; -} + } + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + if (pOpenConnector->pConnectorEvo == pConnectorEvo) { + return pOpenConnector->nvKmsApiHandle; + } + } + + return 0; +} /*! * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to * the given NVDispEvoRec on the NvKmsPerOpen. */ -static NvBool DispEvoToDevAndDispHandles( - const struct NvKmsPerOpen *pOpen, - const NVDispEvoRec *pDispEvo, - NvKmsDeviceHandle *pDeviceHandle, - NvKmsDispHandle *pDispHandle) -{ - NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; - struct NvKmsPerOpenDev *pOpenDev; - NvKmsGenericHandle dev; +static NvBool DispEvoToDevAndDispHandles(const struct NvKmsPerOpen *pOpen, + const NVDispEvoRec *pDispEvo, + NvKmsDeviceHandle *pDeviceHandle, + NvKmsDispHandle *pDispHandle) { + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, - pOpenDev, dev) { + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, pOpenDev, dev) { - struct NvKmsPerOpenDisp *pOpenDisp; - NvKmsGenericHandle disp; + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; - if (pOpenDev->pDevEvo != pDevEvo) { - continue; - } - - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, - pOpenDisp, disp) { - if (pOpenDisp->pDispEvo != pDispEvo) { - continue; - } - - *pDeviceHandle = pOpenDev->nvKmsApiHandle; - *pDispHandle = pOpenDisp->nvKmsApiHandle; - - return TRUE; - } + if (pOpenDev->pDevEvo != pDevEvo) { + continue; } - return FALSE; -} + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, pOpenDisp, + disp) { + if (pOpenDisp->pDispEvo != pDispEvo) { + continue; + } + *pDeviceHandle = pOpenDev->nvKmsApiHandle; + *pDispHandle = pOpenDisp->nvKmsApiHandle; + + return TRUE; + } + } + + return FALSE; +} /*! * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec * on the NvKmsPerOpen. */ -static struct NvKmsPerOpenDev *DevEvoToOpenDev( - const struct NvKmsPerOpen *pOpen, - const NVDevEvoRec *pDevEvo) -{ - struct NvKmsPerOpenDev *pOpenDev; - NvKmsGenericHandle dev; +static struct NvKmsPerOpenDev *DevEvoToOpenDev(const struct NvKmsPerOpen *pOpen, + const NVDevEvoRec *pDevEvo) { + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, - pOpenDev, dev) { - if (pOpenDev->pDevEvo == pDevEvo) { - return pOpenDev; - } + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, pOpenDev, dev) { + if (pOpenDev->pDevEvo == pDevEvo) { + return pOpenDev; } + } - return NULL; + return NULL; } - /*! * Get the NvKmsFrameLockHandle that corresponds to the given * NVFrameLockEvoRec on the NvKmsPerOpen. */ -static NvBool FrameLockEvoToFrameLockHandle( - const struct NvKmsPerOpen *pOpen, - const NVFrameLockEvoRec *pFrameLockEvo, - NvKmsFrameLockHandle *pFrameLockHandle) -{ - struct NvKmsPerOpenFrameLock *pOpenFrameLock; - NvKmsGenericHandle handle; +static NvBool +FrameLockEvoToFrameLockHandle(const struct NvKmsPerOpen *pOpen, + const NVFrameLockEvoRec *pFrameLockEvo, + NvKmsFrameLockHandle *pFrameLockHandle) { + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NvKmsGenericHandle handle; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, - pOpenFrameLock, handle) { + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { - if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { - *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; - return TRUE; - } + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; + return TRUE; } + } - return FALSE; + return FALSE; } - /*! * Clear the specified NvKmsPerOpenConnector. * @@ -619,16 +580,14 @@ static NvBool FrameLockEvoToFrameLockHandle( * NvKmsPerOpenConnector is assigned. * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared. */ -static void ClearPerOpenConnector( - struct NvKmsPerOpenDisp *pOpenDisp, - struct NvKmsPerOpenConnector *pOpenConnector) -{ - nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, - pOpenConnector->nvKmsApiHandle); - nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); +static void +ClearPerOpenConnector(struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector) { + nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, + pOpenConnector->nvKmsApiHandle); + nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); } - /*! * Initialize an NvKmsPerOpenConnector. * @@ -641,25 +600,23 @@ static void ClearPerOpenConnector( * \return If the NvKmsPerOpenConnector is successfully initialized, * return TRUE. Otherwise, return FALSE. */ -static NvBool InitPerOpenConnector( - struct NvKmsPerOpenDisp *pOpenDisp, - struct NvKmsPerOpenConnector *pOpenConnector, - NVConnectorEvoPtr pConnectorEvo) -{ - pOpenConnector->nvKmsApiHandle = - nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); +static NvBool InitPerOpenConnector(struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector, + NVConnectorEvoPtr pConnectorEvo) { + pOpenConnector->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); - if (pOpenConnector->nvKmsApiHandle == 0) { - goto fail; - } + if (pOpenConnector->nvKmsApiHandle == 0) { + goto fail; + } - pOpenConnector->pConnectorEvo = pConnectorEvo; + pOpenConnector->pConnectorEvo = pConnectorEvo; - return TRUE; + return TRUE; fail: - ClearPerOpenConnector(pOpenDisp, pOpenConnector); - return FALSE; + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + return FALSE; } /*! @@ -669,45 +626,42 @@ fail: * is assigned. * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared. */ -static void ClearPerOpenDisp( - struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDev *pOpenDev, - struct NvKmsPerOpenDisp *pOpenDisp) -{ - struct NvKmsPerOpenConnector *pOpenConnector; - NvKmsGenericHandle connector; +static void ClearPerOpenDisp(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp) { + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; - NVVBlankCallbackPtr pCallbackData; - NvKmsGenericHandle callback; + NVVBlankCallbackPtr pCallbackData; + NvKmsGenericHandle callback; - FreePerOpenFrameLock(pOpen, pOpenDisp); + FreePerOpenFrameLock(pOpen, pOpenDisp); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, - pOpenConnector, connector) { - ClearPerOpenConnector(pOpenDisp, pOpenConnector); + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + } + + /* Destroy the API handle structures. */ + nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); + + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankCallbackHandles[i], + pCallbackData, callback) { + nvRemoveUnicastEvent(pCallbackData->pUserData); } + nvEvoDestroyApiHandles(&pOpenDisp->vblankCallbackHandles[i]); + } - /* Destroy the API handle structures. */ - nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); + nvEvoDestroyApiHandles(&pOpenDisp->vblankSemControlHandles); - for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { - nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); + nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankCallbackHandles[i], - pCallbackData, callback) { - nvRemoveUnicastEvent(pCallbackData->pUserData); - } - nvEvoDestroyApiHandles(&pOpenDisp->vblankCallbackHandles[i]); - } - - nvEvoDestroyApiHandles(&pOpenDisp->vblankSemControlHandles); - - nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); - - nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); + nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); } - /*! * Initialize an NvKmsPerOpenDisp. * @@ -719,230 +673,217 @@ static void ClearPerOpenDisp( * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE. * Otherwise, return FALSE. */ -static NvBool InitPerOpenDisp( - struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDev *pOpenDev, - struct NvKmsPerOpenDisp *pOpenDisp, - NVDispEvoPtr pDispEvo) -{ - NVConnectorEvoPtr pConnectorEvo; - NvU32 connector; +static NvBool InitPerOpenDisp(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp, + NVDispEvoPtr pDispEvo) { + NVConnectorEvoPtr pConnectorEvo; + NvU32 connector; - pOpenDisp->nvKmsApiHandle = - nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); + pOpenDisp->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); - if (pOpenDisp->nvKmsApiHandle == 0) { - goto fail; + if (pOpenDisp->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDisp->pDispEvo = pDispEvo; + + if (nvListCount(&pDispEvo->connectorList) >= + ARRAY_LEN(pOpenDisp->connector)) { + nvAssert(!"More connectors on this disp than NVKMS can handle."); + goto fail; + } + + if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, + ARRAY_LEN(pOpenDisp->connector))) { + goto fail; + } + + connector = 0; + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], + pConnectorEvo)) { + goto fail; } + connector++; + } - pOpenDisp->pDispEvo = pDispEvo; - - if (nvListCount(&pDispEvo->connectorList) >= - ARRAY_LEN(pOpenDisp->connector)) { - nvAssert(!"More connectors on this disp than NVKMS can handle."); - goto fail; - } - - if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, - ARRAY_LEN(pOpenDisp->connector))) { - goto fail; - } - - connector = 0; - FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { - if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], - pConnectorEvo)) { - goto fail; - } - connector++; - } - - /* Initialize the vblankSyncObjectHandles for each head. */ - for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { - if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], - NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { - goto fail; - } - } - - /* Initialize the vblankCallbackHandles for each head. - * - * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply - * here, but we need something. */ - for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { - if (!nvEvoInitApiHandles(&pOpenDisp->vblankCallbackHandles[i], - NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { - goto fail; - } - } - - /* Initialize the vblankSemControlHandles. - * - * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply - * here, but we need something. */ - if (!nvEvoInitApiHandles(&pOpenDisp->vblankSemControlHandles, + /* Initialize the vblankSyncObjectHandles for each head. */ + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { - goto fail; + goto fail; } + } - if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { - goto fail; + /* Initialize the vblankCallbackHandles for each head. + * + * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply + * here, but we need something. */ + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + if (!nvEvoInitApiHandles(&pOpenDisp->vblankCallbackHandles[i], + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; } + } - return TRUE; + /* Initialize the vblankSemControlHandles. + * + * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply + * here, but we need something. */ + if (!nvEvoInitApiHandles(&pOpenDisp->vblankSemControlHandles, + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; + } + + if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { + goto fail; + } + + return TRUE; fail: - ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); - return FALSE; + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + return FALSE; } /*! * Free any SwapGroups tracked by this pOpenDev. */ -static void FreeSwapGroups(struct NvKmsPerOpenDev *pOpenDev) -{ - NVSwapGroupRec *pSwapGroup; - NvKmsSwapGroupHandle handle; - NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; +static void FreeSwapGroups(struct NvKmsPerOpenDev *pOpenDev) { + NVSwapGroupRec *pSwapGroup; + NvKmsSwapGroupHandle handle; + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->swapGroupHandles, - pSwapGroup, - handle) { - nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->swapGroupHandles, pSwapGroup, + handle) { + nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); - if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { - nvHsFreeSwapGroup(pDevEvo, pSwapGroup); - } else { - nvHsDecrementSwapGroupRefCnt(pSwapGroup); - } + if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + nvHsFreeSwapGroup(pDevEvo, pSwapGroup); + } else { + nvHsDecrementSwapGroupRefCnt(pSwapGroup); } + } } /*! * Check that the NvKmsPermissions make sense. */ -static NvBool ValidateNvKmsPermissions( - const NVDevEvoRec *pDevEvo, - const struct NvKmsPermissions *pPermissions, - enum NvKmsClientType clientType) -{ - if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { - NvU32 d, h; +static NvBool +ValidateNvKmsPermissions(const NVDevEvoRec *pDevEvo, + const struct NvKmsPermissions *pPermissions, + enum NvKmsClientType clientType) { + if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; - for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { - for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { - NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; + NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; - if (layerMask == 0) { - continue; - } - - if (nvHasBitAboveMax(layerMask, pDevEvo->apiHead[h].numLayers)) { - return FALSE; - } - - /* - * If the above blocks didn't 'continue', then there - * are permissions specified for this disp+head. Is - * the specified disp+head in range for the current - * configuration? - */ - if (d >= pDevEvo->nDispEvo) { - return FALSE; - } - - if (h >= pDevEvo->numApiHeads) { - return FALSE; - } - } - } - } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { - NvU32 d, h; - - for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { - for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { - - NVDpyIdList dpyIdList = - pPermissions->modeset.disp[d].head[h].dpyIdList; - - if (nvDpyIdListIsEmpty(dpyIdList)) { - continue; - } - - /* - * If the above blocks didn't 'continue', then there - * are permissions specified for this disp+head. Is - * the specified disp+head in range for the current - * configuration? - */ - if (d >= pDevEvo->nDispEvo) { - return FALSE; - } - - if (h >= pDevEvo->numApiHeads) { - return FALSE; - } - } - } - } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) { - - /* Only kapi uses this permission type, so disallow it from userspace */ - if (clientType != NVKMS_CLIENT_KERNEL_SPACE) { - return FALSE; + if (layerMask == 0) { + continue; } - } else { - return FALSE; + if (nvHasBitAboveMax(layerMask, pDevEvo->apiHead[h].numLayers)) { + return FALSE; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numApiHeads) { + return FALSE; + } + } + } + } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = pPermissions->modeset.disp[d].head[h].dpyIdList; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numApiHeads) { + return FALSE; + } + } + } + } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) { + + /* Only kapi uses this permission type, so disallow it from userspace */ + if (clientType != NVKMS_CLIENT_KERNEL_SPACE) { + return FALSE; } - return TRUE; + } else { + return FALSE; + } + + return TRUE; } /*! * Assign pPermissions with the maximum permissions possible for * the pDevEvo. */ -static void AssignFullNvKmsFlipPermissions( - const NVDevEvoRec *pDevEvo, - struct NvKmsFlipPermissions *pPermissions) -{ - NvU32 dispIndex, apiHead; +static void +AssignFullNvKmsFlipPermissions(const NVDevEvoRec *pDevEvo, + struct NvKmsFlipPermissions *pPermissions) { + NvU32 dispIndex, apiHead; - nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); - for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { - for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { - pPermissions->disp[dispIndex].head[apiHead].layerMask = - NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; - } + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pPermissions->disp[dispIndex].head[apiHead].layerMask = + NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; } + } } static void AssignFullNvKmsModesetPermissions( - const NVDevEvoRec *pDevEvo, - struct NvKmsModesetPermissions *pPermissions) -{ - NvU32 dispIndex, apiHead; + const NVDevEvoRec *pDevEvo, struct NvKmsModesetPermissions *pPermissions) { + NvU32 dispIndex, apiHead; - nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); - for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { - for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { - pPermissions->disp[dispIndex].head[apiHead].dpyIdList = - nvAllDpyIdList(); - } + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pPermissions->disp[dispIndex].head[apiHead].dpyIdList = nvAllDpyIdList(); } + } } -static void AssignFullNvKmsPermissions( - struct NvKmsPerOpenDev *pOpenDev -) -{ - NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; +static void AssignFullNvKmsPermissions(struct NvKmsPerOpenDev *pOpenDev) { + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; - AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); - AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); + AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); + AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); } /*! @@ -951,87 +892,86 @@ static void AssignFullNvKmsPermissions( * \param pOpenDev The per-open device structure for the new modeset owner. * \return FALSE if there was already a modeset owner. TRUE otherwise. */ -static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) -{ - NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; +static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) { + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; - if (pDevEvo->modesetOwner == pOpenDev) { - return TRUE; - } - - if (pDevEvo->modesetOwner != NULL) { - return FALSE; - } - - /* - * If claiming modeset ownership, undo any SST forcing imposed by - * console restore. - */ - if (pOpenDev != pDevEvo->pNvKmsOpenDev) { - nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); - } - - pDevEvo->modesetOwner = pOpenDev; - pDevEvo->modesetOwnerChanged = TRUE; - - AssignFullNvKmsPermissions(pOpenDev); + if (pDevEvo->modesetOwner == pOpenDev) { return TRUE; + } + + if (pDevEvo->modesetOwner != NULL) { + return FALSE; + } + + /* + * If claiming modeset ownership, undo any SST forcing imposed by + * console restore. + */ + if (pOpenDev != pDevEvo->pNvKmsOpenDev) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + } + + pDevEvo->modesetOwner = pOpenDev; + pDevEvo->modesetOwnerChanged = TRUE; + + AssignFullNvKmsPermissions(pOpenDev); + return TRUE; } /* * If not NULL, remove pRemoveFlip from pFlip. Returns true if there are still * some remaining permissions. */ -static NvBool RemoveFlipPermissions(struct NvKmsFlipPermissions *pFlip, - const struct NvKmsFlipPermissions *pRemoveFlip) -{ - NvU32 d, h, dLen, hLen; - NvBool remainingPermissions = FALSE; +static NvBool +RemoveFlipPermissions(struct NvKmsFlipPermissions *pFlip, + const struct NvKmsFlipPermissions *pRemoveFlip) { + NvU32 d, h, dLen, hLen; + NvBool remainingPermissions = FALSE; - dLen = ARRAY_LEN(pFlip->disp); - for (d = 0; d < dLen; d++) { - hLen = ARRAY_LEN(pFlip->disp[d].head); - for (h = 0; h < hLen; h++) { + dLen = ARRAY_LEN(pFlip->disp); + for (d = 0; d < dLen; d++) { + hLen = ARRAY_LEN(pFlip->disp[d].head); + for (h = 0; h < hLen; h++) { - if (pRemoveFlip) { - pFlip->disp[d].head[h].layerMask &= - ~pRemoveFlip->disp[d].head[h].layerMask; - } + if (pRemoveFlip) { + pFlip->disp[d].head[h].layerMask &= + ~pRemoveFlip->disp[d].head[h].layerMask; + } - remainingPermissions |= (pFlip->disp[d].head[h].layerMask != 0); - } + remainingPermissions |= (pFlip->disp[d].head[h].layerMask != 0); } + } - return remainingPermissions; + return remainingPermissions; } /* * If not NULL, remove pRemoveModeset from pModeset. Returns true if there are * still some remaining permissions. */ -static NvBool RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset, - const struct NvKmsModesetPermissions *pRemoveModeset) -{ - NvU32 d, h, dLen, hLen; - NvBool remainingPermissions = FALSE; +static NvBool +RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset, + const struct NvKmsModesetPermissions *pRemoveModeset) { + NvU32 d, h, dLen, hLen; + NvBool remainingPermissions = FALSE; - dLen = ARRAY_LEN(pModeset->disp); - for (d = 0; d < dLen; d++) { - hLen = ARRAY_LEN(pModeset->disp[d].head); - for (h = 0; h < hLen; h++) { + dLen = ARRAY_LEN(pModeset->disp); + for (d = 0; d < dLen; d++) { + hLen = ARRAY_LEN(pModeset->disp[d].head); + for (h = 0; h < hLen; h++) { - if (pRemoveModeset) { - pModeset->disp[d].head[h].dpyIdList = nvDpyIdListMinusDpyIdList( - pModeset->disp[d].head[h].dpyIdList, - pRemoveModeset->disp[d].head[h].dpyIdList); - } + if (pRemoveModeset) { + pModeset->disp[d].head[h].dpyIdList = nvDpyIdListMinusDpyIdList( + pModeset->disp[d].head[h].dpyIdList, + pRemoveModeset->disp[d].head[h].dpyIdList); + } - remainingPermissions |= - !nvDpyIdListIsEmpty(pModeset->disp[d].head[h].dpyIdList); - } + remainingPermissions |= + !nvDpyIdListIsEmpty(pModeset->disp[d].head[h].dpyIdList); } + } - return remainingPermissions; + return remainingPermissions; } /*! @@ -1044,87 +984,83 @@ static NvBool RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset, * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to * Undefined. */ -static void RevokePermissionsInternal( - const NvU32 typeBitmask, - NVDevEvoRec *pDevEvo, - const struct NvKmsPerOpenDev *pOpenDevExclude) -{ - struct NvKmsPerOpen *pOpen; +static void +RevokePermissionsInternal(const NvU32 typeBitmask, NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDevExclude) { + struct NvKmsPerOpen *pOpen; - nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { - if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && - (pOpen->grantPermissions.pDevEvo == pDevEvo) && - (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { - nvkms_memset(&pOpen->grantPermissions, 0, - sizeof(pOpen->grantPermissions)); - pOpen->type = NvKmsPerOpenTypeUndefined; - } - - if (pOpen->type == NvKmsPerOpenTypeIoctl) { - - struct NvKmsPerOpenDev *pOpenDev = - DevEvoToOpenDev(pOpen, pDevEvo); - - if (pOpenDev == NULL) { - continue; - } - - if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { - continue; - } - - if (pOpenDev == pDevEvo->modesetSubOwner && - (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER))) { - FreeSwapGroups(pOpenDev); - pDevEvo->modesetSubOwner = NULL; - } - - /* - * Clients with sub-owner permission (or better) don't get flipping - * or modeset permission revoked. - */ - if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { - continue; - } - - if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { - nvkms_memset(&pOpenDev->flipPermissions, 0, - sizeof(pOpenDev->flipPermissions)); - } - - if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { - nvkms_memset(&pOpenDev->modesetPermissions, 0, - sizeof(pOpenDev->modesetPermissions)); - } - } + if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && + (pOpen->grantPermissions.pDevEvo == pDevEvo) && + (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { + nvkms_memset(&pOpen->grantPermissions, 0, + sizeof(pOpen->grantPermissions)); + pOpen->type = NvKmsPerOpenTypeUndefined; } + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { + continue; + } + + if (pOpenDev == pDevEvo->modesetSubOwner && + (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER))) { + FreeSwapGroups(pOpenDev); + pDevEvo->modesetSubOwner = NULL; + } + + /* + * Clients with sub-owner permission (or better) don't get flipping + * or modeset permission revoked. + */ + if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + continue; + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { + nvkms_memset(&pOpenDev->flipPermissions, 0, + sizeof(pOpenDev->flipPermissions)); + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { + nvkms_memset(&pOpenDev->modesetPermissions, 0, + sizeof(pOpenDev->modesetPermissions)); + } + } + } } -static void RestoreConsole(NVDevEvoPtr pDevEvo) -{ - // Try to issue a modeset and flip to the framebuffer console surface. - const NvBool bFail = nvkms_test_fail_alloc_core_channel( - FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE); - - if (bFail || !nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { - // If that didn't work, free the core channel to trigger RM's console - // restore code. - FreeSurfaceCtxDmasForAllOpens(pDevEvo); - DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo); - nvFreeCoreChannelEvo(pDevEvo); +static void RestoreConsole(NVDevEvoPtr pDevEvo) { + // Try to issue a modeset and flip to the framebuffer console surface. + const NvBool bFail = nvkms_test_fail_alloc_core_channel( + FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE); - // Reallocate the core channel right after freeing it. This makes sure - // that it's allocated and ready right away if another NVKMS client is - // started. - if ((!bFail) && nvAllocCoreChannelEvo(pDevEvo)) { - nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); - EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo); - AllocSurfaceCtxDmasForAllOpens(pDevEvo); - } else { - nvRevokeDevice(pDevEvo); - } + if (bFail || !nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { + // If that didn't work, free the core channel to trigger RM's console + // restore code. + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo); + nvFreeCoreChannelEvo(pDevEvo); + + // Reallocate the core channel right after freeing it. This makes sure + // that it's allocated and ready right away if another NVKMS client is + // started. + if ((!bFail) && nvAllocCoreChannelEvo(pDevEvo)) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo); + AllocSurfaceCtxDmasForAllOpens(pDevEvo); + } else { + nvRevokeDevice(pDevEvo); } + } } /*! @@ -1134,27 +1070,26 @@ static void RestoreConsole(NVDevEvoPtr pDevEvo) * ownership. * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise. */ -static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) -{ - NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; +static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) { + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; - if (pDevEvo->modesetOwner != pOpenDev) { - // Only the current owner can release ownership. - return FALSE; - } + if (pDevEvo->modesetOwner != pOpenDev) { + // Only the current owner can release ownership. + return FALSE; + } - FreeSwapGroups(pOpenDev); + FreeSwapGroups(pOpenDev); - pDevEvo->modesetOwner = NULL; - pDevEvo->modesetOwnerChanged = TRUE; - pDevEvo->handleConsoleHotplugs = TRUE; + pDevEvo->modesetOwner = NULL; + pDevEvo->modesetOwnerChanged = TRUE; + pDevEvo->handleConsoleHotplugs = TRUE; - RestoreConsole(pDevEvo); - RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | - NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | - NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER), - pDevEvo, NULL /* pOpenDevExclude */); - return TRUE; + RestoreConsole(pDevEvo); + RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER), + pDevEvo, NULL /* pOpenDevExclude */); + return TRUE; } /*! @@ -1165,36 +1100,33 @@ static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) * \param[in,out] pOpenDev The NvKmsPerOpenDev to free. */ void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDev *pOpenDev) -{ - struct NvKmsPerOpenDisp *pOpenDisp; - NvKmsGenericHandle disp; + struct NvKmsPerOpenDev *pOpenDev) { + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - if (pOpenDev == NULL) { - return; - } + if (pOpenDev == NULL) { + return; + } - nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); + nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, - pOpenDisp, disp) { - ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); - } + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, pOpenDisp, disp) { + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + } - nvEvoDestroyApiHandles(&pOpenDev->dispHandles); + nvEvoDestroyApiHandles(&pOpenDev->dispHandles); - nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); + nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); - nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); + nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); - nvEvoDestroyApiHandles(&pOpenDev->swapGroupHandles); + nvEvoDestroyApiHandles(&pOpenDev->swapGroupHandles); - nvFree(pOpenDev); + nvFree(pOpenDev); } - /*! * Allocate and initialize an NvKmsPerOpenDev. * @@ -1209,68 +1141,66 @@ void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, * On failure, return NULL. */ struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, - NVDevEvoPtr pDevEvo, NvBool isPrivileged) -{ - struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); - NVDispEvoPtr pDispEvo; - NvU32 disp; + NVDevEvoPtr pDevEvo, + NvBool isPrivileged) { + struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); + NVDispEvoPtr pDispEvo; + NvU32 disp; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - if (pOpenDev == NULL) { - goto fail; + if (pOpenDev == NULL) { + goto fail; + } + + pOpenDev->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); + + if (pOpenDev->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDev->pDevEvo = pDevEvo; + + if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, ARRAY_LEN(pOpenDev->disp))) { + goto fail; + } + + if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { + nvAssert(!"More disps on this device than NVKMS can handle."); + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { + if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { + goto fail; } + } - pOpenDev->nvKmsApiHandle = - nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); + if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { + goto fail; + } - if (pOpenDev->nvKmsApiHandle == 0) { - goto fail; - } + pOpenDev->isPrivileged = isPrivileged; + if (pOpenDev->isPrivileged) { + AssignFullNvKmsPermissions(pOpenDev); + } - pOpenDev->pDevEvo = pDevEvo; + if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { + goto fail; + } - if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, - ARRAY_LEN(pOpenDev->disp))) { - goto fail; - } + if (!nvEvoInitApiHandles(&pOpenDev->swapGroupHandles, 4)) { + goto fail; + } - if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { - nvAssert(!"More disps on this device than NVKMS can handle."); - goto fail; - } - - FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { - if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { - goto fail; - } - } - - if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { - goto fail; - } - - pOpenDev->isPrivileged = isPrivileged; - if (pOpenDev->isPrivileged) { - AssignFullNvKmsPermissions(pOpenDev); - } - - if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { - goto fail; - } - - if (!nvEvoInitApiHandles(&pOpenDev->swapGroupHandles, 4)) { - goto fail; - } - - return pOpenDev; + return pOpenDev; fail: - nvFreePerOpenDev(pOpen, pOpenDev); - return NULL; + nvFreePerOpenDev(pOpen, pOpenDev); + return NULL; } - /*! * Assign NvKmsPerOpen::type. * @@ -1279,1059 +1209,983 @@ fail: */ static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen, enum NvKmsPerOpenType type, - NvBool allowRedundantAssignment) -{ - if ((pOpen->type == type) && allowRedundantAssignment) { - return TRUE; - } - - if (pOpen->type != NvKmsPerOpenTypeUndefined) { - return FALSE; - } - - switch (type) { - case NvKmsPerOpenTypeIoctl: - nvListInit(&pOpen->ioctl.eventList); - - if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { - return FALSE; - } - - if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { - nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); - return FALSE; - } - - nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); - break; - - case NvKmsPerOpenTypeGrantSurface: - /* Nothing to do, here. */ - break; - - case NvKmsPerOpenTypeGrantSwapGroup: - /* Nothing to do, here. */ - break; - - case NvKmsPerOpenTypeGrantPermissions: - /* Nothing to do, here. */ - break; - - case NvKmsPerOpenTypeUnicastEvent: - /* Nothing to do, here. */ - break; - - case NvKmsPerOpenTypeUndefined: - nvAssert(!"unexpected NvKmsPerOpenType"); - break; - } - - pOpen->type = type; + NvBool allowRedundantAssignment) { + if ((pOpen->type == type) && allowRedundantAssignment) { return TRUE; + } + + if (pOpen->type != NvKmsPerOpenTypeUndefined) { + return FALSE; + } + + switch (type) { + case NvKmsPerOpenTypeIoctl: + nvListInit(&pOpen->ioctl.eventList); + + if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { + return FALSE; + } + + if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + return FALSE; + } + + nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); + break; + + case NvKmsPerOpenTypeGrantSurface: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantSwapGroup: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantPermissions: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUnicastEvent: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUndefined: + nvAssert(!"unexpected NvKmsPerOpenType"); + break; + } + + pOpen->type = type; + return TRUE; } /*! * Return whether the PerOpen can be used as a unicast event. */ -static inline NvBool PerOpenIsValidForUnicastEvent( - const struct NvKmsPerOpen *pOpen) -{ - /* If the type is Undefined, it can be made a unicast event. */ +static inline NvBool +PerOpenIsValidForUnicastEvent(const struct NvKmsPerOpen *pOpen) { + /* If the type is Undefined, it can be made a unicast event. */ - if (pOpen->type == NvKmsPerOpenTypeUndefined) { - return TRUE; - } + if (pOpen->type == NvKmsPerOpenTypeUndefined) { + return TRUE; + } - /* - * If the type is already UnicastEvent but there is no active user, it can - * be made a unicast event. - */ - if ((pOpen->type == NvKmsPerOpenTypeUnicastEvent) && - (pOpen->unicastEvent.type == NvKmsUnicastEventTypeUndefined)) { - return TRUE; - } + /* + * If the type is already UnicastEvent but there is no active user, it can + * be made a unicast event. + */ + if ((pOpen->type == NvKmsPerOpenTypeUnicastEvent) && + (pOpen->unicastEvent.type == NvKmsUnicastEventTypeUndefined)) { + return TRUE; + } - return FALSE; + return FALSE; } /*! * Allocate the specified device. */ -static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsAllocDeviceParams *pParams = pParamsVoid; - NVDevEvoPtr pDevEvo; - struct NvKmsPerOpenDev *pOpenDev; - NvU32 disp, apiHead; - NvU8 layer; +static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsAllocDeviceParams *pParams = pParamsVoid; + NVDevEvoPtr pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvU32 disp, apiHead; + NvU8 layer; - nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); - if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { - pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; - return FALSE; - } + if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; + return FALSE; + } - /* - * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times - * on the same device with the same fd. - */ - if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { - pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; - return FALSE; - } + /* + * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times + * on the same device with the same fd. + */ + if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } - pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); + pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); + if (pDevEvo == NULL) { + pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); if (pDevEvo == NULL) { - pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); - if (pDevEvo == NULL) { - return FALSE; - } - } else { - if (!pParams->request.tryInferSliMosaicFromExistingDevice && - (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { - pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; - return FALSE; - } - - if (pDevEvo->usesTegraDevice && - (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) { - pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; - return FALSE; - } - pDevEvo->allocRefCnt++; + return FALSE; + } + } else { + if (!pParams->request.tryInferSliMosaicFromExistingDevice && + (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; } - pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); - - if (pOpenDev == NULL) { - nvFreeDevEvo(pDevEvo); - pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; - return FALSE; + if (pDevEvo->usesTegraDevice && + (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; } + pDevEvo->allocRefCnt++; + } - /* Beyond this point, the function cannot fail. */ + pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); - if (pParams->request.enableConsoleHotplugHandling) { - pDevEvo->handleConsoleHotplugs = TRUE; - } + if (pOpenDev == NULL) { + nvFreeDevEvo(pDevEvo); + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + return FALSE; + } - pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; - pParams->reply.subDeviceMask = - NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); - pParams->reply.numHeads = pDevEvo->numApiHeads; - pParams->reply.numDisps = pDevEvo->nDispEvo; + /* Beyond this point, the function cannot fail. */ - ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == - ARRAY_LEN(pOpenDev->disp)); + if (pParams->request.enableConsoleHotplugHandling) { + pDevEvo->handleConsoleHotplugs = TRUE; + } - for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { - pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; - } + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.subDeviceMask = NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); + pParams->reply.numHeads = pDevEvo->numApiHeads; + pParams->reply.numDisps = pDevEvo->nDispEvo; - pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; + ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == ARRAY_LEN(pOpenDev->disp)); - ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == - ARRAY_LEN(pDevEvo->caps.layerCaps)); + for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { + pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; + } - for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { - pParams->reply.numLayers[apiHead] = pDevEvo->apiHead[apiHead].numLayers; - } + pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; - for (layer = 0; - layer < ARRAY_LEN(pParams->reply.layerCaps); - layer++) { - pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; - } - pParams->reply.olutCaps = pDevEvo->caps.olut; + ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == + ARRAY_LEN(pDevEvo->caps.layerCaps)); - pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; - pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pParams->reply.numLayers[apiHead] = pDevEvo->apiHead[apiHead].numLayers; + } - pParams->reply.nIsoSurfacesInVidmemOnly = - !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, - NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); + for (layer = 0; layer < ARRAY_LEN(pParams->reply.layerCaps); layer++) { + pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; + } + pParams->reply.olutCaps = pDevEvo->caps.olut; - pParams->reply.requiresAllAllocationsInSysmem = - pDevEvo->requiresAllAllocationsInSysmem; - pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; + pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; + pParams->reply.requiresVrrSemaphores = + !pDevEvo->hal->caps.supportsDisplayRate; - pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; + pParams->reply.nIsoSurfacesInVidmemOnly = !!NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); - pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; - pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; - pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; - pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; + pParams->reply.requiresAllAllocationsInSysmem = + pDevEvo->requiresAllAllocationsInSysmem; + pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; - pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; + pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; - /* NVKMS swap groups and warp&blend depends on headSurface functionality. */ - pParams->reply.supportsSwapGroups = pDevEvo->isHeadSurfaceSupported; - pParams->reply.supportsWarpAndBlend = pDevEvo->isHeadSurfaceSupported; + pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; + pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; + pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; + pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; - pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; + pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; - pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; - pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; + /* NVKMS swap groups and warp&blend depends on headSurface functionality. */ + pParams->reply.supportsSwapGroups = pDevEvo->isHeadSurfaceSupported; + pParams->reply.supportsWarpAndBlend = pDevEvo->isHeadSurfaceSupported; - /* - * TODO: Replace the isSOCDisplay check with an RM query. See Bug 3689635. - */ - pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay; + pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; - pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; + pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; + pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; - pParams->reply.supportsIndependentAcqRelSemaphore = - pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; + /* + * TODO: Replace the isSOCDisplay check with an RM query. See Bug 3689635. + */ + pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay; - pParams->reply.supportsVblankSyncObjects = - pDevEvo->hal->caps.supportsVblankSyncObjects; + pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; - pParams->reply.supportsVblankSemControl = pDevEvo->supportsVblankSemControl; + pParams->reply.supportsIndependentAcqRelSemaphore = + pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; - pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + pParams->reply.supportsVblankSyncObjects = + pDevEvo->hal->caps.supportsVblankSyncObjects; - return TRUE; + pParams->reply.supportsVblankSemControl = pDevEvo->supportsVblankSemControl; + + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + return TRUE; } -static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) -{ - NVDeferredRequestFifoRec *pDeferredRequestFifo; - NvKmsGenericHandle handle; +static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) { + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, - pDeferredRequestFifo, - handle) { + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, handle) { - nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); - nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, - pDeferredRequestFifo); - } + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo); + } } /* * Forward declaration since this function is used by * DisableRemainingVblankSyncObjects(). */ -static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo, - const NvU32 apiHead, - NVVblankSyncObjectRec *pVblankSyncObject, - NVEvoUpdateState *pUpdateState); +static void +DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo, const NvU32 apiHead, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState); -static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDev *pOpenDev) -{ - struct NvKmsPerOpenDisp *pOpenDisp; - NvKmsGenericHandle disp; - NVVblankSyncObjectRec *pVblankSyncObject; - NvKmsVblankSyncObjectHandle handle; - NvU32 apiHead = 0; +static void +DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) { + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + NVVblankSyncObjectRec *pVblankSyncObject; + NvKmsVblankSyncObjectHandle handle; + NvU32 apiHead = 0; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - if (pOpenDev == NULL) { - return; + if (pOpenDev == NULL) { + return; + } + + /* For each pOpenDisp: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, pOpenDisp, disp) { + /* + * A single update state can handle changes across multiple heads on a + * given Disp. + */ + NVEvoUpdateState updateState = {}; + + /* For each head: */ + for (apiHead = 0; apiHead < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); + apiHead++) { + NVEvoApiHandlesRec *pHandles = + &pOpenDisp->vblankSyncObjectHandles[apiHead]; + + /* For each still-active vblank sync object: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, pVblankSyncObject, handle) { + DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, + pVblankSyncObject, &updateState); + /* Remove the handle from the map. */ + nvEvoDestroyApiHandle(pHandles, handle); + } } - /* For each pOpenDisp: */ - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, - pOpenDisp, disp) { - /* - * A single update state can handle changes across multiple heads on a - * given Disp. - */ - NVEvoUpdateState updateState = { }; - - /* For each head: */ - for (apiHead = 0; apiHead < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { - NVEvoApiHandlesRec *pHandles = - &pOpenDisp->vblankSyncObjectHandles[apiHead]; - - /* For each still-active vblank sync object: */ - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, - pVblankSyncObject, handle) { - DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, - pVblankSyncObject, - &updateState); - /* Remove the handle from the map. */ - nvEvoDestroyApiHandle(pHandles, handle); - } - } - - if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { - /* - * Instruct hardware to execute the staged commands from the - * ConfigureVblankSyncObject() calls (inherent in - * DisableAndCleanVblankSyncObject()) above. This will set up - * and wait for a notification that the hardware execution - * has completed. - */ - nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, - TRUE); - } + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() calls (inherent in + * DisableAndCleanVblankSyncObject()) above. This will set up + * and wait for a notification that the hardware execution + * has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE); } + } } -static void DisableRemainingVblankSemControls( - struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDev *pOpenDev) -{ - struct NvKmsPerOpenDisp *pOpenDisp; - NvKmsGenericHandle dispHandle; - NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; +static void +DisableRemainingVblankSemControls(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) { + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle dispHandle; + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; - nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, - pOpenDisp, - dispHandle) { + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, pOpenDisp, + dispHandle) { - NVVblankSemControl *pVblankSemControl; - NvKmsGenericHandle vblankSemControlHandle; + NVVblankSemControl *pVblankSemControl; + NvKmsGenericHandle vblankSemControlHandle; - FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankSemControlHandles, - pVblankSemControl, - vblankSemControlHandle) { - NvBool ret = - nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl); + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankSemControlHandles, + pVblankSemControl, + vblankSemControlHandle) { + NvBool ret = nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl); - if (!ret) { - nvAssert(!"implicit disable of vblank sem control failed."); - } - nvEvoDestroyApiHandle(&pOpenDisp->vblankSemControlHandles, - vblankSemControlHandle); - } + if (!ret) { + nvAssert(!"implicit disable of vblank sem control failed."); + } + nvEvoDestroyApiHandle(&pOpenDisp->vblankSemControlHandles, + vblankSemControlHandle); } + } } static void FreeDeviceReference(struct NvKmsPerOpen *pOpen, - struct NvKmsPerOpenDev *pOpenDev) -{ - /* Disable all client-owned vblank sync objects that still exist. */ - DisableRemainingVblankSyncObjects(pOpen, pOpenDev); + struct NvKmsPerOpenDev *pOpenDev) { + /* Disable all client-owned vblank sync objects that still exist. */ + DisableRemainingVblankSyncObjects(pOpen, pOpenDev); - DisableRemainingVblankSemControls(pOpen, pOpenDev); + DisableRemainingVblankSemControls(pOpen, pOpenDev); - FreeSwapGroups(pOpenDev); + FreeSwapGroups(pOpenDev); - UnregisterDeferredRequestFifos(pOpenDev); + UnregisterDeferredRequestFifos(pOpenDev); - nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, - &pOpenDev->surfaceHandles); + nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, + &pOpenDev->surfaceHandles); - if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { - // If this pOpenDev is the modeset owner, implicitly release it. Does - // nothing if this pOpenDev is not the modeset owner. - // - // If nvFreeDevEvo() freed the device, then it also implicitly released - // ownership. - ReleaseModesetOwnership(pOpenDev); + if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { + // If this pOpenDev is the modeset owner, implicitly release it. Does + // nothing if this pOpenDev is not the modeset owner. + // + // If nvFreeDevEvo() freed the device, then it also implicitly released + // ownership. + ReleaseModesetOwnership(pOpenDev); - nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); + nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); - // If this pOpenDev is the modeset sub-owner, implicitly release it. - if (pOpenDev->pDevEvo->modesetSubOwner == pOpenDev) { - pOpenDev->pDevEvo->modesetSubOwner = NULL; - } + // If this pOpenDev is the modeset sub-owner, implicitly release it. + if (pOpenDev->pDevEvo->modesetSubOwner == pOpenDev) { + pOpenDev->pDevEvo->modesetSubOwner = NULL; } + } - nvFreePerOpenDev(pOpen, pOpenDev); + nvFreePerOpenDev(pOpen, pOpenDev); } /*! * Free the specified device. */ -static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsFreeDeviceParams *pParams = pParamsVoid; - struct NvKmsPerOpenDev *pOpenDev; +static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsFreeDeviceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; - pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); - if (pOpenDev == NULL) { - return FALSE; - } + if (pOpenDev == NULL) { + return FALSE; + } - FreeDeviceReference(pOpen, pOpenDev); + FreeDeviceReference(pOpen, pOpenDev); - return TRUE; + return TRUE; } - /*! * Get the disp data. This information should remain static for the * lifetime of the disp. */ -static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsQueryDispParams *pParams = pParamsVoid; - struct NvKmsPerOpenDisp *pOpenDisp; - const NVEvoSubDeviceRec *pSubDevice; - NVDispEvoPtr pDispEvo; - NvU32 connector; +static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsQueryDispParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + const NVEvoSubDeviceRec *pSubDevice; + NVDispEvoPtr pDispEvo; + NvU32 connector; - pOpenDisp = GetPerOpenDisp(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle); - if (pOpenDisp == NULL) { - return FALSE; - } + pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } - nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); - pDispEvo = pOpenDisp->pDispEvo; + pDispEvo = pOpenDisp->pDispEvo; - // Don't include dynamic displays in validDpys. The data returned here is - // supposed to be static for the lifetime of the pDispEvo. - pParams->reply.validDpys = - nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, - pDispEvo->dynamicDpyIds); - pParams->reply.bootDpys = pDispEvo->bootDisplays; - pParams->reply.muxDpys = pDispEvo->muxDisplays; - pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; - pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); + // Don't include dynamic displays in validDpys. The data returned here is + // supposed to be static for the lifetime of the pDispEvo. + pParams->reply.validDpys = nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, + pDispEvo->dynamicDpyIds); + pParams->reply.bootDpys = pDispEvo->bootDisplays; + pParams->reply.muxDpys = pDispEvo->muxDisplays; + pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; + pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); - ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == - ARRAY_LEN(pOpenDisp->connector)); + ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == + ARRAY_LEN(pOpenDisp->connector)); - for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); - connector++) { - pParams->reply.connectorHandles[connector] = - pOpenDisp->connector[connector].nvKmsApiHandle; - } + for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); + connector++) { + pParams->reply.connectorHandles[connector] = + pOpenDisp->connector[connector].nvKmsApiHandle; + } - pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; - if (pSubDevice != NULL) { - ct_assert(sizeof(pParams->reply.gpuString) >= - sizeof(pSubDevice->gpuString)); - nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, - sizeof(pSubDevice->gpuString)); - } + pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; + if (pSubDevice != NULL) { + ct_assert(sizeof(pParams->reply.gpuString) >= + sizeof(pSubDevice->gpuString)); + nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, + sizeof(pSubDevice->gpuString)); + } - return TRUE; + return TRUE; } - /*! * Get the connector static data. This information should remain static for the * lifetime of the connector. */ static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; - struct NvKmsPerOpenConnector *pOpenConnector; - NVConnectorEvoPtr pConnectorEvo; + void *pParamsVoid) { + struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; - pOpenConnector = GetPerOpenConnector(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pParams->request.connectorHandle); - if (pOpenConnector == NULL) { - return FALSE; - } + pOpenConnector = GetPerOpenConnector(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } - nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); - pConnectorEvo = pOpenConnector->pConnectorEvo; + pConnectorEvo = pOpenConnector->pConnectorEvo; - pParams->reply.dpyId = pConnectorEvo->displayId; - pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || - nvConnectorIsDPSerializer(pConnectorEvo); - pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; - pParams->reply.type = pConnectorEvo->type; - pParams->reply.typeIndex = pConnectorEvo->typeIndex; - pParams->reply.signalFormat = pConnectorEvo->signalFormat; - pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; - pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; + pParams->reply.dpyId = pConnectorEvo->displayId; + pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || + nvConnectorIsDPSerializer(pConnectorEvo); + pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; + pParams->reply.type = pConnectorEvo->type; + pParams->reply.typeIndex = pConnectorEvo->typeIndex; + pParams->reply.signalFormat = pConnectorEvo->signalFormat; + pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; + pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; - pParams->reply.isLvds = - (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && - (pConnectorEvo->or.protocol == - NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); + pParams->reply.isLvds = + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); - pParams->reply.locationOnChip = (pConnectorEvo->or.location == - NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); - return TRUE; + pParams->reply.locationOnChip = + (pConnectorEvo->or.location == NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); + return TRUE; } - /*! * Get the connector dynamic data. This information should reflects changes to * the connector over time (e.g. for DisplayPort MST devices). */ static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; - struct NvKmsPerOpenConnector *pOpenConnector; - NVConnectorEvoPtr pConnectorEvo; - NVDispEvoPtr pDispEvo; - NVDpyEvoPtr pDpyEvo; + void *pParamsVoid) { + struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; - pOpenConnector = GetPerOpenConnector(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pParams->request.connectorHandle); - if (pOpenConnector == NULL) { - return FALSE; + pOpenConnector = GetPerOpenConnector(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + pDispEvo = pConnectorEvo->pDispEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pParams->reply.detectComplete = pConnectorEvo->detectComplete; + } else { + pParams->reply.detectComplete = TRUE; + } + + // Find the dynamic dpys on this connector. + pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { + if (pDpyEvo->pConnectorEvo == pConnectorEvo) { + pParams->reply.dynamicDpyIdList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pParams->reply.dynamicDpyIdList); } + } - nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); - - pConnectorEvo = pOpenConnector->pConnectorEvo; - pDispEvo = pConnectorEvo->pDispEvo; - - if (nvConnectorUsesDPLib(pConnectorEvo)) { - pParams->reply.detectComplete = pConnectorEvo->detectComplete; - } else { - pParams->reply.detectComplete = TRUE; - } - - // Find the dynamic dpys on this connector. - pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); - FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { - if (pDpyEvo->pConnectorEvo == pConnectorEvo) { - pParams->reply.dynamicDpyIdList = - nvAddDpyIdToDpyIdList(pDpyEvo->id, - pParams->reply.dynamicDpyIdList); - } - } - - return TRUE; + return TRUE; } - /*! * Get the static data for the specified dpy. This information should * remain static for the lifetime of the dpy. */ static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; - NVDpyEvoPtr pDpyEvo; + void *pParamsVoid) { + struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; - pDpyEvo = GetPerOpenDpy(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pParams->request.dpyId); - if (pDpyEvo == NULL) { - return FALSE; - } + pDpyEvo = GetPerOpenDpy(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } - nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); - pParams->reply.connectorHandle = - ConnectorEvoToConnectorHandle(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pDpyEvo->pConnectorEvo); - /* - * All pConnectorEvos should have corresponding pOpenConnectors, - * so we should always be able to find the NvKmsConnectorHandle. - */ - nvAssert(pParams->reply.connectorHandle != 0); + pParams->reply.connectorHandle = ConnectorEvoToConnectorHandle( + pOpen, pParams->request.deviceHandle, pParams->request.dispHandle, + pDpyEvo->pConnectorEvo); + /* + * All pConnectorEvos should have corresponding pOpenConnectors, + * so we should always be able to find the NvKmsConnectorHandle. + */ + nvAssert(pParams->reply.connectorHandle != 0); - pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; + pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; - if (pDpyEvo->dp.addressString != NULL) { - const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; - nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, - NV_MIN(sizeof(pParams->reply.dpAddress), len)); - pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; - } + if (pDpyEvo->dp.addressString != NULL) { + const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; + nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, + NV_MIN(sizeof(pParams->reply.dpAddress), len)); + pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; + } - pParams->reply.mobileInternal = pDpyEvo->internal; - pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); - pParams->reply.headMask = nvDpyGetPossibleApiHeadsMask(pDpyEvo); + pParams->reply.mobileInternal = pDpyEvo->internal; + pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); + pParams->reply.headMask = nvDpyGetPossibleApiHeadsMask(pDpyEvo); - return TRUE; + return TRUE; } - /*! * Get the dynamic data for the specified dpy. This information can * change when a hotplug occurs. */ static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; - NVDpyEvoPtr pDpyEvo; + void *pParamsVoid) { + struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; - pDpyEvo = GetPerOpenDpy(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pParams->request.dpyId); - if (pDpyEvo == NULL) { - return FALSE; - } + pDpyEvo = GetPerOpenDpy(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } - return nvDpyGetDynamicData(pDpyEvo, pParams); + return nvDpyGetDynamicData(pDpyEvo, pParams); } /* Store a copy of the user's infoString pointer, so we can copy out to it when * we're done. */ -struct InfoStringExtraUserStateCommon -{ - NvU64 userInfoString; +struct InfoStringExtraUserStateCommon { + NvU64 userInfoString; }; /* * Allocate a kernel buffer to populate the infoString which will be copied out * to userspace upon completion. */ -static NvBool InfoStringPrepUserCommon( - NvU32 infoStringSize, - NvU64 *ppInfoString, - struct InfoStringExtraUserStateCommon *pExtra) -{ - char *kernelInfoString = NULL; - - if (infoStringSize == 0) { - *ppInfoString = 0; - return TRUE; - } - - if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { - return FALSE; - } - - if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { - return FALSE; - } - - kernelInfoString = nvCalloc(1, infoStringSize); - if (kernelInfoString == NULL) { - return FALSE; - } - - pExtra->userInfoString = *ppInfoString; - *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); +static NvBool +InfoStringPrepUserCommon(NvU32 infoStringSize, NvU64 *ppInfoString, + struct InfoStringExtraUserStateCommon *pExtra) { + char *kernelInfoString = NULL; + if (infoStringSize == 0) { + *ppInfoString = 0; return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { + return FALSE; + } + + if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { + return FALSE; + } + + kernelInfoString = nvCalloc(1, infoStringSize); + if (kernelInfoString == NULL) { + return FALSE; + } + + pExtra->userInfoString = *ppInfoString; + *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); + + return TRUE; } /* * Copy the infoString out to userspace and free the kernel-internal buffer. */ -static NvBool InfoStringDoneUserCommon( - NvU32 infoStringSize, - NvU64 pInfoString, - NvU32 *infoStringLenWritten, - struct InfoStringExtraUserStateCommon *pExtra) -{ - char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); - int status; - NvBool ret; +static NvBool +InfoStringDoneUserCommon(NvU32 infoStringSize, NvU64 pInfoString, + NvU32 *infoStringLenWritten, + struct InfoStringExtraUserStateCommon *pExtra) { + char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); + int status; + NvBool ret; - if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { - ret = TRUE; - goto done; - } + if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { + ret = TRUE; + goto done; + } - nvAssert(*infoStringLenWritten <= infoStringSize); + nvAssert(*infoStringLenWritten <= infoStringSize); - status = nvkms_copyout(pExtra->userInfoString, - kernelInfoString, - *infoStringLenWritten); - if (status == 0) { - ret = TRUE; - } else { - ret = FALSE; - *infoStringLenWritten = 0; - } + status = nvkms_copyout(pExtra->userInfoString, kernelInfoString, + *infoStringLenWritten); + if (status == 0) { + ret = TRUE; + } else { + ret = FALSE; + *infoStringLenWritten = 0; + } done: - nvFree(kernelInfoString); + nvFree(kernelInfoString); - return ret; + return ret; } -struct NvKmsValidateModeIndexExtraUserState -{ - struct InfoStringExtraUserStateCommon common; +struct NvKmsValidateModeIndexExtraUserState { + struct InfoStringExtraUserStateCommon common; }; -static NvBool ValidateModeIndexPrepUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; - struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; +static NvBool ValidateModeIndexPrepUser(void *pParamsVoid, + void *pExtraUserStateVoid) { + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; - return InfoStringPrepUserCommon( - pParams->request.infoStringSize, - &pParams->request.pInfoString, - &pExtra->common); + return InfoStringPrepUserCommon(pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); } -static NvBool ValidateModeIndexDoneUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; - struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; +static NvBool ValidateModeIndexDoneUser(void *pParamsVoid, + void *pExtraUserStateVoid) { + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; - return InfoStringDoneUserCommon( - pParams->request.infoStringSize, - pParams->request.pInfoString, - &pParams->reply.infoStringLenWritten, - &pExtra->common); + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, &pExtra->common); } /*! * Validate the requested mode. */ -static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; - NVDpyEvoPtr pDpyEvo; +static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; - pDpyEvo = GetPerOpenDpy(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pParams->request.dpyId); - if (pDpyEvo == NULL) { - return FALSE; - } + pDpyEvo = GetPerOpenDpy(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } - nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); + nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); - return TRUE; + return TRUE; } -struct NvKmsValidateModeExtraUserState -{ - struct InfoStringExtraUserStateCommon common; +struct NvKmsValidateModeExtraUserState { + struct InfoStringExtraUserStateCommon common; }; -static NvBool ValidateModePrepUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsValidateModeParams *pParams = pParamsVoid; - struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; +static NvBool ValidateModePrepUser(void *pParamsVoid, + void *pExtraUserStateVoid) { + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; - return InfoStringPrepUserCommon( - pParams->request.infoStringSize, - &pParams->request.pInfoString, - &pExtra->common); + return InfoStringPrepUserCommon(pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); } -static NvBool ValidateModeDoneUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsValidateModeParams *pParams = pParamsVoid; - struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; +static NvBool ValidateModeDoneUser(void *pParamsVoid, + void *pExtraUserStateVoid) { + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; - return InfoStringDoneUserCommon( - pParams->request.infoStringSize, - pParams->request.pInfoString, - &pParams->reply.infoStringLenWritten, - &pExtra->common); + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, &pExtra->common); } /*! * Validate the requested mode. */ -static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsValidateModeParams *pParams = pParamsVoid; - NVDpyEvoPtr pDpyEvo; +static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsValidateModeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; - pDpyEvo = GetPerOpenDpy(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - pParams->request.dpyId); - if (pDpyEvo == NULL) { - return FALSE; - } + pDpyEvo = GetPerOpenDpy(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } - nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); + nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); + return TRUE; +} + +static NvBool CopyInOneLut(NvU64 pRampsUser, + struct NvKmsLutRamps **ppRampsKernel) { + struct NvKmsLutRamps *pRampsKernel = NULL; + int status; + + if (pRampsUser == 0) { return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { + return FALSE; + } + + pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); + if (!pRampsKernel) { + return FALSE; + } + + status = + nvkms_copyin((char *)pRampsKernel, pRampsUser, sizeof(*pRampsKernel)); + if (status != 0) { + nvFree(pRampsKernel); + return FALSE; + } + + *ppRampsKernel = pRampsKernel; + + return TRUE; } static NvBool -CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel) -{ - struct NvKmsLutRamps *pRampsKernel = NULL; - int status; +CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) { + struct NvKmsLutRamps *pInputRamps = NULL; + struct NvKmsLutRamps *pOutputRamps = NULL; - if (pRampsUser == 0) { - return TRUE; - } + if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { + goto fail; + } + if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { + goto fail; + } - if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { - return FALSE; - } + pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); + pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); - pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); - if (!pRampsKernel) { - return FALSE; - } - - status = nvkms_copyin((char *)pRampsKernel, pRampsUser, - sizeof(*pRampsKernel)); - if (status != 0) { - nvFree(pRampsKernel); - return FALSE; - } - - *ppRampsKernel = pRampsKernel; - - return TRUE; -} - -static NvBool -CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) -{ - struct NvKmsLutRamps *pInputRamps = NULL; - struct NvKmsLutRamps *pOutputRamps = NULL; - - if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { - goto fail; - } - if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { - goto fail; - } - - pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); - pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); - - return TRUE; + return TRUE; fail: - nvFree(pInputRamps); - nvFree(pOutputRamps); - return FALSE; + nvFree(pInputRamps); + nvFree(pOutputRamps); + return FALSE; } static void -FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) -{ - struct NvKmsLutRamps *pInputRamps = - nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); - struct NvKmsLutRamps *pOutputRamps = - nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); +FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) { + struct NvKmsLutRamps *pInputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); + struct NvKmsLutRamps *pOutputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); - nvFree(pInputRamps); - nvFree(pOutputRamps); + nvFree(pInputRamps); + nvFree(pOutputRamps); } /* No extra user state needed for SetMode; although we lose the user pointers * for the LUT ramps after copying them in, that's okay because we don't need * to copy them back out again. */ -struct NvKmsSetModeExtraUserState -{ -}; +struct NvKmsSetModeExtraUserState {}; /*! * Copy in any data referenced by pointer for the SetMode request. Currently * this is only the LUT ramps. */ -static NvBool SetModePrepUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsSetModeParams *pParams = pParamsVoid; - struct NvKmsSetModeRequest *pReq = &pParams->request; - NvU32 disp, apiHead, dispFailed, apiHeadFailed; +static NvBool SetModePrepUser(void *pParamsVoid, void *pExtraUserStateVoid) { + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, apiHead, dispFailed, apiHeadFailed; - /* Iterate over all of the common LUT ramp pointers embedded in the SetMode - * request, and copy in each one. */ - for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { - for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { - struct NvKmsSetLutCommonParams *pCommonLutParams = - &pReq->disp[disp].head[apiHead].flip.lut; + /* Iterate over all of the common LUT ramp pointers embedded in the SetMode + * request, and copy in each one. */ + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[apiHead].flip.lut; - if (!CopyInLutParams(pCommonLutParams)) { - /* Remember how far we got through these loops before we - * failed, so that we can undo everything up to this point. */ - dispFailed = disp; - apiHeadFailed = apiHead; - goto fail; - } - } + if (!CopyInLutParams(pCommonLutParams)) { + /* Remember how far we got through these loops before we + * failed, so that we can undo everything up to this point. */ + dispFailed = disp; + apiHeadFailed = apiHead; + goto fail; + } } + } - return TRUE; + return TRUE; fail: - for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { - for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { - struct NvKmsSetLutCommonParams *pCommonLutParams = - &pReq->disp[disp].head[apiHead].flip.lut; + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[apiHead].flip.lut; - if (disp > dispFailed || - (disp == dispFailed && apiHead >= apiHeadFailed)) { - break; - } + if (disp > dispFailed || + (disp == dispFailed && apiHead >= apiHeadFailed)) { + break; + } - FreeCopiedInLutParams(pCommonLutParams); - } + FreeCopiedInLutParams(pCommonLutParams); } + } - return FALSE; + return FALSE; } /*! * Free buffers allocated in SetModePrepUser. */ -static NvBool SetModeDoneUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsSetModeParams *pParams = pParamsVoid; - struct NvKmsSetModeRequest *pReq = &pParams->request; - NvU32 disp, apiHead; +static NvBool SetModeDoneUser(void *pParamsVoid, void *pExtraUserStateVoid) { + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, apiHead; - for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { - for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { - struct NvKmsSetLutCommonParams *pCommonLutParams = - &pReq->disp[disp].head[apiHead].flip.lut; + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[apiHead].flip.lut; - FreeCopiedInLutParams(pCommonLutParams); - } + FreeCopiedInLutParams(pCommonLutParams); } + } - return TRUE; + return TRUE; } /*! * Perform a modeset on the device. */ -static NvBool SetMode(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsSetModeParams *pParams = pParamsVoid; - struct NvKmsPerOpenDev *pOpenDev; +static NvBool SetMode(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; - pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); - if (pOpenDev == NULL) { - return FALSE; - } + if (pOpenDev == NULL) { + return FALSE; + } - return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, - &pParams->request, &pParams->reply, - FALSE /* bypassComposition */, - TRUE /* doRasterLock */); + return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, &pParams->request, + &pParams->reply, FALSE /* bypassComposition */, + TRUE /* doRasterLock */); } /*! * Set the cursor image. */ -static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsSetCursorImageParams *pParams = pParamsVoid; - struct NvKmsPerOpenDev *pOpenDev; - struct NvKmsPerOpenDisp *pOpenDisp; - NVDispEvoPtr pDispEvo; +static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsSetCursorImageParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; - if (!GetPerOpenDevAndDisp(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle, - &pOpenDev, - &pOpenDisp)) { - return FALSE; - } + if (!GetPerOpenDevAndDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle, &pOpenDev, + &pOpenDisp)) { + return FALSE; + } - pDispEvo = pOpenDisp->pDispEvo; + pDispEvo = pOpenDisp->pDispEvo; - if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { - return FALSE; - } + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } - return nvHsIoctlSetCursorImage(pDispEvo, - pOpenDev, - &pOpenDev->surfaceHandles, - pParams->request.head, - &pParams->request.common); + return nvHsIoctlSetCursorImage(pDispEvo, pOpenDev, &pOpenDev->surfaceHandles, + pParams->request.head, + &pParams->request.common); } /*! * Change the cursor position. */ -static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) -{ - struct NvKmsMoveCursorParams *pParams = pParamsVoid; - struct NvKmsPerOpenDisp *pOpenDisp; - NVDispEvoPtr pDispEvo; +static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsMoveCursorParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; - pOpenDisp = GetPerOpenDisp(pOpen, - pParams->request.deviceHandle, - pParams->request.dispHandle); - if (pOpenDisp == NULL) { - return FALSE; - } + pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } - pDispEvo = pOpenDisp->pDispEvo; + pDispEvo = pOpenDisp->pDispEvo; - if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { - return FALSE; - } + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } - return nvHsIoctlMoveCursor(pDispEvo, - pParams->request.head, - &pParams->request.common); + return nvHsIoctlMoveCursor(pDispEvo, pParams->request.head, + &pParams->request.common); } /* No extra user state needed for SetLut; although we lose the user pointers * for the LUT ramps after copying them in, that's okay because we don't need * to copy them back out again. */ -struct NvKmsSetLutExtraUserState -{ -}; +struct NvKmsSetLutExtraUserState {}; /*! * Copy in any data referenced by pointer for the SetLut request. Currently * this is only the LUT ramps. */ -static NvBool SetLutPrepUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsSetLutParams *pParams = pParamsVoid; - struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; +static NvBool SetLutPrepUser(void *pParamsVoid, void *pExtraUserStateVoid) { + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; - return CopyInLutParams(pCommonLutParams); + return CopyInLutParams(pCommonLutParams); } /*! * Free buffers allocated in SetLutPrepUser. */ -static NvBool SetLutDoneUser( - void *pParamsVoid, - void *pExtraUserStateVoid) -{ - struct NvKmsSetLutParams *pParams = pParamsVoid; - struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; +static NvBool SetLutDoneUser(void *pParamsVoid, void *pExtraUserStateVoid) { + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; - FreeCopiedInLutParams(pCommonLutParams); + FreeCopiedInLutParams(pCommonLutParams); - return TRUE; + return TRUE; } /*! * Set the LUT on the specified head. */ -static NvBool SetLut(struct NvKmsPerOpen *pOpen, - void *pParamsVoid) +static NvBool SetLut(struct NvKmsPerOpen *pOpen, void *pParamsVoid) { + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, + &pParams->request.common)) { + return FALSE; + } + + nvEvoSetLut(pDispEvo, pParams->request.head, TRUE /* kickoff */, + &pParams->request.common); + + return TRUE; +} + +static NvBool CheckLutNotifier(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) { - struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsCheckLutNotifierParams *pParams = pParamsVoid; struct NvKmsPerOpenDisp *pOpenDisp; NVDispEvoPtr pDispEvo; @@ -2348,19 +2202,16 @@ static NvBool SetLut(struct NvKmsPerOpen *pOpen, return FALSE; } - if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, - &pParams->request.common)) { - return FALSE; + if (pParams->request.waitForCompletion) { + nvEvoWaitForLUTNotifier(pDispEvo, pParams->request.head); } - nvEvoSetLut(pDispEvo, - pParams->request.head, TRUE /* kickoff */, - &pParams->request.common); + pParams->reply.complete = nvEvoIsLUTNotifierComplete(pDispEvo, + pParams->request.head); return TRUE; } - /*! * Return whether the specified head is idle. */ @@ -5029,6 +4880,7 @@ NvBool nvKmsIoctl( ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage), ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor), ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut), + ENTRY(NVKMS_IOCTL_CHECK_LUT_NOTIFIER, CheckLutNotifier), ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel), ENTRY_CUSTOM_USER(NVKMS_IOCTL_FLIP, Flip), ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, diff --git a/src/nvidia/arch/nvalloc/unix/include/os-interface.h b/src/nvidia/arch/nvalloc/unix/include/os-interface.h index 4e13005e2..d2981a39d 100644 --- a/src/nvidia/arch/nvalloc/unix/include/os-interface.h +++ b/src/nvidia/arch/nvalloc/unix/include/os-interface.h @@ -62,6 +62,11 @@ struct os_work_queue; /* Each OS defines its own version of this opaque type */ typedef struct os_wait_queue os_wait_queue; +/* Flags needed by os_get_current_proccess_flags */ +#define OS_CURRENT_PROCESS_FLAG_NONE 0x0 +#define OS_CURRENT_PROCESS_FLAG_KERNEL_THREAD 0x1 +#define OS_CURRENT_PROCESS_FLAG_EXITING 0x2 + /* * --------------------------------------------------------------------------- * @@ -190,6 +195,7 @@ NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); NvBool NV_API_CALL os_is_nvswitch_present (void); NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NvU32 NV_API_CALL os_get_current_process_flags(void); NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); void NV_API_CALL os_free_wait_queue (os_wait_queue *); void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); diff --git a/src/nvidia/arch/nvalloc/unix/src/os.c b/src/nvidia/arch/nvalloc/unix/src/os.c index af68bc058..de2324d91 100644 --- a/src/nvidia/arch/nvalloc/unix/src/os.c +++ b/src/nvidia/arch/nvalloc/unix/src/os.c @@ -4947,6 +4947,18 @@ osGetRandomBytes return os_get_random_bytes(pBytes, numBytes); } +/* + * @brief Get current process flags.. + */ +NvU32 +osGetCurrentProcessFlags +( + void +) +{ + return os_get_current_process_flags(); +} + /* * @brief Allocate wait queue * diff --git a/src/nvidia/generated/g_kernel_mig_manager_nvoc.h b/src/nvidia/generated/g_kernel_mig_manager_nvoc.h index 7718686a4..d385562a5 100644 --- a/src/nvidia/generated/g_kernel_mig_manager_nvoc.h +++ b/src/nvidia/generated/g_kernel_mig_manager_nvoc.h @@ -1834,6 +1834,17 @@ static inline NV_STATUS kmigmgrGetSmallestGpuInstanceSize(OBJGPU *arg1, struct K #define kmigmgrGetSmallestGpuInstanceSize(arg1, arg2, pComputeSizeFlag) kmigmgrGetSmallestGpuInstanceSize_IMPL(arg1, arg2, pComputeSizeFlag) #endif //__nvoc_kernel_mig_manager_h_disabled +NvBool kmigmgrIsGPUInstanceFlagLegal_IMPL(OBJGPU *arg1, struct KernelMIGManager *arg2, NvU32 gpuInstanceFlag); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsGPUInstanceFlagLegal(OBJGPU *arg1, struct KernelMIGManager *arg2, NvU32 gpuInstanceFlag) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsGPUInstanceFlagLegal(arg1, arg2, gpuInstanceFlag) kmigmgrIsGPUInstanceFlagLegal_IMPL(arg1, arg2, gpuInstanceFlag) +#endif //__nvoc_kernel_mig_manager_h_disabled + NV_STATUS kmigmgrGetGPUInstanceScrubberCe_IMPL(OBJGPU *arg1, struct KernelMIGManager *arg2, Device *pDevice, NvU32 *ceInst); #ifdef __nvoc_kernel_mig_manager_h_disabled diff --git a/src/nvidia/generated/g_kernel_vgpu_mgr_nvoc.h b/src/nvidia/generated/g_kernel_vgpu_mgr_nvoc.h index 63cbe6cce..51d3b57a2 100644 --- a/src/nvidia/generated/g_kernel_vgpu_mgr_nvoc.h +++ b/src/nvidia/generated/g_kernel_vgpu_mgr_nvoc.h @@ -234,6 +234,7 @@ typedef struct */ NvU64 assignedSwizzIdMask; NvU32 fractionalMultiVgpu; + NvBool isPlacementIdInfoSet; } KERNEL_PHYS_GPU_INFO; /* vGPU info received from mdev kernel module for KVM */ diff --git a/src/nvidia/generated/g_nv_name_released.h b/src/nvidia/generated/g_nv_name_released.h index 047e88aae..1436919a9 100644 --- a/src/nvidia/generated/g_nv_name_released.h +++ b/src/nvidia/generated/g_nv_name_released.h @@ -5448,6 +5448,7 @@ static const CHIPS_RELEASED sChipsReleased[] = { { 0x2C39, 0x0000, 0x0000, "NVIDIA RTX PRO 4000 Blackwell Generation Laptop GPU" }, { 0x2C58, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 Laptop GPU" }, { 0x2C59, 0x0000, 0x0000, "NVIDIA GeForce RTX 5080 Laptop GPU" }, + { 0x2C79, 0x0000, 0x0000, "NVIDIA RTX PRO 4000 Blackwell Embedded GPU" }, { 0x2D04, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060 Ti" }, { 0x2D05, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060" }, { 0x2D18, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Laptop GPU" }, @@ -5458,11 +5459,13 @@ static const CHIPS_RELEASED sChipsReleased[] = { { 0x2D39, 0x0000, 0x0000, "NVIDIA RTX PRO 2000 Blackwell Generation Laptop GPU" }, { 0x2D58, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Laptop GPU" }, { 0x2D59, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060 Laptop GPU" }, + { 0x2D79, 0x0000, 0x0000, "NVIDIA RTX PRO 2000 Blackwell Embedded GPU" }, { 0x2D83, 0xc791, 0x17aa, "NVIDIA GeForce RTX 5050" }, { 0x2D98, 0x0000, 0x0000, "NVIDIA GeForce RTX 5050 Laptop GPU" }, { 0x2DB8, 0x0000, 0x0000, "NVIDIA RTX PRO 1000 Blackwell Generation Laptop GPU" }, { 0x2DB9, 0x0000, 0x0000, "NVIDIA RTX PRO 500 Blackwell Generation Laptop GPU" }, { 0x2DD8, 0x0000, 0x0000, "NVIDIA GeForce RTX 5050 Laptop GPU" }, + { 0x2DF9, 0x0000, 0x0000, "NVIDIA RTX PRO 500 Blackwell Embedded GPU" }, { 0x2F04, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070" }, { 0x2F18, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Ti Laptop GPU" }, { 0x2F38, 0x0000, 0x0000, "NVIDIA RTX PRO 3000 Blackwell Generation Laptop GPU" }, diff --git a/src/nvidia/generated/g_os_nvoc.h b/src/nvidia/generated/g_os_nvoc.h index ee7521236..e12a77d91 100644 --- a/src/nvidia/generated/g_os_nvoc.h +++ b/src/nvidia/generated/g_os_nvoc.h @@ -211,6 +211,11 @@ typedef struct RM_PAGEABLE_SECTION { #define OS_ALLOC_PAGES_NODE_NONE 0x0 #define OS_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 +// Flags needed by osGetCurrentProccessFlags +#define OS_CURRENT_PROCESS_FLAG_NONE 0x0 +#define OS_CURRENT_PROCESS_FLAG_KERNEL_THREAD 0x1 +#define OS_CURRENT_PROCESS_FLAG_EXITING 0x2 + // // Structures for osPackageRegistry and osUnpackageRegistry // @@ -723,6 +728,8 @@ NvS32 osImexChannelCount(void); NV_STATUS osGetRandomBytes(NvU8 *pBytes, NvU16 numBytes); +NvU32 osGetCurrentProcessFlags(void); + NV_STATUS osAllocWaitQueue(OS_WAIT_QUEUE **ppWq); void osFreeWaitQueue(OS_WAIT_QUEUE *pWq); void osWaitUninterruptible(OS_WAIT_QUEUE *pWq); diff --git a/src/nvidia/inc/kernel/core/thread_state.h b/src/nvidia/inc/kernel/core/thread_state.h index 1f721c8ed..01ac1e485 100644 --- a/src/nvidia/inc/kernel/core/thread_state.h +++ b/src/nvidia/inc/kernel/core/thread_state.h @@ -187,6 +187,8 @@ typedef struct THREAD_STATE_DB #define THREAD_STATE_FLAGS_TIMEOUT_INITED NVBIT(5) #define THREAD_STATE_FLAGS_DEVICE_INIT NVBIT(7) #define THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED NVBIT(8) +#define THREAD_STATE_FLAGS_IS_KERNEL_THREAD NVBIT(9) +#define THREAD_STATE_FLAGS_IS_EXITING NVBIT(10) // These Threads run exclusively between a conditional acquire #define THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING (THREAD_STATE_FLAGS_IS_ISR | \ diff --git a/src/nvidia/src/kernel/core/thread_state.c b/src/nvidia/src/kernel/core/thread_state.c index 51759ece2..24dc3a44b 100644 --- a/src/nvidia/src/kernel/core/thread_state.c +++ b/src/nvidia/src/kernel/core/thread_state.c @@ -590,6 +590,8 @@ static NV_STATUS _threadStateInitCommon(THREAD_STATE_NODE *pThreadNode, NvU32 fl */ void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags) { + NvU32 osFlags; + // Isrs should be using threadStateIsrInit(). NV_ASSERT_OR_RETURN_VOID((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | THREAD_STATE_FLAGS_IS_ISR | @@ -599,6 +601,14 @@ void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags) if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) return; + osFlags = osGetCurrentProcessFlags(); + + if (osFlags & OS_CURRENT_PROCESS_FLAG_KERNEL_THREAD) + flags |= THREAD_STATE_FLAGS_IS_KERNEL_THREAD; + + if (osFlags & OS_CURRENT_PROCESS_FLAG_EXITING) + flags |= THREAD_STATE_FLAGS_IS_EXITING; + // Use common initialization logic (stack-allocated) // Note: Legacy void API ignores errors for backward compatibility _threadStateInitCommon(pThreadNode, flags, NV_FALSE); diff --git a/src/nvidia/src/kernel/gpu/external_device/arch/kepler/kern_gsync_p2060.c b/src/nvidia/src/kernel/gpu/external_device/arch/kepler/kern_gsync_p2060.c index 5c5e2dba5..e824310f4 100644 --- a/src/nvidia/src/kernel/gpu/external_device/arch/kepler/kern_gsync_p2060.c +++ b/src/nvidia/src/kernel/gpu/external_device/arch/kepler/kern_gsync_p2060.c @@ -1213,6 +1213,8 @@ gsyncReadUniversalFrameCount_P2060 OBJTMR *pTmpTmr = NULL; OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NV_CHECK_OR_RETURN(LEVEL_INFO, gsyncIsFrameLocked_P2060(pThis), NV_ERR_INVALID_STATE); + if (!(pThis->FrameCountData.iface == NV_P2060_MAX_IFACES_PER_GSYNC)) { // @@ -1257,7 +1259,8 @@ gsyncReadUniversalFrameCount_P2060 // P2060 refreshrate is in 0.00001 Hz, so divide by 10000 to get Hz. // divide 1000000 by refreshRate to get the frame time in us. // - pThis->FrameCountData.frameTime = 1000000 / (pThis->RefreshRate/10000); //in us + NV_CHECK_OR_RETURN(LEVEL_INFO, pThis->RefreshRate >= 10, NV_ERR_INVALID_STATE); + pThis->FrameCountData.frameTime = 1000*1000*1000 / (pThis->RefreshRate/10); //in us // // Enable FrameCountTimerService to verify FrameCountData.initialDifference. diff --git a/src/nvidia/src/kernel/gpu/fsp/kern_fsp.c b/src/nvidia/src/kernel/gpu/fsp/kern_fsp.c index 27a46a6cf..0b9c26cff 100644 --- a/src/nvidia/src/kernel/gpu/fsp/kern_fsp.c +++ b/src/nvidia/src/kernel/gpu/fsp/kern_fsp.c @@ -242,6 +242,7 @@ kfspStateUnload_IMPL NvU32 flags ) { + kfspReleaseProxyImage(pGpu, pKernelFsp); return NV_OK; } diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c b/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c index 2e0ee9f2d..838ac39d4 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c @@ -161,7 +161,7 @@ NvBool kmigmgrIsGPUInstanceFlagValid_GA100 ( OBJGPU *pGpu, - KernelMIGManager *pGrMgr, + KernelMIGManager *pKernelMIGManager, NvU32 gpuInstanceFlag ) { @@ -173,6 +173,10 @@ kmigmgrIsGPUInstanceFlagValid_GA100 NvU32 gfxSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, gpuInstanceFlag); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + kmigmgrIsGPUInstanceFlagLegal(pGpu, pKernelMIGManager, gpuInstanceFlag), + NV_FALSE); + if (!FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA, _DEFAULT, gpuInstanceFlag)) { return NV_FALSE; @@ -186,8 +190,6 @@ kmigmgrIsGPUInstanceFlagValid_GA100 case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: break; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU mem partitioning flag 0x%x\n", - memSizeFlag); return NV_FALSE; } @@ -200,12 +202,7 @@ kmigmgrIsGPUInstanceFlagValid_GA100 case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_QUARTER: case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_06: - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_07: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU compute partitioning flag 0x%x\n", - computeSizeFlag); return NV_FALSE; } @@ -213,17 +210,7 @@ kmigmgrIsGPUInstanceFlagValid_GA100 { case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_FULL: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_06: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_07: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU GFX partitioning flag 0x%x\n", - gfxSizeFlag); return NV_FALSE; } diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb100.c b/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb100.c index 87287d944..0638e890b 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb100.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb100.c @@ -50,6 +50,10 @@ kmigmgrIsGPUInstanceFlagValid_GB100 NvU32 gfxSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, gpuInstanceFlag); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + kmigmgrIsGPUInstanceFlagLegal(pGpu, pKernelMIGManager, gpuInstanceFlag), + NV_FALSE); + if (!FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA, _DEFAULT, gpuInstanceFlag)) { return NV_FALSE; @@ -63,8 +67,6 @@ kmigmgrIsGPUInstanceFlagValid_GB100 case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: break; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU mem partitioning flag 0x%x\n", - memSizeFlag); return NV_FALSE; } @@ -78,8 +80,6 @@ kmigmgrIsGPUInstanceFlagValid_GB100 case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: break; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU compute partitioning flag 0x%x\n", - computeSizeFlag); return NV_FALSE; } @@ -93,15 +93,7 @@ kmigmgrIsGPUInstanceFlagValid_GB100 break; case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH: - // Cannot support these sizes since there is only one GFX Capable SYSPIPE - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU GFX partitioning flag 0x%x\n", - gfxSizeFlag); return NV_FALSE; } diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb10b.c b/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb10b.c index 2f2738d4f..08e888a6c 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb10b.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb10b.c @@ -145,6 +145,10 @@ kmigmgrIsGPUInstanceFlagValid_GB10B NvU32 gfxSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, gpuInstanceFlag); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + kmigmgrIsGPUInstanceFlagLegal(pGpu, pKernelMIGManager, gpuInstanceFlag), + NV_FALSE); + if (!FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA, _DEFAULT, gpuInstanceFlag)) { return NV_FALSE; @@ -155,8 +159,6 @@ kmigmgrIsGPUInstanceFlagValid_GB10B case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL: break; default: - NV_PRINTF(LEVEL_INFO, "Unrecognized GPU mem partitioning flag 0x%x\n", - memSizeFlag); return NV_FALSE; } @@ -167,8 +169,6 @@ kmigmgrIsGPUInstanceFlagValid_GB10B case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF: break; default: - NV_PRINTF(LEVEL_INFO, "Unrecognized GPU compute partitioning flag 0x%x\n", - computeSizeFlag); return NV_FALSE; } @@ -182,14 +182,7 @@ kmigmgrIsGPUInstanceFlagValid_GB10B break; case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE: break; - // Cannot support these sizes since there is only one GFX Capable SYSPIPE - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH: default: - NV_PRINTF(LEVEL_INFO, "Unrecognized GPU GFX partitioning flag 0x%x\n", - gfxSizeFlag); return NV_FALSE; } diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb202.c b/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb202.c index 0cf164471..e993995cc 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb202.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/arch/blackwell/kmigmgr_gb202.c @@ -50,6 +50,10 @@ kmigmgrIsGPUInstanceFlagValid_GB202 NvU32 gfxSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, gpuInstanceFlag); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + kmigmgrIsGPUInstanceFlagLegal(pGpu, pKernelMIGManager, gpuInstanceFlag), + NV_FALSE); + // If incorrect all video flag, then fail if (!(FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA, _DEFAULT, gpuInstanceFlag) || FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA, _ENABLE, gpuInstanceFlag) || @@ -64,11 +68,7 @@ kmigmgrIsGPUInstanceFlagValid_GB202 case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF: case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU mem partitioning flag 0x%x\n", - memSizeFlag); return NV_FALSE; } @@ -79,14 +79,7 @@ kmigmgrIsGPUInstanceFlagValid_GB202 case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF: case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_QUARTER: - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_06: - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_07: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU compute partitioning flag 0x%x\n", - computeSizeFlag); return NV_FALSE; } @@ -104,13 +97,7 @@ kmigmgrIsGPUInstanceFlagValid_GB202 // TODO: Move _NONE should not be supported on GB202 because we always expect SMG to enable it. case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_06: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_07: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU GFX partitioning flag 0x%x\n", - gfxSizeFlag); return NV_FALSE; } diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/arch/hopper/kmigmgr_gh100.c b/src/nvidia/src/kernel/gpu/mig_mgr/arch/hopper/kmigmgr_gh100.c index 71dff99f9..1ee4d5b93 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/arch/hopper/kmigmgr_gh100.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/arch/hopper/kmigmgr_gh100.c @@ -41,7 +41,7 @@ NvBool kmigmgrIsGPUInstanceFlagValid_GH100 ( OBJGPU *pGpu, - KernelMIGManager *pGrMgr, + KernelMIGManager *pKernelMIGManager, NvU32 gpuInstanceFlag ) { @@ -52,6 +52,10 @@ kmigmgrIsGPUInstanceFlagValid_GH100 NvU32 gfxSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, gpuInstanceFlag); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + kmigmgrIsGPUInstanceFlagLegal(pGpu, pKernelMIGManager, gpuInstanceFlag), + NV_FALSE); + if (!FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA, _DEFAULT, gpuInstanceFlag)) { return NV_FALSE; @@ -65,8 +69,6 @@ kmigmgrIsGPUInstanceFlagValid_GH100 case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: break; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU mem partitioning flag 0x%x\n", - memSizeFlag); return NV_FALSE; } @@ -79,29 +81,14 @@ kmigmgrIsGPUInstanceFlagValid_GH100 case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_QUARTER: case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_06: - case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_07: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU compute partitioning flag 0x%x\n", - computeSizeFlag); return NV_FALSE; } switch (gfxSizeFlag) { case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE: break; - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_FULL: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_06: - case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_07: - return NV_FALSE; default: - NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU GFX partitioning flag 0x%x\n", - gfxSizeFlag); return NV_FALSE; } return NV_TRUE; diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c b/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c index 17756ea10..82b54b499 100644 --- a/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c +++ b/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c @@ -9731,3 +9731,81 @@ kmigmgrComputeProfileGetCapacity_IMPL return NV_OK; } +/*! + * @brief Verifies that none of gpuInstanceFlag fields have unknown values. + */ +NvBool +kmigmgrIsGPUInstanceFlagLegal_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 gpuInstanceFlag +) +{ + NvU32 memSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, gpuInstanceFlag); + NvU32 computeSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _COMPUTE_SIZE, gpuInstanceFlag); + NvU32 gfxSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, gpuInstanceFlag); + NvU32 allMediaFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _REQ_ALL_MEDIA, gpuInstanceFlag); + + switch (memSizeFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL: + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER: + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU mem partitioning flag 0x%x\n", + memSizeFlag); + return NV_FALSE; + } + + switch (computeSizeFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_FULL: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_QUARTER: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_06: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_07: + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU compute partitioning flag 0x%x\n", + computeSizeFlag); + return NV_FALSE; + } + + switch (gfxSizeFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_FULL: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_06: + case NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_07: + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU GFX partitioning flag 0x%x\n", + gfxSizeFlag); + return NV_FALSE; + } + + switch (allMediaFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA_DEFAULT: + case NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA_DISABLE: + case NV2080_CTRL_GPU_PARTITION_FLAG_REQ_ALL_MEDIA_ENABLE: + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU all media partitioning flag 0x%x\n", + allMediaFlag); + return NV_FALSE; + } + + return NV_TRUE; +} + diff --git a/src/nvidia/src/kernel/mem_mgr/mem_multicast_fabric.c b/src/nvidia/src/kernel/mem_mgr/mem_multicast_fabric.c index 773f3a018..0ed035530 100644 --- a/src/nvidia/src/kernel/mem_mgr/mem_multicast_fabric.c +++ b/src/nvidia/src/kernel/mem_mgr/mem_multicast_fabric.c @@ -145,10 +145,19 @@ typedef struct mem_multicast_fabric_descriptor // // Boolean to be set when an Inband request has been sent to FM - // and is currently in progress + // and is currently in progress. + // + // This flag is only set on the prime object. // NvBool bInbandReqInProgress; + // + // Boolean set when an inband request response is received. + // + // This flag is set on both prime and non-prime objects. + // + NvBool bResponseReceived; + // // Request Id associated with the Inband request in progress when // bInbandReqSent is set to true @@ -1094,27 +1103,43 @@ _memMulticastFabricDescriptorFree MEM_MULTICAST_FABRIC_TEAM_RELEASE_REQUEST); } + // + // In the process cleanup path or a deferred cleanup path, skip waiting on + // the clients which are being torn down. The process could be already in + // uninterruptible state at that point, and if for some reason GFM doesn't + // respond, we will be stuck indefinitely in the wait queue. Instead march + // on, and handle the cleanup later (see memorymulticastfabricTeamSetupResponseCallback) + // whenever GFM responds. + // + // This wait is really required for interruptible cases like NvRmFree(), + // to mimic a synchronous op. + // if (pMulticastFabricDesc->bInbandReqInProgress) { - OS_WAIT_QUEUE *pWq; THREAD_STATE_NODE *pThreadNode = NULL; THREAD_STATE_FREE_CALLBACK freeCallback; - NV_ASSERT_OK(osAllocWaitQueue(&pWq)); + NV_ASSERT_OK(threadStateGetCurrent(&pThreadNode, NULL)); - if (pWq != NULL) + if (!((pThreadNode->flags & THREAD_STATE_FLAGS_IS_EXITING) || + (pThreadNode->flags & THREAD_STATE_FLAGS_IS_KERNEL_THREAD))) { - NV_ASSERT_OK(fabricMulticastCleanupCacheInsert(pFabric, - pMulticastFabricDesc->inbandReqId, - pWq)); + OS_WAIT_QUEUE *pWq = NULL; + NV_ASSERT_OK(osAllocWaitQueue(&pWq)); - NV_ASSERT_OK(threadStateGetCurrent(&pThreadNode, NULL)); + if (pWq != NULL) + { + NV_ASSERT_OK(fabricMulticastCleanupCacheInsert(pFabric, + pMulticastFabricDesc->inbandReqId, + pWq)); - freeCallback.pCb = fabricMulticastWaitOnTeamCleanupCallback; - freeCallback.pCbData = (void *)pMulticastFabricDesc->inbandReqId; - NV_ASSERT_OK(threadStateEnqueueCallbackOnFree(pThreadNode, - &freeCallback)); + freeCallback.pCb = fabricMulticastWaitOnTeamCleanupCallback; + freeCallback.pCbData = (void *)pMulticastFabricDesc->inbandReqId; + + NV_ASSERT_OK(threadStateEnqueueCallbackOnFree(pThreadNode, + &freeCallback)); + } } } @@ -1517,8 +1542,30 @@ _memMulticastFabricAttachGpuPostProcessor goto installMemDesc; } - if (mcTeamStatus != NV_OK) + // + // If MSE (or any entity on the other side) replies back with + // NV_ERR_BUSY_RETRY, resubmit the request. + // + if (_memMulticastFabricIsPrime(pMulticastFabricDesc->allocFlags) && + (mcTeamStatus == NV_ERR_BUSY_RETRY)) + { + status = _memMulticastFabricSendInbandRequest(pGpu, pMulticastFabricDesc, + MEM_MULTICAST_FABRIC_TEAM_SETUP_REQUEST); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "MCFLA retry failed %x\n", status); + goto installMemDesc; + } + else + { + // Don't install memdesc as we are successfully retrying... + return; + } + } + else if (mcTeamStatus != NV_OK) + { goto installMemDesc; + } if (!_memMulticastFabricIsPrime(pMulticastFabricDesc->allocFlags)) pMulticastFabricDesc->allocSize = mcAddressSize; @@ -1632,8 +1679,6 @@ memorymulticastfabricTeamSetupResponseCallback if (pMulticastFabricDesc != NULL) { - fabricMulticastSetupCacheDelete(pFabric, requestId); - // // We have now safely acquired pMulticastFabricDesc->lock, which // should block the destructor from removing pMulticastFabricDesc @@ -1647,14 +1692,20 @@ memorymulticastfabricTeamSetupResponseCallback // portSyncRwLockReleaseWrite(pFabric->pMulticastFabricModuleLock); - pMulticastFabricDesc->bInbandReqInProgress = NV_FALSE; + if (!pMulticastFabricDesc->bResponseReceived) + { + pMulticastFabricDesc->bInbandReqInProgress = NV_FALSE; - _memMulticastFabricAttachGpuPostProcessor(pGpu, - pMulticastFabricDesc, - mcTeamStatus, - mcTeamHandle, - mcAddressBase, - mcAddressSize); + // This call sets `bInbandReqInProgress` on a successful retry. + _memMulticastFabricAttachGpuPostProcessor(pGpu, + pMulticastFabricDesc, + mcTeamStatus, + mcTeamHandle, + mcAddressBase, + mcAddressSize); + + pMulticastFabricDesc->bResponseReceived = !pMulticastFabricDesc->bInbandReqInProgress; + } portSyncRwLockReleaseWrite(pMulticastFabricDesc->pLock); } diff --git a/src/nvidia/src/kernel/os/os_init.c b/src/nvidia/src/kernel/os/os_init.c index fd0b40620..6f6e14277 100644 --- a/src/nvidia/src/kernel/os/os_init.c +++ b/src/nvidia/src/kernel/os/os_init.c @@ -295,9 +295,9 @@ NvU32 vgpuDevReadReg032( OBJSYS *pSys = SYS_GET_INSTANCE(); OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); - if(!pGpu || - !pHypervisor || !pHypervisor->bDetected || !pHypervisor->bIsHVMGuest || - !GPU_GET_KERNEL_BIF(pGpu)) + + if (!pGpu || !GPU_GET_KERNEL_BIF(pGpu) || + (!IS_VIRTUAL(pGpu) && !(pHypervisor && pHypervisor->bDetected && pHypervisor->bIsHVMGuest))) { *vgpuHandled = NV_FALSE; return 0; diff --git a/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c b/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c index 5032c849e..3c519082f 100644 --- a/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c +++ b/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c @@ -280,6 +280,7 @@ struct gpuDevice NvU32 accessCounterBufferClass; NvBool isTccMode; NvBool isWddmMode; + NvBool isMigDevice; struct gpuSession *session; gpuFbInfo fbInfo; gpuInfo info; @@ -1014,7 +1015,7 @@ static NvU64 makeDeviceDescriptorKey(const struct gpuDevice *device) NvU64 key = device->deviceInstance; NvU64 swizzid = device->info.smcSwizzId; - if (device->info.smcEnabled) + if (device->isMigDevice) key |= (swizzid << 32); return key; @@ -2110,6 +2111,7 @@ NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, device->deviceInstance = gpuIdInfoParams.deviceInstance; device->subdeviceInstance = gpuIdInfoParams.subdeviceInstance; device->gpuId = gpuIdInfoParams.gpuId; + device->isMigDevice = bCreateSmcPartition; portMemCopy(&device->info, sizeof(device->info), pGpuInfo, sizeof(*pGpuInfo)); diff --git a/src/nvidia/src/kernel/virtualization/kernel_vgpu_mgr.c b/src/nvidia/src/kernel/virtualization/kernel_vgpu_mgr.c index 4c68b98fd..730da6a2c 100644 --- a/src/nvidia/src/kernel/virtualization/kernel_vgpu_mgr.c +++ b/src/nvidia/src/kernel/virtualization/kernel_vgpu_mgr.c @@ -3076,6 +3076,10 @@ kvgpumgrSetSupportedPlacementIds(OBJGPU *pGpu) if (!pPgpuInfo->heterogeneousTimesliceSizesSupported && !pPgpuInfo->homogeneousPlacementSupported) return rmStatus; + /* skip placement Id calculation if already set */ + if (pPgpuInfo->isPlacementIdInfoSet) + return rmStatus; + hostChannelCount = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pKernelFifo->ppChidMgr[0]); pKernelVgpuTypePlacementInfo = &pPgpuInfo->kernelVgpuTypePlacementInfo; @@ -3230,6 +3234,9 @@ kvgpumgrSetSupportedPlacementIds(OBJGPU *pGpu) } } + if (rmStatus == NV_OK) + pPgpuInfo->isPlacementIdInfoSet = NV_TRUE; + return rmStatus; } diff --git a/version.mk b/version.mk index 4357c3248..2c04091e4 100644 --- a/version.mk +++ b/version.mk @@ -1,4 +1,4 @@ -NVIDIA_VERSION = 570.195.03 +NVIDIA_VERSION = 570.207 # This file. VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))