Compare commits

..

1 Commits

Author SHA1 Message Date
Maneet Singh
4b30f4cde1 570.169 2025-06-16 19:11:43 -07:00
44 changed files with 50196 additions and 49916 deletions

View File

@@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 570.158.01.
version 570.169.
## How to Build
@@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
570.158.01 driver release. This can be achieved by installing
570.169 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@@ -185,7 +185,7 @@ table below).
For details on feature support and limitations, see the NVIDIA GPU driver
end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/570.158.01/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/570.169/README/kernel_open.html
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
Package for more details.
@@ -966,6 +966,14 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 5070 Ti | 2C05 |
| NVIDIA GeForce RTX 5090 Laptop GPU | 2C18 |
| NVIDIA GeForce RTX 5080 Laptop GPU | 2C19 |
| NVIDIA RTX PRO 4500 Blackwell | 2C31 1028 2051 |
| NVIDIA RTX PRO 4500 Blackwell | 2C31 103C 2051 |
| NVIDIA RTX PRO 4500 Blackwell | 2C31 10DE 2051 |
| NVIDIA RTX PRO 4500 Blackwell | 2C31 17AA 2051 |
| NVIDIA RTX PRO 4000 Blackwell | 2C34 1028 2052 |
| NVIDIA RTX PRO 4000 Blackwell | 2C34 103C 2052 |
| NVIDIA RTX PRO 4000 Blackwell | 2C34 10DE 2052 |
| NVIDIA RTX PRO 4000 Blackwell | 2C34 17AA 2052 |
| NVIDIA RTX PRO 5000 Blackwell Generation Laptop GPU | 2C38 |
| NVIDIA RTX PRO 4000 Blackwell Generation Laptop GPU | 2C39 |
| NVIDIA GeForce RTX 5090 Laptop GPU | 2C58 |

View File

@@ -79,7 +79,7 @@ ccflags-y += -I$(src)/common/inc
ccflags-y += -I$(src)
ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
ccflags-y += -D__KERNEL__ -DMODULE -DNVRM
ccflags-y += -DNV_VERSION_STRING=\"570.158.01\"
ccflags-y += -DNV_VERSION_STRING=\"570.169\"
ifneq ($(SYSSRCHOST1X),)
ccflags-y += -I$(SYSSRCHOST1X)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -297,9 +297,21 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
#endif
}
#define NV_CAN_CALL_VMA_START_WRITE 1
#if !NV_CAN_CALL_VMA_START_WRITE
/*
* Commit 45ad9f5290dc updated vma_start_write() to call __vma_start_write().
*/
void nv_vma_start_write(struct vm_area_struct *);
#endif
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) |= flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_set(vma, flags);
#else
vma->vm_flags |= flags;
@@ -308,7 +320,10 @@ static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_clear(vma, flags);
#else
vma->vm_flags &= ~flags;

View File

@@ -62,6 +62,20 @@
#undef NV_DRM_FENCE_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
#endif
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))

View File

@@ -58,16 +58,6 @@ typedef struct nv_timer nv_drm_timer;
#error "Need to define kernel timer callback primitives for this OS"
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
struct page;
/* Set to true when the atomic modeset feature is enabled. */

View File

@@ -38,6 +38,7 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group

View File

@@ -308,12 +308,13 @@ void uvm_ats_smmu_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, NvU64 addr,
NV_STATUS uvm_ats_sva_add_gpu(uvm_parent_gpu_t *parent_gpu)
{
#if NV_IS_EXPORT_SYMBOL_GPL_iommu_dev_enable_feature
int ret;
ret = iommu_dev_enable_feature(&parent_gpu->pci_dev->dev, IOMMU_DEV_FEAT_SVA);
if (ret)
return errno_to_nv_status(ret);
#endif
if (UVM_ATS_SMMU_WAR_REQUIRED())
return uvm_ats_smmu_war_init(parent_gpu);
else
@@ -325,7 +326,9 @@ void uvm_ats_sva_remove_gpu(uvm_parent_gpu_t *parent_gpu)
if (UVM_ATS_SMMU_WAR_REQUIRED())
uvm_ats_smmu_war_deinit(parent_gpu);
#if NV_IS_EXPORT_SYMBOL_GPL_iommu_dev_disable_feature
iommu_dev_disable_feature(&parent_gpu->pci_dev->dev, IOMMU_DEV_FEAT_SVA);
#endif
}
NV_STATUS uvm_ats_sva_bind_gpu(uvm_gpu_va_space_t *gpu_va_space)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -867,3 +867,75 @@ void NV_API_CALL nv_set_safe_to_mmap_locked(
nvl->safe_to_mmap = safe_to_mmap;
}
#if !NV_CAN_CALL_VMA_START_WRITE
static NvBool nv_vma_enter_locked(struct vm_area_struct *vma, NvBool detaching)
{
NvU32 tgt_refcnt = VMA_LOCK_OFFSET;
NvBool interrupted = NV_FALSE;
if (!detaching)
{
tgt_refcnt++;
}
if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt))
{
return NV_FALSE;
}
rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
prepare_to_rcuwait(&vma->vm_mm->vma_writer_wait);
for (;;)
{
set_current_state(TASK_UNINTERRUPTIBLE);
if (refcount_read(&vma->vm_refcnt) == tgt_refcnt)
break;
if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
{
interrupted = NV_TRUE;
break;
}
schedule();
}
// This is an open-coded version of finish_rcuwait().
rcu_assign_pointer(vma->vm_mm->vma_writer_wait.task, NULL);
__set_current_state(TASK_RUNNING);
if (interrupted)
{
// Clean up on error: release refcount and dep_map
refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
return NV_FALSE;
}
lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
return NV_TRUE;
}
/*
* Helper function to handle VMA locking and refcount management.
*/
void nv_vma_start_write(struct vm_area_struct *vma)
{
NvU32 mm_lock_seq;
NvBool locked;
if (__is_vma_write_locked(vma, &mm_lock_seq))
return;
locked = nv_vma_enter_locked(vma, NV_FALSE);
WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
if (locked)
{
NvBool detached;
detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
WARN_ON_ONCE(detached);
}
}
EXPORT_SYMBOL(nv_vma_start_write);
#endif // !NV_CAN_CALL_VMA_START_WRITE

View File

@@ -236,6 +236,9 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_hrtimer_setup
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_iommu_dev_enable_feature
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_iommu_dev_disable_feature
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops

View File

@@ -93,6 +93,10 @@ namespace DisplayPort
bool bMstTimeslotBug4968411;
bool bApplyManualTimeslotBug4968411;
bool bApplyStuffDummySymbolsWAR;
bool bStuffDummySymbolsFor128b132b;
bool bStuffDummySymbolsFor8b10b;
// Do not enable downspread while link training.
bool bDisableDownspread;

View File

@@ -391,6 +391,10 @@ namespace DisplayPort
return maxModeBwRequired;
}
bool getStuffDummySymbolsFor128b132b() const { return processedEdid.WARData.bStuffDummySymbolsFor128b132b; }
bool getStuffDummySymbolsFor8b10b() const { return processedEdid.WARData.bStuffDummySymbolsFor8b10b; }
bool getApplyStuffDummySymbolsWAR() const { return processedEdid.WARFlags.bApplyStuffDummySymbolsWAR; }
virtual void queryGUID2();
virtual bool getSDPExtnForColorimetrySupported();

View File

@@ -169,6 +169,7 @@ namespace DisplayPort
bool bDisableDownspread;
bool bForceHeadShutdown;
bool bDisableDscMaxBppLimit;
bool bApplyStuffDummySymbolsWAR;
bool bSkipCableIdCheck;
bool bAllocateManualTimeslots;
}_WARFlags;
@@ -182,6 +183,8 @@ namespace DisplayPort
unsigned maxLaneAtLowRate; // Max lane count supported at RBR
unsigned optimalLinkRate; // Optimal link rate value to override
unsigned optimalLaneCount; // Optimal lane count value to override
bool bStuffDummySymbolsFor128b132b;
bool bStuffDummySymbolsFor8b10b;
}_WARData;
_WARData WARData;

View File

@@ -434,6 +434,7 @@ namespace DisplayPort
void configureTriggerAll(NvU32 head, bool enable);
virtual bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates);
bool configureFec(const bool bEnableFec);
virtual void applyStuffDummySymbolWAR(NvU32 head, bool enable);
};
}

View File

@@ -298,6 +298,7 @@ namespace DisplayPort
virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex){ return false; }
virtual bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates) = 0;
virtual bool configureFec(const bool bEnableFec) = 0;
virtual void applyStuffDummySymbolWAR(NvU32 head, bool enable) = 0;
};
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -133,8 +133,9 @@ namespace DisplayPort
const ModesetInfo &modesetInfo,
Watermark * dpInfo
);
// Return Payload Bandwidth Number(PBN)for requested mode
unsigned pbnForMode(const ModesetInfo &modesetInfo);
unsigned pbnForMode(const ModesetInfo &modesetInfo, bool bAccountSpread = true);
}
#endif //INCLUDED_DP_WATERMARK_H

View File

@@ -6057,7 +6057,11 @@ bool ConnectorImpl::allocateTimeslice(GroupImpl * targetGroup)
// Check for available timeslots
if (slot_count > freeSlots)
{
DP_PRINTF(DP_ERROR, "DP-TS> Failed to allocate timeslot!! Not enough free slots. slot_count: %d, freeSlots: %d",
slot_count, freeSlots);
return false;
}
for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next)
{

View File

@@ -547,20 +547,23 @@ bool ConnectorImpl2x::compoundQueryAttachMSTGeneric(Group * target,
*/
bool ConnectorImpl2x::notifyAttachBegin(Group *target, const DpModesetParams &modesetParams)
{
unsigned twoChannelAudioHz = modesetParams.modesetInfo.twoChannelAudioHz;
unsigned eightChannelAudioHz = modesetParams.modesetInfo.eightChannelAudioHz;
NvU64 pixelClockHz = modesetParams.modesetInfo.pixelClockHz;
unsigned rasterWidth = modesetParams.modesetInfo.rasterWidth;
unsigned rasterHeight = modesetParams.modesetInfo.rasterHeight;
unsigned rasterBlankStartX = modesetParams.modesetInfo.rasterBlankStartX;
unsigned rasterBlankEndX = modesetParams.modesetInfo.rasterBlankEndX;
unsigned depth = modesetParams.modesetInfo.depth;
bool bLinkTrainingStatus = true;
bool bEnableFEC = true;
bool bEnableDsc = modesetParams.modesetInfo.bEnableDsc;
bool bEnablePassThroughForPCON = modesetParams.modesetInfo.bEnablePassThroughForPCON;
Device *newDev = target->enumDevices(0);
DeviceImpl *dev = (DeviceImpl *)newDev;
unsigned twoChannelAudioHz = modesetParams.modesetInfo.twoChannelAudioHz;
unsigned eightChannelAudioHz = modesetParams.modesetInfo.eightChannelAudioHz;
NvU64 pixelClockHz = modesetParams.modesetInfo.pixelClockHz;
unsigned rasterWidth = modesetParams.modesetInfo.rasterWidth;
unsigned rasterHeight = modesetParams.modesetInfo.rasterHeight;
unsigned rasterBlankStartX = modesetParams.modesetInfo.rasterBlankStartX;
unsigned rasterBlankEndX = modesetParams.modesetInfo.rasterBlankEndX;
unsigned depth = modesetParams.modesetInfo.depth;
bool bLinkTrainingStatus = true;
bool bEnableFEC = true;
bool bEnableDsc = modesetParams.modesetInfo.bEnableDsc;
bool bEnablePassThroughForPCON = modesetParams.modesetInfo.bEnablePassThroughForPCON;
Device *newDev = target->enumDevices(0);
DeviceImpl *dev = (DeviceImpl *)newDev;
bool bApplyStuffDummySymbolsWAR = this->bApplyStuffDummySymbolsWAR;
bool bStuffDummySymbolsFor128b132b = this->bStuffDummySymbolsFor128b132b;
bool bStuffDummySymbolsFor8b10b = this->bStuffDummySymbolsFor8b10b;
if(preferredLinkConfig.isValid())
{
@@ -597,6 +600,14 @@ bool ConnectorImpl2x::notifyAttachBegin(Group *target, const DpModesetParams &mo
DP_USED(buffer);
DP_PRINTF(DP_NOTICE, "DP2xCONN> | %s (%s) |", dev->getTopologyAddress().toString(buffer),
dev->isVideoSink() ? "VIDEO" : "BRANCH");
//
// Note: This makes an assumption that all devices in the group have the same values for
// bApplyStuffDummySymbolsWAR, bStuffDummySymbolsFor8b10b and bStuffDummySymbolsFor128b132b
//
bApplyStuffDummySymbolsWAR |= ((DeviceImpl *)dev)->getApplyStuffDummySymbolsWAR();
bStuffDummySymbolsFor128b132b |= ((DeviceImpl *)dev)->getStuffDummySymbolsFor128b132b();
bStuffDummySymbolsFor8b10b |= ((DeviceImpl *)dev)->getStuffDummySymbolsFor8b10b();
}
if (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)
@@ -767,6 +778,16 @@ bool ConnectorImpl2x::notifyAttachBegin(Group *target, const DpModesetParams &mo
}
}
bFromResumeToNAB = false;
// Apply dummy symbol WAR if link training succeeded and device requires dummy symbols
// for the channel coding mode as per the device's WAR flags
if (bLinkTrainingStatus &&
bApplyStuffDummySymbolsWAR &&
((activeLinkConfig.bIs128b132bChannelCoding && bStuffDummySymbolsFor128b132b) ||
((!activeLinkConfig.bIs128b132bChannelCoding) && bStuffDummySymbolsFor8b10b)))
{
main->applyStuffDummySymbolWAR(targetImpl->headIndex, true);
}
return bLinkTrainingStatus;
}
@@ -1175,13 +1196,25 @@ bool ConnectorImpl2x::train(const LinkConfiguration &lConfig, bool force, LinkTr
*/
void ConnectorImpl2x::notifyDetachBegin(Group *target)
{
bool bApplyStuffDummySymbolsWAR = this->bApplyStuffDummySymbolsWAR;
if (!target)
target = firmwareGroup;
Device *newDev = target->enumDevices(0);
DeviceImpl *dev = (DeviceImpl *)newDev;
GroupImpl *group = (GroupImpl*)target;
for (Device * d = target->enumDevices(0); d; d = target->enumDevices(d))
{
DeviceImpl * dev = (DeviceImpl *)d;
bApplyStuffDummySymbolsWAR |= dev->getApplyStuffDummySymbolsWAR();
}
if (bApplyStuffDummySymbolsWAR)
{
main->applyStuffDummySymbolWAR(group->headIndex, false);
}
if (dev != NULL && dev->bApplyPclkWarBug4949066 == true)
{
EvoInterface *provider = ((EvoMainLink *)main)->getProvider();
@@ -1719,6 +1752,9 @@ void ConnectorImpl2x::configInit()
bMstTimeslotBug4968411 = false;
bApplyManualTimeslotBug4968411 = false;
bDisableDownspread = false;
bApplyStuffDummySymbolsWAR = false;
bStuffDummySymbolsFor128b132b = false;
bStuffDummySymbolsFor8b10b = false;
applyDP2xRegkeyOverrides();
}

View File

@@ -1901,3 +1901,16 @@ bool EvoMainLink::configureFec
return false;
}
void EvoMainLink::applyStuffDummySymbolWAR(NvU32 head, bool enable)
{
NV0073_CTRL_STUFF_DUMMY_SYMBOL_WAR_PARAMS params = {0};
params.subDeviceInstance = provider->getSubdeviceIndex();
params.head = head;
params.bEnable = enable ? NV_TRUE : NV_FALSE;
NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_STUFF_DUMMY_SYMBOL_WAR, &params, sizeof(params));
if (ret != NVOS_STATUS_SUCCESS)
{
DP_PRINTF(DP_ERROR, "Failed to program the dummy symbol WAR!: %d", enable);
}
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -170,7 +170,14 @@ NvU32 LinkConfiguration::slotsForPBN(NvU32 allocatedPBN, bool usable)
void LinkConfiguration::pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn)
{
base_pbn = pbnForMode(modesetInfo);
if (bIs128b132bChannelCoding)
{
base_pbn = pbnForMode(modesetInfo, false);
}
else
{
base_pbn = pbnForMode(modesetInfo, true);
}
if (!bIs128b132bChannelCoding)
{

View File

@@ -112,6 +112,20 @@ void ConnectorImpl2x::applyOuiWARs()
}
}
break;
case 0xAD6000:
if ((modelName[0] == 'M') &&
(modelName[1] == 'C') &&
(modelName[2] == '2') &&
(modelName[3] == '9') &&
(modelName[4] == '0') &&
(modelName[5] == 0x04U))
{
bApplyStuffDummySymbolsWAR = true;
bStuffDummySymbolsFor128b132b = false;
bStuffDummySymbolsFor8b10b = true;
}
break;
}
}
@@ -637,6 +651,14 @@ void Edid::applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDen
}
break;
case 0xAC10:
if (ProductID == 0x42AD || ProductID == 0x42AC)
{
this->WARFlags.bApplyStuffDummySymbolsWAR = true;
this->WARData.bStuffDummySymbolsFor128b132b = true;
this->WARData.bStuffDummySymbolsFor8b10b = false;
}
break;
default:
break;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -838,8 +838,10 @@ bool DisplayPort::isModePossibleMSTWithFEC
return true;
}
unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo)
unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo, bool bAccountSpread)
{
NvU64 pbn_numerator, pbn_denominator;
// When DSC is enabled consider depth will multiplied by 16
unsigned dsc_factor = 1;
@@ -855,8 +857,14 @@ unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo)
}
}
unsigned pbnForMode = (NvU32)(divide_ceil(modesetInfo.pixelClockHz * modesetInfo.depth * 1006 * 64 / 8,
(NvU64)54000000 * 1000 * dsc_factor));
pbn_numerator = modesetInfo.pixelClockHz * modesetInfo.depth * 64 / 8;
pbn_denominator = 54000000ULL * dsc_factor;
return pbnForMode;
if (bAccountSpread)
{
pbn_numerator *= 1006;
pbn_denominator *= 1000;
}
return (NvU32)(divide_ceil(pbn_numerator, pbn_denominator));
}

View File

@@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r573_30
#define NV_BUILD_BRANCH r570_00
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r573_30
#define NV_PUBLIC_BRANCH r570_00
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r570/r573_30-464"
#define NV_BUILD_CHANGELIST_NUM (36065453)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r570/r570_00-486"
#define NV_BUILD_CHANGELIST_NUM (36118394)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r570/r573_30-464"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36065453)
#define NV_BUILD_NAME "rel/gpu_drv/r570/r570_00-486"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36118394)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r573_30-1"
#define NV_BUILD_CHANGELIST_NUM (36065453)
#define NV_BUILD_BRANCH_VERSION "r570_00-483"
#define NV_BUILD_CHANGELIST_NUM (36118394)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "573.32"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36065453)
#define NV_BUILD_NAME "573.42"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36118394)
#define NV_BUILD_BRANCH_BASE_VERSION R570
#endif
// End buildmeister python edited section

View File

@@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "570.158.01"
#define NV_VERSION_STRING "570.169"
#else

View File

@@ -3539,4 +3539,39 @@ typedef struct NV0073_CTRL_CMD_CALCULATE_DP_IMP_PARAMS {
NV0073_CTRL_DP_IMP_WATERMARK watermark;
} NV0073_CTRL_CMD_CALCULATE_DP_IMP_PARAMS;
/*
* NV0073_CTRL_CMD_STUFF_DUMMY_SYMBOL_WAR
*
* Some sink devices require extra padding between SDPs. This is programmed for GB20x+ GPUs.
*
* subDeviceInstance
* This parameter specifies the subdevice instance within the
* NV04_DISPLAY_COMMON parent device to which the operation should be
* directed. This parameter must specify a value between zero and the
* total number of subdevices within the parent device. This parameter
* should be set to zero for default behavior.
* displayId
* This parameter specifies the ID of the display for which the control
* is being issued. The display ID must be valid.
* head
* This parameter specifies the head index for the operation.
* bEnable
* Boolean to enable or disable the WAR.
*
* Possible status values returned are:
* NV_OK
* NV_ERR_INVALID_PARAM_STRUCT
* NV_ERR_INVALID_ARGUMENT
*/
#define NV0073_CTRL_STUFF_DUMMY_SYMBOL_WAR_PARAMS_MESSAGE_ID (0x8DU)
typedef struct NV0073_CTRL_STUFF_DUMMY_SYMBOL_WAR_PARAMS {
NvU32 subDeviceInstance;
NvU32 displayId;
NvU32 head;
NvBool bEnable;
} NV0073_CTRL_STUFF_DUMMY_SYMBOL_WAR_PARAMS;
#define NV0073_CTRL_CMD_STUFF_DUMMY_SYMBOL_WAR (0x73138dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_STUFF_DUMMY_SYMBOL_WAR_PARAMS_MESSAGE_ID" */
/* _ctrl0073dp_h_ */

View File

@@ -94,7 +94,8 @@ void nvIdleLayerChannels(NVDevEvoRec *pDevEvo,
NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]);
void nvEvoClearSurfaceUsage(NVDevEvoRec *pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo);
NVSurfaceEvoPtr pSurfaceEvo,
const NvBool skipSync);
NvBool nvIdleBaseChannelOneApiHead(NVDispEvoRec *pDispEvo, NvU32 apiHead,
NvBool *pStoppedBase);

View File

@@ -1197,13 +1197,14 @@ void nvIdleLayerChannels(NVDevEvoRec *pDevEvo,
* in-flight methods flip away from this surface.
*/
void nvEvoClearSurfaceUsage(NVDevEvoRec *pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo)
NVSurfaceEvoPtr pSurfaceEvo,
const NvBool skipSync)
{
NvU32 head;
/*
* If the core channel is no longer allocated, we don't need to
* sync. This assumes the channels are allocated/deallocated
* clear usage/sync. This assumes the channels are allocated/deallocated
* together.
*/
if (pDevEvo->core) {
@@ -1212,16 +1213,20 @@ void nvEvoClearSurfaceUsage(NVDevEvoRec *pDevEvo,
pDevEvo->hal->ClearSurfaceUsage(pDevEvo, pSurfaceEvo);
}
nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
/* HALs with ClearSurfaceUsage() require sync to ensure completion. */
if (!skipSync ||
(pDevEvo->hal->ClearSurfaceUsage != NULL)) {
nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
for (head = 0; head < pDevEvo->numHeads; head++) {
NvU32 layer;
for (head = 0; head < pDevEvo->numHeads; head++) {
NvU32 layer;
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
NVEvoChannelPtr pChannel =
pDevEvo->head[head].layer[layer];
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
NVEvoChannelPtr pChannel =
pDevEvo->head[head].layer[layer];
nvRMSyncEvoChannel(pDevEvo, pChannel, __LINE__);
nvRMSyncEvoChannel(pDevEvo, pChannel, __LINE__);
}
}
}
}

View File

@@ -1156,14 +1156,14 @@ void nvEvoDecrementSurfaceRefCntsWithSync(NVDevEvoPtr pDevEvo,
if (pSurfaceEvo->rmRefCnt == 0) {
/*
* Don't sync if this surface was registered as not requiring display
* hardware access, to WAR timeouts that result from OGL unregistering
* a deferred request fifo causing a sync here that may timeout if
* GLS hasn't had the opportunity to release semaphores with pending
* flips. (Bug 2050970)
* Don't clear usage/sync if this surface was registered as not
* requiring display hardware access, to WAR timeouts that result from
* OGL unregistering a deferred request fifo causing a sync here that
* may timeout if GLS hasn't had the opportunity to release semaphores
* with pending flips. (Bug 2050970)
*/
if (!skipSync && pSurfaceEvo->requireDisplayHardwareAccess) {
nvEvoClearSurfaceUsage(pDevEvo, pSurfaceEvo);
if (pSurfaceEvo->requireDisplayHardwareAccess) {
nvEvoClearSurfaceUsage(pDevEvo, pSurfaceEvo, skipSync);
}
FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo);

View File

@@ -65,8 +65,6 @@ CHIPSET_SETUP_FUNC(Intel_0685_setupFunc)
CHIPSET_SETUP_FUNC(Intel_4381_setupFunc)
CHIPSET_SETUP_FUNC(Intel_7A82_setupFunc)
CHIPSET_SETUP_FUNC(Intel_7A04_setupFunc)
CHIPSET_SETUP_FUNC(Intel_5795_setupFunc)
CHIPSET_SETUP_FUNC(Intel_1B81_setupFunc)
CHIPSET_SETUP_FUNC(SiS_656_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RS400_setupFunc)
CHIPSET_SETUP_FUNC(ATI_RS480_setupFunc)
@@ -192,11 +190,11 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_INTEL, 0x4385, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A82, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A84, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1B81, CS_INTEL_1B81, "Intel-SapphireRapids", Intel_1B81_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x7A8A, CS_INTEL_1B81, "Intel-SapphireRapids", Intel_1B81_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x1B81, CS_INTEL_1B81, "Intel-SapphireRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0x7A8A, CS_INTEL_1B81, "Intel-SapphireRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x7A04, CS_INTEL_7A04, "Intel-RaptorLake", Intel_7A04_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x5795, CS_INTEL_5795, "Intel-GraniteRapids", Intel_5795_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x5795, CS_INTEL_5795, "Intel-GraniteRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0xA70D, CS_INTEL_B660, "Intel-B660", Intel_A70D_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2010-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -47,33 +47,13 @@
#define NV_MSGBOX_CMD_ERR_MORE_PROCESSING_REQUIRED 0x000000F0
//
// Alternative encodings of the command word
// These are distinguished by a non-zero value in the 29:29 bit,
// previously known as _RSVD.
// Alternative encodings of the command word.
// These were distinguished by a non-zero value in the 29:29 bit.
// Bit 29 is now reserved and must be 0 i.e. only standard requests will be processed
// and debug requests would fail.
//
#define NV_MSGBOX_CMD_ENCODING 29:29
#define NV_MSGBOX_CMD_ENCODING_STANDARD 0x00000000
#define NV_MSGBOX_CMD_ENCODING_DEBUG 0x00000001
// Debug command structure
#define NV_MSGBOX_DEBUG_CMD_OPCODE 1:0
#define NV_MSGBOX_DEBUG_CMD_OPCODE_READ_PRIV 0x00000000
#define NV_MSGBOX_DEBUG_CMD_ARG 23:2
/* Utility command constructor macros */
#define NV_MSGBOX_DEBUG_CMD(opcode, arg) \
( \
DRF_DEF(_MSGBOX, _DEBUG_CMD, _OPCODE, opcode) | \
DRF_NUM(_MSGBOX, _DEBUG_CMD, _ARG, (arg)) | \
DRF_DEF(_MSGBOX, _CMD, _STATUS, _NULL) | \
DRF_DEF(_MSGBOX, _CMD, _ENCODING, _DEBUG) | \
DRF_DEF(_MSGBOX, _CMD, _INTR, _PENDING) \
)
#define NV_MSGBOX_DEBUG_CMD_READ_PRIV(offset) \
NV_MSGBOX_DEBUG_CMD(_READ_PRIV, (offset) >> 2)
#endif // _SMBPBI_PRIV_H_

View File

@@ -1218,6 +1218,7 @@ struct OBJGPU {
NvBool gspRmInitialized;
NV_PM_DEPTH powerManagementDepth;
_GPU_PCIE_PEER_CLIQUE pciePeerClique;
NvBool bGspNocatEnabled;
NvU32 i2cPortForExtdev;
GPUIDINFO idInfo;
_GPU_CHIP_INFO chipInfo;

View File

@@ -638,6 +638,18 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
pThis->__kmemsysGetMaximumBlacklistPages__ = &kmemsysGetMaximumBlacklistPages_GA100;
}
// kmemsysIsSwizzIdRejectedByHW -- halified (2 hals)
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0xd0000000UL) ) ||
( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000ec0UL) )) /* ChipHal: GH100 | GB100 | GB102 | GB202 | GB203 | GB205 | GB206 | GB207 */
{
pThis->__kmemsysIsSwizzIdRejectedByHW__ = &kmemsysIsSwizzIdRejectedByHW_GH100;
}
// default
else
{
pThis->__kmemsysIsSwizzIdRejectedByHW__ = &kmemsysIsSwizzIdRejectedByHW_3dd2c9;
}
// kmemsysGetFbInfos -- halified (2 hals)
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
@@ -659,10 +671,10 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
{
pThis->__kmemsysIsNumaPartitionInUse__ = &kmemsysIsNumaPartitionInUse_3dd2c9;
}
} // End __nvoc_init_funcTable_KernelMemorySystem_1 with approximately 73 basic block(s).
} // End __nvoc_init_funcTable_KernelMemorySystem_1 with approximately 75 basic block(s).
// Initialize vtable(s) for 43 virtual method(s).
// Initialize vtable(s) for 44 virtual method(s).
void __nvoc_init_funcTable_KernelMemorySystem(KernelMemorySystem *pThis, RmHalspecOwner *pRmhalspecowner) {
// Per-class vtable definition
@@ -701,7 +713,7 @@ void __nvoc_init_funcTable_KernelMemorySystem(KernelMemorySystem *pThis, RmHalsp
pThis->__nvoc_base_OBJENGSTATE.__nvoc_vtable = &vtable.OBJENGSTATE; // (engstate) super
pThis->__nvoc_vtable = &vtable; // (kmemsys) this
// Initialize vtable(s) with 29 per-object function pointer(s).
// Initialize vtable(s) with 30 per-object function pointer(s).
__nvoc_init_funcTable_KernelMemorySystem_1(pThis, pRmhalspecowner);
}

View File

@@ -229,7 +229,7 @@ struct KernelMemorySystem {
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super
struct KernelMemorySystem *__nvoc_pbase_KernelMemorySystem; // kmemsys
// Vtable with 29 per-object function pointers
// Vtable with 30 per-object function pointers
NV_STATUS (*__kmemsysGetFbNumaInfo__)(OBJGPU *, struct KernelMemorySystem * /*this*/, NvU64 *, NvU64 *, NvS32 *); // halified (2 hals) body
NV_STATUS (*__kmemsysReadUsableFbSize__)(OBJGPU *, struct KernelMemorySystem * /*this*/, NvU64 *); // halified (2 hals) body
NV_STATUS (*__kmemsysGetUsableFbSize__)(OBJGPU *, struct KernelMemorySystem * /*this*/, NvU64 *); // halified (2 hals) body
@@ -257,6 +257,7 @@ struct KernelMemorySystem {
NvU32 (*__kmemsysGetEccDedCountSize__)(OBJGPU *, struct KernelMemorySystem * /*this*/); // halified (3 hals) body
NvU32 (*__kmemsysGetEccDedCountRegAddr__)(OBJGPU *, struct KernelMemorySystem * /*this*/, NvU32, NvU32); // halified (3 hals) body
NvU16 (*__kmemsysGetMaximumBlacklistPages__)(OBJGPU *, struct KernelMemorySystem * /*this*/); // halified (2 hals) body
NvBool (*__kmemsysIsSwizzIdRejectedByHW__)(OBJGPU *, struct KernelMemorySystem * /*this*/, NvU32); // halified (2 hals)
NV_STATUS (*__kmemsysGetFbInfos__)(OBJGPU *, struct KernelMemorySystem * /*this*/, struct RsClient *, Device *, NvHandle, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *, NvU64 *); // halified (2 hals)
NvBool (*__kmemsysIsNumaPartitionInUse__)(OBJGPU *, struct KernelMemorySystem * /*this*/, NvU32); // halified (2 hals) body
@@ -441,6 +442,9 @@ NV_STATUS __nvoc_objCreate_KernelMemorySystem(KernelMemorySystem**, Dynamic*, Nv
#define kmemsysGetMaximumBlacklistPages_FNPTR(pKernelMemorySystem) pKernelMemorySystem->__kmemsysGetMaximumBlacklistPages__
#define kmemsysGetMaximumBlacklistPages(pGpu, pKernelMemorySystem) kmemsysGetMaximumBlacklistPages_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysGetMaximumBlacklistPages_HAL(pGpu, pKernelMemorySystem) kmemsysGetMaximumBlacklistPages_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysIsSwizzIdRejectedByHW_FNPTR(arg_this) arg_this->__kmemsysIsSwizzIdRejectedByHW__
#define kmemsysIsSwizzIdRejectedByHW(arg1, arg_this, swizzId) kmemsysIsSwizzIdRejectedByHW_DISPATCH(arg1, arg_this, swizzId)
#define kmemsysIsSwizzIdRejectedByHW_HAL(arg1, arg_this, swizzId) kmemsysIsSwizzIdRejectedByHW_DISPATCH(arg1, arg_this, swizzId)
#define kmemsysGetFbInfos_FNPTR(arg_this) arg_this->__kmemsysGetFbInfos__
#define kmemsysGetFbInfos(arg1, arg_this, arg3, arg4, hSubdevice, pParams, pFbInfoListIndicesUnset) kmemsysGetFbInfos_DISPATCH(arg1, arg_this, arg3, arg4, hSubdevice, pParams, pFbInfoListIndicesUnset)
#define kmemsysGetFbInfos_HAL(arg1, arg_this, arg3, arg4, hSubdevice, pParams, pFbInfoListIndicesUnset) kmemsysGetFbInfos_DISPATCH(arg1, arg_this, arg3, arg4, hSubdevice, pParams, pFbInfoListIndicesUnset)
@@ -603,6 +607,10 @@ static inline NvU16 kmemsysGetMaximumBlacklistPages_DISPATCH(OBJGPU *pGpu, struc
return pKernelMemorySystem->__kmemsysGetMaximumBlacklistPages__(pGpu, pKernelMemorySystem);
}
static inline NvBool kmemsysIsSwizzIdRejectedByHW_DISPATCH(OBJGPU *arg1, struct KernelMemorySystem *arg_this, NvU32 swizzId) {
return arg_this->__kmemsysIsSwizzIdRejectedByHW__(arg1, arg_this, swizzId);
}
static inline NV_STATUS kmemsysGetFbInfos_DISPATCH(OBJGPU *arg1, struct KernelMemorySystem *arg_this, struct RsClient *arg3, Device *arg4, NvHandle hSubdevice, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, NvU64 *pFbInfoListIndicesUnset) {
return arg_this->__kmemsysGetFbInfos__(arg1, arg_this, arg3, arg4, hSubdevice, pParams, pFbInfoListIndicesUnset);
}
@@ -1034,6 +1042,12 @@ NvU16 kmemsysGetMaximumBlacklistPages_GM107(OBJGPU *pGpu, struct KernelMemorySys
NvU16 kmemsysGetMaximumBlacklistPages_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
NvBool kmemsysIsSwizzIdRejectedByHW_GH100(OBJGPU *arg1, struct KernelMemorySystem *arg2, NvU32 swizzId);
static inline NvBool kmemsysIsSwizzIdRejectedByHW_3dd2c9(OBJGPU *arg1, struct KernelMemorySystem *arg2, NvU32 swizzId) {
return NV_FALSE;
}
static inline NV_STATUS kmemsysGetFbInfos_ac1694(OBJGPU *arg1, struct KernelMemorySystem *arg2, struct RsClient *arg3, Device *arg4, NvHandle hSubdevice, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, NvU64 *pFbInfoListIndicesUnset) {
return NV_OK;
}

View File

@@ -819,7 +819,7 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
{
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000ec0UL) )) /* ChipHal: GB202 | GB203 | GB205 | GB206 | GB207 */
{
pThis->__kgspGetNonWprHeapSize__ = &kgspGetNonWprHeapSize_1bb8e3;
pThis->__kgspGetNonWprHeapSize__ = &kgspGetNonWprHeapSize_ad951d;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{

View File

@@ -1318,8 +1318,8 @@ static inline NvU32 kgspGetNonWprHeapSize_d505ea(struct OBJGPU *pGpu, struct Ker
return 2097152;
}
static inline NvU32 kgspGetNonWprHeapSize_1bb8e3(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 2228224;
static inline NvU32 kgspGetNonWprHeapSize_ad951d(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 2293760;
}
static inline NvU32 kgspGetNonWprHeapSize_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {

View File

@@ -5432,6 +5432,14 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2C05, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Ti" },
{ 0x2C18, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 Laptop GPU" },
{ 0x2C19, 0x0000, 0x0000, "NVIDIA GeForce RTX 5080 Laptop GPU" },
{ 0x2C31, 0x2051, 0x1028, "NVIDIA RTX PRO 4500 Blackwell" },
{ 0x2C31, 0x2051, 0x103c, "NVIDIA RTX PRO 4500 Blackwell" },
{ 0x2C31, 0x2051, 0x10de, "NVIDIA RTX PRO 4500 Blackwell" },
{ 0x2C31, 0x2051, 0x17aa, "NVIDIA RTX PRO 4500 Blackwell" },
{ 0x2C34, 0x2052, 0x1028, "NVIDIA RTX PRO 4000 Blackwell" },
{ 0x2C34, 0x2052, 0x103c, "NVIDIA RTX PRO 4000 Blackwell" },
{ 0x2C34, 0x2052, 0x10de, "NVIDIA RTX PRO 4000 Blackwell" },
{ 0x2C34, 0x2052, 0x17aa, "NVIDIA RTX PRO 4000 Blackwell" },
{ 0x2C38, 0x0000, 0x0000, "NVIDIA RTX PRO 5000 Blackwell Generation Laptop GPU" },
{ 0x2C39, 0x0000, 0x0000, "NVIDIA RTX PRO 4000 Blackwell Generation Laptop GPU" },
{ 0x2C58, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 Laptop GPU" },

View File

@@ -217,6 +217,7 @@ typedef struct GspSystemInfo
NvBool bClockBoostSupported;
NvBool bRouteDispIntrsToCPU;
NvU64 hostPageSize;
NvBool bGspNocatEnabled;
} GspSystemInfo;

View File

@@ -549,6 +549,38 @@ kmemsysNumaRemoveAllMemory_GH100
return;
}
/*!
* @brief Return if a given swizzId is rejected by HW
*/
NvBool
kmemsysIsSwizzIdRejectedByHW_GH100
(
OBJGPU *pGpu,
KernelMemorySystem *pKernelMemorySystem,
NvU32 swizzId
)
{
KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
const KERNEL_MIG_MANAGER_STATIC_INFO *pStaticInfo = kmigmgrGetStaticInfo(pGpu, pKernelMIGManager);
NV_ASSERT_OR_RETURN(swizzId < KMIGMGR_MAX_GPU_SWIZZID, NV_TRUE);
NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_TRUE);
NV_ASSERT_OR_RETURN(pStaticInfo->pSwizzIdFbMemPageRanges != NULL, NV_TRUE);
// empty range returned by AMAP means swizzID is rejected by HW
if (rangeIsEmpty(rangeMake(pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].lo,
pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].hi)))
{
NV_PRINTF(LEVEL_INFO,
"GPU Instance Mem Config for swizzId = 0x%x is rejected by HW\n",
swizzId);
return NV_TRUE;
}
return NV_FALSE;
}
/*
* @brief Function to map swizzId to VMMU Segments
*/
@@ -571,9 +603,17 @@ kmemsysSwizzIdToVmmuSegmentsRange_GH100
NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE);
NV_ASSERT_OR_RETURN(pStaticInfo->pSwizzIdFbMemPageRanges != NULL, NV_ERR_INVALID_STATE);
startingVmmuSegment = pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].lo;
memSizeInVmmuSegment = (pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].hi -
pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].lo + 1);
if (kmemsysIsSwizzIdRejectedByHW_HAL(pGpu, pKernelMemorySystem, swizzId))
{
startingVmmuSegment = 0;
memSizeInVmmuSegment = 0;
}
else
{
startingVmmuSegment = pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].lo;
memSizeInVmmuSegment = (pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].hi -
pStaticInfo->pSwizzIdFbMemPageRanges->fbMemPageRanges[swizzId].lo + 1);
}
if (memSizeInVmmuSegment > totalVmmuSegments)
{

View File

@@ -1029,13 +1029,6 @@ _kmigmgrHandlePostSchedulingEnableCallback
// Initialize static info derived from physical RM
NV_ASSERT_OK_OR_RETURN(kmigmgrLoadStaticInfo_HAL(pGpu, pKernelMIGManager));
//
// Populate static GPU instance memory config which will be used to manage
// GPU instance memory
//
KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
NV_ASSERT_OK_OR_RETURN(kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem));
// KERNEL_ONLY variants require static info to detect reduced configs
kmigmgrDetectReducedConfig_HAL(pGpu, pKernelMIGManager);
}
@@ -1557,6 +1550,7 @@ kmigmgrLoadStaticInfo_KERNEL
NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS params = {0};
ENGTYPE_BIT_VECTOR partitionableNv2080Engines;
NvU32 nv2080EngineType;
KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_STATE);
@@ -1617,6 +1611,38 @@ kmigmgrLoadStaticInfo_KERNEL
sizeof(*pPrivate->staticInfo.pSkylineInfo)),
failed);
pPrivate->staticInfo.pSwizzIdFbMemPageRanges = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges));
NV_CHECK_OR_ELSE(LEVEL_ERROR,
pPrivate->staticInfo.pSwizzIdFbMemPageRanges != NULL,
status = NV_ERR_NO_MEMORY;
goto failed;);
portMemSet(pPrivate->staticInfo.pSwizzIdFbMemPageRanges, 0x0, sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges));
status = pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES,
pPrivate->staticInfo.pSwizzIdFbMemPageRanges,
sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges));
if (status == NV_ERR_NOT_SUPPORTED)
{
// Only supported on specific GPU's
status = NV_OK;
portMemFree(pPrivate->staticInfo.pSwizzIdFbMemPageRanges);
pPrivate->staticInfo.pSwizzIdFbMemPageRanges = NULL;
}
else if (status != NV_OK)
{
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, status, failed);
}
//
// Populate static GPU instance memory config which will be used to manage
// GPU instance memory
//
NV_ASSERT_OK_OR_RETURN(kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem));
pPrivate->staticInfo.pCIProfiles = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pCIProfiles));
NV_CHECK_OR_ELSE(LEVEL_ERROR,
pPrivate->staticInfo.pCIProfiles != NULL,
@@ -1654,32 +1680,6 @@ kmigmgrLoadStaticInfo_KERNEL
NV_CHECK(LEVEL_ERROR, kmigmgrEnableAllLCEs(pGpu, pKernelMIGManager, NV_FALSE) == NV_OK);
}
pPrivate->staticInfo.pSwizzIdFbMemPageRanges = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges));
NV_CHECK_OR_ELSE(LEVEL_ERROR,
pPrivate->staticInfo.pSwizzIdFbMemPageRanges != NULL,
status = NV_ERR_NO_MEMORY;
goto failed;);
portMemSet(pPrivate->staticInfo.pSwizzIdFbMemPageRanges, 0x0, sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges));
status = pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES,
pPrivate->staticInfo.pSwizzIdFbMemPageRanges,
sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges));
if (status == NV_ERR_NOT_SUPPORTED)
{
// Only supported on specific GPU's
status = NV_OK;
portMemFree(pPrivate->staticInfo.pSwizzIdFbMemPageRanges);
pPrivate->staticInfo.pSwizzIdFbMemPageRanges = NULL;
}
else if (status != NV_OK)
{
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, status, failed);
}
return status;
failed:
@@ -3933,13 +3933,6 @@ kmigmgrSetPartitioningMode_IMPL
// Initialize static info derived from physical RM
NV_ASSERT_OK_OR_RETURN(kmigmgrLoadStaticInfo_HAL(pGpu, pKernelMIGManager));
//
// Populate static GPU instance memory config which will be used to manage
// GPU instance memory
//
KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
NV_ASSERT_OK_OR_RETURN(kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem));
NV_ASSERT_OK(gpuDisableAccounting(pGpu, NV_TRUE));
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -916,29 +916,6 @@ Intel_7A04_setupFunc
return NV_OK;
}
static NV_STATUS
Intel_5795_setupFunc
(
OBJCL *pCl
)
{
pCl->setProperty(pCl, PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE, NV_TRUE);
return NV_OK;
}
static NV_STATUS
Intel_1B81_setupFunc
(
OBJCL *pCl
)
{
pCl->setProperty(pCl, PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE, NV_TRUE);
return NV_OK;
}
static NV_STATUS
Nvidia_T210_setupFunc
(

View File

@@ -9597,6 +9597,11 @@ NV_STATUS rpcGspSetSystemInfo_v17_00
rpcInfo->bClockBoostSupported = pKernelFsp->bClockBoostSupported;
}
if (RMCFG_FEATURE_PLATFORM_WINDOWS)
{
rpcInfo->bGspNocatEnabled = NV_TRUE;
}
status = _issueRpcAsync(pGpu, pRpc);
}

View File

@@ -1,4 +1,4 @@
NVIDIA_VERSION = 570.158.01
NVIDIA_VERSION = 570.169
# This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))