565.57.01

This commit is contained in:
Bernhard Stoeckner
2024-10-22 17:38:58 +02:00
parent ed4be64962
commit d5a0858f90
1049 changed files with 209491 additions and 167508 deletions

View File

@@ -30,7 +30,6 @@
extern "C" {
#endif
NvBool nvDPTimersPending(void);
NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo);
void nvDPFreeTimer(NVDPLibTimerPtr pTimer);
void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo);

View File

@@ -90,6 +90,10 @@ NvU32 nvDpyGetPossibleApiHeadsMask(const NVDpyEvoRec *pDpyEvo);
NvBool nvDpyIsHDRCapable(const NVDpyEvoRec *pDpyEvo);
void nvConstructDpVscSdp(const NVDispHeadInfoFrameStateEvoRec *pInfoFrame,
const NVDpyAttributeColor *pDpyColor,
DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp);
#ifdef __cplusplus
};
#endif

View File

@@ -118,7 +118,9 @@ void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,
NVEvoUpdateState *pUpdateState);
void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
const NvU32 head, NVEvoUpdateState *pUpdateState);
const NvU32 head,
const NVDpyAttributeColor *pDpyColor,
NVEvoUpdateState *pUpdateState);
void nvChooseDitheringEvo(
const NVConnectorEvoRec *pConnectorEvo,
@@ -185,6 +187,7 @@ NvBool nvDowngradeColorBpc(
NVDpyAttributeColor *pDpyColor);
NvBool nvDowngradeColorSpaceAndBpc(
const NVDpyEvoRec *pDpyEvo,
const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats,
NVDpyAttributeColor *pDpyColor);
@@ -250,7 +253,6 @@ NvBool nvValidateSetLutCommonParams(
const struct NvKmsSetLutCommonParams *pParams);
NvBool nvChooseColorRangeEvo(
enum NvKmsOutputColorimetry colorimetry,
const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
const enum NvKmsDpyAttributeColorBpcValue colorBpc,
@@ -393,9 +395,10 @@ void nvEvoDisableHwYUV420Packer(const NVDispEvoRec *pDispEvo,
const NvU32 head,
NVEvoUpdateState *pUpdateState);
NvBool nvEvoGetSingleTileHwModeTimings(const NVHwModeTimingsEvo *pSrc,
const NvU32 numTiles,
NVHwModeTimingsEvo *pDst);
NvBool nvEvoGetSingleMergeHeadSectionHwModeTimings(
const NVHwModeTimingsEvo *pSrc,
const NvU32 numSections,
NVHwModeTimingsEvo *pDst);
NvBool nvEvoUse2Heads1OR(const NVDpyEvoRec *pDpyEvo,
const NVHwModeTimingsEvo *pTimings,

View File

@@ -71,6 +71,12 @@ struct EvoClampRangeC5 {
NvU32 green, red_blue;
};
typedef void (NVEvoParseCapabilityNotifierFunc3)(NVDevEvoPtr pDevEvo,
NVEvoSubDevPtr pEvoSubDev, volatile const NvU32 *pCaps);
typedef NvU32 (NVEvoHwFormatFromKmsFormatFunc3)(
const enum NvKmsSurfaceMemoryFormat format);
/*
* Converts FP32 to fixed point S5.14 coefficient format
*/
@@ -130,6 +136,23 @@ void nvEvoSetNotifierC3(NVDevEvoRec *pDevEvo,
const NvU32 notifier,
NVEvoUpdateState *updateState);
static inline NvU32 nvEvoReadCapReg3(volatile const NvU32 *pCaps, NvU32 offset)
{
/* Offsets are in bytes, but the array has dword-sized elements. */
return pCaps[offset / sizeof(NvU32)];
}
NvBool nvEvoGetCapabilities3(NVDevEvoPtr pDevEvo,
NVEvoParseCapabilityNotifierFunc3 *pParse,
NVEvoHwFormatFromKmsFormatFunc3 *pGetHwFmt,
NvU32 hwclass, size_t length);
void nvEvoParseCapabilityNotifier6(NVDevEvoPtr pDevEvo,
NVEvoSubDevPtr pEvoSubDev,
volatile const NvU32 *pCaps);
NvU32 nvHwFormatFromKmsFormatC6(const enum NvKmsSurfaceMemoryFormat format);
NvBool nvEvoGetCapabilitiesC6(NVDevEvoPtr pDevEvo);
void
@@ -153,14 +176,11 @@ nvEvoFillLUTSurfaceC5(NVEvoLutEntryRec *pLUTBuffer,
void nvSetupOutputLUT5(NVDevEvoPtr pDevEvo,
const NVDispHeadStateEvoRec *pHeadState,
const int head,
NVLutSurfaceEvoPtr pLutSurfEvo,
NvBool enableBaseLut,
NvBool enableOutputLut,
NVEvoUpdateState *updateState,
NvBool bypassComposition,
NVSurfaceDescriptor **pSurfaceDesc,
NvU32 *lutSize,
NvU64 *offset,
NvBool *disableOcsc0,
NvU32 *fpNormScale,
NvBool *isLutModeVss);
@@ -257,7 +277,8 @@ const struct NvKmsCscMatrix* nvEvoGetOCsc1MatrixC5(const NVDispHeadStateEvoRec *
struct EvoClampRangeC5 nvEvoGetOCsc1ClampRange(const NVDispHeadStateEvoRec *pHeadState);
void nvEvo3PickOCsc0(NVDispEvoPtr pDispEvo, const NvU32 head, struct NvKms3x4MatrixF32 *ocsc0Matrix);
void nvEvo3PickOCsc0(const NVDispEvoRec* pDispEvo, const NvU32 head,
struct NvKms3x4MatrixF32 *ocsc0Matrix, NvBool *pOutputRoundingFix);
static inline const NVEvoScalerCaps*
nvEvoGetWindowScalingCapsC3(const NVDevEvoRec *pDevEvo)

View File

@@ -42,7 +42,7 @@ void nvInitFlipEvoHwState(
NvBool nvUpdateFlipEvoHwState(
const struct NvKmsPerOpenDev *pOpenDev,
const NVDevEvoRec *pDevEvo,
NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const struct NvKmsFlipCommonParams *pParams,

View File

@@ -30,21 +30,20 @@ extern "C" {
#include "nvkms-types.h"
NvBool nvSetTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head);
void nvRefTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head);
void nvUnrefTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head);
NvBool nvSetTmoLutSurfaceEvo(NVDevEvoPtr pDevEvo,
NVFlipChannelEvoHwState *pHwState);
void nvFreeUnrefedTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head);
void nvInvalidateDefaultLut(NVDevEvoPtr pDevEvo);
NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo);
void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo);
void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo,
void nvUploadDataToLutSurfaceEvo(NVSurfaceEvoPtr pSurfEvo,
const NVEvoLutDataRec *pLUTBuffer,
NVDispEvoPtr pDispEvo);

View File

@@ -51,7 +51,7 @@ typedef struct {
} NVProposedModeSetStateOneApiHead;
typedef struct {
NvU8 tilePosition;
NvU8 mergeHeadSection;
NVHwModeTimingsEvo timings;
NVConnectorEvoRec *pConnectorEvo;
HDMI_FRL_CONFIG hdmiFrlConfig;

View File

@@ -65,7 +65,8 @@ NVVBlankCallbackPtr
nvApiHeadRegisterVBlankCallback(NVDispEvoPtr pDispEvo,
const NvU32 apiHead,
NVVBlankCallbackProc pCallback,
void *pUserData);
void *pUserData,
NvU8 listIndex);
void nvApiHeadUnregisterVBlankCallback(NVDispEvoPtr pDispEvo,
NVVBlankCallbackPtr pCallback);

View File

@@ -43,6 +43,7 @@ enum NVPreallocType {
PREALLOC_TYPE_VALIDATE_MODE_IMP_OUT_HW_MODE_TIMINGS,
PREALLOC_TYPE_VALIDATE_MODE_TMP_USAGE_BOUNDS,
PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS,
PREALLOC_TYPE_SET_LUT_WORK_AREA,
PREALLOC_TYPE_MAX
};

View File

@@ -33,6 +33,8 @@ extern "C" {
struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen,
NVDevEvoPtr pDevEvo, NvBool isPrivileged);
void nvRevokeDevice(NVDevEvoPtr pDevEvo);
void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen,
struct NvKmsPerOpenDev *pOpenDev);
@@ -71,6 +73,8 @@ const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst(
void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32);
void nvKmsOrphanVblankSemControlForAllOpens(NVDispEvoRec *pDispEvo);
#ifdef __cplusplus
};
#endif

View File

@@ -112,6 +112,7 @@ NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo,
NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd);
NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo);
void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo);
void nvRmUnmapFbConsoleMemory(NVDevEvoPtr pDevEvo);
NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo,
NVEvoDmaPtr pDma,
NvU64 limit,

View File

@@ -0,0 +1,36 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_SETLUT_WORKAREA_H__
#define __NVKMS_SETLUT_WORKAREA_H__
#include "nvkms-types.h"
struct NvKmsSetLutWorkArea {
struct {
NVFlipEvoHwState newState;
NVFlipEvoHwState oldState;
} head[NVKMS_MAX_HEADS_PER_DISP];
};
#endif

View File

@@ -77,21 +77,9 @@ void nvEvoUnregisterDeferredRequestFifo(
NVDevEvoPtr pDevEvo,
NVDeferredRequestFifoRec *pDeferredRequestFifo);
NVVblankSemControl *nvEvoEnableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask,
NVSurfaceEvoRec *pSurfaceEvo,
NvU64 surfaceOffset);
NvBool nvEvoDisableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVVblankSemControl *pVblankSemControl);
NvBool nvEvoAccelVblankSemControls(
NvBool nvEvoCpuMapSurface(
NVDevEvoPtr pDevEvo,
NvU32 dispIndex,
NvU32 hwHeadMask);
NVSurfaceEvoPtr pSurfaceEvo);
static inline NvBool nvEvoIsSurfaceOwner(const NVSurfaceEvoRec *pSurfaceEvo,
const struct NvKmsPerOpenDev *pOpenDev,

View File

@@ -56,6 +56,7 @@ extern "C" {
#include "nvidia-push-init.h"
#include "timing/nvtiming.h"
#include "timing/dpsdp.h"
#include "timing/nvt_dsc_pps.h"
#include "hdmipacket/nvhdmi_frlInterface.h" // HDMI_{SRC,SINK}_CAPS
@@ -143,7 +144,6 @@ typedef struct _NVParsedEdidEvoRec *NVParsedEdidEvoPtr;
typedef struct _NVVBlankCallbackRec *NVVBlankCallbackPtr;
typedef struct _NVRgLine1CallbackRec *NVRgLine1CallbackPtr;
typedef struct _NVDpyEvoRec *NVDpyEvoPtr;
typedef struct _NVLutSurfaceEvo *NVLutSurfaceEvoPtr;
typedef struct _NVFrameLockEvo *NVFrameLockEvoPtr;
typedef struct _NVEvoInfoString *NVEvoInfoStringPtr;
typedef struct _NVSurfaceEvoRec NVSurfaceEvoRec, *NVSurfaceEvoPtr;
@@ -292,12 +292,14 @@ typedef struct {
NvBool usable;
NvBool csc0MatricesPresent;
NvBool cscLUTsPresent;
NvBool csc1MatricesPresent;
NvBool csc10MatrixPresent;
NvBool csc11MatrixPresent;
NvBool tmoPresent;
NVEvoScalerCaps scalerCaps;
} NVEvoWindowCaps;
#define NV_EVO_NUM_WINDOW_CAPS 32
typedef NvU64 NVEvoChannelMask;
#define NV_EVO_CHANNEL_MASK_CORE 0:0
@@ -511,6 +513,7 @@ typedef struct _NVEvoChannel {
struct {
NvBool enabled;
NvBool clientSpecified;
NvU32 srcMaxLum;
NvU32 targetMaxLums[NVKMS_MAX_SUBDEVICES];
} tmoParams;
@@ -609,7 +612,11 @@ typedef struct {
} NVFlipSyncObjectEvoHwState;
typedef struct {
NVLutSurfaceEvoPtr pLutSurfaceEvo;
NVSurfaceEvoPtr pLutSurfaceEvo;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
NvBool fromOverride;
} NVFlipLutHwState;
typedef struct {
@@ -678,6 +685,26 @@ typedef struct {
*/
NvU16 horizontal;
} maxDownscaleFactors;
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
} csc00Override;
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
} csc01Override;
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
} csc10Override;
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
} csc11Override;
} NVFlipChannelEvoHwState;
typedef struct {
@@ -694,6 +721,9 @@ typedef struct {
struct NvKmsHDRStaticMetadata staticMetadata;
} hdrInfoFrame;
NVFlipLutHwState outputLut;
NvU32 olutFpNormScale;
NvBool skipLayerPendingFlips[NVKMS_MAX_LAYERS_PER_HEAD];
struct {
@@ -702,6 +732,7 @@ typedef struct {
NvBool cursorPosition : 1;
NvBool tf : 1;
NvBool hdrStaticMetadata : 1;
NvBool olut : 1;
NvBool layerPosition[NVKMS_MAX_LAYERS_PER_HEAD];
NvBool layerSyncObjects[NVKMS_MAX_LAYERS_PER_HEAD];
@@ -724,6 +755,8 @@ typedef struct _NVEvoSubDevHeadStateRec {
struct NvKmsPoint viewPortPointIn;
NVFlipCursorEvoHwState cursor;
NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD];
NVFlipLutHwState outputLut;
NvU32 olutFpNormScale;
// Current usage bounds programmed into the hardware.
struct NvKmsUsageBounds usage;
// Usage bounds required after the last scheduled flip completes.
@@ -867,6 +900,7 @@ typedef struct {
typedef struct {
NvBool supportsDP13 :1;
NvBool supportsHDMI20 :1;
NvBool supportsYUV2020 :1;
NvBool inputLutAppliesToBase :1;
NvU8 validNIsoFormatMask;
NvU32 maxPitchValue;
@@ -878,6 +912,7 @@ typedef struct {
struct NvKmsCompositionCapabilities cursorCompositionCaps;
NvU16 validLayerRRTransforms;
struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD];
struct NvKmsLUTCaps olut;
NvU8 legacyNotifierFormatSizeBytes[NVKMS_MAX_LAYERS_PER_HEAD];
NvU8 dpYCbCr422MaxBpc;
NvU8 hdmiYCbCr422MaxBpc;
@@ -914,6 +949,12 @@ typedef struct _NVEvoSubDeviceRec {
NvBool isBaseSurfSpecified[NVKMS_MAX_HEADS_PER_DISP];
enum NvKmsSurfaceMemoryFormat baseSurfFormat[NVKMS_MAX_HEADS_PER_DISP];
/* EVO2 only, base and output LUT state - prevents unnecessary flip interlocking */
const NVSurfaceEvoRec *pBaseLutSurface[NVKMS_MAX_HEADS_PER_DISP];
NvU64 baseLutOffset[NVKMS_MAX_HEADS_PER_DISP];
const NVSurfaceEvoRec *pOutputLutSurface[NVKMS_MAX_HEADS_PER_DISP];
NvU64 outputLutOffset[NVKMS_MAX_HEADS_PER_DISP];
/* Composition parameters considered for hardware programming by EVO2 hal */
struct {
NvBool initialized;
@@ -1202,7 +1243,7 @@ typedef struct _NVEvoDevRec {
*/
struct {
struct {
NVLutSurfaceEvoPtr LUT[3];
NVSurfaceEvoPtr LUT[3];
struct {
NvBool waitForPreviousUpdate;
NvBool curBaseLutEnabled;
@@ -1211,7 +1252,7 @@ typedef struct _NVEvoDevRec {
nvkms_timer_handle_t *updateTimer;
} disp[NVKMS_MAX_SUBDEVICES];
} apiHead[NVKMS_MAX_HEADS_PER_DISP];
NVLutSurfaceEvoPtr defaultLut;
NVSurfaceEvoPtr defaultLut;
enum NvKmsLUTState defaultBaseLUTState[NVKMS_MAX_SUBDEVICES];
enum NvKmsLUTState defaultOutputLUTState[NVKMS_MAX_SUBDEVICES];
@@ -1820,17 +1861,18 @@ typedef struct _NVDispHeadStateEvoRec {
} hdrInfoFrame;
struct {
NVLutSurfaceEvoPtr pCurrSurface;
NVSurfaceEvoPtr pCurrSurface;
NvBool outputLutEnabled : 1;
NvBool baseLutEnabled : 1;
} lut;
/*
* The api head can be mapped onto the N harware heads, a frame presented
* by the api head gets split horizontally into N tiles, 'tilePosition'
* describe the tile presented by this hardware head.
* by the api head gets split horizontally into N sections,
* 'mergeHeadSection' describe the section presented by this hardware
* head.
*/
NvU8 tilePosition;
NvU8 mergeHeadSection;
NVEvoMergeMode mergeMode;
/*
@@ -1897,8 +1939,20 @@ typedef struct _NVDispApiHeadStateEvoRec {
NVDispFlipOccurredEventDataEvoRec data;
} flipOccurredEvent[NVKMS_MAX_LAYERS_PER_HEAD];
NvU64 vblankCount;
NvU32 rmVBlankCallbackHandle;
NVListRec vblankCallbackList;
/*
* All entries in vblankCallbackList[0] get called before any entries in
* vblankCallbackList[1].
*/
NVListRec vblankCallbackList[2];
struct {
NVListRec list;
NVVBlankCallbackPtr pCallbackPtr;
} vblankSemControl;
NvBool hs10bpcHint : 1;
} NVDispApiHeadStateEvoRec;
@@ -2267,6 +2321,9 @@ static inline NvBool NV_EVO_LOCK_PIN_IS_INTERNAL(NvU32 n)
#define FOR_ALL_EVO_DEVS(_pDevEvo) \
nvListForEachEntry(_pDevEvo, &nvEvoGlobal.devList, devListEntry)
#define FOR_ALL_EVO_DEVS_SAFE(_pDevEvo, _pDevEvo_tmp) \
nvListForEachEntry_safe(_pDevEvo, _pDevEvo_tmp, &nvEvoGlobal.devList, devListEntry)
#define FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP( \
_pSwapGroup, _pDeferredRequestFifo) \
nvListForEachEntry((_pDeferredRequestFifo), \
@@ -2444,24 +2501,6 @@ static inline NvBool nvIs3DVisionStereoEvo(const enum NvKmsStereoMode stereo)
(_head)++) \
if ((_headMask) & (1 << (_head)))
typedef struct _NVLutSurfaceEvo {
NVDevEvoPtr pDevEvo;
NvU32 handle;
NvU32 size;
NVSurfaceDescriptor surfaceDesc;
NvU32 allocRefCnt; /* Only used for dynamically allocated LUTs */
NvU64 gpuAddress;
void *subDeviceAddress[NVKMS_MAX_SUBDEVICES];
/* Keep track of prefetched surfaces. */
NvU32 difrLastPrefetchPass;
} NVLutSurfaceEvoRec;
typedef struct _NVFrameLockEvo {
NVListRec frameLockListEntry;
@@ -2919,11 +2958,9 @@ typedef const struct _nv_evo_hal {
const NvU16 *green,
const NvU16 *blue,
int nColorMapEntries, int depth);
void (*SetLUTContextDma) (const NVDispEvoRec *pDispEvo,
const int head,
NVLutSurfaceEvoPtr pLutSurfEvo,
NvBool enableBaseLut,
NvBool enableOutputLut,
void (*SetOutputLut) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head,
const NVFlipLutHwState *pOutputLut,
NvU32 fpNormScale,
NVEvoUpdateState *updateState,
NvBool bypassComposition);
void (*SetOutputScaler) (const NVDispEvoRec *pDispEvo, const NvU32 head,
@@ -3179,11 +3216,18 @@ static inline void nvAssignHwHeadsMaskApiHeadState(
nvPopCount32(hwHeadsMask);
}
typedef struct _NVVblankSemControlHeadEntryRec {
NVListRec listEntry;
NvU32 previousRequestCounter;
NvU64 previousVblankCount;
struct NvKmsVblankSemControlDataOneHead *pDataOneHead;
} NVVblankSemControlHeadEntry;
typedef struct _NVVblankSemControl {
NvU32 dispIndex;
NvU32 hwHeadMask;
NvU64 surfaceOffset;
NvU32 apiHeadMask;
NVSurfaceEvoRec *pSurfaceEvo;
NVVblankSemControlHeadEntry headEntry[NV_MAX_HEADS];
} NVVblankSemControl;
#ifdef __cplusplus

View File

@@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_VBLANK_SEM_CONTROL_H__
#define __NVKMS_VBLANK_SEM_CONTROL_H__
#include "nvkms-types.h"
NVVblankSemControl *nvEvoEnableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask,
NVSurfaceEvoRec *pSurfaceEvo,
NvU64 surfaceOffset);
NvBool nvEvoDisableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVVblankSemControl *pVblankSemControl);
NvBool nvEvoAccelVblankSemControls(
NVDevEvoPtr pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask);
void nvEvoOrphanVblankSemControl(
NVDispEvoRec *pDispEvo,
NVVblankSemControl *pVblankSemControl);
#endif /* __NVKMS_VBLANK_SEM_CONTROL_H__ */

View File

@@ -50,6 +50,8 @@
#define NVKMS_LOG2_LUT_ARRAY_SIZE 10
#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE)
#define NVKMS_OLUT_FP_NORM_SCALE_DEFAULT 0xffffffff
typedef NvU32 NvKmsDeviceHandle;
typedef NvU32 NvKmsDispHandle;
typedef NvU32 NvKmsConnectorHandle;
@@ -245,6 +247,80 @@ struct NvKmsLutRamps {
NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */
};
/* Datatypes for LUT capabilities */
enum NvKmsLUTFormat {
/*
* Normalized fixed-point format mapping [0, 1] to [0x0, 0xFFFF].
*/
NVKMS_LUT_FORMAT_UNORM16,
/*
* Half-precision floating point.
*/
NVKMS_LUT_FORMAT_FP16,
/*
* 14-bit fixed-point format required to work around hardware bug 813188.
*
* To convert from UNORM16 to UNORM14_WAR_813188:
* unorm14_war_813188 = ((unorm16 >> 2) & ~7) + 0x6000
*/
NVKMS_LUT_FORMAT_UNORM14_WAR_813188
};
enum NvKmsLUTVssSupport {
NVKMS_LUT_VSS_NOT_SUPPORTED,
NVKMS_LUT_VSS_SUPPORTED,
NVKMS_LUT_VSS_REQUIRED,
};
enum NvKmsLUTVssType {
NVKMS_LUT_VSS_TYPE_NONE,
NVKMS_LUT_VSS_TYPE_LINEAR,
NVKMS_LUT_VSS_TYPE_LOGARITHMIC,
};
struct NvKmsLUTCaps {
/*! Whether this layer or head on this device supports this LUT stage. */
NvBool supported;
/*! Whether this LUT supports VSS. */
enum NvKmsLUTVssSupport vssSupport;
/*!
* The type of VSS segmenting this LUT uses.
*/
enum NvKmsLUTVssType vssType;
/*!
* Expected number of VSS segments.
*/
NvU32 vssSegments;
/*!
* Expected number of LUT entries.
*/
NvU32 lutEntries;
/*!
* Format for each of the LUT entries.
*/
enum NvKmsLUTFormat entryFormat;
};
/* each LUT entry uses this many bytes */
#define NVKMS_LUT_CAPS_LUT_ENTRY_SIZE (4 * sizeof(NvU16))
/* if the LUT surface uses VSS, size of the VSS header */
#define NVKMS_LUT_VSS_HEADER_SIZE (4 * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE)
struct NvKmsLUTSurfaceParams {
NvKmsSurfaceHandle surfaceHandle;
NvU64 offset NV_ALIGN_BYTES(8);
NvU32 vssSegments;
NvU32 lutEntries;
};
/*
* A 3x4 row-major colorspace conversion matrix.
*
@@ -463,6 +539,10 @@ struct NvKmsLayerCapabilities {
* still expected to honor the NvKmsUsageBounds for each head.
*/
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
/* Capabilities for each LUT stage in the EVO3 precomp pipeline. */
struct NvKmsLUTCaps ilut;
struct NvKmsLUTCaps tmo;
};
/*!
@@ -683,4 +763,20 @@ struct NvKmsSuperframeInfo {
} view[NVKMS_MAX_SUPERFRAME_VIEWS];
};
/* Fields within NvKmsVblankSemControlDataOneHead::flags */
#define NVKMS_VBLANK_SEM_CONTROL_SWAP_INTERVAL 15:0
struct NvKmsVblankSemControlDataOneHead {
NvU32 requestCounterAccel;
NvU32 requestCounter;
NvU32 flags;
NvU32 semaphore;
NvU64 vblankCount NV_ALIGN_BYTES(8);
};
struct NvKmsVblankSemControlData {
struct NvKmsVblankSemControlDataOneHead head[NV_MAX_HEADS];
};
#endif /* NVKMS_API_TYPES_H */

View File

@@ -272,6 +272,7 @@ enum NvKmsIoctlCommand {
NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL,
NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS,
NVKMS_IOCTL_VRR_SIGNAL_SEMAPHORE,
NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED,
};
@@ -751,6 +752,17 @@ struct NvKmsFlipCommonParams {
*/
struct NvKmsSetLutCommonParams lut;
struct {
NvBool specified;
NvBool enabled;
struct NvKmsLUTSurfaceParams lut;
} olut;
struct {
NvBool specified;
NvU32 val;
} olutFpNormScale;
struct {
NvBool specified;
/*!
@@ -886,6 +898,18 @@ struct NvKmsFlipCommonParams {
NvBool specified;
} compositionParams;
struct {
NvBool specified;
NvBool enabled;
struct NvKmsLUTSurfaceParams lut;
} ilut;
struct {
NvBool specified;
NvBool enabled;
struct NvKmsLUTSurfaceParams lut;
} tmo;
/*
* Color-space conversion matrix applied to the layer before
* compositing.
@@ -946,6 +970,34 @@ struct NvKmsFlipCommonParams {
enum NvKmsInputColorSpace val;
NvBool specified;
} colorSpace;
/* When enabled, explicitly set CSC00 with provided matrix */
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
NvBool specified;
} csc00Override;
/* When enabled, explicitly set CSC01 with provided matrix */
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
NvBool specified;
} csc01Override;
/* When enabled, explicitly set CSC10 with provided matrix */
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
NvBool specified;
} csc10Override;
/* When enabled, explicitly set CSC11 with provided matrix */
struct {
struct NvKmsCscMatrix matrix;
NvBool enabled;
NvBool specified;
} csc11Override;
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
};
@@ -1146,6 +1198,11 @@ struct NvKmsAllocDeviceReply {
*/
struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD];
/*!
* Describes supported functionalities for the output LUT on each head
*/
struct NvKmsLUTCaps olutCaps;
/*!
* This bitmask specifies all of the (rotation, reflectionX, reflectionY)
* combinations that are supported for the main and overlay layers.
@@ -4133,11 +4190,74 @@ struct NvKmsSetFlipLockGroupParams {
* NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL
* NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS
*
* Enable or disable vblank semaphore control for the given heads using the
* specified surface and surface offset. The memory at that location is
* interpreted as an NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_DATA. See the
* RMAPI documentation for NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_DATA for
* details of the semantics of that interface.
* The VBlank Semaphore Control API ("VBlank Sem Control") allows clients to
* register for a semaphore release to be performed on the specified system
* memory.
*
* One or more clients may register a memory allocation + offset by specifying
* an NvKmsSurfaceHandle and offset within that surface. Until the
* vblank_sem_control is disabled, during each vblank on the specified heads,
* nvkms will interpret the specified memory location as an
* NvKmsVblankSemControlData data structure. Each enabled head will inspect the
* corresponding NvKmsVblankSemControlDataOneHead at
* NvKmsVblankSemControlData::head[head].
*
* NvKmsEnableVblankSemControlRequest::surfaceOffset must be a multiple of 8, so
* that GPU semaphore releases can write to 8-byte fields within
* NvKmsVblankSemControlDataOneHead with natural alignment.
*
* During vblank, the NvKmsVblankSemControlDataOneHead::requestCounter field
* will be read, and the following pseudocode will be performed:
*
* swapInterval = DRF_VAL(data->flags)
*
* if (data->requestCounter == prevRequestCounter)
* return
*
* if (currentVblankCount < (prevVBlankCount + swapInterval))
* return
*
* data->vblankCount = currentVblankCount
* data->semaphore = data->requestCounter
*
* prevRequestCounter = data->requestCounter
* previousVblankCount = currentVblankCount
*
* I.e., if the client-described conditions are met, nvkms will write
* NvKmsVblankSemControlDataOneHead::semaphore to the client-requested
* 'requestCounter' along with the vblankCount.
*
* The intent is for clients to use semaphore releases to write:
*
* NvKmsVblankSemControlDataOneHead::swapInterval
* NvKmsVblankSemControlDataOneHead::requestCounter
*
* and then perform a semaphore acquire on
* NvKmsVblankSemControlDataOneHead::semaphore >= requestCounter (using the
* ACQ_GEQ semaphore operation). This will block any following methods in the
* client's channel (e.g., a blit) until the requested conditions are met. Note
* the ::requestCounter should be written last, because the change in value of
* ::requestCounter is what causes nvkms, during a vblank callback, to inspect
* the other fields.
*
* Additionally, clients should use the CPU (not semaphore releases in their
* channel) to write the field
* NvKmsVblankSemControlDataOneHead::requestCounterAccel at the same time that
* they enqueue the semaphore release to write to
* NvKmsVblankSemControlDataOneHead::requestCounter. ::requestCounterAccel will
* be used by nvkms to "accelerate" the vblank sem control by copying the value
* from ::requestCounterAccel to ::semaphore. This will be done when the vblank
* sem control is disabled, and when a client calls
* NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS. It is important for nvkms to have
* access to the value in ::requestCounterAccel, and not just ::requestCounter.
* The latter is only the last value released so far by the client's channel
* (further releases to ::requestCounter may still be inflight, perhaps blocked
* on pending semaphore acquires). The former should be the most recent value
* enqueued in the channel. This is also why it is important for clients to
* acquire with ACQ_GEQ (greater-than-or-equal-to), rather than just ACQUIRE.
*
* The same NvKmsSurfaceHandle (with different surfaceOffsets) may be used by
* multiple VBlank Sem Controls.
*
* It is the responsibility of the nvkms client(s) to coordinate at modeset
* time: the mapping of nvkms apiHeads to underlying hwHeads may change during a
@@ -4150,7 +4270,7 @@ struct NvKmsSetFlipLockGroupParams {
* NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS can be used, specifying a particular
* set of heads, to set all vblank sem controls on those heads to have their
* semaphore set to the value in their respective
* NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_DATA::requestCounterAccel fields.
* NvKmsVblankSemControlDataOneHead::requestCounterAccel fields.
*
* These ioctls are only available when
* NvKmsAllocDeviceReply::supportsVblankSemControl is true.
@@ -4224,4 +4344,27 @@ struct NvKmsVrrSignalSemaphoreParams {
struct NvKmsVrrSignalSemaphoreReply reply; /*! out */
};
/*
* NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED
*
* Notify NVKMS that the calling client has disabled the framebuffer console.
* NVKMS will free the framebuffer console reserved memory and disable
* NVKMS-based console restore.
*
* This IOCTL can only be used by kernel-mode clients.
*/
struct NvKmsFramebufferConsoleDisabledRequest {
NvKmsDeviceHandle deviceHandle;
};
struct NvKmsFramebufferConsoleDisabledReply {
NvU32 padding;
};
struct NvKmsFramebufferConsoleDisabledParams {
struct NvKmsFramebufferConsoleDisabledRequest request;
struct NvKmsFramebufferConsoleDisabledReply reply;
};
#endif /* NVKMS_API_H */

View File

@@ -124,6 +124,8 @@ struct NvKmsKapiDevice {
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX];
struct NvKmsKapiLutCaps lutCaps;
NvU32 numHeads;
NvU32 numLayers[NVKMS_KAPI_MAX_HEADS];

View File

@@ -124,6 +124,14 @@ struct NvKmsKapiDisplayMode {
#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff
#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0
struct NvKmsKapiLutCaps {
struct {
struct NvKmsLUTCaps ilut;
struct NvKmsLUTCaps tmo;
} layer[NVKMS_KAPI_LAYER_MAX];
struct NvKmsLUTCaps olut;
};
struct NvKmsKapiDeviceResourcesInfo {
NvU32 numHeads;
@@ -169,6 +177,8 @@ struct NvKmsKapiDeviceResourcesInfo {
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX];
struct NvKmsKapiLutCaps lutCaps;
};
#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType))
@@ -262,21 +272,54 @@ struct NvKmsKapiLayerConfig {
NvU16 dstWidth, dstHeight;
enum NvKmsInputColorSpace inputColorSpace;
struct {
NvBool enabled;
struct NvKmsKapiSurface *lutSurface;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
} ilut;
struct {
NvBool enabled;
struct NvKmsKapiSurface *lutSurface;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
} tmo;
struct NvKmsCscMatrix csc;
NvBool cscUseMain;
struct {
struct NvKmsCscMatrix lmsCtm;
struct NvKmsCscMatrix lmsToItpCtm;
struct NvKmsCscMatrix itpToLmsCtm;
struct NvKmsCscMatrix blendCtm;
struct {
NvBool lmsCtm : 1;
NvBool lmsToItpCtm : 1;
NvBool itpToLmsCtm : 1;
NvBool blendCtm : 1;
} enabled;
} matrixOverrides;
};
struct NvKmsKapiLayerRequestedConfig {
struct NvKmsKapiLayerConfig config;
struct {
NvBool surfaceChanged : 1;
NvBool srcXYChanged : 1;
NvBool srcWHChanged : 1;
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool cscChanged : 1;
NvBool tfChanged : 1;
NvBool hdrMetadataChanged : 1;
NvBool surfaceChanged : 1;
NvBool srcXYChanged : 1;
NvBool srcWHChanged : 1;
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool cscChanged : 1;
NvBool tfChanged : 1;
NvBool hdrMetadataChanged : 1;
NvBool matrixOverridesChanged : 1;
NvBool ilutChanged : 1;
NvBool tmoChanged : 1;
} flags;
};
@@ -342,18 +385,30 @@ struct NvKmsKapiHeadModeSetConfig {
struct NvKmsLutRamps *pRamps;
} output;
} lut;
struct {
NvBool enabled;
struct NvKmsKapiSurface *lutSurface;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
} olut;
NvU32 olutFpNormScale;
};
struct NvKmsKapiHeadRequestedConfig {
struct NvKmsKapiHeadModeSetConfig modeSetConfig;
struct {
NvBool activeChanged : 1;
NvBool displaysChanged : 1;
NvBool modeChanged : 1;
NvBool hdrInfoFrameChanged : 1;
NvBool colorimetryChanged : 1;
NvBool ilutChanged : 1;
NvBool olutChanged : 1;
NvBool activeChanged : 1;
NvBool displaysChanged : 1;
NvBool modeChanged : 1;
NvBool hdrInfoFrameChanged : 1;
NvBool colorimetryChanged : 1;
NvBool legacyIlutChanged : 1;
NvBool legacyOlutChanged : 1;
NvBool olutChanged : 1;
NvBool olutFpNormScaleChanged : 1;
} flags;
struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig;
@@ -1172,21 +1227,6 @@ struct NvKmsKapiFunctionsTable {
NvU64 *pPages
);
/*!
* Check if this memory object can be scanned out for display.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] memory The memory object to check for display support.
*
* \return NV_TRUE if this memory can be displayed, NV_FALSE if not.
*/
NvBool (*isMemoryValidForDisplay)
(
const struct NvKmsKapiDevice *device,
const struct NvKmsKapiMemory *memory
);
/*
* Import SGT as a memory handle.
*
@@ -1504,6 +1544,16 @@ struct NvKmsKapiFunctionsTable {
struct NvKmsKapiDevice *device,
NvS32 index
);
/*
* Notify NVKMS that the system's framebuffer console has been disabled and
* the reserved allocation for the old framebuffer console can be unmapped.
*/
void
(*framebufferConsoleDisabled)
(
struct NvKmsKapiDevice *device
);
};
/** @} */
@@ -1518,6 +1568,20 @@ NvBool nvKmsKapiGetFunctionsTable
struct NvKmsKapiFunctionsTable *funcsTable
);
NvU32 nvKmsKapiF16ToF32(NvU16 a);
NvU16 nvKmsKapiF32ToF16(NvU32 a);
NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact);
NvU32 nvKmsKapiUI32ToF32(NvU32 a);
/** @} */
#endif /* defined(__NVKMS_KAPI_H__) */

View File

@@ -30,6 +30,9 @@
#include "nvkms-rmapi.h"
#include "nvkms-vrr.h"
#include "nvkms-softfloat.h"
#include "nv-float.h"
#include "nvkms-kapi.h"
#include "nvkms-kapi-private.h"
#include "nvkms-kapi-internal.h"
@@ -401,7 +404,13 @@ static NvBool KmsAllocateDevice(struct NvKmsKapiDevice *device)
device->supportedSurfaceMemoryFormats[layer] =
paramsAlloc->reply.layerCaps[layer].supportedSurfaceMemoryFormats;
device->supportsICtCp[layer] = paramsAlloc->reply.layerCaps[layer].supportsICtCp;
device->lutCaps.layer[layer].ilut =
paramsAlloc->reply.layerCaps[layer].ilut;
device->lutCaps.layer[layer].tmo =
paramsAlloc->reply.layerCaps[layer].tmo;
}
device->lutCaps.olut = paramsAlloc->reply.olutCaps;
if (paramsAlloc->reply.validNIsoFormatMask &
(1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) {
@@ -1120,6 +1129,9 @@ static NvBool GetDeviceResourcesInfo
nvkms_memcpy(info->supportsICtCp,
device->supportsICtCp,
sizeof(device->supportsICtCp));
info->lutCaps = device->lutCaps;
done:
return status;
@@ -1909,57 +1921,6 @@ static NvBool GetMemoryPages
return NV_TRUE;
}
/*
* Check if the memory we are creating this framebuffer with is valid. We
* cannot scan out sysmem or compressed buffers.
*
* If we cannot use this memory for display it may be resident in sysmem
* or may belong to another GPU.
*/
static NvBool IsMemoryValidForDisplay
(
const struct NvKmsKapiDevice *device,
const struct NvKmsKapiMemory *memory
)
{
NV_STATUS status;
NV0041_CTRL_SURFACE_INFO surfaceInfo = {};
NV0041_CTRL_GET_SURFACE_INFO_PARAMS surfaceInfoParams = {};
if (device == NULL || memory == NULL) {
return NV_FALSE;
}
/*
* Don't do these checks on tegra. Tegra has different capabilities.
* Here we always say display is possible so we never fail framebuffer
* creation.
*/
if (device->isSOC) {
return NV_TRUE;
}
/* Get the type of address space this memory is in, i.e. vidmem or sysmem */
surfaceInfo.index = NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE;
surfaceInfoParams.surfaceInfoListSize = 1;
surfaceInfoParams.surfaceInfoList = (NvP64)&surfaceInfo;
status = nvRmApiControl(device->hRmClient,
memory->hRmHandle,
NV0041_CTRL_CMD_GET_SURFACE_INFO,
&surfaceInfoParams,
sizeof(surfaceInfoParams));
if (status != NV_OK) {
nvKmsKapiLogDeviceDebug(device,
"Failed to get memory location of RM memory object 0x%x",
memory->hRmHandle);
return NV_FALSE;
}
return surfaceInfo.data == NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM;
}
static void FreeMemoryPages
(
NvU64 *pPages
@@ -2557,6 +2518,53 @@ static void AssignHDRMetadataConfig(
}
}
static void AssignLayerLutConfig(
const struct NvKmsKapiDevice *device,
const struct NvKmsKapiLayerConfig *layerConfig,
const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig,
const NvU32 layer,
struct NvKmsFlipCommonParams *params,
NvBool bFromKmsSetMode)
{
if ((device->lutCaps.layer[layer].ilut.supported) &&
(layerRequestedConfig->flags.ilutChanged || bFromKmsSetMode)) {
params->layer[layer].ilut.specified = TRUE;
params->layer[layer].ilut.enabled = layerConfig->ilut.enabled;
if (layerConfig->ilut.lutSurface != NULL) {
params->layer[layer].ilut.lut.surfaceHandle =
layerConfig->ilut.lutSurface->hKmsHandle;
} else {
params->layer[layer].ilut.lut.surfaceHandle = 0;
}
params->layer[layer].ilut.lut.offset = layerConfig->ilut.offset;
params->layer[layer].ilut.lut.vssSegments =
layerConfig->ilut.vssSegments;
params->layer[layer].ilut.lut.lutEntries =
layerConfig->ilut.lutEntries;
}
if ((device->lutCaps.layer[layer].tmo.supported) &&
(layerRequestedConfig->flags.tmoChanged || bFromKmsSetMode)) {
params->layer[layer].tmo.specified = TRUE;
params->layer[layer].tmo.enabled = layerConfig->tmo.enabled;
if (layerConfig->tmo.lutSurface != NULL) {
params->layer[layer].tmo.lut.surfaceHandle =
layerConfig->tmo.lutSurface->hKmsHandle;
} else {
params->layer[layer].tmo.lut.surfaceHandle = 0;
}
params->layer[layer].tmo.lut.offset = layerConfig->tmo.offset;
params->layer[layer].tmo.lut.vssSegments =
layerConfig->tmo.vssSegments;
params->layer[layer].tmo.lut.lutEntries =
layerConfig->tmo.lutEntries;
}
}
static void NvKmsKapiCursorConfigToKms(
const struct NvKmsKapiCursorRequestedConfig *requestedConfig,
struct NvKmsFlipCommonParams *params,
@@ -2626,12 +2634,21 @@ static NvBool NvKmsKapiOverlayLayerConfigToKms(
params->layer[layer].colorSpace.specified = TRUE;
}
if (layerRequestedConfig->flags.cscChanged || bFromKmsSetMode) {
if (layerRequestedConfig->flags.cscChanged ||
layerRequestedConfig->flags.matrixOverridesChanged ||
bFromKmsSetMode) {
params->layer[layer].csc.specified = NV_TRUE;
params->layer[layer].csc.useMain = layerConfig->cscUseMain;
if (!layerConfig->cscUseMain) {
params->layer[layer].csc.matrix = layerConfig->csc;
}
// 'blendCtm' overrides 'csc', but provides a 3x4 matrix.
if (layerConfig->matrixOverrides.enabled.blendCtm) {
params->layer[layer].csc.useMain = FALSE;
params->layer[layer].csc.matrix =
layerConfig->matrixOverrides.blendCtm;
}
}
if (layerRequestedConfig->flags.srcWHChanged || bFromKmsSetMode) {
@@ -2656,6 +2673,51 @@ static NvBool NvKmsKapiOverlayLayerConfigToKms(
AssignHDRMetadataConfig(layerConfig, layerRequestedConfig, layer,
params, bFromKmsSetMode);
if (layerRequestedConfig->flags.matrixOverridesChanged || bFromKmsSetMode) {
// 'lmsCtm' explicitly provides a matrix to program CSC00.
if (layerConfig->matrixOverrides.enabled.lmsCtm) {
params->layer[layer].csc00Override.matrix =
layerConfig->matrixOverrides.lmsCtm;
params->layer[layer].csc00Override.enabled = TRUE;
} else {
params->layer[layer].csc00Override.enabled = FALSE;
}
params->layer[layer].csc00Override.specified = TRUE;
// 'lmsToItpCtm' explicitly provides a matrix to program CSC01.
if (layerConfig->matrixOverrides.enabled.lmsToItpCtm) {
params->layer[layer].csc01Override.matrix =
layerConfig->matrixOverrides.lmsToItpCtm;
params->layer[layer].csc01Override.enabled = TRUE;
} else {
params->layer[layer].csc01Override.enabled = FALSE;
}
params->layer[layer].csc01Override.specified = TRUE;
// 'itpToLmsCtm' explicitly provides a matrix to program CSC10.
if (layerConfig->matrixOverrides.enabled.itpToLmsCtm) {
params->layer[layer].csc10Override.matrix =
layerConfig->matrixOverrides.itpToLmsCtm;
params->layer[layer].csc10Override.enabled = TRUE;
} else {
params->layer[layer].csc10Override.enabled = FALSE;
}
params->layer[layer].csc10Override.specified = TRUE;
// 'blendCtm' explicitly provides a matrix to program CSC11.
if (layerConfig->matrixOverrides.enabled.blendCtm) {
params->layer[layer].csc11Override.matrix =
layerConfig->matrixOverrides.blendCtm;
params->layer[layer].csc11Override.enabled = TRUE;
} else {
params->layer[layer].csc11Override.enabled = FALSE;
}
params->layer[layer].csc11Override.specified = TRUE;
}
AssignLayerLutConfig(device, layerConfig, layerRequestedConfig, layer,
params, bFromKmsSetMode);
if (commit) {
NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX(
device->layerState[head][layer].
@@ -2754,19 +2816,74 @@ static NvBool NvKmsKapiPrimaryLayerConfigToKms(
changed = TRUE;
}
if (layerRequestedConfig->flags.cscChanged || bFromKmsSetMode) {
if (layerRequestedConfig->flags.cscChanged ||
layerRequestedConfig->flags.matrixOverridesChanged ||
bFromKmsSetMode) {
nvAssert(!layerConfig->cscUseMain);
params->layer[NVKMS_MAIN_LAYER].csc.specified = NV_TRUE;
params->layer[NVKMS_MAIN_LAYER].csc.useMain = FALSE;
params->layer[NVKMS_MAIN_LAYER].csc.matrix = layerConfig->csc;
// 'blendCtm' overrides 'csc', but provides a 3x4 matrix.
if (layerConfig->matrixOverrides.enabled.blendCtm) {
params->layer[NVKMS_MAIN_LAYER].csc.matrix =
layerConfig->matrixOverrides.blendCtm;
}
changed = TRUE;
}
AssignHDRMetadataConfig(layerConfig, layerRequestedConfig, NVKMS_MAIN_LAYER,
params, bFromKmsSetMode);
if (layerRequestedConfig->flags.matrixOverridesChanged || bFromKmsSetMode) {
// 'lmsCtm' explicitly provides a matrix to program CSC00.
if (layerConfig->matrixOverrides.enabled.lmsCtm) {
params->layer[NVKMS_MAIN_LAYER].csc00Override.matrix =
layerConfig->matrixOverrides.lmsCtm;
params->layer[NVKMS_MAIN_LAYER].csc00Override.enabled = TRUE;
} else {
params->layer[NVKMS_MAIN_LAYER].csc00Override.enabled = FALSE;
}
params->layer[NVKMS_MAIN_LAYER].csc00Override.specified = TRUE;
// 'lmsToItpCtm' explicitly provides a matrix to program CSC01.
if (layerConfig->matrixOverrides.enabled.lmsToItpCtm) {
params->layer[NVKMS_MAIN_LAYER].csc01Override.matrix =
layerConfig->matrixOverrides.lmsToItpCtm;
params->layer[NVKMS_MAIN_LAYER].csc01Override.enabled = TRUE;
} else {
params->layer[NVKMS_MAIN_LAYER].csc01Override.enabled = FALSE;
}
params->layer[NVKMS_MAIN_LAYER].csc01Override.specified = TRUE;
// 'itpToLmsCtm' explicitly provides a matrix to program CSC10.
if (layerConfig->matrixOverrides.enabled.itpToLmsCtm) {
params->layer[NVKMS_MAIN_LAYER].csc10Override.matrix =
layerConfig->matrixOverrides.itpToLmsCtm;
params->layer[NVKMS_MAIN_LAYER].csc10Override.enabled = TRUE;
} else {
params->layer[NVKMS_MAIN_LAYER].csc10Override.enabled = FALSE;
}
params->layer[NVKMS_MAIN_LAYER].csc10Override.specified = TRUE;
// 'blendCtm' explicitly provides a matrix to program CSC11.
if (layerConfig->matrixOverrides.enabled.blendCtm) {
params->layer[NVKMS_MAIN_LAYER].csc11Override.matrix =
layerConfig->matrixOverrides.blendCtm;
params->layer[NVKMS_MAIN_LAYER].csc11Override.enabled = TRUE;
} else {
params->layer[NVKMS_MAIN_LAYER].csc11Override.enabled = FALSE;
}
params->layer[NVKMS_MAIN_LAYER].csc11Override.specified = TRUE;
changed = TRUE;
}
AssignLayerLutConfig(device, layerConfig, layerRequestedConfig, NVKMS_MAIN_LAYER,
params, bFromKmsSetMode);
if (commit && changed) {
NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX(
device->layerState[head][NVKMS_MAIN_LAYER].
@@ -2850,7 +2967,7 @@ static void NvKmsKapiHeadLutConfigToKms(
struct NvKmsSetOutputLutParams *output = &lutParams->output;
/* input LUT */
if (headRequestedConfig->flags.ilutChanged || bFromKmsSetMode) {
if (headRequestedConfig->flags.legacyIlutChanged || bFromKmsSetMode) {
input->specified = NV_TRUE;
input->depth = modeSetConfig->lut.input.depth;
input->start = modeSetConfig->lut.input.start;
@@ -2860,7 +2977,7 @@ static void NvKmsKapiHeadLutConfigToKms(
}
/* output LUT */
if (headRequestedConfig->flags.olutChanged || bFromKmsSetMode) {
if (headRequestedConfig->flags.legacyOlutChanged || bFromKmsSetMode) {
output->specified = NV_TRUE;
output->enabled = modeSetConfig->lut.output.enabled;
@@ -2981,6 +3098,27 @@ static NvBool NvKmsKapiRequestedModeSetConfigToKms(
&paramsHead->flip.lut,
NV_TRUE /* bFromKmsSetMode */);
if (device->lutCaps.olut.supported) {
paramsHead->flip.olut.specified = TRUE;
paramsHead->flip.olut.enabled = headModeSetConfig->olut.enabled;
if (headModeSetConfig->olut.lutSurface != NULL) {
paramsHead->flip.olut.lut.surfaceHandle =
headModeSetConfig->olut.lutSurface->hKmsHandle;
} else {
paramsHead->flip.olut.lut.surfaceHandle = 0;
}
paramsHead->flip.olut.lut.offset = headModeSetConfig->olut.offset;
paramsHead->flip.olut.lut.vssSegments =
headModeSetConfig->olut.vssSegments;
paramsHead->flip.olut.lut.lutEntries =
headModeSetConfig->olut.lutEntries;
paramsHead->flip.olutFpNormScale.specified = TRUE;
paramsHead->flip.olutFpNormScale.val =
headModeSetConfig->olutFpNormScale;
}
NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig,
&paramsHead->flip,
NV_TRUE /* bFromKmsSetMode */);
@@ -3240,6 +3378,30 @@ static NvBool KmsFlip(
NvKmsKapiHeadLutConfigToKms(headRequestedConfig,
&flipParams->lut,
NV_FALSE /* bFromKmsSetMode */);
if (device->lutCaps.olut.supported && headRequestedConfig->flags.olutChanged) {
flipParams->olut.specified = TRUE;
flipParams->olut.enabled = headModeSetConfig->olut.enabled;
if (headModeSetConfig->olut.lutSurface != NULL) {
flipParams->olut.lut.surfaceHandle =
headModeSetConfig->olut.lutSurface->hKmsHandle;
} else {
flipParams->olut.lut.surfaceHandle = 0;
}
flipParams->olut.lut.offset = headModeSetConfig->olut.offset;
flipParams->olut.lut.vssSegments =
headModeSetConfig->olut.vssSegments;
flipParams->olut.lut.lutEntries =
headModeSetConfig->olut.lutEntries;
}
if (device->lutCaps.olut.supported &&
headRequestedConfig->flags.olutFpNormScaleChanged) {
flipParams->olutFpNormScale.specified = TRUE;
flipParams->olutFpNormScale.val = headModeSetConfig->olutFpNormScale;
}
}
if (params->request.numFlipHeads == 0) {
@@ -3337,9 +3499,11 @@ static NvBool ApplyModeSetConfig(
}
bRequiredModeset =
headRequestedConfig->flags.activeChanged ||
headRequestedConfig->flags.displaysChanged ||
headRequestedConfig->flags.modeChanged;
headRequestedConfig->flags.activeChanged ||
headRequestedConfig->flags.displaysChanged ||
headRequestedConfig->flags.modeChanged ||
headRequestedConfig->flags.hdrInfoFrameChanged ||
headRequestedConfig->flags.colorimetryChanged;
/*
* NVKMS flip ioctl could not validate flip configuration for an
@@ -3527,6 +3691,28 @@ static NvBool SignalVrrSemaphore
return status;
}
static void FramebufferConsoleDisabled
(
struct NvKmsKapiDevice *device
)
{
struct NvKmsFramebufferConsoleDisabledParams params = { };
NvBool status;
if (device->hKmsDevice == 0x0) {
return;
}
params.request.deviceHandle = device->hKmsDevice;
status = nvkms_ioctl_from_kapi(device->pKmsOpen,
NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED,
&params, sizeof(params));
if (!status) {
nvKmsKapiLogDeviceDebug(device, "NVKMS FramebufferConsoleDisabled failed");
}
}
NvBool nvKmsKapiGetFunctionsTableInternal
(
struct NvKmsKapiFunctionsTable *funcsTable
@@ -3595,8 +3781,6 @@ NvBool nvKmsKapiGetFunctionsTableInternal
funcsTable->getMemoryPages = GetMemoryPages;
funcsTable->freeMemoryPages = FreeMemoryPages;
funcsTable->isMemoryValidForDisplay = IsMemoryValidForDisplay;
funcsTable->importSemaphoreSurface = nvKmsKapiImportSemaphoreSurface;
funcsTable->freeSemaphoreSurface = nvKmsKapiFreeSemaphoreSurface;
funcsTable->registerSemaphoreSurfaceCallback =
@@ -3606,6 +3790,7 @@ NvBool nvKmsKapiGetFunctionsTableInternal
funcsTable->setSemaphoreSurfaceValue =
nvKmsKapiSetSemaphoreSurfaceValue;
funcsTable->setSuspendResumeCallback = nvKmsKapiSetSuspendResumeCallback;
funcsTable->framebufferConsoleDisabled = FramebufferConsoleDisabled;
funcsTable->tryInitDisplaySemaphore = nvKmsKapiTryInitDisplaySemaphore;
funcsTable->signalDisplaySemaphore = nvKmsKapiSignalDisplaySemaphore;
@@ -3614,3 +3799,47 @@ NvBool nvKmsKapiGetFunctionsTableInternal
return NV_TRUE;
}
NvU32 nvKmsKapiF16ToF32Internal(NvU16 a)
{
float16_t fa = { .v = a };
return f16_to_f32(fa).v;
}
NvU16 nvKmsKapiF32ToF16Internal(NvU32 a)
{
float32_t fa = { .v = a };
return f32_to_f16(fa).v;
}
NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b)
{
float32_t fa = { .v = a };
float32_t fb = { .v = b };
return f32_mul(fa, fb).v;
}
NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b)
{
float32_t fa = { .v = a };
float32_t fb = { .v = b };
return f32_div(fa, fb).v;
}
NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b)
{
float32_t fa = { .v = a };
float32_t fb = { .v = b };
return f32_add(fa, fb).v;
}
NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact)
{
float32_t fa = { .v = a };
return f32_to_ui32_r_minMag(fa, exact);
}
NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a)
{
return ui32_to_f32(a).v;
}

View File

@@ -67,6 +67,14 @@ enum NvKmsSyncPtOp {
NVKMS_SYNCPT_OP_READ_MINVAL,
};
enum NvKmsDebugForceColorSpace {
NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE,
NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB,
NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444,
NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422,
NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX,
};
typedef struct {
struct {
@@ -102,6 +110,7 @@ NvBool nvkms_disable_vrr_memclk_switch(void);
NvBool nvkms_hdmi_deepcolor(void);
NvBool nvkms_vblank_sem_control(void);
NvBool nvkms_opportunistic_display_sync(void);
enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,

View File

@@ -110,4 +110,18 @@ NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev);
NvU32 nvKmsKapiF16ToF32Internal(NvU16 a);
NvU16 nvKmsKapiF32ToF16Internal(NvU32 a);
NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact);
NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a);
#endif /* __NV_KMS_H__ */

View File

@@ -43,13 +43,6 @@ public:
virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize);
virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize);
virtual void disconnectHead(unsigned head) {
nvAssert(!"disconnectHead should never be called");
}
virtual void reattachHead(unsigned head) {
nvAssert(!"reattachHead should never be called");
}
virtual NvU32 getSubdeviceIndex();
virtual NvU32 getDisplayId();
virtual NvU32 getSorIndex();

View File

@@ -124,11 +124,6 @@ namespace nvkmsDisplayPort {
}; // namespace nvkmsDisplayPort
NvBool nvDPTimersPending(void)
{
return FALSE;
}
NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo)
{
NVDPLibTimerPtr pTimer = new _nv_dplibtimer(pDevEvo);

View File

@@ -611,6 +611,8 @@ static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo)
NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo;
NVDispApiHeadStateEvoRec *pApiHeadState;
NvU32 head;
NvBool colorSpaceChanged = FALSE;
NvBool colorBpcChanged = FALSE;
if (pDpyEvo->apiHead == NV_INVALID_HEAD) {
return;
@@ -637,10 +639,12 @@ static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo)
return;
}
colorSpaceChanged = (pApiHeadState->attributes.color.format != colorSpace);
colorBpcChanged = (pApiHeadState->attributes.color.bpc != colorBpc);
/* For DP, neither color space nor bpc can be changed without a modeset */
if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) &&
((pApiHeadState->attributes.color.format != colorSpace) ||
(pApiHeadState->attributes.color.bpc != colorBpc))) {
(colorSpaceChanged || colorBpcChanged)) {
return;
}
@@ -677,7 +681,8 @@ static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo)
&tmpDpyColor) >
pDpyEvo->maxSingleLinkPixelClockKHz) {
if(!nvDowngradeColorSpaceAndBpc(&colorFormatsInfo,
if(!nvDowngradeColorSpaceAndBpc(pDpyEvo,
&colorFormatsInfo,
&tmpDpyColor)) {
return;
}
@@ -703,9 +708,13 @@ static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo)
&pApiHeadState->attributes.color,
&updateState);
if (newPixelDepth != pDispEvo->headState[head].pixelDepth) {
if ((newPixelDepth != pDispEvo->headState[head].pixelDepth) ||
colorSpaceChanged) {
pDispEvo->headState[head].pixelDepth = newPixelDepth;
nvEvoHeadSetControlOR(pDispEvo, head, &updateState);
nvEvoHeadSetControlOR(pDispEvo,
head,
&pApiHeadState->attributes.color,
&updateState);
}
}

View File

@@ -305,7 +305,7 @@ void nvMoveCursor(NVDispEvoPtr pDispEvo, const NvU32 apiHead,
}
pDevEvo->gpus[sd].headState[head].cursor.x =
pParams->x - (hwViewportInWidth * pHeadState->tilePosition);
pParams->x - (hwViewportInWidth * pHeadState->mergeHeadSection);
pDevEvo->gpus[sd].headState[head].cursor.y = pParams->y;
nvEvoMoveCursorInternal(pDispEvo,

View File

@@ -168,10 +168,6 @@ static NvBool PrefetchHelperSurfaceEvo(NVDIFRStateEvoPtr pDifr,
size_t *cacheRemaining,
NVSurfaceEvoPtr pSurfaceEvo,
NvU32 *status);
static NvBool PrefetchHelperLutSurface(NVDIFRStateEvoPtr pDifr,
size_t *cacheRemaining,
NVLutSurfaceEvoPtr pLutSurface,
NvU32 *status);
static NvBool SetDisabledState(NVDIFRStateEvoPtr pDifr,
NvBool shouldDisable);
@@ -332,14 +328,14 @@ NvU32 nvDIFRPrefetchSurfaces(NVDIFRStateEvoPtr pDifr, size_t l2CacheSize)
* Prefetch per-layer LUTs, if any, but skip null LUTs and
* duplicates already prefetched.
*/
if (!PrefetchHelperLutSurface(pDifr,
if (!PrefetchHelperSurfaceEvo(pDifr,
&cacheRemaining,
pHeadState->layer[layer].inputLut.pLutSurfaceEvo,
&status)) {
goto out;
}
if (!PrefetchHelperLutSurface(pDifr,
if (!PrefetchHelperSurfaceEvo(pDifr,
&cacheRemaining,
pHeadState->layer[layer].tmoLut.pLutSurfaceEvo,
&status)) {
@@ -351,7 +347,7 @@ NvU32 nvDIFRPrefetchSurfaces(NVDIFRStateEvoPtr pDifr, size_t l2CacheSize)
/*
* Finally prefetch the known main LUTs.
*/
if (!PrefetchHelperLutSurface(pDifr,
if (!PrefetchHelperSurfaceEvo(pDifr,
&cacheRemaining,
pDevEvo->lut.defaultLut,
&status)) {
@@ -360,7 +356,7 @@ NvU32 nvDIFRPrefetchSurfaces(NVDIFRStateEvoPtr pDifr, size_t l2CacheSize)
for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) {
if (!PrefetchHelperLutSurface(pDifr,
if (!PrefetchHelperSurfaceEvo(pDifr,
&cacheRemaining,
pDevEvo->lut.apiHead[apiHead].LUT[i],
&status)) {
@@ -713,40 +709,6 @@ static NvBool PrefetchHelperSurfaceEvo(NVDIFRStateEvoPtr pDifr,
return *status == NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS;
}
static NvBool PrefetchHelperLutSurface(NVDIFRStateEvoPtr pDifr,
size_t *cacheRemaining,
NVLutSurfaceEvoPtr pLutSurface,
NvU32 *status)
{
NVDIFRPrefetchParams params;
nvAssert(*status == NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS);
if (!pLutSurface) {
return TRUE;
}
/*
* LUTs are often shared so we only want to prefetch (or consider) each
* LUT at most once during the prefetch process.
*/
if (pLutSurface->difrLastPrefetchPass == pDifr->prefetchPass) {
return TRUE;
}
pLutSurface->difrLastPrefetchPass = pDifr->prefetchPass;
/* Collect copy parameters and do the prefetch. */
params.surfGpuAddress = (NvUPtr)pLutSurface->gpuAddress;
params.surfSizeBytes = pLutSurface->size;
params.surfPitchBytes = pLutSurface->size;
params.surfFormat = NvKmsSurfaceMemoryFormatI8;
*status = PrefetchSingleSurface(pDifr, &params, cacheRemaining);
return *status == NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS;
}
/*
* Set DIFR disabled state in H/W. Return true if state was changed and it
* was successfully signalled downstream.

View File

@@ -2542,13 +2542,123 @@ static void UpdateDpHDRInfoFrame(const NVDispEvoRec *pDispEvo, const NvU32 head)
}
}
void nvConstructDpVscSdp(const NVDispHeadInfoFrameStateEvoRec *pInfoFrame,
const NVDpyAttributeColor *pDpyColor,
DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp)
{
nvkms_memset(sdp, 0, sizeof(*sdp));
// Header
// Per DP1.3 spec
sdp->hb.hb0 = 0;
sdp->hb.hb1 = SDP_PACKET_TYPE_VSC;
sdp->hb.revisionNumber = SDP_VSC_REVNUM_STEREO_PSR2_COLOR;
sdp->hb.numValidDataBytes = SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR;
sdp->db.stereoInterface = 0;
sdp->db.psrState = 0;
sdp->db.contentType = SDP_VSC_CONTENT_TYPE_GRAPHICS;
switch (pDpyColor->format) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
sdp->db.pixEncoding = SDP_VSC_PIX_ENC_RGB;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR444;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR422;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR420;
break;
default:
nvAssert(!"unrecognized color format");
break;
}
switch (pDpyColor->format) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
switch (pDpyColor->colorimetry) {
case NVKMS_OUTPUT_COLORIMETRY_BT2100:
sdp->db.colorimetryFormat =
SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ITU_R_BT2020_RGB;
break;
case NVKMS_OUTPUT_COLORIMETRY_DEFAULT:
sdp->db.colorimetryFormat =
SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_SRGB;
break;
}
switch (pDpyColor->bpc) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_RGB_10BPC;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_RGB_8BPC;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_RGB_6BPC;
break;
default:
nvAssert(!"Invalid bpc value for RBG format");
break;
}
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
switch (pDpyColor->colorimetry) {
case NVKMS_OUTPUT_COLORIMETRY_BT2100:
sdp->db.colorimetryFormat =
SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCBCR;
break;
case NVKMS_OUTPUT_COLORIMETRY_DEFAULT:
sdp->db.colorimetryFormat =
(pInfoFrame->hdTimings ?
SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709 :
SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT601);
break;
}
switch (pDpyColor->bpc) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_10BPC;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_8BPC;
break;
default:
nvAssert(!"Invalid bpc value for YUV color format");
break;
}
break;
default:
nvAssert(!"unrecognized color format");
break;
}
switch (pDpyColor->range) {
case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL:
sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_VESA;
break;
case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED:
sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_CEA;
break;
default:
nvAssert(!"Invalid colorRange value");
break;
}
}
/*
* Construct the DP 1.3 YUV420 infoframe, and toggle it on or off based on
* whether or not YUV420 mode is in use.
* Construct the DP 1.3 VSC SDP infoframe, and toggle it on or off based on
* whether or not YUV420 mode or BT2100 colorimetry is in use.
*/
static void UpdateDpYUV420InfoFrame(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NVDpyAttributeColor *pDpyColor)
static void UpdateDpVscSdpInfoFrame(
const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NVDpyAttributeColor *pDpyColor,
const NVDispHeadInfoFrameStateEvoRec *pInfoFrame)
{
const NVDispHeadStateEvoRec *pHeadState =
&pDispEvo->headState[head];
@@ -2559,7 +2669,8 @@ static void UpdateDpYUV420InfoFrame(const NVDispEvoRec *pDispEvo,
params.subDeviceInstance = pDispEvo->displayOwner;
params.displayId = pHeadState->activeRmId;
if (pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) {
if ((pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) ||
(pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) {
// DPSDP_DP_VSC_SDP_DESCRIPTOR has a (dataSize, hb, db) layout, while
// NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS.aPacket needs to contain
@@ -2570,42 +2681,7 @@ static void UpdateDpYUV420InfoFrame(const NVDispEvoRec *pDispEvo,
nvAssert((void *)&sdp->hb == (void *)params.aPacket);
// Header
// Per DP1.3 spec
sdp->hb.hb0 = 0;
sdp->hb.hb1 = SDP_PACKET_TYPE_VSC;
sdp->hb.revisionNumber = SDP_VSC_REVNUM_STEREO_PSR2_COLOR;
sdp->hb.numValidDataBytes = SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR;
sdp->db.stereoInterface = 0;
sdp->db.psrState = 0;
sdp->db.contentType = SDP_VSC_CONTENT_TYPE_GRAPHICS;
sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR420;
sdp->db.colorimetryFormat = SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709;
switch (pDpyColor->bpc) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_10BPC;
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_8BPC;
break;
default:
nvAssert(!"Invalid pixelDepth value");
break;
}
switch (pDpyColor->range) {
case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL:
sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_VESA;
break;
case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED:
sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_CEA;
break;
default:
nvAssert(!"Invalid colorRange value");
break;
}
nvConstructDpVscSdp(pInfoFrame, pDpyColor, sdp);
params.packetSize = sizeof(sdp->hb) + sdp->hb.numValidDataBytes;
@@ -2632,11 +2708,12 @@ static void UpdateDpYUV420InfoFrame(const NVDispEvoRec *pDispEvo,
static void UpdateDpInfoFrames(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NVDpyAttributeColor *pDpyColor)
const NVDpyAttributeColor *pDpyColor,
const NVDispHeadInfoFrameStateEvoRec *pInfoFrame)
{
UpdateDpHDRInfoFrame(pDispEvo, head);
UpdateDpYUV420InfoFrame(pDispEvo, head, pDpyColor);
UpdateDpVscSdpInfoFrame(pDispEvo, head, pDpyColor, pInfoFrame);
}
void nvCancelSDRTransitionTimer(NVDpyEvoRec *pDpyEvo)
@@ -2704,7 +2781,10 @@ void nvUpdateInfoFrames(NVDpyEvoRec *pDpyEvo)
pHeadState = &pDispEvo->headState[head];
if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) {
UpdateDpInfoFrames(pDispEvo, head, &pApiHeadState->attributes.color);
UpdateDpInfoFrames(pDispEvo,
head,
&pApiHeadState->attributes.color,
&pApiHeadState->infoFrame);
} else {
nvUpdateHdmiInfoFrames(pDispEvo,
head,
@@ -3320,9 +3400,10 @@ NvKmsDpyOutputColorFormatInfo nvDpyGetOutputColorFormatInfo(
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
if (nvDpyIsHdmiEvo(pDpyEvo)) {
// TODO: Handle depth 30 YUV
colorFormatsInfo.yuv444.maxBpc =
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
nvDpyIsHdmiDepth30Evo(pDpyEvo) ?
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10 :
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
colorFormatsInfo.yuv444.minBpc =
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
@@ -3339,6 +3420,35 @@ NvKmsDpyOutputColorFormatInfo nvDpyGetOutputColorFormatInfo(
}
}
switch (nvkms_debug_force_color_space()) {
case NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB:
colorFormatsInfo.yuv444.maxBpc =
colorFormatsInfo.yuv444.minBpc =
colorFormatsInfo.yuv422.maxBpc =
colorFormatsInfo.yuv422.minBpc =
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN;
break;
case NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444:
colorFormatsInfo.rgb444.maxBpc =
colorFormatsInfo.rgb444.minBpc =
colorFormatsInfo.yuv422.maxBpc =
colorFormatsInfo.yuv422.minBpc =
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN;
break;
case NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422:
colorFormatsInfo.rgb444.maxBpc =
colorFormatsInfo.rgb444.minBpc =
colorFormatsInfo.yuv444.maxBpc =
colorFormatsInfo.yuv444.minBpc =
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN;
break;
default:
nvAssert(!"Unrecognzed debug_force_color_space value");
// fallthrough
case NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE:
break;
}
return colorFormatsInfo;
}

View File

@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-api-types.h"
#include "nvkms-types.h"
#include "nvkms-evo-states.h"
@@ -48,6 +49,7 @@
#include "nvkms-difr.h"
#include "nvkms-vrr.h"
#include "nvkms-ioctl.h"
#include "nvkms-setlut-workarea.h"
#include "nvctassert.h"
@@ -238,6 +240,14 @@ static NvBool HasActiveHeads(NVDispEvoPtr pDispEvo)
return nvGetActiveHeadMask(pDispEvo) != 0;
}
static void EvoSetLUTContextDmaHelper(const NVDispEvoRec *pDispEvo,
const NvU32 head,
NVSurfaceEvoPtr pLutSurfEvo,
NvBool enableBaseLut,
NvBool enableOutputLut,
NVEvoUpdateState *pUpdateState,
NvBool bypassComposition);
static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head,
NVEvoUpdateState *updateState)
{
@@ -252,13 +262,13 @@ static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head,
* Lut explicitly.
*/
if (!pDevEvo->hal->caps.supportsCoreChannelSurface) {
pDevEvo->hal->SetLUTContextDma(pDispEvo,
head,
NULL /* pSurfEvo */,
FALSE /* baseLutEnabled */,
FALSE /* outputLutEnabled */,
updateState,
pHeadState->bypassComposition);
EvoSetLUTContextDmaHelper(pDispEvo,
head,
NULL /* pLutSurfEvo */,
FALSE /* baseLutEnabled */,
FALSE /* outputLutEnabled */,
updateState,
pHeadState->bypassComposition);
}
nvPushEvoSubDevMaskDisp(pDispEvo);
@@ -965,7 +975,7 @@ void nvEvoSetTimings(NVDispEvoPtr pDispEvo,
NV_YUV420_MODE_SW));
pDevEvo->hal->SetRasterParams(pDevEvo, head,
pTimings, pHeadState->tilePosition,
pTimings, pHeadState->mergeHeadSection,
pDscInfo, &overscanColor, updateState);
// Set the head parameters
@@ -2724,27 +2734,38 @@ void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,
pUpdateState);
}
static enum NvKmsDpyAttributeColorBpcValue GetMinRequiredBpc(
enum NvKmsOutputColorimetry colorimetry)
{
// 10 BPC required for HDR
// XXX HDR TODO: Handle other colorimetries
return (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) ?
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10 :
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6;
}
static NvBool GetDefaultColorSpace(
const NvKmsDpyOutputColorFormatInfo *pColorFormatsInfo,
enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
enum NvKmsDpyAttributeColorBpcValue *pColorBpc)
enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
const enum NvKmsDpyAttributeColorBpcValue minRequiredBpc)
{
if (pColorFormatsInfo->rgb444.maxBpc !=
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
nvAssert(minRequiredBpc !=
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN);
if (pColorFormatsInfo->rgb444.maxBpc >= minRequiredBpc) {
*pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
*pColorBpc = pColorFormatsInfo->rgb444.maxBpc;
return TRUE;
}
if (pColorFormatsInfo->yuv444.maxBpc !=
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
if (pColorFormatsInfo->yuv444.maxBpc >= minRequiredBpc) {
*pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444;
*pColorBpc = pColorFormatsInfo->yuv444.maxBpc;
return TRUE;
}
if (pColorFormatsInfo->yuv422.maxBpc !=
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
if (pColorFormatsInfo->yuv422.maxBpc >= minRequiredBpc) {
*pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
*pColorBpc = pColorFormatsInfo->yuv422.maxBpc;
return TRUE;
@@ -2759,8 +2780,10 @@ NvBool nvGetDefaultDpyColor(
{
nvkms_memset(pDpyColor, 0, sizeof(*pDpyColor));
if (!GetDefaultColorSpace(pColorFormatsInfo, &pDpyColor->format,
&pDpyColor->bpc)) {
if (!GetDefaultColorSpace(pColorFormatsInfo,
&pDpyColor->format,
&pDpyColor->bpc,
GetMinRequiredBpc(pDpyColor->colorimetry))) {
return FALSE;
}
@@ -2776,7 +2799,6 @@ NvBool nvGetDefaultDpyColor(
}
NvBool nvChooseColorRangeEvo(
enum NvKmsOutputColorimetry colorimetry,
const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
const enum NvKmsDpyAttributeColorBpcValue colorBpc,
@@ -2789,16 +2811,11 @@ NvBool nvChooseColorRangeEvo(
if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) &&
(colorBpc == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6)) {
/* At depth 18 only RGB and full range are allowed */
if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
/* BT2100 requires limited color range */
return FALSE;
}
*pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
} else if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) ||
(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) ||
(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) ||
(colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) {
/* Both YUV and BT2100 colorimetry require limited color range. */
(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) {
/* YUV requires limited color range. */
*pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
} else {
*pColorRange = requestedColorRange;
@@ -2855,22 +2872,16 @@ NvBool nvChooseCurrentColorSpaceAndRangeEvo(
NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
const NvKmsDpyOutputColorFormatInfo colorFormatsInfo =
nvDpyGetOutputColorFormatInfo(pDpyEvo);
const enum NvKmsDpyAttributeColorBpcValue minRequiredBpc =
GetMinRequiredBpc(colorimetry);
// XXX HDR TODO: Handle other colorimetries
// XXX HDR TODO: Handle YUV
if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
/*
* If the head currently has BT2100 colorimetry, we override the
* requested color space with RGB. We cannot support yuv420Mode in
* that configuration, so fail in that case.
*/
if (yuv420Mode != NV_YUV420_MODE_NONE) {
if (yuv420Mode != NV_YUV420_MODE_NONE) {
// XXX HDR TODO: Support YUV420 + HDR
// XXX HDR TODO: Handle other colorimetries
if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
return FALSE;
}
newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
newColorBpc = colorFormatsInfo.rgb444.maxBpc;
} else if (yuv420Mode != NV_YUV420_MODE_NONE) {
/*
* If the current mode timing requires YUV420 compression, we override the
* requested color space with YUV420.
@@ -2880,6 +2891,12 @@ NvBool nvChooseCurrentColorSpaceAndRangeEvo(
nvAssert(colorFormatsInfo.rgb444.maxBpc >=
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8);
} else if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) &&
!pDpyEvo->pDispEvo->pDevEvo->caps.supportsYUV2020) {
newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
newColorBpc = ChooseColorBpc(requestedColorBpc,
colorFormatsInfo.rgb444.maxBpc,
colorFormatsInfo.rgb444.minBpc);
} else {
/*
* Note this is an assignment between different enum types. Checking the
@@ -2909,23 +2926,18 @@ NvBool nvChooseCurrentColorSpaceAndRangeEvo(
nvAssert(!"Invalid Requested ColorSpace");
}
if ((newColorBpc ==
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) &&
if ((newColorBpc < minRequiredBpc) &&
!GetDefaultColorSpace(&colorFormatsInfo, &newColorSpace,
&newColorBpc)) {
&newColorBpc, minRequiredBpc)) {
return FALSE;
}
}
// 10 BPC required for HDR
// XXX HDR TODO: Handle other colorimetries
// XXX HDR TODO: Handle YUV
if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) &&
(newColorBpc < NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10)) {
if (newColorBpc < minRequiredBpc) {
return FALSE;
}
if (!nvChooseColorRangeEvo(colorimetry, requestedColorRange, newColorSpace,
if (!nvChooseColorRangeEvo(requestedColorRange, newColorSpace,
newColorBpc, &newColorRange)) {
}
@@ -2948,18 +2960,12 @@ void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
nvAssert(pConnectorEvo != NULL);
// XXX HDR TODO: Support more output colorimetries
if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
nvAssert(pHeadState->timings.yuv420Mode == NV_YUV420_MODE_NONE);
nvAssert(pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB);
nvAssert(pDpyColor->range == NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED);
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_BT2020RGB;
pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED;
pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
} else if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) &&
if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) &&
(pDpyColor->format ==
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) {
// XXX HDR TODO: Support SW YUV420 + HDR Output
nvAssert(pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100);
/*
* In SW YUV420 mode, HW is programmed with RGB color space and full
* color range. The color space conversion and color range compression
@@ -2997,12 +3003,18 @@ void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
// program HW with RGB/YCbCr
switch (pDpyColor->format) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_BT2020RGB;
} else {
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
}
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) {
if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_BT2020YCC;
} else if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) {
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_709;
} else {
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_601;
@@ -3018,6 +3030,7 @@ void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
// (i.e. the default - RGB)
nvAssert(pDpyColor->format ==
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB);
nvAssert(pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100);
// program HW with RGB only
pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
@@ -3053,9 +3066,9 @@ void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
}
}
// In YUV colorimetry, only limited color range is allowed.
nvAssert(!((pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_RGB) &&
(pHeadState->procAmp.colorRange != NVT_COLOR_RANGE_LIMITED)));
// Full color range is only allowed with RGB color format.
nvAssert((pHeadState->procAmp.colorFormat == NVT_COLOR_FORMAT_RGB) ||
(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED));
// Limited color range is not allowed with 18bpp mode
nvAssert(!((pHeadState->pixelDepth == NVKMS_PIXEL_DEPTH_18_444) &&
@@ -3071,7 +3084,9 @@ void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
}
void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
const NvU32 head, NVEvoUpdateState *pUpdateState)
const NvU32 head,
const NVDpyAttributeColor *pDpyColor,
NVEvoUpdateState *pUpdateState)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
const NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head];
@@ -3079,16 +3094,19 @@ void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth;
NvBool colorSpaceOverride = FALSE;
nvAssert(pHeadState->pixelDepth == nvEvoDpyColorToPixelDepth(pDpyColor));
/*
* Determine whether or not this dpy will need its color space
* overridden.
*
* This is currently only used for DP 1.3 YUV420 mode, where the
* HW's normal support for carrying color space information
* This is currently only used for DP 1.3 YUV420 mode or BT2100 colorimetry,
* where the HW's normal support for carrying color space information
* together with the frame is insufficient.
*/
if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) &&
nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) {
if (((pTimings->yuv420Mode == NV_YUV420_MODE_SW) ||
(pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) &&
nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) {
nvAssert(pDispEvo->pDevEvo->caps.supportsDP13);
colorSpaceOverride = TRUE;
@@ -4660,19 +4678,93 @@ static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, const NvU32 head,
nvPopEvoSubDevMask(pDevEvo);
}
static void EvoSetLUTContextDmaHelper(const NVDispEvoRec *pDispEvo,
const NvU32 head,
NVSurfaceEvoPtr pLutSurfEvo,
NvBool enableBaseLut,
NvBool enableOutputLut,
NVEvoUpdateState *pUpdateState,
NvBool bypassComposition)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NvU32 layer, sd = pDispEvo->displayOwner;
NVEvoSubDevHeadStateRec *pSdHeadState = &pDevEvo->gpus[sd].headState[head];
NVFlipLutHwState inputLut, outputLut;
if (enableBaseLut) {
inputLut.pLutSurfaceEvo = pLutSurfEvo;
inputLut.offset = offsetof(NVEvoLutDataRec, base);
inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
} else {
inputLut.pLutSurfaceEvo = NULL;
inputLut.offset = 0;
inputLut.lutEntries = 0;
}
inputLut.vssSegments = 0;
inputLut.fromOverride = FALSE;
if (enableOutputLut) {
outputLut.pLutSurfaceEvo = pLutSurfEvo;
outputLut.offset = offsetof(NVEvoLutDataRec, output);
outputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
} else {
outputLut.pLutSurfaceEvo = NULL;
outputLut.offset = 0;
outputLut.lutEntries = 0;
}
outputLut.vssSegments = 0;
outputLut.fromOverride = FALSE;
nvPushEvoSubDevMask(pDevEvo, NVBIT(pDispEvo->displayOwner));
if ((pSdHeadState->outputLut.pLutSurfaceEvo != outputLut.pLutSurfaceEvo) ||
(pSdHeadState->outputLut.offset != outputLut.offset) ||
(pSdHeadState->olutFpNormScale != NVKMS_OLUT_FP_NORM_SCALE_DEFAULT)) {
pSdHeadState->outputLut = outputLut;
pSdHeadState->olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT;
pSdHeadState->layer[NVKMS_MAIN_LAYER].tearing = FALSE;
pDevEvo->hal->SetOutputLut(pDevEvo, sd, head,
&outputLut,
NVKMS_OLUT_FP_NORM_SCALE_DEFAULT,
pUpdateState,
bypassComposition);
}
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
NVFlipChannelEvoHwState *pFlipState = &pSdHeadState->layer[layer];
if ((pFlipState->inputLut.pLutSurfaceEvo == inputLut.pLutSurfaceEvo) &&
(pFlipState->inputLut.offset == inputLut.offset)) {
continue;
}
pFlipState->tearing = FALSE;
pFlipState->inputLut = inputLut;
pDevEvo->hal->Flip(pDevEvo,
pChannel,
pFlipState,
pUpdateState,
bypassComposition);
}
nvPopEvoSubDevMask(pDevEvo);
}
void nvEvoSetLUTContextDma(NVDispEvoPtr pDispEvo,
const NvU32 head, NVEvoUpdateState *pUpdateState)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
const NVDispHeadStateEvoRec *pDispHeadState = &pDispEvo->headState[head];
pDevEvo->hal->SetLUTContextDma(pDispEvo,
head,
pHeadState->lut.pCurrSurface,
pHeadState->lut.baseLutEnabled,
pHeadState->lut.outputLutEnabled,
pUpdateState,
pHeadState->bypassComposition);
EvoSetLUTContextDmaHelper(pDispEvo,
head,
pDispHeadState->lut.pCurrSurface,
pDispHeadState->lut.baseLutEnabled,
pDispHeadState->lut.outputLutEnabled,
pUpdateState,
pDispHeadState->bypassComposition);
}
static void EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, const NvU32 apiHead)
@@ -4684,8 +4776,16 @@ static void EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, const NvU32 apiHead)
NvU32 head;
NVEvoUpdateState updateState = { };
struct NvKmsSetLutWorkArea *workarea =
nvPreallocGet(pDevEvo, PREALLOC_TYPE_SET_LUT_WORK_AREA, sizeof(*workarea));
FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
nvInitFlipEvoHwState(pDevEvo, dispIndex, head, &workarea->head[head].oldState);
nvEvoSetLUTContextDma(pDispEvo, head, &updateState);
nvInitFlipEvoHwState(pDevEvo, dispIndex, head, &workarea->head[head].newState);
nvUpdateSurfacesFlipRefCount(pDevEvo, head, &workarea->head[head].newState, TRUE);
}
/*
@@ -4728,6 +4828,12 @@ static void EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, const NvU32 apiHead)
TRUE /* releaseElv */);
pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate |= notify;
}
FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
nvUpdateSurfacesFlipRefCount(pDevEvo, head, &workarea->head[head].oldState, FALSE);
}
nvPreallocRelease(pDevEvo, PREALLOC_TYPE_SET_LUT_WORK_AREA);
}
static void UpdateMaxPixelClock(NVDevEvoPtr pDevEvo)
@@ -4782,6 +4888,7 @@ static NvBool AllocEvoSubDevs(NVDevEvoPtr pDevEvo)
pSdHeadState->cursor.cursorCompParams =
nvDefaultCursorCompositionParams(pDevEvo);
pSdHeadState->olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT;
}
}
@@ -4864,6 +4971,8 @@ static void ClearApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
{
NvU32 apiHead;
nvKmsOrphanVblankSemControlForAllOpens(pDispEvo);
/*
* Unregister all the flip-occurred event callbacks which are
* registered with the (api-head, layer) pair event data,
@@ -4877,7 +4986,11 @@ static void ClearApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
NvU32 layer;
NVDispApiHeadStateEvoRec *pApiHeadState =
&pDispEvo->apiHeadState[apiHead];
nvAssert(nvListIsEmpty(&pApiHeadState->vblankCallbackList));
for (NvU32 i = 0; i < ARRAY_LEN(pApiHeadState->vblankCallbackList); i++) {
nvAssert(nvListIsEmpty(&pApiHeadState->vblankCallbackList[i]));
}
nvAssert(nvListIsEmpty(&pApiHeadState->vblankSemControl.list));
for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) {
if (pApiHeadState->flipOccurredEvent[layer].ref_ptr != NULL) {
nvkms_free_ref_ptr(pApiHeadState->flipOccurredEvent[layer].ref_ptr);
@@ -4916,7 +5029,10 @@ static NvBool InitApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
pApiHeadState->activeDpys = nvEmptyDpyIdList();
pApiHeadState->attributes = NV_EVO_DEFAULT_ATTRIBUTES_SET;
nvListInit(&pApiHeadState->vblankCallbackList);
for (NvU32 i = 0; i < ARRAY_LEN(pApiHeadState->vblankCallbackList); i++) {
nvListInit(&pApiHeadState->vblankCallbackList[i]);
}
nvListInit(&pApiHeadState->vblankSemControl.list);
for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) {
pApiHeadState->flipOccurredEvent[layer].ref_ptr =
@@ -5271,12 +5387,6 @@ NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo)
goto failed;
}
if (!nvAllocLutSurfacesEvo(pDevEvo)) {
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
"Failed to allocate memory for the display color lookup table.");
goto failed;
}
// Resume the DisplayPort library's control of the device.
if (!nvRmResumeDP(pDevEvo)) {
nvEvoLogDev(
@@ -5530,7 +5640,11 @@ NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo)
}
}
nvInvalidateDefaultLut(pDevEvo);
if (!nvAllocCoreChannelEvo(pDevEvo)) {
// free the device if core channel allocation fails
nvRevokeDevice(pDevEvo);
return FALSE;
}
@@ -5600,8 +5714,6 @@ void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo)
// Pause the DisplayPort library's control of the device.
nvRmPauseDP(pDevEvo);
nvFreeLutSurfacesEvo(pDevEvo);
// Unmap and free the cursor controls for all heads
nvFreeCursorEvo(pDevEvo);
@@ -5978,8 +6090,8 @@ NvBool nvConstructHwModeTimingsImpCheckEvo(
timingsParams[head].pConnectorEvo = pConnectorEvo;
timingsParams[head].activeRmId = activeRmId;
timingsParams[head].pixelDepth = nvEvoDpyColorToPixelDepth(pColor);
if (!nvEvoGetSingleTileHwModeTimings(pTimings, numHeads,
&timings[head])) {
if (!nvEvoGetSingleMergeHeadSectionHwModeTimings(pTimings, numHeads,
&timings[head])) {
ret = FALSE;
goto done;
}
@@ -6716,8 +6828,7 @@ static NvBool GetDfpHdmiProtocol(const NVDpyEvoRec *pDpyEvo,
nvDpyGetOutputColorFormatInfo(pDpyEvo);
const NvBool forceHdmiFrlIsSupported = FALSE;
nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS ||
rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A ||
nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A ||
rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B);
/* Override protocol if this mode requires HDMI FRL. */
@@ -6744,28 +6855,14 @@ static NvBool GetDfpHdmiProtocol(const NVDpyEvoRec *pDpyEvo,
if (nvHdmiGetEffectivePixelClockKHz(pDpyEvo, pTimings, pDpyColor) <=
pDpyEvo->maxSingleLinkPixelClockKHz) {
switch (rmProtocol) {
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
/*
* Force single link TMDS protocol. HDMI does not support
* physically support dual link TMDS.
*
* TMDS_A: "use A side of the link"
*/
*pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
break;
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
*pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
break;
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
*pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B;
break;
default:
return FALSE;
}
*pTimingsProtocol = (rmProtocol ==
NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A) ?
NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A :
NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B;
return TRUE;
}
} while (nvDowngradeColorSpaceAndBpc(&colorFormatsInfo,
} while (nvDowngradeColorSpaceAndBpc(pDpyEvo,
&colorFormatsInfo,
pDpyColor));
return FALSE;
}
@@ -6994,6 +7091,7 @@ NvBool nvDowngradeColorBpc(
}
NvBool nvDowngradeColorSpaceAndBpc(
const NVDpyEvoRec *pDpyEvo,
const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats,
NVDpyAttributeColor *pDpyColor)
{
@@ -7003,9 +7101,9 @@ NvBool nvDowngradeColorSpaceAndBpc(
switch (pDpyColor->format) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
/* XXX Add support for downgrading to YUV with BT2100 */
if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
return FALSE;
if ((pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) &&
!pDpyEvo->pDispEvo->pDevEvo->caps.supportsYUV2020) {
break;
}
/* fallthrough */
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
@@ -7060,7 +7158,7 @@ NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
if (!nvDPValidateModeForDpyEvo(pDpyEvo, &dpyColor, pParams, pTimings,
b2Heads1Or, pDscInfo)) {
if (nvDowngradeColorSpaceAndBpc(&supportedColorFormats, &dpyColor)) {
if (nvDowngradeColorSpaceAndBpc(pDpyEvo, &supportedColorFormats, &dpyColor)) {
goto tryAgain;
}
/*
@@ -8460,7 +8558,7 @@ void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 apiHead, NvBool kickoff,
pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate;
const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT);
const int lutToFill = (curLUT + 1) % numLUTs;
NVLutSurfaceEvoPtr pSurfEvo = pDevEvo->lut.apiHead[apiHead].LUT[lutToFill];
NVSurfaceEvoPtr pSurfEvo = pDevEvo->lut.apiHead[apiHead].LUT[lutToFill];
NvBool baseLutEnabled =
pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled ;
NvBool outputLutEnabled =
@@ -8747,6 +8845,8 @@ NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo)
pDevEvo->fbConsoleSurfaceHandle = 0;
}
nvFreeLutSurfacesEvo(pDevEvo);
nvFreeCoreChannelEvo(pDevEvo);
nvTeardownHdmiLibrary(pDevEvo);
@@ -8905,6 +9005,13 @@ NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest,
goto done;
}
if (!nvAllocLutSurfacesEvo(pDevEvo)) {
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
"Failed to allocate memory for the display color lookup table.");
status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
goto done;
}
if (!nvInitHdmiLibrary(pDevEvo)) {
status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
goto done;
@@ -9426,6 +9533,11 @@ NvBool nvNeedsTmoLut(NVDevEvoPtr pDevEvo,
return FALSE;
}
// If the TMO is set directly by the client, honor the client's request.
if (pHwState->tmoLut.fromOverride) {
return (pHwState->tmoLut.pLutSurfaceEvo != NULL);
}
// Don't tone map if layer doesn't have static metadata.
// XXX HDR TODO: Support tone mapping SDR surfaces to HDR
if (!pHwState->hdrStaticMetadata.enabled) {
@@ -9650,41 +9762,42 @@ void nvEvoDisableMergeMode(NVDispEvoRec *pDispEvo,
}
}
NvBool nvEvoGetSingleTileHwModeTimings(const NVHwModeTimingsEvo *pSrc,
const NvU32 numTiles,
NVHwModeTimingsEvo *pDst)
NvBool nvEvoGetSingleMergeHeadSectionHwModeTimings(
const NVHwModeTimingsEvo *pSrc,
const NvU32 numMergeHeadSections,
NVHwModeTimingsEvo *pDst)
{
if (numTiles == 1) {
if (numMergeHeadSections == 1) {
*pDst = *pSrc;
return TRUE;
}
if ((numTiles == 0) ||
if ((numMergeHeadSections == 0) ||
(pSrc->viewPort.out.xAdjust != 0) ||
(pSrc->viewPort.out.width != nvEvoVisibleWidth(pSrc))) {
return FALSE;
}
if (((pSrc->rasterSize.x % numTiles) != 0) ||
(((pSrc->rasterSyncEnd.x + 1) % numTiles) != 0) ||
(((pSrc->rasterBlankEnd.x + 1) % numTiles) != 0) ||
(((pSrc->rasterBlankStart.x + 1) % numTiles) != 0) ||
((pSrc->pixelClock % numTiles) != 0) ||
((pSrc->viewPort.in.width % numTiles) != 0)) {
if (((pSrc->rasterSize.x % numMergeHeadSections) != 0) ||
(((pSrc->rasterSyncEnd.x + 1) % numMergeHeadSections) != 0) ||
(((pSrc->rasterBlankEnd.x + 1) % numMergeHeadSections) != 0) ||
(((pSrc->rasterBlankStart.x + 1) % numMergeHeadSections) != 0) ||
((pSrc->pixelClock % numMergeHeadSections) != 0) ||
((pSrc->viewPort.in.width % numMergeHeadSections) != 0)) {
return FALSE;
}
*pDst = *pSrc;
pDst->rasterSize.x /= numTiles;
pDst->rasterSyncEnd.x /= numTiles;
pDst->rasterBlankEnd.x /= numTiles;
pDst->rasterBlankStart.x /= numTiles;
pDst->rasterSize.x /= numMergeHeadSections;
pDst->rasterSyncEnd.x /= numMergeHeadSections;
pDst->rasterBlankEnd.x /= numMergeHeadSections;
pDst->rasterBlankStart.x /= numMergeHeadSections;
pDst->pixelClock /= numTiles;
pDst->pixelClock /= numMergeHeadSections;
pDst->viewPort.out.width /= numTiles;
pDst->viewPort.in.width /= numTiles;
pDst->viewPort.out.width /= numMergeHeadSections;
pDst->viewPort.in.width /= numMergeHeadSections;
return TRUE;
}

View File

@@ -82,14 +82,6 @@ EvoSetCursorImage(NVDevEvoPtr pDevEvo,
const struct NvKmsCompositionParams *pCursorCompParams);
static void
EvoPushSetLUTContextDmaMethodsForOneSd(NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const NvU32 ctxdma,
NvBool enableBaseLut,
const NvBool enableOutputLut,
NVEvoUpdateState *updateState);
static void
EvoPushUpdateComposition(NVDevEvoPtr pDevEvo,
const int head,
const NVFlipChannelEvoHwState *pBaseHwState,
@@ -370,12 +362,61 @@ static void EvoSetRasterParams91(NVDevEvoPtr pDevEvo, int head,
nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl);
}
static void EvoSetProcAmp97(NVDispEvoPtr pDispEvo, const NvU32 head,
NVEvoUpdateState *updateState)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVEvoChannelPtr pChannel = pDevEvo->core;
const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
NvU8 colorSpace;
NvU32 dynRange;
/* These methods should only apply to a single pDpyEvo */
nvAssert(pDevEvo->subDevMaskStackDepth > 0);
nvUpdateUpdateState(pDevEvo, updateState, pChannel);
// These NVT defines match the HEAD_SET_PROCAMP ones.
ct_assert(NVT_COLORIMETRY_RGB == NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB);
ct_assert(NVT_COLORIMETRY_YUV_601 == NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601);
ct_assert(NVT_COLORIMETRY_YUV_709 == NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709);
ct_assert(NVT_COLOR_RANGE_FULL == NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE);
ct_assert(NVT_COLOR_RANGE_LIMITED == NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE);
if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) {
colorSpace = NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB;
} else if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020YCC) {
colorSpace = NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020;
} else {
colorSpace = pHeadState->procAmp.colorimetry;
}
if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) {
dynRange = DRF_DEF(977D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA);
} else {
nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED);
dynRange = DRF_DEF(977D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA);
}
nvDmaSetStartEvoMethod(pChannel, NV977D_HEAD_SET_PROCAMP(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(977D, _HEAD_SET_PROCAMP, _COLOR_SPACE, colorSpace) |
DRF_DEF(977D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _AUTO) |
DRF_NUM(977D, _HEAD_SET_PROCAMP, _SAT_COS,
pHeadState->procAmp.satCos) |
DRF_NUM(977D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) |
dynRange |
DRF_NUM(977D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION,
pHeadState->procAmp.colorRange));
}
static void EvoSetProcAmp90(NVDispEvoPtr pDispEvo, const NvU32 head,
NVEvoUpdateState *updateState)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVEvoChannelPtr pChannel = pDevEvo->core;
const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
NvU8 colorSpace;
NvU32 dynRange;
/* These methods should only apply to a single pDpyEvo */
@@ -390,6 +431,13 @@ static void EvoSetProcAmp90(NVDispEvoPtr pDispEvo, const NvU32 head,
ct_assert(NVT_COLOR_RANGE_FULL == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE);
ct_assert(NVT_COLOR_RANGE_LIMITED == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE);
if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) {
colorSpace = NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB;
} else {
nvAssert(pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_BT2020YCC);
colorSpace = pHeadState->procAmp.colorimetry;
}
if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) {
dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA);
} else {
@@ -399,8 +447,7 @@ static void EvoSetProcAmp90(NVDispEvoPtr pDispEvo, const NvU32 head,
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PROCAMP(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917D, _HEAD_SET_PROCAMP, _COLOR_SPACE,
pHeadState->procAmp.colorimetry) |
DRF_NUM(917D, _HEAD_SET_PROCAMP, _COLOR_SPACE, colorSpace) |
DRF_DEF(917D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _AUTO) |
DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_COS,
pHeadState->procAmp.satCos) |
@@ -870,6 +917,8 @@ static NvBool EvoSetUsageBounds90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head,
baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS,
_SUPER_SAMPLE, _X1_AA);
baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS,
_OUTPUT_LUT, _USAGE_1025);
}
overlayUsage |= pUsage->layer[NVKMS_OVERLAY_LAYER].usable ?
@@ -892,6 +941,9 @@ static NvBool EvoSetUsageBounds90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head,
_PIXEL_DEPTH, _BPP_16);
}
overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS,
_OVERLAY_LUT, _USAGE_1025);
nvDmaSetStartEvoMethod(pChannel,
NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(head), 2);
nvDmaSetEvoMethodData(pChannel, baseUsage);
@@ -1363,6 +1415,249 @@ static void EvoSetSurface(NVDevEvoPtr pDevEvo,
}
}
static void
SetPresentControlBase(NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChannel,
const NVFlipChannelEvoHwState *pHwState,
NVEvoUpdateState *updateState)
{
NvU32 presentControl =
DRF_NUM(917C, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL,
pHwState->minPresentInterval);
if (pHwState->tearing) {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE,
_IMMEDIATE, presentControl);
/*
* This avoids an invalid state exception:
*
* if ((SetPresentControl.BeginMode != NON_TEARING) &&
* (SetPresentControl.BeginMode != AT_FRAME)
* && (wir_InterlockWithCore == ENABLE))
* throw NV_DISP_BASE_STATE_ERROR_001;
*/
nvDisableCoreInterlockUpdateState(pDevEvo, updateState, pChannel);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE,
_NON_TEARING, presentControl);
}
if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) {
if (pHwState->perEyeStereoFlip) {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_STEREO_FLIP_MODE, _AT_ANY_FRAME,
presentControl);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_STEREO_FLIP_MODE, _PAIR_FLIP,
presentControl);
}
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_MODE, _STEREO, presentControl);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_MODE, _MONO, presentControl);
}
// If we have a non-zero timestamp we need to enable timestamp mode
if (pHwState->timeStamp == 0) {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_TIMESTAMP_MODE, _DISABLE, presentControl);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_TIMESTAMP_MODE, _ENABLE, presentControl);
}
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_TIMESTAMP_ORIGIN_LO, 2);
nvDmaSetEvoMethodData(pChannel, 0);
nvDmaSetEvoMethodData(pChannel, 0);
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_UPDATE_TIMESTAMP_LO, 2);
nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp));
nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp));
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1);
nvDmaSetEvoMethodData(pChannel, presentControl);
}
static void EvoSetBaseInputLut(NVDevEvoPtr pDevEvo,
NvU32 sd, NvU32 head,
const NVFlipLutHwState *pInputLut,
NvBool enable,
NVEvoUpdateState *updateState)
{
/*
* Program input LUT on the core channel, but output LUT on the base
* channel, so LUT surfaces can be split. The input LUT must be on the core
* channel so that I8 surfaces don't fail the error check.
*/
NVEvoChannelPtr pChannel = pDevEvo->core;
NvBool enableLut = enable && pInputLut->pLutSurfaceEvo != NULL;
NvU64 offset = enableLut ? pInputLut->offset : 0;
NvU32 ctxdma = enableLut ?
pInputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0;
const NVSurfaceEvoRec *pOldSurface = pDevEvo->pSubDevices[sd]->pBaseLutSurface[head];
NvBool oldEnableLut = (pOldSurface != NULL);
NvU64 oldOffset = oldEnableLut ?
pDevEvo->pSubDevices[sd]->baseLutOffset[head] : 0;
NvU32 oldCtxdma = oldEnableLut ?
pOldSurface->planes[0].surfaceDesc.ctxDmaHandle : 0;
nvAssert((offset & 0xff) == 0);
nvAssert((oldOffset & 0xff) == 0);
if ((enableLut == oldEnableLut) &&
(ctxdma == oldCtxdma) &&
(offset == oldOffset)) {
return;
}
nvUpdateUpdateState(pDevEvo, updateState, pChannel);
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_LO(head), 1);
nvDmaSetEvoMethodData(pChannel,
(enableLut ? DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _ENABLE) :
DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _DISABLE)) |
DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _MODE, _INTERPOLATE_1025_UNITY_RANGE) |
DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE));
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_HI(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917D, _HEAD_SET_BASE_LUT_HI, _ORIGIN, offset >> 8));
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_LUT(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma));
/*
* Use this backdoor to disable "wide pipe" underreplication during
* expansion of color components into the display pipe.
* Underreplication of a non-zero 8-bit color to more than 8 bits
* causes lookups to fall between LUT entries in interpolating LUTs .
* See bug 734919 for details. However, we use
* INDEX_1025_UNITY_RANGE mode for the ILUT, so no interpolation
* occurs.
* The "wide pipe" may also cause scanout of 8-bit data to an 8-bit
* OR to not be a straight passthrough (bug 895401).
*/
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE));
pDevEvo->pSubDevices[sd]->pBaseLutSurface[head] = enableLut ?
pInputLut->pLutSurfaceEvo : NULL;
pDevEvo->pSubDevices[sd]->baseLutOffset[head] = offset;
}
static void EvoSetOverlayInputLut(NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChannel,
const NVFlipLutHwState *pInputLut,
NvBool enable)
{
NvBool enableLut = enable && pInputLut->pLutSurfaceEvo != NULL;
NvU64 offset = enableLut ? pInputLut->offset : 0;
NvU32 ctxdma = enableLut ?
pInputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0;
nvAssert((offset & 0xff) == 0);
nvDmaSetStartEvoMethod(pChannel, NV917E_SET_OVERLAY_LUT_LO, 1);
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(917E, _SET_OVERLAY_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) |
(enableLut ? DRF_DEF(917E, _SET_OVERLAY_LUT_LO, _ENABLE, _ENABLE) :
DRF_DEF(917E, _SET_OVERLAY_LUT_LO, _ENABLE, _DISABLE)));
nvDmaSetStartEvoMethod(pChannel, NV917E_SET_OVERLAY_LUT_HI, 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917E, _SET_OVERLAY_LUT_HI, _ORIGIN, offset >> 8));
nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_LUT, 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917E, _SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma));
}
static void EvoSetOutputLut(NVDevEvoPtr pDevEvo,
NvU32 sd, NvU32 head,
const NVFlipLutHwState *pOutputLut,
NvBool enable,
NVEvoUpdateState *updateState)
{
/*
* Program input LUT on the core channel, but output LUT on the base
* channel, so LUT surfaces can be split. The input LUT must be on the core
* channel so that I8 surfaces don't fail the error check.
*/
NVEvoChannelPtr pChannel = pDevEvo->base[head];
NvBool enableLut = enable && pOutputLut->pLutSurfaceEvo != NULL;
NvU64 offset = enableLut ? pOutputLut->offset : 0;
NvU32 ctxdma = enableLut ?
pOutputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0;
const NVSurfaceEvoRec *pOldSurface = pDevEvo->pSubDevices[sd]->pOutputLutSurface[head];
NvBool oldEnableLut = (pOldSurface != NULL);
NvU64 oldOffset = oldEnableLut ?
pDevEvo->pSubDevices[sd]->outputLutOffset[head] : 0;
NvU32 oldCtxdma = oldEnableLut ?
pOldSurface->planes[0].surfaceDesc.ctxDmaHandle : 0;
NVFlipChannelEvoHwState *pBaseHwState =
&pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER];
nvAssert((offset & 0xff) == 0);
nvAssert((oldOffset & 0xff) == 0);
if ((enableLut == oldEnableLut) &&
(ctxdma == oldCtxdma) &&
(offset == oldOffset)) {
return;
}
nvUpdateUpdateState(pDevEvo, updateState, pChannel);
/*
* If we're changing the OLUT, this update will need to interlock the base
* and core channels. The caller must set the main layer on the head to
* non-tearing. This function may end up being called without a flip on the
* base layer if it's not dirty, so ensure that we set the present mode
* here.
*/
nvAssert(!pBaseHwState->tearing);
SetPresentControlBase(pDevEvo, pChannel, pBaseHwState, updateState);
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_LO, 1);
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) |
(enableLut ? DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, _ENABLE) :
DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, _DISABLE)));
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_HI, 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917C, _SET_OUTPUT_LUT_HI, _ORIGIN, offset >> 8));
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_LUT, 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917C, _SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma));
pDevEvo->pSubDevices[sd]->pOutputLutSurface[head] = enableLut ?
pOutputLut->pLutSurfaceEvo : NULL;
pDevEvo->pSubDevices[sd]->outputLutOffset[head] = offset;
}
static void EvoSetOutputLut90(NVDevEvoPtr pDevEvo,
NvU32 sd, NvU32 head,
const NVFlipLutHwState *pOutputLut,
NvU32 fpNormScale,
NVEvoUpdateState *updateState,
NvBool bypassComposition)
{
NVFlipChannelEvoHwState *pBaseHwState =
&pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER];
EvoSetOutputLut(pDevEvo, sd, head, pOutputLut,
pBaseHwState->pSurfaceEvo[NVKMS_LEFT] != NULL,
updateState);
}
static void
EvoPushSetCoreSurfaceMethodsForOneSd(NVDevEvoRec *pDevEvo,
const NvU32 sd,
@@ -1375,22 +1670,6 @@ EvoPushSetCoreSurfaceMethodsForOneSd(NVDevEvoRec *pDevEvo,
&pDevEvo->gpus[sd].headState[head];
const NVFlipCursorEvoHwState *pSdCursorState = &pSdHeadState->cursor;
const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo;
const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
NvBool enableOutputLut = pHeadState->lut.outputLutEnabled;
NvBool enableBaseLut = pHeadState->lut.baseLutEnabled;
NVLutSurfaceEvoPtr pCurLutSurfEvo = pHeadState->lut.pCurrSurface;
NvU32 lutCtxdma = pCurLutSurfEvo != NULL ?
pCurLutSurfEvo->surfaceDesc.ctxDmaHandle : 0x0;
if (pSurfaceEvo == NULL || pCurLutSurfEvo == NULL) {
enableOutputLut = FALSE;
enableBaseLut = FALSE;
lutCtxdma = 0x0;
}
nvPushEvoSubDevMask(pDevEvo, NVBIT(sd));
EvoSetSurface(pDevEvo, head, pSurfaceEvo, pCscMatrix, updateState);
@@ -1402,13 +1681,16 @@ EvoPushSetCoreSurfaceMethodsForOneSd(NVDevEvoRec *pDevEvo,
updateState,
&pSdCursorState->cursorCompParams);
/*
* EvoPushSetLUTContextDmaMethodsForOneSd() force enables base
* Lut if core scanout surface depth is 8.
*/
EvoPushSetLUTContextDmaMethodsForOneSd(
pDevEvo, sd, head, lutCtxdma, enableBaseLut, enableOutputLut,
updateState);
/* If we're disabling the core surface, we need to disable the LUTs. */
if (pSurfaceEvo == NULL) {
EvoSetBaseInputLut(pDevEvo, sd, head,
NULL, FALSE,
updateState);
EvoSetOutputLut(pDevEvo, sd, head,
NULL, FALSE,
updateState);
}
nvPopEvoSubDevMask(pDevEvo);
}
@@ -1520,66 +1802,11 @@ FlipBase90(NVDevEvoPtr pDevEvo,
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CSC_RED2RED, 1);
nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _CORE));
return;
}
NvU32 presentControl =
DRF_NUM(917C, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL,
pHwState->minPresentInterval);
if (pHwState->tearing) {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE,
_IMMEDIATE, presentControl);
/*
* This avoids an invalid state exception:
*
* if ((SetPresentControl.BeginMode != NON_TEARING) &&
* (SetPresentControl.BeginMode != AT_FRAME)
* && (wir_InterlockWithCore == ENABLE))
* throw NV_DISP_BASE_STATE_ERROR_001;
*/
nvDisableCoreInterlockUpdateState(pDevEvo, updateState, pChannel);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE,
_NON_TEARING, presentControl);
}
if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) {
if (pHwState->perEyeStereoFlip) {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_STEREO_FLIP_MODE, _AT_ANY_FRAME,
presentControl);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_STEREO_FLIP_MODE, _PAIR_FLIP,
presentControl);
}
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_MODE, _STEREO, presentControl);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_MODE, _MONO, presentControl);
}
// If we have a non-zero timestamp we need to enable timestamp mode
if (pHwState->timeStamp == 0) {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_TIMESTAMP_MODE, _DISABLE, presentControl);
} else {
presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL,
_TIMESTAMP_MODE, _ENABLE, presentControl);
}
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_TIMESTAMP_ORIGIN_LO, 2);
nvDmaSetEvoMethodData(pChannel, 0);
nvDmaSetEvoMethodData(pChannel, 0);
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_UPDATE_TIMESTAMP_LO, 2);
nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp));
nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp));
nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1);
nvDmaSetEvoMethodData(pChannel, presentControl);
SetPresentControlBase(pDevEvo, pChannel, pHwState, updateState);
SetCscMatrix(pChannel, NV917C_SET_CSC_RED2RED, &pHwState->cscMatrix,
DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _BASE));
@@ -1630,8 +1857,6 @@ FlipBase90(NVDevEvoPtr pDevEvo,
nvHwFormatFromKmsFormat90(pHwState->pSurfaceEvo[NVKMS_LEFT]->format)) |
DRF_DEF(917C, _SURFACE_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) |
DRF_DEF(917C, _SURFACE_SET_PARAMS, _GAMMA, _LINEAR));
nvAssert(pHwState->inputLut.pLutSurfaceEvo == NULL);
}
static void
@@ -1792,6 +2017,8 @@ FlipOverlay90(NVDevEvoPtr pDevEvo,
// Disable overlay on this head.
nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1);
nvDmaSetEvoMethodData(pChannel, 0);
EvoSetOverlayInputLut(pDevEvo, pChannel, NULL, FALSE);
return;
}
@@ -1828,10 +2055,10 @@ FlipOverlay90(NVDevEvoPtr pDevEvo,
SetCscMatrix(pChannel, NV917E_SET_CSC_RED2RED, &pHwState->cscMatrix, 0);
EvoSetOverlayInputLut(pDevEvo, pChannel, &pHwState->inputLut, TRUE);
nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1);
nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle);
nvAssert(pHwState->inputLut.pLutSurfaceEvo == NULL);
}
static NvBool
@@ -2021,6 +2248,23 @@ static void EvoFlip90(NVDevEvoPtr pDevEvo,
} else {
pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = FALSE;
}
/*
* On EVO2, error 52 will be thrown if the any of the
* SET_{BASE,OUTPUT}_LUT_{LO,HI} methods are programmed on a
* tearing flip, regardless of whether they actually update the
* hardware state.
*/
if (!pHwState->tearing) {
EvoSetBaseInputLut(pDevEvo, sd, head,
&pHwState->inputLut,
pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL,
updateState);
EvoSetOutputLut(pDevEvo, sd, head,
&pDevEvo->gpus[sd].headState[head].outputLut,
pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL,
updateState);
}
} FOR_EACH_SUBDEV_IN_MASK_END
FlipBase90(pDevEvo, pChannel, pHwState, updateState);
@@ -2389,119 +2633,6 @@ EvoFillLUTSurface90(NVEvoLutEntryRec *pLUTBuffer,
}
}
static void
EvoPushSetLUTContextDmaMethodsForOneSd(NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const NvU32 ctxdma,
NvBool enableBaseLut,
const NvBool enableOutputLut,
NVEvoUpdateState *updateState)
{
NVEvoChannelPtr pChannel = pDevEvo->core;
NvU64 offset;
const NVSurfaceEvoRec *pCoreSurfaceEvo =
pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head];
const NvBool surfaceDepth8 = (pCoreSurfaceEvo != NULL) ?
(pCoreSurfaceEvo->format == NvKmsSurfaceMemoryFormatI8) : FALSE;
nvAssert(nvPeekEvoSubDevMask(pDevEvo) == NVBIT(sd));
// Depth 8 requires the base LUT to be enabled.
if (ctxdma && !enableBaseLut && surfaceDepth8) {
// TODO: Is this still required? Callers should specify the LUT at
// modeset time now.
enableBaseLut = TRUE;
}
nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut));
nvUpdateUpdateState(pDevEvo, updateState, pChannel);
/* Program the base LUT */
offset = offsetof(NVEvoLutDataRec, base);
nvAssert((offset & 0xff) == 0);
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_LO(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) |
(enableBaseLut ? DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _ENABLE) :
DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _DISABLE)) |
DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE));
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_HI(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917D, _HEAD_SET_BASE_LUT_HI, _ORIGIN, offset >> 8));
/* Program the output LUT */
offset = offsetof(NVEvoLutDataRec, output);
nvAssert((offset & 0xff) == 0);
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OUTPUT_LUT_LO(head), 1);
nvDmaSetEvoMethodData(pChannel,
(enableOutputLut ? DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _ENABLE, _ENABLE) :
DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _ENABLE, _DISABLE)) |
DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _MODE, _INTERPOLATE_1025_UNITY_RANGE) |
DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE));
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OUTPUT_LUT_HI(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917D, _HEAD_SET_OUTPUT_LUT_HI, _ORIGIN, offset >> 8));
/* Set the ctxdma that's used by both LUTs */
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_LUT(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma));
/*
* Use this backdoor to disable "wide pipe" underreplication during
* expansion of color components into the display pipe. Underreplication
* of a non-zero 8-bit color to more than 8 bits causes lookups to fall
* between LUT entries in a 256-entry LUT, which we don't want. See bug
* 734919 for details.
* The "wide pipe" may also cause scanout of 8-bit data to an 8-bit OR to
* not be a straight passthrough (bug 895401).
*/
nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1);
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE));
}
static void EvoSetLUTContextDma90(const NVDispEvoRec *pDispEvo,
const int head,
NVLutSurfaceEvoPtr pLutSurfEvo,
NvBool enableBaseLut,
NvBool enableOutputLut,
NVEvoUpdateState *updateState,
NvBool bypassComposition)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
const NvU32 sd = pDispEvo->displayOwner;
const NvBool coreChannelCtxDmaNonNull =
pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] != NULL;
const NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->surfaceDesc.ctxDmaHandle : 0;
/*
* If the core channel doesn't have a scanout surface set, then setting the
* LUT context DMA will cause an exception.
*/
if (!coreChannelCtxDmaNonNull && ctxdma) {
return;
}
nvPushEvoSubDevMask(pDevEvo, NVBIT(sd));
EvoPushSetLUTContextDmaMethodsForOneSd(
pDevEvo, sd, head, ctxdma, enableBaseLut, enableOutputLut,
updateState);
nvPopEvoSubDevMask(pDevEvo);
}
#define NV_EVO2_CAP_GET_PIN(cl, n, pEvoCaps, word, name, idx, pCaps) \
(pEvoCaps)->pin[(idx)].flipLock = \
FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \
@@ -2597,6 +2728,19 @@ static void EvoParseCapabilityNotifier3(NVEvoCapabilitiesPtr pEvoCaps,
// Don't need any PIOR caps currently.
}
/*
* VSS is unsupported on EVO2 and all LUT entries are in UNORM14_WAR_813188
*/
static void EvoFillLUTCaps(struct NvKmsLUTCaps *pCaps, NvBool supported)
{
pCaps->supported = supported;
pCaps->vssSupport = NVKMS_LUT_VSS_NOT_SUPPORTED;
pCaps->vssType = NVKMS_LUT_VSS_TYPE_NONE;
pCaps->vssSegments = 0;
pCaps->lutEntries = supported ? 1025 : 0;
pCaps->entryFormat = NVKMS_LUT_FORMAT_UNORM14_WAR_813188;
}
static NvBool EvoGetCapabilities90(NVDevEvoPtr pDevEvo)
{
NVEvoChannelPtr pChannel = pDevEvo->core;
@@ -2620,8 +2764,14 @@ static NvBool EvoGetCapabilities90(NVDevEvoPtr pDevEvo)
pDevEvo->caps.legacyNotifierFormatSizeBytes[layer] =
NV_DISP_BASE_NOTIFIER_1_SIZEOF;
}
/* Only the ILUT and OLUT are supported on EVO2. The TMO is EVO3+. */
EvoFillLUTCaps(&pDevEvo->caps.layerCaps[layer].ilut, TRUE /* supported */);
EvoFillLUTCaps(&pDevEvo->caps.layerCaps[layer].tmo, FALSE /* supported */);
}
EvoFillLUTCaps(&pDevEvo->caps.olut, TRUE /* supported */);
pDevEvo->caps.cursorCompositionCaps =
(struct NvKmsCompositionCapabilities) {
.supportedColorKeySelects =
@@ -3473,7 +3623,8 @@ static void EvoStopHeadCRC32Capture90(NVDevEvoPtr pDevEvo,
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE));
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE) |
DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE));
}
/*!
@@ -3832,9 +3983,9 @@ static NvU32 EvoBindSurfaceDescriptor90(
return nvCtxDmaBind(pDevEvo, pChannel, pSurfaceDesc->ctxDmaHandle);
}
NVEvoHAL nvEvo94 = {
NVEvoHAL nvEvo97 = {
EvoSetRasterParams91, /* SetRasterParams */
EvoSetProcAmp90, /* SetProcAmp */
EvoSetProcAmp97, /* SetProcAmp */
EvoSetHeadControl90, /* SetHeadControl */
EvoSetHeadRefClk90, /* SetHeadRefClk */
EvoHeadSetControlOR90, /* HeadSetControlOR */
@@ -3849,7 +4000,97 @@ NVEvoHAL nvEvo94 = {
EvoFlip90, /* Flip */
EvoFlipTransitionWAR90, /* FlipTransitionWAR */
EvoFillLUTSurface90, /* FillLUTSurface */
EvoSetLUTContextDma90, /* SetLUTContextDma */
EvoSetOutputLut90, /* SetOutputLut */
EvoSetOutputScaler90, /* SetOutputScaler */
EvoSetViewportPointIn90, /* SetViewportPointIn */
EvoSetViewportInOut90, /* SetViewportInOut */
EvoSetCursorImage91, /* SetCursorImage */
EvoValidateCursorSurface90, /* ValidateCursorSurface */
EvoValidateWindowFormat90, /* ValidateWindowFormat */
EvoInitCompNotifier3, /* InitCompNotifier */
EvoIsCompNotifierComplete3, /* IsCompNotifierComplete */
EvoWaitForCompNotifier3, /* WaitForCompNotifier */
EvoSetDither91, /* SetDither */
EvoSetStallLock94, /* SetStallLock */
NULL, /* SetDisplayRate */
EvoInitChannel90, /* InitChannel */
NULL, /* InitDefaultLut */
EvoInitWindowMapping90, /* InitWindowMapping */
nvEvo1IsChannelIdle, /* IsChannelIdle */
nvEvo1IsChannelMethodPending, /* IsChannelMethodPending */
EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannel */
EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannelIgnoreLock */
EvoAccelerateChannel91, /* AccelerateChannel */
EvoResetChannelAccelerators91, /* ResetChannelAccelerators */
EvoAllocRmCtrlObject90, /* AllocRmCtrlObject */
EvoFreeRmCtrlObject90, /* FreeRmCtrlObject */
EvoSetImmPointOut91, /* SetImmPointOut */
EvoStartHeadCRC32Capture90, /* StartCRC32Capture */
EvoStopHeadCRC32Capture90, /* StopCRC32Capture */
EvoQueryHeadCRC32_90, /* QueryCRC32 */
EvoGetScanLine90, /* GetScanLine */
NULL, /* ConfigureVblankSyncObject */
nvEvo1SetDscParams, /* SetDscParams */
NULL, /* EnableMidFrameAndDWCFWatermark */
EvoGetActiveViewportOffset94, /* GetActiveViewportOffset */
EvoClearSurfaceUsage91, /* ClearSurfaceUsage */
EvoComputeWindowScalingTaps91, /* ComputeWindowScalingTaps */
NULL, /* GetWindowScalingCaps */
NULL, /* SetMergeMode */
EvoAllocSurfaceDescriptor90, /* AllocSurfaceDescriptor */
EvoFreeSurfaceDescriptor90, /* FreeSurfaceDescriptor */
EvoBindSurfaceDescriptor90, /* BindSurfaceDescriptor */
NULL, /* SetTmoLutSurfaceAddress */
NULL, /* SetILUTSurfaceAddress */
NULL, /* SetISOSurfaceAddress */
NULL, /* SetCoreNotifierSurfaceAddressAndControl */
NULL, /* SetWinNotifierSurfaceAddressAndControl */
NULL, /* SetSemaphoreSurfaceAddressAndControl */
NULL, /* SetAcqSemaphoreSurfaceAddressAndControl */
{ /* caps */
FALSE, /* supportsNonInterlockedUsageBoundsUpdate */
FALSE, /* supportsDisplayRate */
TRUE, /* supportsFlipLockRGStatus */
FALSE, /* needDefaultLutSurface */
FALSE, /* hasUnorm10OLUT */
TRUE, /* supportsImageSharpening */
FALSE, /* supportsHDMIVRR */
TRUE, /* supportsCoreChannelSurface */
FALSE, /* supportsHDMIFRL */
TRUE, /* supportsSetStorageMemoryLayout */
FALSE, /* supportsIndependentAcqRelSemaphore */
TRUE, /* supportsCoreLut */
FALSE, /* supportsSynchronizedOverlayPositionUpdate */
FALSE, /* supportsVblankSyncObjects */
TRUE, /* requiresScalingTapsInBothDimensions */
FALSE, /* supportsMergeMode */
FALSE, /* supportsHDMI10BPC */
FALSE, /* supportsDPAudio192KHz */
NV_EVO2_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */
sizeof(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */
NV_EVO_SCALER_1TAP, /* minScalerTaps */
0, /* xEmulatedSurfaceMemoryFormats */
},
};
NVEvoHAL nvEvo94 = {
EvoSetRasterParams91, /* SetRasterParams */
EvoSetProcAmp90, /* SetProcAmp */
EvoSetHeadControl90, /* SetHeadControl */
EvoSetHeadRefClk90, /* SetHeadRefClk */
EvoHeadSetControlOR90, /* HeadSetControlOR */
EvoORSetControl90, /* ORSetControl */
EvoHeadSetDisplayId90, /* HeadSetDisplayId */
EvoSetUsageBounds90, /* SetUsageBounds */
EvoUpdate91, /* Update */
nvEvo1IsModePossible, /* IsModePossible */
nvEvo1PrePostIMP, /* PrePostIMP */
EvoSetNotifier90, /* SetNotifier */
EvoGetCapabilities90, /* GetCapabilities */
EvoFlip90, /* Flip */
EvoFlipTransitionWAR90, /* FlipTransitionWAR */
EvoFillLUTSurface90, /* FillLUTSurface */
EvoSetOutputLut90, /* SetOutputLut */
EvoSetOutputScaler90, /* SetOutputScaler */
EvoSetViewportPointIn90, /* SetViewportPointIn */
EvoSetViewportInOut90, /* SetViewportInOut */

File diff suppressed because it is too large Load Diff

View File

@@ -26,6 +26,7 @@
#include "nvkms-flip.h"
#include "nvkms-hw-flip.h"
#include "nvkms-utils-flip.h"
#include "nvkms-lut.h"
#include "nvkms-prealloc.h"
#include "nvkms-private.h"
#include "nvkms-utils.h"
@@ -68,9 +69,10 @@ NvBool nvCheckFlipPermissions(
layerMask = allLayersMask;
}
/* Changing viewPortIn or LUT requires permission to alter all layers. */
/* Changing viewPortIn or output LUT requires permission to alter all layers. */
if ((layerMask != allLayersMask) && ((pParams->viewPortIn.specified) ||
(pParams->olut.specified) ||
(pParams->lut.input.specified) ||
(pParams->lut.output.specified))) {
return FALSE;
@@ -196,8 +198,7 @@ static NvBool UpdateProposedFlipStateOneApiHead(
}
}
if (!nvChooseColorRangeEvo(pProposedApiHead->hdr.dpyColor.colorimetry,
pDpyEvo->requestedColorRange,
if (!nvChooseColorRangeEvo(pDpyEvo->requestedColorRange,
pProposedApiHead->hdr.dpyColor.format,
pProposedApiHead->hdr.dpyColor.bpc,
&pProposedApiHead->hdr.dpyColor.range)) {
@@ -354,6 +355,26 @@ static void InitNvKmsFlipWorkArea(const NVDevEvoRec *pDevEvo,
}
}
static void CleanupNvKmsFlipWorkArea(NVDevEvoPtr pDevEvo,
struct NvKmsFlipWorkArea *pWorkArea)
{
const NVDispEvoRec *pDispEvo;
NvU32 sd, head;
FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) {
/*
* If the flip failed or wasn't committed, any TMO surfaces
* allocated by nvSetTmoLutSurfaceEvo will be left in newState with
* 1 refcnt, so free them now.
*/
nvFreeUnrefedTmoLutSurfacesEvo(pDevEvo,
&pWorkArea->sd[sd].head[head].newState,
head);
}
}
}
static void FlipEvoOneApiHead(NVDispEvoRec *pDispEvo,
const NvU32 apiHead,
const struct NvKmsFlipWorkArea *pWorkArea,
@@ -397,12 +418,6 @@ static void FlipEvoOneApiHead(NVDispEvoRec *pDispEvo,
&pProposedApiHead->hdr.dpyColor,
pUpdateState);
}
if (pProposedApiHead->lut.input.specified ||
pProposedApiHead->lut.output.specified) {
/* Update current LUT to hardware */
nvEvoSetLUTContextDma(pDispEvo, head, pUpdateState);
}
}
if (pProposedApiHead->dirty.hdr) {
@@ -502,9 +517,7 @@ NvBool nvFlipEvo(NVDevEvoPtr pDevEvo,
const NvBool allowVrr =
GetAllowVrr(pDevEvo, pFlipHead, numFlipHeads,
requestAllowVrr, &applyAllowVrr);
struct NvKmsFlipWorkArea *pWorkArea =
nvPreallocGet(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA,
sizeof(*pWorkArea));
struct NvKmsFlipWorkArea *pWorkArea;
NvU32 i;
/*
@@ -516,9 +529,14 @@ NvBool nvFlipEvo(NVDevEvoPtr pDevEvo,
* NVKMS_IOCTL_FLIP requests.
*/
if (pDevEvo->coreInitMethodsPending) {
goto done;
if (reply) {
reply->flipResult = result;
}
return ret;
}
pWorkArea = nvPreallocGet(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA,
sizeof(*pWorkArea));
InitNvKmsFlipWorkArea(pDevEvo, pWorkArea);
/* Validate the flip parameters and update the work area. */
@@ -677,7 +695,7 @@ NvBool nvFlipEvo(NVDevEvoPtr pDevEvo,
/* fall through */
done:
CleanupNvKmsFlipWorkArea(pDevEvo, pWorkArea);
nvPreallocRelease(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA);
if (reply) {
reply->flipResult = result;
@@ -782,7 +800,7 @@ void nvApiHeadSetViewportPointIn(const NVDispEvoRec *pDispEvo,
nvPushEvoSubDevMaskDisp(pDispEvo);
pDevEvo->hal->SetViewportPointIn(pDevEvo, head,
x + (hwViewportInWidth * pHeadState->tilePosition), y,
x + (hwViewportInWidth * pHeadState->mergeHeadSection), y,
&updateState);
nvPopEvoSubDevMask(pDevEvo);

View File

@@ -50,6 +50,7 @@
#include "class/clc77d.h" // NVC67D_CORE_CHANNEL_DMA
extern NVEvoHAL nvEvo94;
extern NVEvoHAL nvEvo97;
extern NVEvoHAL nvEvoC3;
extern NVEvoHAL nvEvoC5;
extern NVEvoHAL nvEvoC6;
@@ -60,6 +61,7 @@ enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo)
_pEvoHal, \
_supportsDP13, \
_supportsHDMI20, \
_supportsYUV2020, \
_inputLutAppliesToBase, \
_dpYCbCr422MaxBpc, \
_hdmiYCbCr422MaxBpc, \
@@ -83,6 +85,7 @@ enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo)
.evoCaps = { \
.supportsDP13 = _supportsDP13, \
.supportsHDMI20 = _supportsHDMI20, \
.supportsYUV2020 = _supportsYUV2020, \
.validNIsoFormatMask = _validNIsoFormatMask, \
.inputLutAppliesToBase = _inputLutAppliesToBase, \
.maxPitchValue = _maxPitch, \
@@ -153,32 +156,33 @@ enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo)
const NVEvoCapsRec evoCaps;
} dispTable[] = {
/*
* hdmiYCbCr422MaxBpc--------------------+
* dpYCbCr422MaxBpc------------------+ |
* inputLutAppliesToBase ---------+ | |
* supportsHDMI20 -------------+ | | |
* supportsDP13 ------------+ | | | |
* pEvoHal --------------+ | | | | |
* windowClassPrefix | | | | | |
* classPrefix | | | | | | |
* | | | | | | | |
* hdmiYCbCr422MaxBpc-----------------------+
* dpYCbCr422MaxBpc---------------------+ |
* inputLutAppliesToBase ------------+ | |
* supportsYUV2020 ---------------+ | | |
* supportsHDMI20 -------------+ | | | |
* supportsDP13 ------------+ | | | | |
* pEvoHal --------------+ | | | | | |
* windowClassPrefix | | | | | | |
* classPrefix | | | | | | | |
* | | | | | | | | |
*/
/* Ada */
ENTRY_NVD(C7, C6, &nvEvoC6, 1, 1, 0, 12, 12),
ENTRY_NVD(C7, C6, &nvEvoC6, 1, 1, 1, 0, 12, 12),
/* Ampere */
ENTRY_NVD(C6, C6, &nvEvoC6, 1, 1, 0, 12, 12),
ENTRY_NVD(C6, C6, &nvEvoC6, 1, 1, 1, 0, 12, 12),
/* Turing */
ENTRY_NVD(C5, C5, &nvEvoC5, 1, 1, 0, 12, 12),
ENTRY_NVD(C5, C5, &nvEvoC5, 1, 1, 1, 0, 12, 12),
/* Volta */
ENTRY_NVD(C3, C3, &nvEvoC3, 1, 1, 0, 12, 12),
ENTRY_NVD(C3, C3, &nvEvoC3, 1, 1, 1, 0, 12, 12),
/* gp10x */
ENTRY_EVO(98, &nvEvo94, 1, 1, 1, 12, 12),
ENTRY_EVO(98, &nvEvo97, 1, 1, 1, 1, 12, 12),
/* gp100 */
ENTRY_EVO(97, &nvEvo94, 1, 1, 1, 12, 12),
ENTRY_EVO(97, &nvEvo97, 1, 1, 1, 1, 12, 12),
/* gm20x */
ENTRY_EVO(95, &nvEvo94, 0, 1, 1, 8, 0),
ENTRY_EVO(95, &nvEvo94, 0, 1, 0, 1, 8, 0),
/* gm10x */
ENTRY_EVO(94, &nvEvo94, 0, 0, 1, 8, 0),
ENTRY_EVO(94, &nvEvo94, 0, 0, 0, 1, 8, 0),
};
int i;

View File

@@ -69,16 +69,10 @@ static inline const NVT_EDID_CEA861_INFO *GetExt861(const NVParsedEdidEvoRec *pP
static void CalculateVideoInfoFrameColorFormat(
const NVDpyAttributeColor *pDpyColor,
const NvU32 hdTimings,
const NVT_EDID_INFO *pEdidInfo,
NVT_VIDEO_INFOFRAME_CTRL *pCtrl)
{
/*
* If NVKMS_OUTPUT_COLORIMETRY_BT2100 is enabled, we expect the colorSpace
* is RGB. This is enforced when the colorSpace is selected.
* XXX HDR TODO: Support YUV
*/
nvAssert((pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100) ||
(pDpyColor->format ==
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB));
NvBool sinkSupportsRGBQuantizationOverride = FALSE;
// sets video infoframe colorspace (RGB/YUV).
switch (pDpyColor->format) {
@@ -102,22 +96,36 @@ static void CalculateVideoInfoFrameColorFormat(
// sets video infoframe colorimetry.
switch (pDpyColor->format) {
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
pCtrl->colorimetry = NVT_COLORIMETRY_BT2020RGB;
} else {
pCtrl->colorimetry = NVT_COLORIMETRY_RGB;
switch (pDpyColor->colorimetry) {
case NVKMS_OUTPUT_COLORIMETRY_BT2100:
pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY;
pCtrl->extended_colorimetry =
NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC;
break;
case NVKMS_OUTPUT_COLORIMETRY_DEFAULT:
pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_NO_DATA;
break;
}
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
if (hdTimings) {
pCtrl->colorimetry = NVT_COLORIMETRY_YUV_709;
} else {
pCtrl->colorimetry = NVT_COLORIMETRY_YUV_601;
switch (pDpyColor->colorimetry) {
case NVKMS_OUTPUT_COLORIMETRY_BT2100:
pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY;
pCtrl->extended_colorimetry =
NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC;
break;
case NVKMS_OUTPUT_COLORIMETRY_DEFAULT:
pCtrl->colorimetry =
(hdTimings ? NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709 :
NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SMPTE170M_ITU601);
break;
}
break;
case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
pCtrl->colorimetry = NVT_COLORIMETRY_YUV_709;
// XXX HDR TODO: Support YUV420 + HDR
nvAssert(pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100);
pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709;
break;
default:
nvAssert(!"Invalid colorSpace value");
@@ -139,13 +147,22 @@ static void CalculateVideoInfoFrameColorFormat(
break;
}
if (pEdidInfo != NULL) {
sinkSupportsRGBQuantizationOverride = (pEdidInfo->ext861.valid.VCDB &&
((pEdidInfo->ext861.video_capability & NVT_CEA861_VCDB_QS_MASK) >>
NVT_CEA861_VCDB_QS_SHIFT) != 0);
}
if ((pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) &&
!sinkSupportsRGBQuantizationOverride) {
pCtrl->rgb_quantization_range = NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_DEFAULT;
}
/*
* Only limited color range is allowed with YUV444, YUV422 color spaces, or
* BT2020 colorimetry.
* Only limited color range is allowed with YUV444 and YUV422 color spaces.
*/
nvAssert(!(((pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422) ||
(pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444) ||
(pCtrl->colorimetry == NVT_COLORIMETRY_BT2020RGB)) &&
(pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444)) &&
(pCtrl->rgb_quantization_range !=
NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE)));
}
@@ -532,8 +549,7 @@ static void SendVideoInfoFrame(const NVDispEvoRec *pDispEvo,
NVT_VIDEO_INFOFRAME VideoInfoFrame;
NVT_STATUS status;
CalculateVideoInfoFrameColorFormat(pDpyColor, hdTimings, &videoCtrl);
CalculateVideoInfoFrameColorFormat(pDpyColor, hdTimings, pEdidInfo, &videoCtrl);
status = NvTiming_ConstructVideoInfoframe(pEdidInfo,
&videoCtrl,

View File

@@ -2281,7 +2281,8 @@ void nvHsAddVBlankCallback(NVHsChannelEvoPtr pHsChannel)
nvApiHeadRegisterVBlankCallback(pDispEvo,
pHsChannel->apiHead,
HsVBlankCallback,
NULL);
NULL,
1 /* listIndex */);
}
/*!

View File

@@ -168,6 +168,8 @@ void nvClearFlipEvoHwState(
nvkms_memset(pFlipState, 0, sizeof(*pFlipState));
pFlipState->olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT;
for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) {
pFlipState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX;
}
@@ -227,6 +229,9 @@ void nvInitFlipEvoHwState(
pFlipState->hdrInfoFrame.eotf = pHeadState->hdrInfoFrameOverride.eotf;
pFlipState->hdrInfoFrame.staticMetadata =
pHeadState->hdrInfoFrameOverride.staticMetadata;
pFlipState->outputLut = pSdHeadState->outputLut;
pFlipState->olutFpNormScale = pSdHeadState->olutFpNormScale;
}
@@ -240,6 +245,8 @@ NvBool nvIsLayerDirty(const struct NvKmsFlipCommonParams *pParams,
pParams->layer[layer].completionNotifier.specified ||
pParams->layer[layer].syncObjects.specified ||
pParams->layer[layer].compositionParams.specified ||
pParams->layer[layer].ilut.specified ||
pParams->layer[layer].tmo.specified ||
pParams->layer[layer].csc.specified ||
pParams->layer[layer].hdr.specified ||
pParams->layer[layer].colorSpace.specified;
@@ -287,9 +294,12 @@ static NvBool FlipRequiresNonTearingMode(
// Layout (i.e. frame, field1, or field2)
pOldSurf->widthInPixels != pNewSurf->widthInPixels ||
pOldSurf->heightInPixels != pNewSurf->heightInPixels ||
pOldSurf->layout != pNewSurf->layout;
pOldSurf->layout != pNewSurf->layout ||
// UseGainOfs
// NewBaseLut -- USE_CORE_LUT is programmed in InitChannel*
pOld->inputLut.pLutSurfaceEvo != pNew->inputLut.pLutSurfaceEvo ||
pOld->inputLut.offset != pNew->inputLut.offset ||
pOld->tmoLut.pLutSurfaceEvo != pNew->tmoLut.pLutSurfaceEvo ||
pOld->tmoLut.offset != pNew->tmoLut.offset;
// NewOutputLut
}
@@ -560,9 +570,106 @@ static NvBool UpdateLayerFlipEvoHwStateHDRStaticMetadata(
return TRUE;
}
static NvBool UpdateFlipLutHwState(
const NVDevEvoRec *pDevEvo,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
NVFlipLutHwState *pFlipLutHwState,
const struct NvKmsLUTSurfaceParams *pLUTSurfaceParams,
const struct NvKmsLUTCaps *pLUTCaps,
const NvBool isUsedByLayerChannel)
{
NvU32 requiredSize = 0;
/*
* If the LUT is not supported and the user has specified it, even with
* surfaceHandle == 0, the request is invalid.
*/
if (!pLUTCaps->supported) {
return FALSE;
}
if (pLUTSurfaceParams->surfaceHandle != 0) {
pFlipLutHwState->pLutSurfaceEvo =
nvEvoGetSurfaceFromHandle(pDevEvo,
pOpenDevSurfaceHandles,
pLUTSurfaceParams->surfaceHandle,
FALSE, /* isUsedByCursorChannel */
isUsedByLayerChannel);
if (pFlipLutHwState->pLutSurfaceEvo == NULL) {
/* Invalid surface handle */
return FALSE;
}
pFlipLutHwState->offset = pLUTSurfaceParams->offset;
pFlipLutHwState->vssSegments = pLUTSurfaceParams->vssSegments;
pFlipLutHwState->lutEntries = pLUTSurfaceParams->lutEntries;
/* Attempt to validate the surface and parameters: */
if (pFlipLutHwState->pLutSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) {
/*
* Only pitch surfaces can be used.
*
* XXX: Also need surface format check?
* (NvKmsSurfaceMemoryFormatR16G16B16A16)
*/
return FALSE;
}
if ((pLUTCaps->vssSupport == NVKMS_LUT_VSS_NOT_SUPPORTED) &&
(pLUTSurfaceParams->vssSegments != 0)) {
/* Can't specify VSS entries if VSS is not supported. */
return FALSE;
}
if ((pLUTCaps->vssSupport == NVKMS_LUT_VSS_REQUIRED) &&
(pLUTSurfaceParams->vssSegments == 0)) {
/* Must specify VSS entries if VSS is required. */
return FALSE;
}
if ((pLUTSurfaceParams->lutEntries > pLUTCaps->lutEntries) ||
(pLUTSurfaceParams->vssSegments > pLUTCaps->vssSegments)) {
/* The number of LUT and VSS entries cannot exceed LUT caps. */
return FALSE;
}
requiredSize = pLUTSurfaceParams->lutEntries * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE +
(pLUTSurfaceParams->vssSegments > 0 ? NVKMS_LUT_VSS_HEADER_SIZE : 0);
if (requiredSize > pFlipLutHwState->pLutSurfaceEvo->planes[0].rmObjectSizeInBytes) {
/* The surface isn't large enough to hold the described LUT. */
return FALSE;
}
/*
* TODO: Check that lutEntries, vssSegments, and vssType correlate
* correctly.
*/
} else {
/* Disable the LUT. */
pFlipLutHwState->pLutSurfaceEvo = NULL;
pFlipLutHwState->offset = 0;
pFlipLutHwState->vssSegments = 0;
pFlipLutHwState->lutEntries = 0;
}
return TRUE;
}
#define WITH_APIHEAD_FOR_HEAD(_pDevEvo, _sd, _head, _apiHead) \
for (_apiHead = 0; \
_apiHead < ARRAY_LEN(_pDevEvo->pDispEvo[_sd]->apiHeadState); \
_apiHead++) { \
if ((_pDevEvo->pDispEvo[_sd]->apiHeadState[_apiHead].hwHeadsMask & \
NVBIT(_head)) != 0) {
#define WITH_APIHEAD_FOR_HEAD_DONE \
break; \
} \
}
static NvBool UpdateLayerFlipEvoHwStateCommon(
const struct NvKmsPerOpenDev *pOpenDev,
const NVDevEvoRec *pDevEvo,
NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const NvU32 layer,
@@ -742,6 +849,49 @@ static NvBool UpdateLayerFlipEvoHwStateCommon(
pParams->layer[layer].colorSpace.val;
}
if (pParams->layer[layer].csc00Override.specified) {
// CSC00 is only available on layers that support ICtCp.
if (!pDevEvo->caps.layerCaps[layer].supportsICtCp &&
pHwState->csc00Override.enabled) {
return FALSE;
}
pHwState->csc00Override.enabled =
pParams->layer[layer].csc00Override.enabled;
pHwState->csc00Override.matrix =
pParams->layer[layer].csc00Override.matrix;
}
if (pParams->layer[layer].csc01Override.specified) {
// CSC01 is only available on layers that support ICtCp.
if (!pDevEvo->caps.layerCaps[layer].supportsICtCp &&
pHwState->csc01Override.enabled) {
return FALSE;
}
pHwState->csc01Override.enabled =
pParams->layer[layer].csc01Override.enabled;
pHwState->csc01Override.matrix =
pParams->layer[layer].csc01Override.matrix;
}
if (pParams->layer[layer].csc10Override.specified) {
// CSC10 is only available on layers that support ICtCp.
if (!pDevEvo->caps.layerCaps[layer].supportsICtCp &&
pHwState->csc10Override.enabled) {
return FALSE;
}
pHwState->csc10Override.enabled =
pParams->layer[layer].csc10Override.enabled;
pHwState->csc10Override.matrix =
pParams->layer[layer].csc10Override.matrix;
}
if (pParams->layer[layer].csc11Override.specified) {
pHwState->csc11Override.enabled =
pParams->layer[layer].csc11Override.enabled;
pHwState->csc11Override.matrix =
pParams->layer[layer].csc11Override.matrix;
}
if (pHwState->composition.depth == 0) {
pHwState->composition.depth =
NVKMS_MAX_LAYERS_PER_HEAD - layer;
@@ -773,6 +923,89 @@ static NvBool UpdateLayerFlipEvoHwStateCommon(
}
}
if (pParams->layer[layer].ilut.specified) {
if (pParams->layer[layer].ilut.enabled) {
ret = UpdateFlipLutHwState(
pDevEvo,
pOpenDevSurfaceHandles,
&pFlipState->layer[layer].inputLut,
&pParams->layer[layer].ilut.lut,
&pDevEvo->caps.layerCaps[layer].ilut,
TRUE /*isUsedByLayerChannel*/);
if (!ret) {
return FALSE;
}
/* Cache that the hw state is from the new params. */
pFlipState->layer[layer].inputLut.fromOverride = TRUE;
} else {
/* Cache that the hw state is from the legacy params. */
pFlipState->layer[layer].inputLut.fromOverride = FALSE;
}
}
/*
* If this is not from the new params - either cached this flip or in a
* previous one - set from the legacy params.
*/
if (!pFlipState->layer[layer].inputLut.fromOverride) {
NvU32 apiHead;
WITH_APIHEAD_FOR_HEAD(pDevEvo, sd, head, apiHead) {
NvBool ilutEnabled = pDevEvo->lut.apiHead[apiHead].disp[sd].curBaseLutEnabled;
NvU32 curLUTIndex = pDevEvo->lut.apiHead[apiHead].disp[sd].curLUTIndex;
NvU32 nextLutIndex = (curLUTIndex + 1) % 3;
/* If the legacy params are specified, update from those. */
if (pParams->lut.input.specified) {
if (pParams->lut.input.end != 0) {
pFlipState->layer[layer].inputLut.pLutSurfaceEvo =
pDevEvo->lut.apiHead[apiHead].LUT[nextLutIndex];
pFlipState->layer[layer].inputLut.offset = offsetof(NVEvoLutDataRec, base);
pFlipState->layer[layer].inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
pFlipState->layer[layer].inputLut.vssSegments = 0;
} else {
pFlipState->layer[layer].inputLut.pLutSurfaceEvo = NULL;
pFlipState->layer[layer].inputLut.offset = 0;
pFlipState->layer[layer].inputLut.lutEntries = 0;
pFlipState->layer[layer].inputLut.vssSegments = 0;
}
/* Otherwise, use the current state. */
} else if (ilutEnabled) {
pFlipState->layer[layer].inputLut.pLutSurfaceEvo =
pDevEvo->lut.apiHead[apiHead].LUT[curLUTIndex];
pFlipState->layer[layer].inputLut.offset = offsetof(NVEvoLutDataRec, base);
pFlipState->layer[layer].inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
pFlipState->layer[layer].inputLut.vssSegments = 0;
} else {
pFlipState->layer[layer].inputLut.pLutSurfaceEvo = NULL;
pFlipState->layer[layer].inputLut.offset = 0;
pFlipState->layer[layer].inputLut.lutEntries = 0;
pFlipState->layer[layer].inputLut.vssSegments = 0;
}
} WITH_APIHEAD_FOR_HEAD_DONE;
}
if (pParams->layer[layer].tmo.specified) {
if (pParams->layer[layer].tmo.enabled) {
ret = UpdateFlipLutHwState(
pDevEvo,
pOpenDevSurfaceHandles,
&pFlipState->layer[layer].tmoLut,
&pParams->layer[layer].tmo.lut,
&pDevEvo->caps.layerCaps[layer].tmo,
TRUE /*isUsedByLayerChannel*/);
if (!ret) {
return FALSE;
}
pFlipState->layer[layer].tmoLut.fromOverride = TRUE;
} else {
pFlipState->layer[layer].tmoLut.fromOverride = FALSE;
}
}
if (!pFlipState->layer[layer].tmoLut.fromOverride) {
nvSetTmoLutSurfaceEvo(pDevEvo, &pFlipState->layer[layer]);
}
if (pParams->layer[layer].maxDownscaleFactors.specified) {
pFlipState->layer[layer].maxDownscaleFactors.vertical =
pParams->layer[layer].maxDownscaleFactors.vertical;
@@ -792,7 +1025,7 @@ static NvBool UpdateLayerFlipEvoHwStateCommon(
static NvBool UpdateMainLayerFlipEvoHwState(
const struct NvKmsPerOpenDev *pOpenDev,
const NVDevEvoRec *pDevEvo,
NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const struct NvKmsFlipCommonParams *pParams,
@@ -813,6 +1046,21 @@ static NvBool UpdateMainLayerFlipEvoHwState(
return FALSE;
}
if (pHwState->pSurfaceEvo[NVKMS_LEFT] &&
pHwState->pSurfaceEvo[NVKMS_LEFT]->format == NvKmsSurfaceMemoryFormatI8 &&
pHwState->inputLut.pLutSurfaceEvo == NULL) {
/*
* Depth 8 requires the input LUT to be enabled, so fall back to the
* last programmed legacy LUT.
*/
pHwState->inputLut.pLutSurfaceEvo =
pDevEvo->pDispEvo[sd]->headState[head].lut.pCurrSurface;
pHwState->inputLut.offset = offsetof(NVEvoLutDataRec, base);
pHwState->inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
pHwState->inputLut.vssSegments = 0;
}
if (pParams->layer[NVKMS_MAIN_LAYER].csc.specified) {
if (pParams->layer[NVKMS_MAIN_LAYER].csc.useMain) {
return FALSE;
@@ -876,7 +1124,7 @@ static NvBool UpdateCursorLayerFlipEvoHwState(
const NVDevEvoRec *pDevEvo,
const struct NvKmsFlipCommonParams *pParams,
const NVHwModeTimingsEvo *pTimings,
const NvU8 tilePosition,
const NvU8 mergeHeadSection,
NVFlipEvoHwState *pFlipState)
{
if (pParams->cursor.imageSpecified) {
@@ -895,7 +1143,7 @@ static NvBool UpdateCursorLayerFlipEvoHwState(
if (pParams->cursor.positionSpecified) {
pFlipState->cursor.x = (pParams->cursor.position.x -
(pTimings->viewPort.in.width * tilePosition));
(pTimings->viewPort.in.width * mergeHeadSection));
pFlipState->cursor.y = pParams->cursor.position.y;
pFlipState->dirty.cursorPosition = TRUE;
@@ -906,7 +1154,7 @@ static NvBool UpdateCursorLayerFlipEvoHwState(
static NvBool UpdateOverlayLayerFlipEvoHwState(
const struct NvKmsPerOpenDev *pOpenDev,
const NVDevEvoRec *pDevEvo,
NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const NvU32 layer,
@@ -978,12 +1226,12 @@ static NvBool UpdateOverlayLayerFlipEvoHwState(
*/
NvBool nvUpdateFlipEvoHwState(
const struct NvKmsPerOpenDev *pOpenDev,
const NVDevEvoRec *pDevEvo,
NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const struct NvKmsFlipCommonParams *pParams,
const NVHwModeTimingsEvo *pTimings,
const NvU8 tilePosition,
const NvU8 mergeHeadSection,
NVFlipEvoHwState *pFlipState,
NvBool allowVrr)
{
@@ -992,12 +1240,12 @@ NvBool nvUpdateFlipEvoHwState(
if (pParams->viewPortIn.specified) {
pFlipState->dirty.viewPortPointIn = TRUE;
pFlipState->viewPortPointIn.x = pParams->viewPortIn.point.x +
(pTimings->viewPort.in.width * tilePosition);
(pTimings->viewPort.in.width * mergeHeadSection);
pFlipState->viewPortPointIn.y = pParams->viewPortIn.point.y;
}
if (!UpdateCursorLayerFlipEvoHwState(pOpenDev, pDevEvo, pParams, pTimings,
tilePosition, pFlipState)) {
mergeHeadSection, pFlipState)) {
return FALSE;
}
@@ -1034,6 +1282,77 @@ NvBool nvUpdateFlipEvoHwState(
}
}
/*
* See the comment for the ILUT in UpdateLayerFlipEvoHwStateCommon for an
* overview of this setup.
*/
if (pParams->olut.specified) {
if (pParams->olut.enabled) {
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
nvGetSurfaceHandlesFromOpenDevConst(pOpenDev);
if (!UpdateFlipLutHwState(pDevEvo, pOpenDevSurfaceHandles,
&pFlipState->outputLut, &pParams->olut.lut,
&pDevEvo->caps.olut,
FALSE /*isUsedByLayerChannel*/)) {
return FALSE;
}
pFlipState->outputLut.fromOverride = TRUE;
} else {
pFlipState->outputLut.fromOverride = FALSE;
}
}
if (!pFlipState->outputLut.fromOverride) {
NvU32 apiHead;
WITH_APIHEAD_FOR_HEAD(pDevEvo, sd, head, apiHead) {
NvBool olutEnabled = pDevEvo->lut.apiHead[apiHead].disp[sd].curOutputLutEnabled;
NvU32 curLUTIndex = pDevEvo->lut.apiHead[apiHead].disp[sd].curLUTIndex;
NvU32 nextLutIndex = (curLUTIndex + 1) % 3;
if (pParams->lut.output.specified) {
if (pParams->lut.output.enabled) {
pFlipState->outputLut.pLutSurfaceEvo =
pDevEvo->lut.apiHead[apiHead].LUT[nextLutIndex];
pFlipState->outputLut.offset = offsetof(NVEvoLutDataRec, output);
pFlipState->outputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
pFlipState->outputLut.vssSegments = 0;
} else {
pFlipState->outputLut.pLutSurfaceEvo = NULL;
pFlipState->outputLut.offset = 0;
pFlipState->outputLut.lutEntries = 0;
pFlipState->outputLut.vssSegments = 0;
}
} else if (olutEnabled) {
pFlipState->outputLut.pLutSurfaceEvo =
pDevEvo->lut.apiHead[apiHead].LUT[curLUTIndex];
pFlipState->outputLut.offset = offsetof(NVEvoLutDataRec, output);
pFlipState->outputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES;
pFlipState->outputLut.vssSegments = 0;
} else {
pFlipState->outputLut.pLutSurfaceEvo = NULL;
pFlipState->outputLut.offset = 0;
pFlipState->outputLut.lutEntries = 0;
pFlipState->outputLut.vssSegments = 0;
}
} WITH_APIHEAD_FOR_HEAD_DONE;
}
if (pParams->olutFpNormScale.specified) {
pFlipState->olutFpNormScale = pParams->olutFpNormScale.val;
}
if ((pFlipState->outputLut.pLutSurfaceEvo !=
pDevEvo->gpus[sd].headState[head].outputLut.pLutSurfaceEvo) ||
(pFlipState->outputLut.offset !=
pDevEvo->gpus[sd].headState[head].outputLut.offset) ||
(pFlipState->olutFpNormScale !=
pDevEvo->gpus[sd].headState[head].olutFpNormScale)) {
pFlipState->layer[NVKMS_MAIN_LAYER].tearing = FALSE;
pFlipState->dirty.olut = TRUE;
}
if (!AssignUsageBounds(pDevEvo, head, pFlipState)) {
return FALSE;
}
@@ -1146,6 +1465,71 @@ static NvBool ValidateSurfaceSize(
return TRUE;
}
static NvBool
ValidateLayerLutHwState(const NVDevEvoRec *pDevEvo,
const NVFlipEvoHwState *pFlipState,
NvU32 layer)
{
const NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer];
if (!pDevEvo->caps.layerCaps[layer].ilut.supported &&
(pHwState->inputLut.pLutSurfaceEvo != NULL)) {
return FALSE;
}
if (!pDevEvo->caps.layerCaps[layer].tmo.supported &&
(pHwState->tmoLut.pLutSurfaceEvo != NULL)) {
return FALSE;
}
/* Surface format validation is handled in UpdateFlipLutHwState */
if (pDevEvo->hal->caps.needDefaultLutSurface &&
pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) {
/*
* needDefaultLutSurface corresponds to the Turing+ case where the ILUT
* must convert to FP16. When it is set, the ILUT must be set if the
* surface is not in FP16 and the ILUT must not be set if the surface
* is in FP16. However, we only validate the second case because the
* first is handled internally be using the default ILUT.
*/
if ((pHwState->pSurfaceEvo[NVKMS_LEFT]->format ==
NvKmsSurfaceMemoryFormatRF16GF16BF16XF16) ||
(pHwState->pSurfaceEvo[NVKMS_LEFT]->format ==
NvKmsSurfaceMemoryFormatRF16GF16BF16AF16)) {
/*
* If the layer's surface format is FP16, the ILUT must not be
* enabled.
*/
if (pHwState->inputLut.pLutSurfaceEvo) {
return FALSE;
}
}
}
if (pHwState->pSurfaceEvo[NVKMS_LEFT] &&
pHwState->pSurfaceEvo[NVKMS_LEFT]->format == NvKmsSurfaceMemoryFormatI8) {
/* If the layer's surface format is I8, the ILUT must be enabled. */
if (pHwState->inputLut.pLutSurfaceEvo == NULL) {
return FALSE;
}
}
return TRUE;
}
static NvBool
ValidateHeadLutHwState(const NVDevEvoRec *pDevEvo,
const NVFlipEvoHwState *pFlipState)
{
if (!pDevEvo->caps.olut.supported &&
(pFlipState->outputLut.pLutSurfaceEvo != NULL)) {
return FALSE;
}
return TRUE;
}
static NvBool
ValidateMainFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo,
const NVFlipChannelEvoHwState *pHwState,
@@ -1446,6 +1830,10 @@ NvBool nvValidateFlipEvoHwState(
return FALSE;
}
}
if (!ValidateLayerLutHwState(pDevEvo, pFlipState, layer)) {
return FALSE;
}
}
if (!ValidateHDR(pDevEvo, head, pFlipState)) {
@@ -1456,6 +1844,10 @@ NvBool nvValidateFlipEvoHwState(
return FALSE;
}
if (!ValidateHeadLutHwState(pDevEvo, pFlipState)) {
return FALSE;
}
/* XXX NVKMS TODO: validate cursor x,y against current viewport in? */
return ValidateUsageBounds(pDevEvo,
@@ -1531,12 +1923,12 @@ static inline NvU32 MinCvToVal(NvU32 cv, NvU32 maxCLL)
softfloat_round_near_even, FALSE);
}
static void UpdateHDR(NVDevEvoPtr pDevEvo,
const NVFlipEvoHwState *pFlipState,
const NvU32 sd,
const NvU32 head,
const NVT_HDR_STATIC_METADATA *pHdrInfo,
NVEvoUpdateState *updateState)
static NvBool UpdateHDR(NVDevEvoPtr pDevEvo,
const NVFlipEvoHwState *pFlipState,
const NvU32 sd,
const NvU32 head,
const NVT_HDR_STATIC_METADATA *pHdrInfo,
NVEvoUpdateState *updateState)
{
NVDispEvoPtr pDispEvo = pDevEvo->gpus[sd].pDispEvo;
NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
@@ -1663,10 +2055,7 @@ static void UpdateHDR(NVDevEvoPtr pDevEvo,
dirty = TRUE;
}
if (dirty) {
// Update OCSC / OLUT
nvEvoSetLUTContextDma(pDispEvo, head, updateState);
}
return dirty;
}
/*!
@@ -1695,6 +2084,7 @@ void nvFlipEvoOneHead(
NVEvoSubDevHeadStateRec *pSdHeadState =
&pDevEvo->gpus[sd].headState[head];
NvU32 layer;
NvBool hdrDirty;
/*
* Provide the pre-update hardware state (in pSdHeadState) and the new
@@ -1719,6 +2109,11 @@ void nvFlipEvoOneHead(
pSdHeadState->cursor = pFlipState->cursor;
}
if (pFlipState->dirty.olut) {
pSdHeadState->outputLut = pFlipState->outputLut;
pSdHeadState->olutFpNormScale = pFlipState->olutFpNormScale;
}
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
if (pFlipState->dirty.layer[layer]) {
pSdHeadState->layer[layer] = pFlipState->layer[layer];
@@ -1750,7 +2145,17 @@ void nvFlipEvoOneHead(
pFlipState->cursor.y);
}
UpdateHDR(pDevEvo, pFlipState, sd, head, pHdrInfo, updateState);
hdrDirty = UpdateHDR(pDevEvo, pFlipState, sd, head, pHdrInfo, updateState);
if (pFlipState->dirty.olut || hdrDirty) {
nvPushEvoSubDevMask(pDevEvo, NVBIT(sd));
pDevEvo->hal->SetOutputLut(pDevEvo, sd, head,
&pFlipState->outputLut,
pFlipState->olutFpNormScale,
updateState,
bypassComposition);
nvPopEvoSubDevMask(pDevEvo);
}
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
if (!pFlipState->dirty.layer[layer]) {
@@ -1825,6 +2230,10 @@ void nvUpdateSurfacesFlipRefCount(
pDevEvo,
pFlipState->cursor.pSurfaceEvo,
increase);
ChangeSurfaceFlipRefCount(
pDevEvo,
pFlipState->outputLut.pLutSurfaceEvo,
increase);
for (i = 0; i < pDevEvo->head[head].numLayers; i++) {
NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i];
@@ -1841,6 +2250,14 @@ void nvUpdateSurfacesFlipRefCount(
pDevEvo,
pLayerFlipState->completionNotifier.surface.pSurfaceEvo,
increase);
ChangeSurfaceFlipRefCount(
pDevEvo,
pLayerFlipState->inputLut.pLutSurfaceEvo,
increase);
ChangeSurfaceFlipRefCount(
pDevEvo,
pLayerFlipState->tmoLut.pLutSurfaceEvo,
increase);
if (!pLayerFlipState->syncObject.usingSyncpt) {
ChangeSurfaceFlipRefCount(
@@ -2567,11 +2984,6 @@ void nvPreFlip(NVDevEvoRec *pDevEvo,
head,
&pWorkArea->sd[sd].head[head].newState,
NV_TRUE);
nvRefTmoLutSurfacesEvo(
pDevEvo,
&pWorkArea->sd[sd].head[head].newState,
head);
}
}
@@ -2668,11 +3080,6 @@ void nvPostFlip(NVDevEvoRec *pDevEvo,
head,
&pWorkArea->sd[sd].head[head].oldState,
NV_FALSE);
nvUnrefTmoLutSurfacesEvo(
pDevEvo,
&pWorkArea->sd[sd].head[head].oldState,
head);
}
}
@@ -2806,8 +3213,9 @@ NvBool nvAssignNVFlipEvoHwState(NVDevEvoRec *pDevEvo,
&pHeadState->timings.viewPort.possibleUsage;
if (!nvUpdateFlipEvoHwState(pOpenDev, pDevEvo, sd, head, pParams,
&pHeadState->timings, pHeadState->tilePosition,
pFlipHwState, allowVrr)) {
&pHeadState->timings,
pHeadState->mergeHeadSection, pFlipHwState,
allowVrr)) {
return FALSE;
}
@@ -2818,10 +3226,6 @@ NvBool nvAssignNVFlipEvoHwState(NVDevEvoRec *pDevEvo,
return FALSE;
}
if (!nvSetTmoLutSurfacesEvo(pDevEvo, pFlipHwState, head)) {
return FALSE;
}
return TRUE;
}

View File

@@ -25,57 +25,82 @@
#include "nvkms-rm.h"
#include "nvkms-rmapi.h"
#include "nvkms-dma.h"
#include "nvkms-surface.h"
#include "nvkms-private.h"
#include "nvkms-utils.h"
#include "nvkms-headsurface.h"
#include "nvos.h"
#include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
static void FreeLutSurfaceEvoInVidmem(NVLutSurfaceEvoPtr pSurfEvo)
static void FreeLutSurfaceEvo(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfEvo)
{
NVDevEvoPtr pDevEvo;
if (pSurfEvo == NULL) {
return;
}
pDevEvo = pSurfEvo->pDevEvo;
nvAssert(pSurfEvo->rmRefCnt == 1);
nvAssert(pSurfEvo->structRefCnt == 1);
if (pSurfEvo->gpuAddress) {
nvRmApiUnmapMemoryDma(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pDevEvo->nvkmsGpuVASpace,
pSurfEvo->handle,
0,
(NvU64)pSurfEvo->gpuAddress);
}
nvRmEvoUnMapVideoMemory(pDevEvo, pSurfEvo->handle,
pSurfEvo->subDeviceAddress);
/* Free surface descriptor */
pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo,
nvEvoGlobal.clientHandle,
&pSurfEvo->surfaceDesc);
/* Free the surface */
if (pSurfEvo->handle) {
NvU32 result;
result = nvRmApiFree(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle, pSurfEvo->handle);
if (result != NVOS_STATUS_SUCCESS) {
nvAssert(!"Freeing LUT surface failed");
}
nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
pSurfEvo->handle);
pSurfEvo->handle = 0;
}
nvFree(pSurfEvo);
nvEvoUnregisterSurface(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pSurfEvo->owner.surfaceHandle,
TRUE /* skipUpdate */);
}
static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
static NVSurfaceEvoPtr RegisterLutSurfaceEvo(NVDevEvoPtr pDevEvo, NvU32 memoryHandle)
{
struct NvKmsRegisterSurfaceParams registerSurfaceParams = { };
const NVEvoApiHandlesRec *pSurfaceHandles;
NvU64 size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
NVSurfaceEvoPtr pSurfEvo = NULL;
registerSurfaceParams.request.deviceHandle = pDevEvo->deviceHandle;
registerSurfaceParams.request.useFd = FALSE;
registerSurfaceParams.request.rmClient = nvEvoGlobal.clientHandle;
registerSurfaceParams.request.planes[0].u.rmObject = memoryHandle;
registerSurfaceParams.request.planes[0].offset = 0;
registerSurfaceParams.request.planes[0].pitch = (size + 255) & ~255;
registerSurfaceParams.request.planes[0].rmObjectSizeInBytes = size;
registerSurfaceParams.request.widthInPixels = (size + 7) >> 3; /* TODO: Check on this */
registerSurfaceParams.request.heightInPixels = 1;
registerSurfaceParams.request.layout = NvKmsSurfaceMemoryLayoutPitch;
registerSurfaceParams.request.format = NvKmsSurfaceMemoryFormatR16G16B16A16;
registerSurfaceParams.request.noDisplayHardwareAccess = FALSE;
registerSurfaceParams.request.noDisplayCaching = FALSE;
registerSurfaceParams.request.isoType = NVKMS_MEMORY_ISO;
registerSurfaceParams.request.log2GobsPerBlockY = 0;
/*
* Although the caller may like a GPU mapping, we pass
* NvHsMapPermissionsNone so failing the mapping doesn't fail the surface
* creation.
*/
nvEvoRegisterSurface(pDevEvo,
pDevEvo->pNvKmsOpenDev,
&registerSurfaceParams,
NvHsMapPermissionsNone);
if (registerSurfaceParams.reply.surfaceHandle == 0) {
return NULL;
}
pSurfaceHandles = nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev);
pSurfEvo =
nvEvoGetSurfaceFromHandle(pDevEvo,
pSurfaceHandles,
registerSurfaceParams.reply.surfaceHandle,
FALSE /* isUsedByCursorChannel */,
TRUE /* isUsedByLayerChannel */);
return pSurfEvo;
}
static NVSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
{
NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
NvU32 ret = NVOS_STATUS_ERROR_GENERIC;
@@ -83,26 +108,16 @@ static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
NvU64 size = 0, alignment = 4096;
NvU32 memoryHandle = 0;
NVSurfaceEvoPtr pSurfEvo = NULL;
NVLutSurfaceEvoPtr pSurfEvo;
memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo));
if (pSurfEvo == NULL) {
return NULL;
}
pSurfEvo->pDevEvo = pDevEvo;
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
pSurfEvo->size = size;
pSurfEvo->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (pSurfEvo->handle == 0) {
if (memoryHandle == 0) {
goto fail;
}
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr);
attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2);
@@ -121,34 +136,39 @@ static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pSurfEvo->handle,
memoryHandle,
NV01_MEMORY_LOCAL_USER,
&memAllocParams);
/* If we failed the allocation above, abort */
if (ret != NVOS_STATUS_SUCCESS) {
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle);
pSurfEvo->handle = 0;
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
goto fail;
}
/* Allocate and bind surface descriptor */
ret =
nvRmAllocAndBindSurfaceDescriptor(
pDevEvo,
pSurfEvo->handle,
NvKmsSurfaceMemoryLayoutPitch,
pSurfEvo->size - 1,
&pSurfEvo->surfaceDesc);
if (ret != NVOS_STATUS_SUCCESS) {
pSurfEvo = RegisterLutSurfaceEvo(pDevEvo, memoryHandle);
/*
* nvEvoRegisterSurface dups the memory handle, so we free the one we
* just created.
*/
nvRmApiFree(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
memoryHandle);
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
memoryHandle = 0;
if (pSurfEvo == NULL) {
goto fail;
}
/* Map the surface for the CPU */
if (!nvRmEvoMapVideoMemory(pSurfEvo->pDevEvo,
pSurfEvo->handle, pSurfEvo->size,
pSurfEvo->subDeviceAddress,
/*
* Map the surface for the CPU. This is only done by nvEvoRegisterSurface
* for NISO surfaces, so it must be done manually here.
*/
if (!nvRmEvoMapVideoMemory(pDevEvo,
pSurfEvo->planes[0].rmHandle,
size, pSurfEvo->cpuAddress,
SUBDEVICE_MASK_ALL)) {
goto fail;
}
@@ -157,98 +177,31 @@ static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
* The GPU mapping is only needed for prefetching LUT surfaces for DIFR.
* It isn't worth failing alone but we want to keep gpuAddress coherent.
*/
ret = nvRmApiMapMemoryDma(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pDevEvo->nvkmsGpuVASpace,
pSurfEvo->handle,
0,
pSurfEvo->size,
DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE) |
DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_ONLY),
&pSurfEvo->gpuAddress);
pSurfEvo->gpuAddress = nvHsMapSurfaceToDevice(pDevEvo,
pSurfEvo->planes[0].rmHandle,
size,
NvHsMapPermissionsReadOnly);
if (ret != NVOS_STATUS_SUCCESS) {
if (pSurfEvo->gpuAddress == NV_HS_BAD_GPU_ADDRESS) {
pSurfEvo->gpuAddress = 0ULL;
}
return pSurfEvo;
fail:
fail:
/* An error occurred -- free the surface */
FreeLutSurfaceEvoInVidmem(pSurfEvo);
FreeLutSurfaceEvo(pDevEvo, pSurfEvo);
return NULL;
}
static void FreeLutSurfaceEvoInSysmem(NVLutSurfaceEvoPtr pSurfEvo)
{
NVDevEvoPtr pDevEvo;
if (pSurfEvo == NULL) {
return;
}
pDevEvo = pSurfEvo->pDevEvo;
/* Free surface descriptor */
pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo,
nvEvoGlobal.clientHandle,
&pSurfEvo->surfaceDesc);
/* Free the surface */
if (pSurfEvo->handle) {
NvU32 result;
if (pSurfEvo->subDeviceAddress[0] != NULL) {
/*
* SOC display devices should only have one subdevice
* (and therefore it is safe to unmap only subDeviceAddress[0])
* for reasons described in AllocLutSurfaceEvoInSysmem
*/
nvAssert(pDevEvo->numSubDevices == 1);
result = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pSurfEvo->handle,
pSurfEvo->subDeviceAddress[0],
0);
if (result != NVOS_STATUS_SUCCESS) {
nvAssert(!"Unmapping LUT surface failed");
}
pSurfEvo->subDeviceAddress[0] = NULL;
}
result = nvRmApiFree(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle, pSurfEvo->handle);
if (result != NVOS_STATUS_SUCCESS) {
nvAssert(!"Freeing LUT surface failed");
}
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle);
}
nvFree(pSurfEvo);
}
static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo)
static NVSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo)
{
NvU32 memoryHandle = 0;
void *pBase = NULL;
NvU64 size = 0;
NVLutSurfaceEvoPtr pSurfEvo;
NvU32 ret;
pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo));
if (pSurfEvo == NULL) {
return NULL;
}
pSurfEvo->pDevEvo = pDevEvo;
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
pSurfEvo->size = size;
NvU64 size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
NVSurfaceEvoPtr pSurfEvo = NULL;
NvU32 ret = 0;
memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (memoryHandle == 0) {
@@ -265,16 +218,35 @@ static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo)
goto fail;
}
pSurfEvo->handle = memoryHandle;
pSurfEvo = RegisterLutSurfaceEvo(pDevEvo, memoryHandle);
/* Allocate and bind surface descriptor */
ret =
nvRmAllocAndBindSurfaceDescriptor(
pDevEvo,
pSurfEvo->handle,
NvKmsSurfaceMemoryLayoutPitch,
pSurfEvo->size - 1,
&pSurfEvo->surfaceDesc);
/*
* nvEvoRegisterSurface dups the memory handle, so we can free the one we
* just created.
*/
nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
memoryHandle,
pBase,
0 /* flags */);
nvRmApiFree(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
memoryHandle);
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
memoryHandle = 0;
if (pSurfEvo == NULL) {
goto fail;
}
ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pSurfEvo->planes[0].rmHandle,
0, /* offset */
size,
&pBase,
0 /* flags */);
if (ret != NVOS_STATUS_SUCCESS) {
goto fail;
@@ -287,35 +259,18 @@ static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo)
* subdevice.
*/
nvAssert(pDevEvo->numSubDevices == 1);
pSurfEvo->subDeviceAddress[0] = pBase;
pSurfEvo->cpuAddress[0] = pBase;
return pSurfEvo;
fail:
fail:
/* An error occurred -- free the surface */
FreeLutSurfaceEvoInSysmem(pSurfEvo);
FreeLutSurfaceEvo(pDevEvo, pSurfEvo);
return NULL;
}
static void FreeLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo)
{
NVDevEvoPtr pDevEvo;
if (pSurfEvo == NULL) {
return;
}
pDevEvo = pSurfEvo->pDevEvo;
if (pDevEvo->requiresAllAllocationsInSysmem) {
FreeLutSurfaceEvoInSysmem(pSurfEvo);
} else {
FreeLutSurfaceEvoInVidmem(pSurfEvo);
}
}
static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo)
static NVSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo)
{
if (pDevEvo->requiresAllAllocationsInSysmem) {
return AllocLutSurfaceEvoInSysmem(pDevEvo);
@@ -324,77 +279,69 @@ static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo)
}
}
NvBool nvSetTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head)
NvBool nvSetTmoLutSurfaceEvo(NVDevEvoPtr pDevEvo,
NVFlipChannelEvoHwState *pHwState)
{
NvU32 layer;
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
if (pFlipState->layer[layer].hdrStaticMetadata.enabled) {
if (!pFlipState->layer[layer].tmoLut.pLutSurfaceEvo) {
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo =
AllocLutSurfaceEvo(pDevEvo);
if (!pFlipState->layer[layer].tmoLut.pLutSurfaceEvo) {
return FALSE;
}
// Will be referenced via nvRefTmoLutSurfacesEvo() on new state
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt = 0;
if (pHwState->hdrStaticMetadata.enabled) {
if (!pHwState->tmoLut.pLutSurfaceEvo) {
pHwState->tmoLut.pLutSurfaceEvo = AllocLutSurfaceEvo(pDevEvo);
if (!pHwState->tmoLut.pLutSurfaceEvo) {
return FALSE;
}
} else {
// Will be freed via nvUnrefTmoLutSurfacesEvo() on old state
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = NULL;
}
} else {
// Will be freed via nvEvoDecrementSurfaceRefCnts() and
// nvFreeUnrefedTmoLutSurfacesEvo() on old state
pHwState->tmoLut.pLutSurfaceEvo = NULL;
}
return TRUE;
}
void nvRefTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head)
/*
* After the flip dereferences its TMO surfaces, or when it fails after TMO
* surface allocation, any unused TMO surfaces are left with 1 refcount. If
* these TMO surfaces are owned by the pDevEvo's pNvKmsOpenDev, then they have
* been allocated within this file and need to be freed. If not, they have been
* allocated by an NvKms client, which is in charge of freeing them.
*
* Only call FreeLutSurfaceEvo in the first case.
*/
void nvFreeUnrefedTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head)
{
// Reference new state layers that have hdrStaticMetadata enabled.
NvU32 layer;
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
if (pFlipState->layer[layer].hdrStaticMetadata.enabled) {
nvAssert(pFlipState->layer[layer].tmoLut.pLutSurfaceEvo);
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt++;
if (pFlipState->layer[layer].tmoLut.pLutSurfaceEvo != NULL &&
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->structRefCnt <= 1) {
if (pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->owner.pOpenDev ==
pDevEvo->pNvKmsOpenDev) {
FreeLutSurfaceEvo(pDevEvo,
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo);
}
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = NULL;
}
}
}
void nvUnrefTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo,
NVFlipEvoHwState *pFlipState,
NvU32 head)
void nvInvalidateDefaultLut(NVDevEvoPtr pDevEvo)
{
// Unref old state layers that had hdrStaticMetadata enabled.
NvU32 layer;
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
if (pFlipState->layer[layer].hdrStaticMetadata.enabled) {
nvAssert(pFlipState->layer[layer].tmoLut.pLutSurfaceEvo);
NvU32 sd;
if (pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt <= 1) {
// Wait for any outstanding LUT updates before freeing.
if (pDevEvo->core) {
nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
}
FreeLutSurfaceEvo(
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo);
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = NULL;
} else {
pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt--;
}
}
for (sd = 0; sd < NVKMS_MAX_SUBDEVICES; sd++) {
pDevEvo->lut.defaultBaseLUTState[sd] =
pDevEvo->lut.defaultOutputLUTState[sd] =
NvKmsLUTStateUninitialized;
}
}
NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo)
{
NVDispEvoPtr pDispEvo;
NvU32 apiHead, dispIndex, i, sd;
NvU32 apiHead, dispIndex, i;
for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) {
@@ -425,11 +372,7 @@ NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo)
return FALSE;
}
for (sd = 0; sd < NVKMS_MAX_SUBDEVICES; sd++) {
pDevEvo->lut.defaultBaseLUTState[sd] =
pDevEvo->lut.defaultOutputLUTState[sd] =
NvKmsLUTStateUninitialized;
}
nvInvalidateDefaultLut(pDevEvo);
pDevEvo->hal->InitDefaultLut(pDevEvo);
}
@@ -464,21 +407,21 @@ void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo)
}
if (pDevEvo->lut.defaultLut != NULL) {
FreeLutSurfaceEvo(pDevEvo->lut.defaultLut);
FreeLutSurfaceEvo(pDevEvo, pDevEvo->lut.defaultLut);
pDevEvo->lut.defaultLut = NULL;
}
for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) {
if (pDevEvo->lut.apiHead[apiHead].LUT[i] != NULL) {
FreeLutSurfaceEvo(pDevEvo->lut.apiHead[apiHead].LUT[i]);
FreeLutSurfaceEvo(pDevEvo, pDevEvo->lut.apiHead[apiHead].LUT[i]);
pDevEvo->lut.apiHead[apiHead].LUT[i] = NULL;
}
}
}
}
void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo,
void nvUploadDataToLutSurfaceEvo(NVSurfaceEvoPtr pSurfEvo,
const NVEvoLutDataRec *pLUTBuffer,
NVDispEvoPtr pDispEvo)
{
@@ -494,18 +437,18 @@ void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo,
return;
}
nvAssert(pSurfEvo->subDeviceAddress[sd]);
nvAssert(pSurfEvo->cpuAddress[sd]);
/* The size to copy should not be larger than the surface. */
nvAssert(size <= pSurfEvo->size);
nvAssert(size <= pSurfEvo->planes[0].rmObjectSizeInBytes);
/* The source, destination, and size should be 4-byte aligned. */
nvAssert((((NvUPtr)data) & 0x3) == 0);
nvAssert((((NvUPtr)pSurfEvo->subDeviceAddress[sd]) & 0x3) == 0);
nvAssert((((NvUPtr)pSurfEvo->cpuAddress[sd]) & 0x3) == 0);
nvAssert((size % 4) == 0);
src = data;
dst = (NvU32*)pSurfEvo->subDeviceAddress[sd];
dst = (NvU32*)pSurfEvo->cpuAddress[sd];
for (dword = 0; dword < (size/4); dword++) {
*(dst++) = *(src++);

View File

@@ -576,7 +576,8 @@ InitProposedModeSetHwState(const NVDevEvoRec *pDevEvo,
nvInitFlipEvoHwState(pDevEvo, sd, head,
&pProposed->sd[sd].head[head].flip);
pProposedHead->tilePosition = pHeadState->tilePosition;
pProposedHead->mergeHeadSection =
pHeadState->mergeHeadSection;
pProposedHead->timings = pHeadState->timings;
pProposedHead->pConnectorEvo = pHeadState->pConnectorEvo;
pProposedHead->hdmiFrlConfig = pHeadState->hdmiFrlConfig;
@@ -621,6 +622,7 @@ AssignProposedModeSetNVFlipEvoHwState(
pFlip->dirty.tf = TRUE;
pFlip->dirty.hdrStaticMetadata = TRUE;
pFlip->dirty.olut = TRUE;
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
pFlip->dirty.layer[layer] = TRUE;
@@ -647,7 +649,7 @@ AssignProposedModeSetNVFlipEvoHwState(
head,
&pRequestHead->flip,
&pProposedHead->timings,
pProposedHead->tilePosition,
pProposedHead->mergeHeadSection,
pFlip,
FALSE /* allowVrr */)) {
return FALSE;
@@ -1279,11 +1281,12 @@ AssignProposedModeSetHwState(NVDevEvoRec *pDevEvo,
&pProposedDisp->apiHead[apiHead];
const NvU32 primaryHead =
nvGetPrimaryHwHeadFromMask(pProposedApiHead->hwHeadsMask);
const NvU32 numTiles = nvPopCount32(pProposedApiHead->hwHeadsMask);
const NvU32 numMergeHeadSections =
nvPopCount32(pProposedApiHead->hwHeadsMask);
const NVDpyEvoRec *pDpyEvo =
nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo);
NVProposedModeSetHwStateOneHead *pProposedPrimaryHead;
NvU32 secondaryHeadTilePosition = 1;
NvU32 secondaryMergeHeadSection = 1;
NvU32 head;
nvAssert((pProposedApiHead->hwHeadsMask != 0x0) ||
@@ -1307,12 +1310,13 @@ AssignProposedModeSetHwState(NVDevEvoRec *pDevEvo,
NVProposedModeSetHwStateOneHead *pProposedHead =
&pProposedDisp->head[head];
pProposedHead->tilePosition =
(head == primaryHead) ? 0 : (secondaryHeadTilePosition++);
pProposedHead->mergeHeadSection =
(head == primaryHead) ? 0 : (secondaryMergeHeadSection++);
if (!nvEvoGetSingleTileHwModeTimings(&pProposedApiHead->timings,
numTiles,
&pProposedHead->timings)) {
if (!nvEvoGetSingleMergeHeadSectionHwModeTimings(
&pProposedApiHead->timings,
numMergeHeadSections,
&pProposedHead->timings)) {
status = NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE;
break;
}
@@ -1443,6 +1447,10 @@ ValidateProposedModeSetHwStateOneDispImp(NVDispEvoPtr pDispEvo,
NvU32 head;
if (pProposedApiHead->hwHeadsMask == 0x0) {
continue;
}
/*
* Don't try to downgrade heads which are not marked as changed.
* This could lead to unchanged/not-requested heads hogging all
@@ -1560,7 +1568,7 @@ static NvBool DowngradeColorSpaceAndBpcOneHead(
const NvKmsDpyOutputColorFormatInfo supportedColorFormats =
nvDpyGetOutputColorFormatInfo(pDpyEvo);
if (!nvDowngradeColorSpaceAndBpc(&supportedColorFormats, &dpyColor)) {
if (!nvDowngradeColorSpaceAndBpc(pDpyEvo, &supportedColorFormats, &dpyColor)) {
return FALSE;
}
@@ -1829,8 +1837,18 @@ done:
return dpIsModePossible;
}
static NvBool VblankCallbackListsAreEmpty(
const NVDispApiHeadStateEvoRec *pApiHeadState)
{
ct_assert(ARRAY_LEN(pApiHeadState->vblankCallbackList) == 2);
return (nvListIsEmpty(&pApiHeadState->vblankCallbackList[0]) &&
nvListIsEmpty(&pApiHeadState->vblankCallbackList[1]));
}
static void VBlankCallbackDeferredWork(void *dataPtr, NvU32 data32)
{
NVDispApiHeadStateEvoRec *pApiHeadState = NULL;
NVVBlankCallbackPtr pVBlankCallbackTmp = NULL;
NVVBlankCallbackPtr pVBlankCallback = NULL;
NVDispEvoPtr pDispEvo = dataPtr;
@@ -1840,11 +1858,22 @@ static void VBlankCallbackDeferredWork(void *dataPtr, NvU32 data32)
return;
}
nvListForEachEntry_safe(pVBlankCallback,
pVBlankCallbackTmp,
&pDispEvo->apiHeadState[apiHead].vblankCallbackList,
vblankCallbackListEntry) {
pVBlankCallback->pCallback(pDispEvo, pVBlankCallback);
pApiHeadState = &pDispEvo->apiHeadState[apiHead];
/*
* Increment the vblankCount here, so that any callbacks in the list can
* rely on the same value.
*/
pApiHeadState->vblankCount++;
for (NvU32 i = 0; i < ARRAY_LEN(pApiHeadState->vblankCallbackList); i++) {
nvListForEachEntry_safe(pVBlankCallback,
pVBlankCallbackTmp,
&pApiHeadState->vblankCallbackList[i],
vblankCallbackListEntry) {
pVBlankCallback->pCallback(pDispEvo, pVBlankCallback);
}
}
}
@@ -1859,6 +1888,64 @@ static void VBlankCallback(void *pParam1, void *pParam2)
0); /* timeout: schedule the work immediately */
}
static void DisableVBlankCallbacks(const NVDevEvoRec *pDevEvo)
{
NvU32 dispIndex;
NVDispEvoPtr pDispEvo;
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
NvU32 apiHead;
for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
NVDispApiHeadStateEvoRec *pApiHeadState =
&pDispEvo->apiHeadState[apiHead];
if (pApiHeadState->rmVBlankCallbackHandle != 0) {
nvRmRemoveVBlankCallback(pDispEvo,
pApiHeadState->rmVBlankCallbackHandle);
pApiHeadState->rmVBlankCallbackHandle = 0;
}
}
}
}
static void EnableVBlankCallbacks(const NVDevEvoRec *pDevEvo)
{
NvU32 dispIndex;
NVDispEvoPtr pDispEvo;
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
NvU32 apiHead;
for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
NVDispApiHeadStateEvoRec *pApiHeadState =
&pDispEvo->apiHeadState[apiHead];
nvAssert(pApiHeadState->rmVBlankCallbackHandle == 0);
if (VblankCallbackListsAreEmpty(pApiHeadState)) {
continue;
}
const NvU32 hwHead =
nvGetPrimaryHwHeadFromMask(pApiHeadState->hwHeadsMask);
if (hwHead == NV_INVALID_HEAD) {
continue;
}
pApiHeadState->rmVBlankCallbackHandle =
nvRmAddVBlankCallback(pDispEvo, hwHead,
VBlankCallback, (void *)(NvUPtr)apiHead);
}
}
}
/*!
* Validate the proposed configuration on the specified disp.
*
@@ -2399,7 +2486,7 @@ ApplyProposedModeSetHwStateOneHeadShutDown(
pHeadState->pConnectorEvo = NULL;
pHeadState->bypassComposition = FALSE;
pHeadState->tilePosition = 0;
pHeadState->mergeHeadSection = 0;
nvkms_memset(&pHeadState->timings, 0, sizeof(pHeadState->timings));
pHeadState->activeRmId = 0;
@@ -2507,12 +2594,6 @@ ApplyProposedModeSetStateOneApiHeadShutDown(
DisableActiveCoreRGSyncObjects(pDispEvo, apiHead,
&pWorkArea->modesetUpdateState.updateState);
if (pApiHeadState->rmVBlankCallbackHandle != 0) {
nvRmRemoveVBlankCallback(pDispEvo,
pApiHeadState->rmVBlankCallbackHandle);
pApiHeadState->rmVBlankCallbackHandle = 0;
}
nvDisable3DVisionAegis(pDpyEvo);
/* Cancel any pending LUT updates. */
@@ -2632,7 +2713,7 @@ ApplyProposedModeSetHwStateOneHeadPreUpdate(
pHeadState->bypassComposition = bypassComposition;
pHeadState->activeRmId = pProposedApiHead->activeRmId;
pHeadState->pConnectorEvo = pProposedHead->pConnectorEvo;
pHeadState->tilePosition = pProposedHead->tilePosition;
pHeadState->mergeHeadSection = pProposedHead->mergeHeadSection;
pHeadState->timings = pProposedHead->timings;
pHeadState->dscInfo = pProposedApiHead->dscInfo;
pHeadState->hdmiFrlConfig = pProposedHead->hdmiFrlConfig;
@@ -2640,9 +2721,6 @@ ApplyProposedModeSetHwStateOneHeadPreUpdate(
nvEvoDpyColorToPixelDepth(&pProposedApiHead->attributes.color);
pHeadState->audio = pProposedHead->audio;
/* Update current LUT to hardware */
nvEvoSetLUTContextDma(pDispEvo, head, updateState);
nvEvoSetTimings(pDispEvo, head, updateState);
nvSetDitheringEvo(pDispEvo,
@@ -2650,7 +2728,10 @@ ApplyProposedModeSetHwStateOneHeadPreUpdate(
&pProposedApiHead->attributes.dithering,
updateState);
nvEvoHeadSetControlOR(pDispEvo, head, updateState);
nvEvoHeadSetControlOR(pDispEvo,
head,
&pProposedApiHead->attributes.color,
updateState);
/* Update hardware's current colorSpace and colorRange */
nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo,
@@ -2816,13 +2897,6 @@ ApplyProposedModeSetStateOneApiHeadPreUpdate(
*/
ReEnableActiveCoreRGSyncObjects(pDispEvo, apiHead, updateState);
nvAssert(pApiHeadState->rmVBlankCallbackHandle == 0);
if (!nvListIsEmpty(&pApiHeadState->vblankCallbackList)) {
pApiHeadState->rmVBlankCallbackHandle =
nvRmAddVBlankCallback(pDispEvo, proposedPrimaryHead,
VBlankCallback, (void *)(NvUPtr)apiHead);
}
pApiHeadState->attributes = pProposedApiHead->attributes;
pApiHeadState->tf = pProposedApiHead->tf;
pApiHeadState->hdrInfoFrameOverride =
@@ -3857,6 +3931,8 @@ NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo,
BeginEndModeset(pDevEvo, pProposed, BEGIN_MODESET);
DisableVBlankCallbacks(pDevEvo);
nvEvoLockStatePreModeset(pDevEvo);
nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset(pDevEvo, pRequest);
@@ -3885,6 +3961,8 @@ NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo,
nvEvoLockStatePostModeset(pDevEvo, doRasterLock);
EnableVBlankCallbacks(pDevEvo);
/*
* The modeset was successful: if headSurface was used as part of this
* modeset, record that in the pDevEvo.
@@ -3953,6 +4031,7 @@ done:
* \param[in] pCallback The function to call when vblank is reached on the
* provided pDispEvo+head combination.
* \param[in] pUserData A pointer to caller-provided custom data.
* \param[in] listIndex Which vblankCallbackList[] array to add this callback into.
*
* \return Returns a pointer to a NVVBlankCallbackRec structure if the
* registration was successful. Otherwise, return NULL.
@@ -3961,7 +4040,8 @@ NVVBlankCallbackPtr
nvApiHeadRegisterVBlankCallback(NVDispEvoPtr pDispEvo,
const NvU32 apiHead,
NVVBlankCallbackProc pCallback,
void *pUserData)
void *pUserData,
NvU8 listIndex)
{
/*
* All the hardware heads mapped on the input api head should be
@@ -3982,8 +4062,9 @@ nvApiHeadRegisterVBlankCallback(NVDispEvoPtr pDispEvo,
pVBlankCallback->pUserData = pUserData;
pVBlankCallback->apiHead = apiHead;
/* append to the tail of the list */
nvListAppend(&pVBlankCallback->vblankCallbackListEntry,
&pApiHeadState->vblankCallbackList);
&pApiHeadState->vblankCallbackList[listIndex]);
nvAssert((head != NV_INVALID_HEAD) ||
(pApiHeadState->rmVBlankCallbackHandle == 0));
@@ -4020,7 +4101,7 @@ void nvApiHeadUnregisterVBlankCallback(NVDispEvoPtr pDispEvo,
(pApiHeadState->rmVBlankCallbackHandle == 0));
// If there are no more callbacks, disable the RM-level callback
if (nvListIsEmpty(&pApiHeadState->vblankCallbackList) &&
if (VblankCallbackListsAreEmpty(pApiHeadState) &&
(head != NV_INVALID_HEAD) &&
(pApiHeadState->rmVBlankCallbackHandle != 0)) {
nvRmRemoveVBlankCallback(pDispEvo,

View File

@@ -25,6 +25,7 @@
#include "nvkms-flip-workarea.h"
#include "nvkms-modeset-types.h"
#include "nvkms-modeset-workarea.h"
#include "nvkms-setlut-workarea.h"
#include "nvkms-prealloc.h"
#include "nvkms-utils.h"
@@ -64,6 +65,8 @@ static size_t GetSizeForType(NVDevEvoPtr pDevEvo, enum NVPreallocType type)
return sizeof(struct NvKmsUsageBounds);
case PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS:
return sizeof(NVDpLibIsModePossibleParamsRec);
case PREALLOC_TYPE_SET_LUT_WORK_AREA:
return sizeof(struct NvKmsSetLutWorkArea);
case PREALLOC_TYPE_MAX:
/* Not a real option, but added for -Wswitch-enum */
break;

View File

@@ -72,6 +72,7 @@
#include <ctrl/ctrl0073/ctrl0073dp.h> /* NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID */
#include <ctrl/ctrl0073/ctrl0073specific.h> /* NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO */
#include <ctrl/ctrl0073/ctrl0073system.h> /* NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED */
#include <ctrl/ctrl0076.h> /* NV0076_CTRL_CMD_NOTIFY_CONSOLE_DISABLED */
#include <ctrl/ctrl0080/ctrl0080gpu.h> /* NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER */
#include <ctrl/ctrl0080/ctrl0080gr.h> /* NV0080_CTRL_CMD_GR_GET_CAPS_V2 */
#include <ctrl/ctrl0080/ctrl0080unix.h> /* NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH */
@@ -3284,6 +3285,9 @@ static NVEvoChannel* EvoAllocateCoreChannel(NVDevEvoRec *pDevEvo)
goto failed;
}
nvkms_memset(&pDevEvo->lut.notifierState, 0,
sizeof(pDevEvo->lut.notifierState));
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
NvU32 ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
@@ -4722,6 +4726,38 @@ done:
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hMemory);
}
void nvRmUnmapFbConsoleMemory(NVDevEvoPtr pDevEvo)
{
struct NvKmsPerOpenDev *pOpenDev = pDevEvo->pNvKmsOpenDev;
NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
nvGetSurfaceHandlesFromOpenDev(pOpenDev);
NVSurfaceEvoPtr pSurfaceEvo =
nvEvoGetSurfaceFromHandle(pDevEvo,
pOpenDevSurfaceHandles,
pDevEvo->fbConsoleSurfaceHandle,
FALSE,
TRUE);
NvU32 status;
if (!pSurfaceEvo) {
return;
}
// Tell Resman that the surface mapping is no longer needed.
status = nvRmApiControl(nvEvoGlobal.clientHandle,
pSurfaceEvo->planes[0].rmHandle,
NV0076_CTRL_CMD_NOTIFY_CONSOLE_DISABLED,
NULL, 0);
(void)status;
nvAssert(status == NVOS_STATUS_SUCCESS);
// Free the NVKMS surface.
nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev,
pDevEvo->fbConsoleSurfaceHandle,
TRUE /* skipUpdate */);
pDevEvo->fbConsoleSurfaceHandle = 0;
}
static void LogAuxPacket(const NVDispEvoRec *pDispEvo, const DPAUXPACKET *pkt)
{
const char *req, *rep;

View File

@@ -33,10 +33,71 @@
// NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD
#include "ctrl/ctrl0000/ctrl0000unix.h"
// NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM
#include "ctrl/ctrl0000/ctrl0000client.h"
/* NV0041_CTRL_SURFACE_INFO */
#include "ctrl/ctrl0041.h"
/* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */
#include "class/cl0071.h"
static void CpuUnmapSurface(
NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo)
{
const NvU32 planeIndex = 0;
NvU32 sd;
if (pSurfaceEvo->planes[planeIndex].rmHandle == 0) {
return;
}
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
if (pSurfaceEvo->cpuAddress[sd] != NULL) {
nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
pSurfaceEvo->planes[planeIndex].rmHandle,
pSurfaceEvo->cpuAddress[sd],
0);
pSurfaceEvo->cpuAddress[sd] = NULL;
}
}
}
NvBool nvEvoCpuMapSurface(
NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo)
{
const NvU32 planeIndex = 0;
NvU32 sd;
/*
* We should only be called here with surfaces that contain a single plane.
*/
nvAssert(nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format)->numPlanes == 1);
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
NvU32 result = nvRmApiMapMemory(
nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
pSurfaceEvo->planes[planeIndex].rmHandle,
0,
pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes,
(void **) &pSurfaceEvo->cpuAddress[sd],
0);
if (result != NVOS_STATUS_SUCCESS) {
CpuUnmapSurface(pDevEvo, pSurfaceEvo);
return FALSE;
}
}
return TRUE;
}
static void FreeSurfaceEvoStruct(NVSurfaceEvoPtr pSurfaceEvo)
{
if (pSurfaceEvo == NULL) {
@@ -54,7 +115,6 @@ static void FreeSurfaceEvoStruct(NVSurfaceEvoPtr pSurfaceEvo)
static void FreeSurfaceEvoRm(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo)
{
NvU64 structRefCnt;
NvU32 firstPlaneRmHandle;
NvU8 planeIndex;
if ((pDevEvo == NULL) || (pSurfaceEvo == NULL)) {
@@ -69,26 +129,11 @@ static void FreeSurfaceEvoRm(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo)
&pSurfaceEvo->planes[planeIndex].surfaceDesc);
}
firstPlaneRmHandle = pSurfaceEvo->planes[0].rmHandle;
if (firstPlaneRmHandle != 0) {
NvU32 sd;
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
if (pSurfaceEvo->cpuAddress[sd] != NULL) {
nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
firstPlaneRmHandle,
pSurfaceEvo->cpuAddress[sd],
0);
pSurfaceEvo->cpuAddress[sd] = NULL;
}
}
CpuUnmapSurface(pDevEvo, pSurfaceEvo);
if (pSurfaceEvo->planes[0].rmHandle != 0) {
nvHsUnmapSurfaceFromDevice(pDevEvo,
firstPlaneRmHandle,
pSurfaceEvo->planes[0].rmHandle,
pSurfaceEvo->gpuAddress);
}
@@ -329,6 +374,72 @@ static NvBool ValidateRegisterSurfaceRequest(
return TRUE;
}
static NvBool ValidateSurfaceAddressSpace(
NVDevEvoPtr pDevEvo,
const struct NvKmsRegisterSurfaceRequest *pRequest,
NvU32 rmHandle)
{
NV0041_CTRL_GET_SURFACE_INFO_PARAMS surfaceInfoParams = {};
NV0041_CTRL_SURFACE_INFO surfaceInfo = {};
NV_STATUS status;
/*
* Don't do these checks on tegra. Tegra has different capabilities.
* Here we always say display is possible so we never fail framebuffer
* creation.
*/
if (pDevEvo->isSOCDisplay) {
return TRUE;
}
/*
* Don't do these checks for surfaces that do not need access to display
* hardware.
*/
if (pRequest->noDisplayHardwareAccess) {
return TRUE;
}
/*
* If the memory is not isochronous, the memory will not be scanned out to a
* display. The checks are not needed for such memory types.
*/
if (pRequest->isoType != NVKMS_MEMORY_ISO) {
return TRUE;
}
/*
* Check if the memory we are registering this surface with is valid. We
* cannot scan out sysmem or compressed buffers.
*
* If we cannot use this memory for display it may be resident in sysmem
* or may belong to another GPU.
*/
surfaceInfo.index = NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE;
surfaceInfoParams.surfaceInfoListSize = 1;
surfaceInfoParams.surfaceInfoList = (NvP64)&surfaceInfo;
status = nvRmApiControl(nvEvoGlobal.clientHandle,
rmHandle,
NV0041_CTRL_CMD_GET_SURFACE_INFO,
&surfaceInfoParams,
sizeof(surfaceInfoParams));
if (status != NV_OK) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Failed to get memory location of RM memory object 0x%x",
rmHandle);
return FALSE;
}
if (surfaceInfo.data != NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Memory used for surface not appropriate for scanout");
return FALSE;
}
return TRUE;
}
void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
@@ -484,6 +595,10 @@ void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo,
goto fail;
}
if (!ValidateSurfaceAddressSpace(pDevEvo, pRequest, planeRmHandle)) {
goto fail;
}
/* XXX Validate sizeInBytes: can we query the surface size from RM? */
if (!pRequest->noDisplayHardwareAccess) {
@@ -536,26 +651,14 @@ void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo,
/*
* Map the first plane of the surface only into the CPU's address space.
* This is the only valid plane since we would have already rejected
* multi-planar semaphore requests earlier.
* multi-planar NISO surface requests earlier in
*
* nvEvoRegisterSurface() => ValidateRegisterSurfaceRequest() =>
* ValidatePlaneProperties().
*/
if (needCpuMapping) {
NvU32 sd;
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
result = nvRmApiMapMemory(
nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
pSurfaceEvo->planes[0].rmHandle,
0,
pRequest->planes[0].rmObjectSizeInBytes,
(void **) &pSurfaceEvo->cpuAddress[sd],
0);
if (result != NVOS_STATUS_SUCCESS) {
goto fail;
}
if (!nvEvoCpuMapSurface(pDevEvo, pSurfaceEvo)) {
goto fail;
}
}
@@ -1240,133 +1343,3 @@ void nvEvoUnregisterDeferredRequestFifo(
nvFree(pDeferredRequestFifo);
}
static NvBool AssignVblankSemControlHwHeadMask(
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask,
NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_ENABLE_PARAMS *pParams)
{
NvU32 apiHead;
FOR_ALL_HEADS(apiHead, apiHeadMask) {
NvU32 hwHead = nvGetPrimaryHwHead(pDispEvo, apiHead);
if (hwHead == NV_INVALID_HEAD) {
return FALSE;
}
pParams->headMask |= NVBIT(hwHead);
pParams->headIndexMap[hwHead] = apiHead;
}
pParams->bUseHeadIndexMap = TRUE;
return TRUE;
}
NVVblankSemControl *nvEvoEnableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask,
NVSurfaceEvoRec *pSurfaceEvo,
NvU64 surfaceOffset)
{
NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_ENABLE_PARAMS params = { };
NVVblankSemControl *pVblankSemControl;
if (!pDevEvo->supportsVblankSemControl) {
return NULL;
}
if (!AssignVblankSemControlHwHeadMask(pDispEvo, apiHeadMask, &params)) {
return NULL;
}
/*
* We cannot enable VblankSemControl if the requested offset within the
* surface is too large.
*/
if (A_plus_B_greater_than_C_U64(
surfaceOffset,
sizeof(NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_DATA),
pSurfaceEvo->planes[0].rmObjectSizeInBytes)) {
return NULL;
}
if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) {
return NULL;
}
pVblankSemControl = nvCalloc(1, sizeof(*pVblankSemControl));
if (pVblankSemControl == NULL) {
return NULL;
}
pVblankSemControl->dispIndex = pDispEvo->displayOwner;
pVblankSemControl->surfaceOffset = surfaceOffset;
pVblankSemControl->pSurfaceEvo = pSurfaceEvo;
params.subDeviceInstance = pVblankSemControl->dispIndex;
params.hMemory = pVblankSemControl->pSurfaceEvo->planes[0].rmHandle;
params.memoryOffset = pVblankSemControl->surfaceOffset;
if (nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_ENABLE,
&params, sizeof(params)) == NVOS_STATUS_SUCCESS) {
nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
return pVblankSemControl;
} else {
nvFree(pVblankSemControl);
return NULL;
}
}
NvBool nvEvoDisableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVVblankSemControl *pVblankSemControl)
{
NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_DISABLE_PARAMS params = { };
if (!pDevEvo->supportsVblankSemControl) {
return FALSE;
}
params.subDeviceInstance = pVblankSemControl->dispIndex;
params.hMemory = pVblankSemControl->pSurfaceEvo->planes[0].rmHandle;
params.memoryOffset = pVblankSemControl->surfaceOffset;
if (nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
NV0073_CTRL_CMD_SYSTEM_VBLANK_SEM_CONTROL_DISABLE,
&params, sizeof(params)) == NVOS_STATUS_SUCCESS) {
nvEvoDecrementSurfaceRefCnts(pDevEvo, pVblankSemControl->pSurfaceEvo);
nvFree(pVblankSemControl);
return TRUE;
} else {
return FALSE;
}
}
NvBool nvEvoAccelVblankSemControls(
NVDevEvoPtr pDevEvo,
NvU32 dispIndex,
NvU32 hwHeadMask)
{
NV0073_CTRL_CMD_SYSTEM_ACCEL_VBLANK_SEM_CONTROLS_PARAMS params = { };
if (!pDevEvo->supportsVblankSemControl) {
return FALSE;
}
params.subDeviceInstance = dispIndex;
params.headMask = hwHeadMask;
return nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
NV0073_CTRL_CMD_SYSTEM_ACCEL_VBLANK_SEM_CONTROLS,
&params, sizeof(params)) == NVOS_STATUS_SUCCESS;
}

View File

@@ -0,0 +1,360 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-types.h"
#include "nvkms-utils.h"
#include "nvkms-surface.h"
#include "nvkms-modeset.h"
#include "nvkms-vblank-sem-control.h"
static void
VblankSemControlWrite(
NVVblankSemControlHeadEntry *pEntry,
NvU64 vblankCount,
NvBool bAccel)
{
volatile struct NvKmsVblankSemControlDataOneHead *pData =
pEntry->pDataOneHead;
const NvU32 requestCounter =
bAccel ? pData->requestCounterAccel : pData->requestCounter;
//
// Write the current vblankCount and GPU time, and release the
// semaphore. Be sure to release the semaphore last, so that consumers
// of these fields can use the semaphore to know when the other fields
// are ready.
//
pData->vblankCount = vblankCount;
//
// Use gcc builtin to ensure the pData->semaphore write is ordered after the
// above.
//
__sync_synchronize();
pData->semaphore = requestCounter;
//
// Record the request count and current vblankCount, for computation
// next time.
//
pEntry->previousRequestCounter = requestCounter;
pEntry->previousVblankCount = vblankCount;
}
static void VblankSemControlCallback(
NVDispEvoRec *pDispEvo,
NVVBlankCallbackPtr pCallbackData)
{
NVDispApiHeadStateEvoRec *pApiHeadState = pCallbackData->pUserData;
const NvU64 vblankCount = pApiHeadState->vblankCount;
NVVblankSemControlHeadEntry *pEntry;
nvListForEachEntry(
pEntry, &pApiHeadState->vblankSemControl.list, listEntry) {
volatile struct NvKmsVblankSemControlDataOneHead *pData =
pEntry->pDataOneHead;
NvU32 flags, swapInterval;
const NvU32 requestCounter = pData->requestCounter;
//
// Use gcc builtin to ensure the pData->requestCounter read is ordered
// before the below.
//
__sync_synchronize();
/*
* If this entry does not have a new request, skip it. But, still
* update the vblankCount so that the client always has access to the
* current vblankCount.
*/
if (requestCounter == pEntry->previousRequestCounter) {
pData->vblankCount = vblankCount;
continue;
}
flags = pData->flags;
swapInterval = DRF_VAL(KMS, _VBLANK_SEM_CONTROL, _SWAP_INTERVAL, flags);
/*
* If the requested swapInterval is not satisfied, skip this entry.
*/
if (swapInterval != 0) {
if (vblankCount < (pEntry->previousVblankCount + swapInterval)) {
continue;
}
}
VblankSemControlWrite(pEntry, vblankCount, FALSE /* bAccel */);
}
}
static NvBool EnableVblankSemControlOneHead(
NVDispEvoRec *pDispEvo,
NvU32 apiHead,
NVVblankSemControl *pVblankSemControl,
struct NvKmsVblankSemControlDataOneHead *pDataOneHead)
{
NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead];
NVVblankSemControlHeadEntry *pEntry;
const NvBool isFirstEntry =
nvListIsEmpty(&pApiHeadState->vblankSemControl.list);
pEntry = &pVblankSemControl->headEntry[apiHead];
pEntry->pDataOneHead = pDataOneHead;
pEntry->previousRequestCounter = 0;
pEntry->previousVblankCount = pApiHeadState->vblankCount;
//
// If this is the first enabled vblank sem control on head, add a vblank
// callback. Note we specify addToFront=true, so that this callback is
// sequenced before any NotifyVblank callbacks (those use addToFront=false).
//
if (isFirstEntry) {
pApiHeadState->vblankSemControl.pCallbackPtr =
nvApiHeadRegisterVBlankCallback(pDispEvo,
apiHead,
VblankSemControlCallback,
pApiHeadState,
0 /* listIndex */);
if (pApiHeadState->vblankSemControl.pCallbackPtr == NULL) {
nvkms_memset(pEntry, 0, sizeof(*pEntry));
return FALSE;
}
}
nvListAdd(&pEntry->listEntry, &pApiHeadState->vblankSemControl.list);
return TRUE;
}
static void DisableVblankSemControlOneHead(
NVDispEvoRec *pDispEvo,
NvU32 apiHead,
NVVblankSemControlHeadEntry *pEntry)
{
NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead];
//
// Accelerate any pending semaphores before disabling the vblank sem control.
//
VblankSemControlWrite(
pEntry, pApiHeadState->vblankCount, TRUE /* bAccel */);
nvListDel(&pEntry->listEntry);
//
// If that was the last enabled vblank sem control on head, delete the
// vblank callback.
//
if (nvListIsEmpty(&pApiHeadState->vblankSemControl.list)) {
nvApiHeadUnregisterVBlankCallback(
pDispEvo, pApiHeadState->vblankSemControl.pCallbackPtr);
pApiHeadState->vblankSemControl.pCallbackPtr = NULL;
}
nvkms_memset(pEntry, 0, sizeof(*pEntry));
}
static NvBool EnableVblankSemControlValidate(
NVDevEvoRec *pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask,
NVSurfaceEvoRec *pSurfaceEvo,
NvU64 surfaceOffset)
{
NvU32 apiHead;
if (!pDevEvo->supportsVblankSemControl) {
return FALSE;
}
/*
* We cannot enable VblankSemControl if the requested offset within the
* surface is too large.
*/
if (A_plus_B_greater_than_C_U64(
surfaceOffset,
sizeof(struct NvKmsVblankSemControlData),
pSurfaceEvo->planes[0].rmObjectSizeInBytes)) {
return FALSE;
}
/*
* NvKmsVblankSemControlData must be at least 8-byte aligned, so that GPU
* semaphore releases can write to 8-byte fields within it with natural
* alignment.
*/
if ((surfaceOffset % sizeof(NvU64)) != 0) {
return FALSE;
}
if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) {
return FALSE;
}
FOR_ALL_HEADS(apiHead, apiHeadMask) {
if (nvGetPrimaryHwHead(pDispEvo, apiHead) == NV_INVALID_HEAD) {
return FALSE;
}
}
return TRUE;
}
static void DisableVblankSemControl(
NVDispEvoPtr pDispEvo,
NVVblankSemControl *pVblankSemControl)
{
NvU32 apiHead;
FOR_ALL_HEADS(apiHead, pVblankSemControl->apiHeadMask) {
NVVblankSemControlHeadEntry *pEntry =
&pVblankSemControl->headEntry[apiHead];
DisableVblankSemControlOneHead(pDispEvo, apiHead, pEntry);
}
}
NvBool nvEvoDisableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVVblankSemControl *pVblankSemControl)
{
NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[pVblankSemControl->dispIndex];
if (!pDevEvo->supportsVblankSemControl) {
return FALSE;
}
DisableVblankSemControl(pDispEvo, pVblankSemControl);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pVblankSemControl->pSurfaceEvo);
nvFree(pVblankSemControl);
return TRUE;
}
void nvEvoOrphanVblankSemControl(
NVDispEvoRec *pDispEvo,
NVVblankSemControl *pVblankSemControl)
{
if (!pDispEvo->pDevEvo->supportsVblankSemControl) {
return;
}
DisableVblankSemControl(pDispEvo, pVblankSemControl);
pVblankSemControl->apiHeadMask = 0;
}
NVVblankSemControl *nvEvoEnableVblankSemControl(
NVDevEvoRec *pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask,
NVSurfaceEvoRec *pSurfaceEvo,
NvU64 surfaceOffset)
{
struct NvKmsVblankSemControlData *pData;
NVVblankSemControl *pVblankSemControl;
NvU32 apiHead;
if (!EnableVblankSemControlValidate(pDevEvo, pDispEvo, apiHeadMask,
pSurfaceEvo, surfaceOffset)) {
return NULL;
}
/*
* Lazily map the surface; note we'll just leave the surface mapped after
* this point.
*/
if (pSurfaceEvo->cpuAddress[0] == NULL) {
if (!nvEvoCpuMapSurface(pDevEvo, pSurfaceEvo)) {
return NULL;
}
}
pData = (struct NvKmsVblankSemControlData *)
(((NvU8 *) pSurfaceEvo->cpuAddress[0]) + surfaceOffset);
pVblankSemControl = nvCalloc(1, sizeof(*pVblankSemControl));
if (pVblankSemControl == NULL) {
return NULL;
}
pVblankSemControl->dispIndex = pDispEvo->displayOwner;
pVblankSemControl->apiHeadMask = apiHeadMask;
pVblankSemControl->pSurfaceEvo = pSurfaceEvo;
FOR_ALL_HEADS(apiHead, apiHeadMask) {
if (!EnableVblankSemControlOneHead(pDispEvo,
apiHead,
pVblankSemControl,
&pData->head[apiHead])) {
/*
* EnableVblankSemControlOneHead() failed for one head, but previous
* heads may have succeeded. Unroll by disabling vblank_sem_control
* for all heads where the pVblankSemControl is enabled.
*/
DisableVblankSemControl(pDispEvo, pVblankSemControl);
nvFree(pVblankSemControl);
return NULL;
}
}
nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
return pVblankSemControl;
}
NvBool nvEvoAccelVblankSemControls(
NVDevEvoPtr pDevEvo,
NVDispEvoRec *pDispEvo,
NvU32 apiHeadMask)
{
NvU32 apiHead;
if (!pDevEvo->supportsVblankSemControl) {
return FALSE;
}
FOR_ALL_HEADS(apiHead, apiHeadMask) {
NVDispApiHeadStateEvoRec *pApiHeadState =
&pDispEvo->apiHeadState[apiHead];
NVVblankSemControlHeadEntry *pEntry;
nvListForEachEntry(
pEntry, &pApiHeadState->vblankSemControl.list, listEntry) {
VblankSemControlWrite(
pEntry,
pApiHeadState->vblankCount,
TRUE /* bAccel */);
}
}
return TRUE;
}

View File

@@ -42,6 +42,7 @@
#include "nvkms-surface.h"
#include "nvkms-3dvision.h"
#include "nvkms-ioctl.h"
#include "nvkms-vblank-sem-control.h"
#include "nvkms-headsurface.h"
#include "nvkms-headsurface-ioctl.h"
#include "nvkms-headsurface-swapgroup.h"
@@ -1117,6 +1118,8 @@ static void RestoreConsole(NVDevEvoPtr pDevEvo)
nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo);
AllocSurfaceCtxDmasForAllOpens(pDevEvo);
} else {
nvRevokeDevice(pDevEvo);
}
}
}
@@ -1439,6 +1442,7 @@ static NvBool AllocDevice(struct NvKmsPerOpen *pOpen,
layer++) {
pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer];
}
pParams->reply.olutCaps = pDevEvo->caps.olut;
pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT;
pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate;
@@ -4234,6 +4238,7 @@ static NvBool SwitchMux(
{
struct NvKmsSwitchMuxParams *pParams = pParamsVoid;
const struct NvKmsSwitchMuxRequest *r = &pParams->request;
struct NvKmsPerOpenDev *pOpenDev;
NVDpyEvoPtr pDpyEvo;
pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId);
@@ -4241,7 +4246,12 @@ static NvBool SwitchMux(
return FALSE;
}
if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(GetPerOpenDev(pOpen, r->deviceHandle))) {
pOpenDev = GetPerOpenDev(pOpen, r->deviceHandle);
if (pOpenDev == NULL) {
return FALSE;
}
if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
return FALSE;
}
@@ -4652,7 +4662,8 @@ static NvBool NotifyVblank(
pCallbackData = nvApiHeadRegisterVBlankCallback(pOpenDisp->pDispEvo,
apiHead,
NotifyVblankCallback,
pEventOpenFd);
pEventOpenFd,
1 /* listIndex */);
if (pCallbackData == NULL) {
return NV_FALSE;
}
@@ -4867,8 +4878,7 @@ static NvBool AccelVblankSemControls(
struct NvKmsPerOpenDev *pOpenDev;
struct NvKmsPerOpenDisp *pOpenDisp;
NVDevEvoPtr pDevEvo;
const NVDispEvoRec *pDispEvo;
NvU32 apiHead, hwHeadMask = 0;
NVDispEvoRec *pDispEvo;
if (!GetPerOpenDevAndDisp(pOpen,
pParams->request.deviceHandle,
@@ -4885,18 +4895,10 @@ static NvBool AccelVblankSemControls(
pDevEvo = pOpenDev->pDevEvo;
pDispEvo = pOpenDisp->pDispEvo;
FOR_ALL_HEADS(apiHead, pParams->request.headMask) {
NvU32 hwHead = nvGetPrimaryHwHead(pDispEvo, apiHead);
if (hwHead != NV_INVALID_HEAD) {
hwHeadMask |= NVBIT(hwHead);
}
}
return nvEvoAccelVblankSemControls(
pDevEvo,
pDispEvo->displayOwner,
hwHeadMask);
pDispEvo,
pParams->request.headMask);
}
static NvBool VrrSignalSemaphore(
@@ -4917,6 +4919,30 @@ static NvBool VrrSignalSemaphore(
return TRUE;
}
static NvBool FramebufferConsoleDisabled(
struct NvKmsPerOpen *pOpen,
void *pParamsVoid)
{
const struct NvKmsFramebufferConsoleDisabledParams *pParams = pParamsVoid;
struct NvKmsPerOpenDev *pOpenDev =
GetPerOpenDev(pOpen, pParams->request.deviceHandle);
if (pOpenDev == NULL) {
return FALSE;
}
if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
return FALSE;
}
if (pOpen->clientType != NVKMS_CLIENT_KERNEL_SPACE) {
return FALSE;
}
nvRmUnmapFbConsoleMemory(pOpenDev->pDevEvo);
return TRUE;
}
/*!
* Perform the ioctl operation requested by the client.
*
@@ -5043,6 +5069,7 @@ NvBool nvKmsIoctl(
ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL, DisableVblankSemControl),
ENTRY(NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS, AccelVblankSemControls),
ENTRY(NVKMS_IOCTL_VRR_SIGNAL_SEMAPHORE, VrrSignalSemaphore),
ENTRY(NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED, FramebufferConsoleDisabled),
};
struct NvKmsPerOpen *pOpen = pOpenVoid;
@@ -5191,6 +5218,31 @@ void nvKmsClose(void *pOpenVoid)
}
/*
Frees all references to a device
*/
void nvRevokeDevice(NVDevEvoPtr pDevEvo)
{
if (pDevEvo == NULL) {
return;
}
struct NvKmsPerOpen *pOpen;
nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenListEntry) {
struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
if (pOpenDev == NULL) {
continue;
}
if (pOpenDev == pDevEvo->pNvKmsOpenDev) {
// do not free the internal pOpenDev, as that is handled
// by nvFreeDevEvo
continue;
}
FreeDeviceReference(pOpen, pOpenDev);
}
}
/*!
* Open callback.
*
@@ -6605,22 +6657,21 @@ void nvKmsResume(NvU32 gpuId)
suspendCounter--;
if (suspendCounter == 0) {
NVDevEvoPtr pDevEvo;
FOR_ALL_EVO_DEVS(pDevEvo) {
NVDevEvoPtr pDevEvo, pDevEvo_tmp;
FOR_ALL_EVO_DEVS_SAFE(pDevEvo, pDevEvo_tmp) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming");
if (nvResumeDevEvo(pDevEvo)) {
nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo);
AllocSurfaceCtxDmasForAllOpens(pDevEvo);
}
if (pDevEvo->modesetOwner == NULL) {
// Hardware state was lost, so we need to force a console
// restore.
pDevEvo->skipConsoleRestore = FALSE;
RestoreConsole(pDevEvo);
if (pDevEvo->modesetOwner == NULL) {
// Hardware state was lost, so we need to force a console
// restore.
pDevEvo->skipConsoleRestore = FALSE;
RestoreConsole(pDevEvo);
}
}
}
}
@@ -6755,3 +6806,40 @@ NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *p
pOpenDev->pDevEvo->modesetOwner == pOpenDev ||
pOpenDev->pDevEvo->modesetSubOwner == pOpenDev;
}
void nvKmsOrphanVblankSemControlForAllOpens(NVDispEvoRec *pDispEvo)
{
struct NvKmsPerOpen *pOpen;
if (!pDispEvo->pDevEvo->supportsVblankSemControl) {
return;
}
nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDispEvo->pDevEvo);
struct NvKmsPerOpenDisp *pOpenDisp;
NvKmsGenericHandle disp;
if (pOpenDev == NULL) {
continue;
}
FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
pOpenDisp, disp) {
NVVblankSemControl *pVblankSemControl;
NvKmsGenericHandle vblankSemControlHandle;
if (pOpenDisp->pDispEvo != pDispEvo) {
continue;
}
FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankSemControlHandles,
pVblankSemControl,
vblankSemControlHandle) {
nvEvoOrphanVblankSemControl(pDispEvo, pVblankSemControl);
}
}
}
}

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.
@@ -17,8 +17,8 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment
{ .offset = 0x00000300,
.registerCount = 13,
{ .offset = 0x00000280,
.registerCount = 12,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
.stage = NV3D_HW_SHADER_STAGE_PIXEL,
@@ -26,7 +26,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_customSampling
{ .offset = 0x00000580,
{ .offset = 0x00000480,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -35,7 +35,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_overlay
{ .offset = 0x00004680,
{ .offset = 0x00004500,
.registerCount = 31,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -44,7 +44,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_overlay_customSampling
{ .offset = 0x00005480,
{ .offset = 0x00005200,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -53,7 +53,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset
{ .offset = 0x00008280,
{ .offset = 0x00007f00,
.registerCount = 15,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -62,7 +62,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_customSampling
{ .offset = 0x00008600,
{ .offset = 0x00008200,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -71,8 +71,8 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_swapped
{ .offset = 0x0000c800,
.registerCount = 16,
{ .offset = 0x0000c380,
.registerCount = 15,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
.stage = NV3D_HW_SHADER_STAGE_PIXEL,
@@ -80,7 +80,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_swapped_customSampling
{ .offset = 0x0000cb80,
{ .offset = 0x0000c680,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -89,7 +89,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_overlay
{ .offset = 0x00010d80,
{ .offset = 0x00010800,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -98,7 +98,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_overlay_customSampling
{ .offset = 0x00011c00,
{ .offset = 0x00011580,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -107,7 +107,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_overlay_swapped
{ .offset = 0x00014b00,
{ .offset = 0x00014380,
.registerCount = 31,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -116,7 +116,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_offset_overlay_swapped_customSampling
{ .offset = 0x00015980,
{ .offset = 0x00015100,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -125,7 +125,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend
{ .offset = 0x00018880,
{ .offset = 0x00017f00,
.registerCount = 15,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -134,7 +134,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_customSampling
{ .offset = 0x00018b80,
{ .offset = 0x00018180,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -143,8 +143,8 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_swapped
{ .offset = 0x0001cd00,
.registerCount = 17,
{ .offset = 0x0001c280,
.registerCount = 15,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
.stage = NV3D_HW_SHADER_STAGE_PIXEL,
@@ -152,7 +152,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_swapped_customSampling
{ .offset = 0x0001d080,
{ .offset = 0x0001c580,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -161,7 +161,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_overlay
{ .offset = 0x00021280,
{ .offset = 0x00020700,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -170,7 +170,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_overlay_customSampling
{ .offset = 0x00022100,
{ .offset = 0x00021480,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -179,7 +179,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_overlay_swapped
{ .offset = 0x00024f80,
{ .offset = 0x00024200,
.registerCount = 31,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -188,7 +188,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_overlay_swapped_customSampling
{ .offset = 0x00025e00,
{ .offset = 0x00024f80,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -197,7 +197,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset
{ .offset = 0x00028d00,
{ .offset = 0x00027d80,
.registerCount = 20,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -206,7 +206,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_customSampling
{ .offset = 0x00029100,
{ .offset = 0x00028100,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -215,7 +215,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_swapped
{ .offset = 0x0002d380,
{ .offset = 0x0002c300,
.registerCount = 20,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -224,7 +224,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_swapped_customSampling
{ .offset = 0x0002d780,
{ .offset = 0x0002c680,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -233,7 +233,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_overlay
{ .offset = 0x00031a00,
{ .offset = 0x00030880,
.registerCount = 35,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -242,7 +242,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_overlay_customSampling
{ .offset = 0x00032880,
{ .offset = 0x00031600,
.registerCount = 37,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -251,7 +251,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_overlay_swapped
{ .offset = 0x00035780,
{ .offset = 0x00034400,
.registerCount = 36,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -260,7 +260,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling
{ .offset = 0x00036680,
{ .offset = 0x00035200,
.registerCount = 37,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -269,8 +269,8 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_yuv420
{ .offset = 0x00039600,
.registerCount = 56,
{ .offset = 0x00038080,
.registerCount = 40,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
.stage = NV3D_HW_SHADER_STAGE_PIXEL,
@@ -278,7 +278,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_yuv420_overlay
{ .offset = 0x0003b180,
{ .offset = 0x00039880,
.registerCount = 38,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -287,7 +287,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_pixelShift
{ .offset = 0x0003dc80,
{ .offset = 0x0003c180,
.registerCount = 32,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -296,7 +296,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_overlay_pixelShift
{ .offset = 0x0003e880,
{ .offset = 0x0003cd00,
.registerCount = 31,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -305,7 +305,7 @@ static const Nv3dProgramInfo BlackwellProgramInfo[NUM_PROGRAMS] = {
},
// nvidia_headsurface_fragment_reversePrime
{ .offset = 0x0003f780,
{ .offset = 0x0003db00,
.registerCount = 13,
.type = NV3D_SHADER_TYPE_PIXEL,
.constIndex = -1,
@@ -322,7 +322,7 @@ static const Nv3dShaderConstBufInfo BlackwellConstBufInfo[] = {
static const size_t BlackwellConstBufSize = 0;
static const NvU32 BlackwellConstBufSizeAlign = 256;
// Total shader code size: 254.5 KB
static const size_t BlackwellProgramHeapSize = 260608;
// Total shader code size: 247.25 KB
static const size_t BlackwellProgramHeapSize = 253184;
static const size_t BlackwellShaderMaxLocalBytes = 0;
static const size_t BlackwellShaderMaxStackBytes = 0;

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.

View File

@@ -1,4 +1,4 @@
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.545.00.dev/gpu_drv/dev_a-16109'
// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009'
// WARNING: This file is auto-generated! Do not hand-edit!
// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'.

View File

@@ -5,11 +5,13 @@ SRCS += ../common/shared/nvstatus/nvstatus.c
SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c
SRCS += ../common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
SRCS += ../common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
SRCS += ../common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
SRCS += ../common/softfloat/source/f16_to_f32.c
SRCS += ../common/softfloat/source/f32_add.c
SRCS += ../common/softfloat/source/f32_div.c
SRCS += ../common/softfloat/source/f32_eq.c
@@ -74,6 +76,7 @@ SRCS += ../common/softfloat/source/s_mulAddF32.c
SRCS += ../common/softfloat/source/s_mulAddF64.c
SRCS += ../common/softfloat/source/s_normRoundPackToF32.c
SRCS += ../common/softfloat/source/s_normRoundPackToF64.c
SRCS += ../common/softfloat/source/s_normSubnormalF16Sig.c
SRCS += ../common/softfloat/source/s_normSubnormalF32Sig.c
SRCS += ../common/softfloat/source/s_normSubnormalF64Sig.c
SRCS += ../common/softfloat/source/s_roundPackToF16.c
@@ -103,6 +106,7 @@ SRCS_CXX += ../common/displayport/src/dp_edid.cpp
SRCS_CXX += ../common/displayport/src/dp_evoadapter.cpp
SRCS_CXX += ../common/displayport/src/dp_groupimpl.cpp
SRCS_CXX += ../common/displayport/src/dp_guid.cpp
SRCS_CXX += ../common/displayport/src/dp_linkconfig.cpp
SRCS_CXX += ../common/displayport/src/dp_list.cpp
SRCS_CXX += ../common/displayport/src/dp_merger.cpp
SRCS_CXX += ../common/displayport/src/dp_messagecodings.cpp
@@ -207,6 +211,7 @@ SRCS += src/nvkms-stereo.c
SRCS += src/nvkms-surface.c
SRCS += src/nvkms-utils-flip.c
SRCS += src/nvkms-utils.c
SRCS += src/nvkms-vblank-sem-control.c
SRCS += src/nvkms-vrr.c
SRCS += src/nvkms.c
SRCS += ../common/unix/xzminidec/src/xz_crc32.c