515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

143
src/nvidia-modeset/Makefile Normal file
View File

@@ -0,0 +1,143 @@
###########################################################################
# Makefile for nv-modeset-kernel.o
###########################################################################
NV_MODULE_LOGGING_NAME ?= nvidia-modeset
VERSION_MK_DIR = ../../
include ../../utils.mk
include srcs.mk
# The source files for nv-modeset-kernel.o are all SRCS and SRCS_CXX defined in
# srcs.mk, and the NVIDIA ID string
ALL_SRCS = $(SRCS) $(SRCS_CXX)
ALL_SRCS += $(NVIDSTRING)
SRC_COMMON = ../common
CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h
CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc
CFLAGS += -I $(SRC_COMMON)/shared/inc
CFLAGS += -I $(SRC_COMMON)/inc
CFLAGS += -I $(SRC_COMMON)/softfloat/nvidia
CFLAGS += -I $(SRC_COMMON)/softfloat/source/include
CFLAGS += -I $(SRC_COMMON)/softfloat/source/8086-SSE
CFLAGS += -I $(SRC_COMMON)/unix/common/utils/interface
CFLAGS += -I $(SRC_COMMON)/unix/common/inc
CFLAGS += -I $(SRC_COMMON)/modeset
CFLAGS += -I os-interface/include
CFLAGS += -I kapi/interface
CFLAGS += -I ../nvidia/arch/nvalloc/unix/include
CFLAGS += -I interface
CFLAGS += -I include
CFLAGS += -I kapi/include
CFLAGS += -I generated
CFLAGS += -I $(SRC_COMMON)/displayport/inc
CFLAGS += -I $(SRC_COMMON)/displayport/inc/dptestutil
CFLAGS += -I $(SRC_COMMON)/inc/displayport
CFLAGS += -DNDEBUG
CFLAGS += -D_LANGUAGE_C
CFLAGS += -D__NO_CTYPE
CFLAGS += -DNV_CPU_INTRINSICS_KERNEL
CFLAGS += -DNVHDMIPKT_RM_CALLS_INTERNAL=0
# XXX it would be nice to only define these for appropriate files...
CFLAGS += -DSOFTFLOAT_ROUND_ODD
CFLAGS += -DSOFTFLOAT_FAST_DIV32TO16
CFLAGS += -DSOFTFLOAT_FAST_DIV64TO32
# Tell nvtiming to use nvkms import functions
CFLAGS += -DNVT_USE_NVKMS
CFLAGS += -Wformat
CFLAGS += -Wreturn-type
CFLAGS += -Wswitch
CFLAGS += -Wunused-local-typedefs
CFLAGS += -Wchar-subscripts
CFLAGS += -Wparentheses
CFLAGS += -Wpointer-arith
CFLAGS += -Wcast-qual
CFLAGS += -Wall
CFLAGS += -Wextra
CFLAGS += -Wno-sign-compare
CFLAGS += -Wno-unused-parameter
CFLAGS += -Wno-missing-field-initializers
CFLAGS += -Wno-format-zero-length
CFLAGS += -Wmissing-declarations
CFLAGS += -Wno-cast-qual
CFLAGS += -O2
ifeq ($(TARGET_ARCH),x86_64)
CFLAGS += -msoft-float
CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel
CFLAGS += -mno-mmx
CFLAGS += -mno-sse
CFLAGS += -mno-sse2
CFLAGS += -mno-3dnow
endif
ifeq ($(TARGET_ARCH),aarch64)
CFLAGS += -mgeneral-regs-only
CFLAGS += -march=armv8-a
endif
CFLAGS += -fno-pic
CFLAGS += -fno-common
CFLAGS += -fomit-frame-pointer
CFLAGS += -fno-strict-aliasing
CFLAGS += -ffunction-sections
CFLAGS += -fdata-sections
CFLAGS += -ffreestanding
CONDITIONAL_CFLAGS := $(call TEST_CC_ARG, -fcf-protection=none)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-overflow=2)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-truncation=1)
ifeq ($(TARGET_ARCH),x86_64)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register)
endif
CFLAGS += $(CONDITIONAL_CFLAGS)
CC_ONLY_CFLAGS += -Wimplicit
CC_ONLY_CFLAGS += -Wstrict-prototypes
CC_ONLY_CFLAGS += -Wmissing-prototypes
CC_ONLY_CFLAGS += -std=gnu11
CXX_ONLY_CFLAGS += -std=gnu++11
CXX_ONLY_CFLAGS += -fno-operator-names
CXX_ONLY_CFLAGS += -fno-rtti
CXX_ONLY_CFLAGS += -fno-exceptions
CXX_ONLY_CFLAGS += -fcheck-new
SHADER_OBJS =
CFLAGS += -DNVKMS_INCLUDE_HEADSURFACE=0
OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS))
OBJS += $(SHADER_OBJS)
# Define how to generate the NVIDIA ID string
$(eval $(call GENERATE_NVIDSTRING, \
NV_KMS_ID, \
UNIX Open Kernel Mode Setting Driver, $(OBJS)))
# Define how to build each object file from the corresponding source file.
$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src))))
NV_MODESET_KERNEL_O = $(OUTPUTDIR)/nv-modeset-kernel.o
.PNONY: all clean
all: $(NV_MODESET_KERNEL_O)
$(NV_MODESET_KERNEL_O): $(OBJS)
$(call quiet_cmd,LD) -r -o $(NV_MODESET_KERNEL_O) $(OBJS)
clean:
$(RM) -rf $(OUTPUTDIR)

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__
#define __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head,
NvBool enable);
void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo);
NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ */

View File

@@ -0,0 +1,100 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_DP_NVDP_CONNECTOR_H__
#define __NVKMS_DP_NVDP_CONNECTOR_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nvkms-types.h"
NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo);
void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo,
NvBool connected);
void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector);
void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector);
NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo);
NVDPLibModesetStatePtr nvDPLibCreateModesetState(
const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NvU32 displayId,
const NVDpyIdList dpyIdList,
const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
NVHwModeTimingsEvo *pTimings);
void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState);
void nvDPBeginValidation(NVDispEvoPtr pDispEvo);
NvBool nvDPLibValidateTimings(
const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NvU32 displayId,
const NVDpyIdList dpyIdList,
const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
const struct NvKmsModeValidationParams *pModeValidationParams,
NVHwModeTimingsEvo *pTimings);
NvBool nvDPEndValidation(NVDispEvoPtr pDispEvo);
NvBool nvDPValidateModeForDpyEvo(
const NVDpyEvoRec *pDpyEvo,
const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
const struct NvKmsModeValidationParams *pModeValidationParams,
NVHwModeTimingsEvo *pTimings);
void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector,
const NVEvoModesetUpdateState *pModesetUpdateState);
void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector);
void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector);
NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged);
void nvDPSetAllowMultiStreamingOneConnector(
NVDPLibConnectorPtr pDpLibConnector,
NvBool allowMST);
void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST);
enum NVDpLinkMode {
NV_DP_LINK_MODE_OFF,
NV_DP_LINK_MODE_SST,
NV_DP_LINK_MODE_MST,
};
enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_DP_NVDP_CONNECTOR_H__ */

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_DP_NVDP_DEVICE_H__
#define __NVKMS_DP_NVDP_DEVICE_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on);
unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo);
NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size);
void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo);
void nvDPDpyFree(NVDpyEvoPtr pDpyEvo);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_DP_NVDP_DEVICE_H__ */

View File

@@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_DP_NVDP_TIMER_H__
#define __NVKMS_DP_NVDP_TIMER_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
NvBool nvDPTimersPending(void);
NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo);
void nvDPFreeTimer(NVDPLibTimerPtr pTimer);
void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_DP_NVDP_TIMER_H__ */

View File

@@ -0,0 +1,40 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __EVO_STATE_H__
#define __EVO_STATE_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
void nvEvoStateStartNoLock(NVEvoSubDevPtr);
#ifdef __cplusplus
};
#endif
#endif /* __EVO_STATE_H__ */

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_3DVISION_H__
#define __NVKMS_3DVISION_H__
#include "nvkms-types.h"
void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 head);
void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo);
NvBool
nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo,
NVEvoInfoStringPtr pInfoString);
void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo);
void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo,
const NvU32 head);
#endif /* __NVKMS_3DVISION_H__ */

View File

@@ -0,0 +1,51 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_ATTRIBUTES_H__
#define __NVKMS_ATTRIBUTES_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount);
NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo,
struct NvKmsSetDpyAttributeParams *pParams);
NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo,
struct NvKmsGetDpyAttributeParams *pParams);
NvBool nvGetDpyAttributeValidValuesEvo(
const NVDpyEvoRec *pDpyEvo,
struct NvKmsGetDpyAttributeValidValuesParams *pParams);
NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_ATTRIBUTES_H__ */

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_CONSOLE_RESTORE_H__
#define __NVKMS_CONSOLE_RESTORE_H__
#include "nvkms-types.h"
NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST);
#endif // __NVKMS_CONSOLE_RESTORE_H__

View File

@@ -0,0 +1,53 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_CURSOR_H__
#define __NVKMS_CURSOR_H__
#include "nvkms-types.h"
NvBool nvGetCursorImageSurfaces(
const NVDevEvoRec *pDevEvo,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
const struct NvKmsSetCursorImageCommonParams *pParams,
NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]);
NvBool nvSetCursorImage(
NVDispEvoPtr pDispEvo,
const struct NvKmsPerOpenDev *pOpenDevice,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
NvU32 head,
const struct NvKmsSetCursorImageCommonParams *pParams);
void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo,
NvU32 head, NvS16 x, NvS16 y);
void nvEvoMoveCursor(NVDispEvoPtr pDispEvo, NvU32 head,
const struct NvKmsMoveCursorCommonParams *pParams);
NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo);
void nvFreeCursorEvo(NVDevEvoPtr pDevEvo);
enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo);
#endif /* __NVKMS_CURSOR_H__ */

View File

@@ -0,0 +1,286 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/* this file contains dma push buffer inlined routines */
#ifndef __NVKMS_DMA_H__
#define __NVKMS_DMA_H__
#include <nvctassert.h>
#include "nvkms-types.h"
#include "nvkms-utils.h"
#include "class/cl917d.h"
/* declare prototypes: */
void nvDmaKickoffEvo(NVEvoChannelPtr);
void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count);
void nvWriteEvoCoreNotifier(const NVDispEvoRec *, NvU32 offset, NvU32 value);
NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo,
NvU32 offset, NvU32 done_base_bit,
NvU32 done_extent_bit,
NvU32 done_false_value);
void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset,
NvU32 done_base_bit,
NvU32 done_extent_bit, NvU32 done_false_value);
void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask);
NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier,
NvU32 entry_stride,
NvU32 entry_count,
NvU32 status_offset,
NvU32 field_count,
NvU32 flag_count,
const CRC32NotifierEntryRec *field_info,
const CRC32NotifierEntryFlags *flag_info);
void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier,
NvU32 offset,
NvU32 reset_base_bit,
NvU32 reset_value);
NvBool nvEvoWaitForCRC32Notifier(volatile NvU32 *pCRC32Notifier,
NvU32 offset,
NvU32 done_base_bit,
NvU32 done_extent_bit,
NvU32 done_value);
#define SUBDEVICE_MASK_ALL DRF_MASK(NV917D_DMA_SET_SUBDEVICE_MASK_VALUE)
static inline void nvDmaStorePioMethod(
void *pBase, NvU32 offset, NvU32 value)
{
NvU32 *ptr = ((NvU32 *)pBase) + (offset/sizeof(NvU32));
/*
* Use gcc built-in atomic store to ensure the write happens exactly once
* and to ensure ordering. We can use the weaker "relaxed" model because we
* separately use appropriate fencing on anything that needs to preceed this
* write.
*/
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
static inline NvU32 nvDmaLoadPioMethod(
const void *pBase, NvU32 offset)
{
const NvU32 *ptr = ((const NvU32 *)pBase) + (offset/sizeof(NvU32));
/*
* Use gcc built-in atomic load to ensure the read happens exactly once and
* to ensure ordering. We use the "acquire" model to ensure anything after
* this read doesn't get reordered earlier than this read. (E.g., we don't
* want any writes to the pushbuffer that are waiting on GET to advance to
* get reordered before this read, potentially clobbering the pushbuffer
* before it's been read.)
*/
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
static inline NvBool nvDmaSubDevMaskMatchesCurrent(
const NVEvoChannel *pChannel,
const NvU32 subDevMask)
{
const NvU32 allSubDevices = (1 << pChannel->pb.num_channels) - 1;
return (subDevMask & allSubDevices) ==
(pChannel->pb.currentSubDevMask & allSubDevices);
}
static inline void nvDmaSetEvoMethodData(
NVEvoChannelPtr pChannel,
const NvU32 data)
{
*(pChannel->pb.buffer) = data;
pChannel->pb.buffer++;
}
static inline void nvDmaSetEvoMethodDataU64(
NVEvoChannelPtr pChannel,
const NvU64 data)
{
nvDmaSetEvoMethodData(pChannel, NvU64_HI32(data));
nvDmaSetEvoMethodData(pChannel, NvU64_LO32(data));
}
/* Get the SDM for a given pDisp */
static inline NvU32 nvDispSubDevMaskEvo(const NVDispEvoRec *pDispEvo)
{
return NVBIT(pDispEvo->displayOwner);
}
/* Initialize the EVO SDM stack */
static inline void nvInitEvoSubDevMask(NVDevEvoPtr pDevEvo) {
pDevEvo->subDevMaskStackDepth = 0;
pDevEvo->subDevMaskStack[0] = SUBDEVICE_MASK_ALL;
}
/* Return the SDM at the top of the stack (i.e. the currently active one) */
static inline NvU32 nvPeekEvoSubDevMask(NVDevEvoPtr pDevEvo) {
return pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth];
}
/* Push the given mask onto the stack and set it. */
static inline void nvPushEvoSubDevMask(NVDevEvoPtr pDevEvo, NvU32 mask) {
pDevEvo->subDevMaskStackDepth++;
nvAssert(pDevEvo->subDevMaskStackDepth < NV_EVO_SUBDEV_STACK_SIZE);
pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth] = mask;
}
/* Automagically push the SDM for broadcast to disp. */
static inline void nvPushEvoSubDevMaskDisp(const NVDispEvoRec *pDispEvo) {
NvU32 mask = nvDispSubDevMaskEvo(pDispEvo);
nvPushEvoSubDevMask(pDispEvo->pDevEvo, mask);
}
/* Pop the last entry on the stack */
static inline void nvPopEvoSubDevMask(NVDevEvoPtr pDevEvo) {
pDevEvo->subDevMaskStackDepth--;
}
/*
* Update the state tracked in updateState to indicate that pChannel has
* pending methods and requires an update/kickoff.
*/
static inline void nvUpdateUpdateState(NVDevEvoPtr pDevEvo,
NVEvoUpdateState *updateState,
const NVEvoChannel *pChannel)
{
const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo);
NvU32 sd;
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
if (subDevMask & (1 << sd)) {
updateState->subdev[sd].channelMask |= pChannel->channelMask;
}
}
}
/*
* Update the state tracked in updateState to indicate that pChannel has
* pending WindowImmediate methods.
*/
static inline void nvWinImmChannelUpdateState(NVDevEvoPtr pDevEvo,
NVEvoUpdateState *updateState,
const NVEvoChannel *pChannel)
{
const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo);
NvU32 sd;
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
if (subDevMask & (1 << sd)) {
updateState->subdev[sd].winImmChannelMask |= pChannel->channelMask;
}
}
}
/*
* Update the state tracked in updateState to prevent pChannel from
* interlocking with the core channel on the next UPDATE.
*/
static inline
void nvDisableCoreInterlockUpdateState(NVDevEvoPtr pDevEvo,
NVEvoUpdateState *updateState,
const NVEvoChannel *pChannel)
{
const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo);
NvU32 sd;
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
if (subDevMask & (1 << sd)) {
updateState->subdev[sd].noCoreInterlockMask |=
pChannel->channelMask;
}
}
}
// These macros verify that the values used in the methods fit
// into the defined ranges.
#define ASSERT_DRF_NUM(d, r, f, n) \
nvAssert(!(~DRF_MASK(NV ## d ## r ## f) & (n)))
// From resman nv50/dev_disp.h
#define NV_UDISP_DMA_OPCODE 31:29 /* RWXUF */
#define NV_UDISP_DMA_OPCODE_METHOD 0x00000000 /* RW--V */
#define NV_UDISP_DMA_METHOD_COUNT 27:18 /* RWXUF */
// Technically, the METHOD_OFFSET field is 13:2 for nvdisplay (classes c3*),
// and only 11:2 for older display classes. But, the higher bits were
// unused in the older classes, and we should never push any methods of that
// size on them anyway, so we always use the wider definition here.
#define NV_UDISP_DMA_METHOD_OFFSET 13:2 /* RWXUF */
// Start an EVO method.
static inline void nvDmaSetStartEvoMethod(
NVEvoChannelPtr pChannel,
NvU32 method,
NvU32 count)
{
NVDmaBufferEvoPtr p = &pChannel->pb;
const NvU32 sdMask = nvPeekEvoSubDevMask(p->pDevEvo);
// We add 1 to the count for the method header.
const NvU32 countPlusHeader = count + 1;
const NvU32 methodDwords = method >> 2;
nvAssert((method & 0x3) == 0);
ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count);
ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords);
if (!nvDmaSubDevMaskMatchesCurrent(pChannel, sdMask)) {
if (p->num_channels > 1) {
nvEvoSetSubdeviceMask(pChannel, sdMask);
}
}
if (p->fifo_free_count <= countPlusHeader) {
nvEvoMakeRoom(pChannel, countPlusHeader);
}
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(_UDISP, _DMA, _OPCODE, _METHOD) |
DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count) |
DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords));
p->fifo_free_count -= countPlusHeader;
}
static inline NvBool nvIsUpdateStateEmpty(const NVDevEvoRec *pDevEvo,
const NVEvoUpdateState *updateState)
{
NvU32 sd;
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
if (updateState->subdev[sd].channelMask != 0x0) {
return FALSE;
}
}
return TRUE;
}
#endif /* __NVKMS_DMA_H__ */

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_DPY_H__
#define __NVKMS_DPY_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo);
void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo,
struct NvKmsModeValidationValidSyncs *pValidSyncs);
NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo,
NVConnectorEvoPtr pConnectorEvo,
NVDpyId dpyId, const char *dpAddress);
void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo);
NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId);
void nvUpdateInfoFrames(const NVDispEvoRec *pDispEvo, const NvU32 head);
NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo,
const NVHwModeTimingsEvo *pTimings);
NVHwModeTimingsEvoPtr
nvGetCurrentModeTimingsForDpyEvo(const NVDpyEvoRec *pDpyEvo);
NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId);
NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo,
const char *address, NvBool *pDynamicDpyCreated);
typedef enum {
NVKMS_EDID_READ_MODE_DEFAULT,
NVKMS_EDID_READ_MODE_ACPI,
} NvKmsEdidReadMode;
NvBool nvDpyReadAndParseEdidEvo(
const NVDpyEvoRec *pDpyEvo,
const struct NvKmsQueryDpyDynamicDataRequest *pRequest,
NvKmsEdidReadMode readMode,
NVEdidRec *pEdid,
NVParsedEdidEvoPtr *ppParsedEdid,
NVEvoInfoStringPtr pInfoString);
char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo,
const NVDpyIdList dpyIdList);
NvBool nvDpyGetDynamicData(
NVDpyEvoPtr pDpyEvo,
struct NvKmsQueryDpyDynamicDataParams *pParams);
void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo);
NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo);
NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVParsedEdidEvoRec *pParsedEdid);
enum NvKmsDpyAttributeDigitalSignalValue
nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_DPY_H__ */

View File

@@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_EVENT_H__
#define __NVKMS_EVENT_H__
#include "nvkms.h"
void nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32);
void nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32);
#endif /* __NVKMS_EVENT_H__ */

View File

@@ -0,0 +1,107 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2009-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_STATES_H__
#define __NVKMS_STATES_H__
#include "nvkms-types.h"
#include "g_nvkms-evo-states.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum NVEvoLockSignal {
NV_EVO_LOCK_SIGNAL_FLIP_LOCK,
NV_EVO_LOCK_SIGNAL_FRAME_LOCK,
NV_EVO_LOCK_SIGNAL_RASTER_LOCK,
NV_EVO_LOCK_SIGNAL_STEREO,
} NVEvoLockSignal;
typedef enum NVEvoLockAction {
NV_EVO_LOCK_HEADS,
NV_EVO_UNLOCK_HEADS,
NV_EVO_ADD_FRAME_LOCK_SERVER,
NV_EVO_REM_FRAME_LOCK_SERVER,
NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC,
NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC,
NV_EVO_ADD_FRAME_LOCK_CLIENT,
NV_EVO_REM_FRAME_LOCK_CLIENT,
NV_EVO_ENABLE_VRR,
NV_EVO_DISABLE_VRR,
NV_EVO_ADD_FRAME_LOCK_REF,
NV_EVO_REM_FRAME_LOCK_REF,
NV_EVO_ADD_SLI_SECONDARY,
NV_EVO_ADD_SLI_LAST_SECONDARY,
NV_EVO_ADD_SLI_PRIMARY,
NV_EVO_REM_SLI,
} NVEvoLockAction;
/* nv_evo.c */
NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *,
NVEvoSubDevPtr,
NVEvoLockSignal);
NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo,
NVEvoSubDevPtr pEvoSubDev,
const NvU32 *pHeads);
NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo,
NVEvoSubDevPtr pEvoSubDev,
const NvU32 *pHeads);
/* nvkms-hw-states.c */
NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_STATES_H__ */

View File

@@ -0,0 +1,297 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_H__
#define __NVKMS_H__
#include "nvkms-types.h"
#include "nvkms-modeset-types.h"
#include "nvkms-api.h"
#ifdef __cplusplus
extern "C" {
#endif
extern NVEvoInfoStringRec dummyInfoString;
NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId);
NvU8 nvGetGpuLogIndex(void);
void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head,
NVEvoModesetUpdateState *pModesetUpdateState);
void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo,
const NvU32 head,
NVDPLibModesetStatePtr pDpLibModesetState,
NVEvoModesetUpdateState *pModesetUpdateState);
void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync,
NVEvoUpdateState *updateState, NvBool releaseElv);
void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo,
NVEvoUpdateState *updateState);
void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo,
const NvU32 head,
NvBool isVrr,
NvBool enable);
void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo,
const NvU32 head, NVEvoUpdateState *updateState);
void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo,
const NvU32 head,
const NvU16 x,
NvU16 y,
NVEvoUpdateState *updateState);
void
nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings,
NvModeTimingsPtr pModeTimings);
void nvEvoSetTimings(NVDispEvoPtr pDispEvo, const NvU32 head,
NVEvoUpdateState *updateState);
NvBool nvGetDfpProtocol(const NVDpyEvoRec *pDpyEvo,
NVHwModeTimingsEvoPtr pTimings);
void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo,
struct NvKmsScalingUsageBounds *pScaling);
NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps,
const NvU32 inWidth, const NvU32 inHeight,
const NvU32 outWidth, const NvU32 outHeight,
NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps,
struct NvKmsScalingUsageBounds *out);
NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo,
const NVEvoScalerCaps *pScalerCaps,
const NvU32 inWidth, const NvU32 inHeight,
const NvU32 outWidth, const NvU32 outHeight,
NvBool doubleScan,
NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut);
NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo,
const NVEvoScalerCaps *pScalerCaps,
NVHwModeTimingsEvoPtr pTimings,
NVEvoInfoStringPtr pInfoString);
void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo,
NVHwModeViewPortEvo *pViewPort);
struct NvKmsUsageBounds nvUnionUsageBounds(const struct NvKmsUsageBounds *a,
const struct NvKmsUsageBounds *b);
NvBool UsageBoundsEqual(const struct NvKmsUsageBounds *a,
const struct NvKmsUsageBounds *b);
NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound(
const enum NvKmsSurfaceMemoryFormat format,
const NvU64 supportedFormatsCapMask);
void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo);
void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo);
void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo);
void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo, NvU32 *dispNeedsEarlyUpdate,
NVEvoUpdateState *updateState);
void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock);
NvBool nvSetUsageBoundsEvo(
NVDevEvoPtr pDevEvo,
NvU32 sd,
NvU32 head,
const struct NvKmsUsageBounds *pUsage,
NVEvoUpdateState *updateState);
void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,
NvU32 sd,
NvU32 head,
NvBool enable,
NVEvoUpdateState *pUpdateState);
void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
const NvU32 head, NVEvoUpdateState *pUpdateState);
void nvSetDitheringEvo(
NVDispEvoPtr pDispEvo, const NvU32 head,
enum NvKmsDpyAttributeRequestedDitheringValue configState,
const enum NvKmsDpyAttributeRequestedDitheringDepthValue configDepth,
const enum NvKmsDpyAttributeRequestedDitheringModeValue configMode,
NVEvoUpdateState *pUpdateState);
NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo);
NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo);
NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val);
NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value);
NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue);
NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value);
NvBool nvSetStereoEvo(const NVDispEvoRec *pDispEvo,
const NvU32 head, NvBool enable);
NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head);
NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo);
void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo);
void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo);
void nvSetDVCEvo(NVDispEvoPtr pDispEvo,
const NvU32 head,
NvS32 dvc,
NVEvoUpdateState *updateState);
void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head,
const NvU32 value, NVEvoUpdateState *updateState);
NvBool nvLayerSetPositionEvo(
NVDevEvoPtr pDevEvo,
const struct NvKmsSetLayerPositionRequest *pRequest);
NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo,
const struct NvKmsMode *pKmsMode,
const struct NvKmsSize *pViewPortSizeIn,
const struct NvKmsRect *pViewPortOut,
NVHwModeTimingsEvoPtr pTimings,
const struct NvKmsModeValidationParams
*pParams,
NVEvoInfoStringPtr pInfoString);
NvBool nvConstructHwModeTimingsImpCheckEvo(
const NVConnectorEvoRec *pConnectorEvo,
NVHwModeTimingsEvoPtr pTimings,
const struct NvKmsModeValidationParams *pParams,
NVEvoInfoStringPtr pInfoString,
const int head);
NvBool nvDowngradeHwModeTimingsDpPixelDepthEvo(
NVHwModeTimingsEvoPtr pTimings,
const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace);
NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
NVHwModeTimingsEvoPtr pTimings,
const struct NvKmsModeValidationParams *pParams);
NvBool nvEvoUpdateHwModeTimingsViewPort(
const NVDpyEvoRec *pDpyEvo,
const struct NvKmsModeValidationParams *pModeValidationParams,
const struct NvKmsSize *pViewPortSizeIn,
const struct NvKmsRect *pViewPortOut,
NVHwModeTimingsEvo *pTimings);
typedef struct _NVValidateImpOneDispHeadParamsRec
{
const NVConnectorEvoRec *pConnectorEvo;
const struct NvKmsUsageBounds *pUsage;
NvU32 activeRmId;
NVHwModeTimingsEvoPtr pTimings;
} NVValidateImpOneDispHeadParamsRec;
NvBool nvValidateImpOneDisp(
NVDispEvoPtr pDispEvo,
const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
NvBool requireBootClocks,
NVEvoReallocateBandwidthMode reallocBandwidth,
NvU32 *pMinIsoBandwidthKBPS,
NvU32 *pMinDramFloorKBPS);
NvBool nvAllocateDisplayBandwidth(
NVDispEvoPtr pDispEvo,
NvU32 newIsoBandwidthKBPS,
NvU32 newDramFloorKBPS);
NvBool nvValidateImpOneDispDowngrade(
NVDispEvoPtr pDispEvo,
const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
NvBool requireBootClocks,
NVEvoReallocateBandwidthMode reallocBandwidth,
NvU32 downgradePossibleHeadsBitMask);
NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo);
NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo);
void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 head, NvBool kickoff,
const struct NvKmsSetLutCommonParams *pParams);
NvBool nvValidateSetLutCommonParams(
const NVDevEvoRec *pDevEvo,
const struct NvKmsSetLutCommonParams *pParams);
void nvChooseCurrentColorSpaceAndRangeEvo(
const NVHwModeTimingsEvo *pTimings,
const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace,
const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace,
enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange);
void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
NVDispEvoPtr pDispEvo,
const NvU32 head,
NVEvoUpdateState *pUpdateState);
void nvSetColorSpaceAndRangeEvo(
NVDispEvoPtr pDispEvo, const NvU32 head,
const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace,
const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
NVEvoUpdateState *pUpdateState);
NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask);
void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo);
void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo,
NvBool enable, NvBool isPre);
void nvUnbloatHwModeTimingsEvo(NVHwModeTimingsEvoPtr pTimings, NvU32 factor);
NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head,
CRC32NotifierCrcOut *crcOut /* out */);
NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo);
NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest,
enum NvKmsAllocDeviceStatus *pStatus);
NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo);
NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head,
NvU32 *val, NvBool set,
NvBool *needsEarlyUpdate,
NVEvoUpdateState *updateState);
void nvEvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo,
NvU32 head, NvBool kickOff);
NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo,
const NvU32 sd,
NVEvoChannelPtr pChannel,
NvU64 *pStartTime,
const NvU32 timeout);
static inline void nvAssertSameSemaphoreSurface(
const NVFlipChannelEvoHwState *pHwState)
{
/*!
* pHwState->syncObject contains separate fields to track the semaphore
* surface used for acquire, and the semaphore surface used for release.
* Prior to NvDisplay 4.0, display HW only supports using a single semaphore
* surface for both acquire and release. As such, assert that the semaphore
* surfaces in pHwState->syncObject are the same, and that we're also not
* using syncpoints. This is enforced during flip validation.
*/
nvAssert(pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo ==
pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo);
nvAssert(!pHwState->syncObject.usingSyncpt);
}
void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo,
NVConnectorEvoPtr pConnectorEvo);
void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo,
NVConnectorEvoPtr pConnectorEvo);
void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo,
NVConnectorEvoPtr pConnectorEvo);
NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask,
NvBool server);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_H__ */

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_EVO_1_H__
#define __NVKMS_EVO_1_H__
#include "nvkms-types.h"
NvBool nvEvo1IsChannelIdle(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd,
NvBool *result);
NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd,
NvBool *result);
void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo,
const NVEvoIsModePossibleDispInput *pInput,
NVEvoIsModePossibleDispOutput *pOutput);
void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre);
void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NVHwModeTimingsEvo *pTimings);
NVEvoChannel* nvEvo1AllocateCoreChannel(NVDevEvoRec *pDevEvo);
void nvEvo1FreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel);
static inline NvU16 nvEvo1GetColorSpaceFlag(NVDevEvoPtr pDevEvo,
const NvBool colorSpaceOverride)
{
NvU16 colorSpaceFlag = 0;
if (colorSpaceOverride) {
nvAssert(pDevEvo->caps.supportsDP13);
colorSpaceFlag = 1 << 11;
}
return colorSpaceFlag;
}
#endif /* __NVKMS_EVO_1_H__ */

View File

@@ -0,0 +1,51 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_FLIP_WORKAREA_H__
#define __NVKMS_FLIP_WORKAREA_H__
#include "nvkms-types.h"
struct NvKmsFlipWorkArea {
struct {
NvBool changed;
struct {
/*
* Pre flip usage bounds are the union of current and new
* usable usage bounds: the unioned usage bounds have to
* allow both the current state and the state being flipped to.
* This field is set and used by PreFlipIMP() and its
* helper functions.
*/
struct NvKmsUsageBounds preFlipUsage;
NVFlipEvoHwState newState;
NVFlipEvoHwState oldState;
NvU32 oldAccelerators;
NvBool accelerated;
} head[NVKMS_MAX_HEADS_PER_DISP];
} sd[NVKMS_MAX_SUBDEVICES];
};
#endif /* __NVKMS_FLIP_WORKAREA_H__ */

View File

@@ -0,0 +1,92 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_FLIP_H__
#define __NVKMS_FLIP_H__
#include "nvkms-types.h"
void nvClearFlipEvoHwState(
NVFlipEvoHwState *pFlipState);
void nvInitFlipEvoHwState(
const NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
NVFlipEvoHwState *pFlipState);
NvBool nvUpdateFlipEvoHwState(
const struct NvKmsPerOpenDev *pOpenDev,
const NVDevEvoRec *pDevEvo,
const NvU32 sd,
const NvU32 head,
const struct NvKmsFlipCommonParams *pParams,
NVFlipEvoHwState *pFlipState,
NvBool allowVrr,
const struct NvKmsUsageBounds *pPossibleUsage);
NvBool nvValidateFlipEvoHwState(
const NVDevEvoRec *pDevEvo,
const NvU32 head,
const NVHwModeTimingsEvo *pTimings,
const NVFlipEvoHwState *pFlipState);
void
nvUpdateSurfacesFlipRefCount(
NVDevEvoPtr pDevEvo,
const NvU32 head,
NVFlipEvoHwState *pFlipState,
NvBool increase);
void nvFlipEvoOneHead(
NVDevEvoPtr pDevEvo,
const NvU32 sd,
const NvU32 head,
const NVFlipEvoHwState *pFlipState,
NvBool allowFlipLock,
NVEvoUpdateState *updateState);
void nvEvoCancelPostFlipIMPTimer(
NVDevEvoPtr pDevEvo);
NvBool nvHandleSyncptRegistration(
NVDevEvoRec *pDevEvo,
NvU32 head,
const struct NvKmsFlipCommonParams *pParams,
NVFlipEvoHwState *pFlipState);
void nvFillPostSyncptReplyOneChannel(
NVEvoChannel *pChannel,
enum NvKmsSyncptType postType,
struct NvKmsSyncpt *postSyncpt,
const NVFlipSyncObjectEvoHwState *pHwSyncObject);
NvBool nvFlipEvo(NVDevEvoPtr pDevEvo,
const struct NvKmsPerOpenDev *pOpenDev,
const struct NvKmsFlipRequest *request,
struct NvKmsFlipReply *reply,
NvBool skipUpdate,
NvBool allowFlipLock);
#endif /* __NVKMS_FLIP_H__ */

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_FRAMELOCK_H__
#define __NVKMS_FRAMELOCK_H__
#include "nvkms-types.h"
void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo);
void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo);
NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr, NvU32);
NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *,
enum NvKmsFrameLockAttribute attribute,
NvS64*);
NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val);
NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val);
NvBool nvGetFrameLockDisplayConfigValidValuesEvo(
const NVDpyEvoRec *pDpyEvo,
struct NvKmsAttributeValidValuesCommonReply *pValidValues);
NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo,
struct NvKmsSetDispAttributeParams *pParams);
NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo,
struct NvKmsGetDispAttributeParams *pParams);
NvBool nvGetDispAttributeValidValuesEvo(
const NVDispEvoRec *pDispEvo,
struct NvKmsGetDispAttributeValidValuesParams *pParams);
NvBool nvSetFrameLockAttributeEvo(
NVFrameLockEvoRec *pFrameLockEvo,
const struct NvKmsSetFrameLockAttributeParams *pParams);
NvBool nvGetFrameLockAttributeEvo(
const NVFrameLockEvoRec *pFrameLockEvo,
struct NvKmsGetFrameLockAttributeParams *pParams);
NvBool nvGetFrameLockAttributeValidValuesEvo(
const NVFrameLockEvoRec *pFrameLockEvo,
struct NvKmsGetFrameLockAttributeValidValuesParams *pParams);
NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo);
NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo);
static inline NvBool
nvIsFramelockableHead(const NVDispEvoRec *pDispEvo, const NvU32 head)
{
return (head != NV_INVALID_HEAD) &&
((head == nvGetFramelockServerHead(pDispEvo)) ||
((NVBIT(head) & nvGetFramelockClientHeadsMask(pDispEvo)) != 0x0));
}
void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head,
const NvBool enable, const NvBool server);
#endif /* __NVKMS_FRAMELOCK_H__ */

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_HAL_H__
#define __NVKMS_HAL_H__
#include "nvkms-types.h"
enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo);
#endif /* __NVKMS_HAL_H__ */

View File

@@ -0,0 +1,78 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_HDMI_H__
#define __NVKMS_HDMI_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nvkms-types.h"
void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NVAttributesSetEvoRec *pAttributesSet,
const NvBool hdTimings,
const NVT_VIDEO_INFOFRAME_CTRL *pCtrl,
NVDpyEvoRec *pDpyEvo);
void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo);
void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo);
void nvUpdateHdmiCaps(NVDpyEvoPtr pDpyEvo);
void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo,
NVEvoInfoStringPtr pInfoString);
NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo);
NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo,
const struct NvKmsModeValidationParams *pParams,
const NVT_TIMING *pTiming);
void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo,
const NvU32 head, const NvBool enable);
void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo);
void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable);
NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo);
void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo);
NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo);
NvBool nvHdmiFrlQueryConfig(const NVDpyEvoRec *pDpyEvo,
const NvModeTimings *pModeTimings,
NVHwModeTimingsEvo *pTimings,
const struct NvKmsModeValidationParams *pParams);
void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId);
void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head);
void nvHdmiDpConstructHeadAudioState(const NvU32 displayId,
const NVDpyEvoRec *pDpyEvo,
NVDispHeadAudioStateEvoRec *pAudioState);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_HDMI_H__ */

View File

@@ -0,0 +1,54 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_LUT_H__
#define __NVKMS_LUT_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nvkms-types.h"
NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo);
void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo);
void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo,
const NVEvoLutDataRec *pLUTBuffer,
NVDispEvoPtr pDispEvo);
static inline void nvCancelLutUpdateEvo(
const NVDispEvoRec *pDispEvo,
const NvU32 head)
{
NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
nvkms_free_timer(pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer);
pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer = NULL;
}
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_LUT_H__ */

View File

@@ -0,0 +1,63 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_MODEPOOL_H__
#define __NVKMS_MODEPOOL_H__
#include "nvkms-types.h"
#include "nvkms-utils.h" /* NVEvoLogType */
#ifdef __cplusplus
extern "C" {
#endif
void
nvValidateModeIndex(NVDpyEvoPtr pDpyEvo,
const struct NvKmsValidateModeIndexRequest *pRequest,
struct NvKmsValidateModeIndexReply *pReply);
void
nvValidateModeEvo(NVDpyEvoPtr pDpyEvo,
const struct NvKmsValidateModeRequest *pRequest,
struct NvKmsValidateModeReply *pReply);
void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr
pInfoString,
const NvModeTimings *pModeTimings);
NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo,
const struct NvKmsModeValidationParams *pParams,
const struct NvKmsMode *pKmsMode,
const struct NvKmsSize *pViewPortSizeIn,
const struct NvKmsRect *pViewPortOut,
NVHwModeTimingsEvo *pTimingsEvo);
const NVT_TIMING *nvFindEdidNVT_TIMING(
const NVDpyEvoRec *pDpyEvo,
const NvModeTimings *pModeTimings,
const struct NvKmsModeValidationParams *pParams);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_MODEPOOL_H__ */

View File

@@ -0,0 +1,74 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_MODESET_TYPES_H__
#define __NVKMS_MODESET_TYPES_H__
/* This header file defines types used internally by the modeset path. */
#include "nvkms-types.h"
typedef struct {
NVHwModeTimingsEvo timings;
NVDpyIdList dpyIdList;
NVConnectorEvoRec *pConnectorEvo;
NvU32 activeRmId;
struct NvKmsSetLutCommonParams lut;
NvU8 allowFlipLockGroup;
enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace;
enum NvKmsDpyAttributeColorRangeValue colorRange;
struct NvKmsModeValidationParams modeValidationParams;
NvBool changed : 1;
NvBool allowGsync : 1;
NvBool hs10bpcHint : 1;
enum NvKmsAllowAdaptiveSync allowAdaptiveSync;
NvU32 vrrOverrideMinRefreshRate;
NVDPLibModesetStatePtr pDpLibModesetState;
NVDispHeadAudioStateEvoRec audio;
} NVProposedModeSetHwStateOneHead;
typedef struct {
NVProposedModeSetHwStateOneHead head[NVKMS_MAX_HEADS_PER_DISP];
} NVProposedModeSetHwStateOneDisp;
typedef struct {
struct {
NVFlipEvoHwState flip;
} head[NVKMS_MAX_HEADS_PER_DISP];
} NVProposedModeSetHwStateOneSubDev;
typedef struct {
NVProposedModeSetHwStateOneDisp disp[NVKMS_MAX_SUBDEVICES];
NVProposedModeSetHwStateOneSubDev sd[NVKMS_MAX_SUBDEVICES];
NvBool allowHeadSurfaceInNvKms : 1;
} NVProposedModeSetHwState;
struct _NVEvoModesetUpdateState {
NVEvoUpdateState updateState;
NVDpyIdList connectorIds;
const NVDPLibModesetStateRec
*pDpLibModesetState[NVKMS_MAX_HEADS_PER_DISP];
NvBool windowMappingChanged;
};
#endif /* __NVKMS_MODESET_TYPES_H__ */

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_MODESET_WORKAREA_H__
#define __NVKMS_MODESET_WORKAREA_H__
typedef struct {
struct {
struct {
NVFlipEvoHwState newState;
NVFlipEvoHwState oldState;
NvU32 oldActiveRmId;
} head[NVKMS_MAX_HEADS_PER_DISP];
NVDpyIdList changedDpyIdList;
NVDpyIdList sorAssignedConnectorsList;
NvU32 assignedSorMask;
} sd[NVKMS_MAX_SUBDEVICES];
NVEvoUpdateState earlyUpdateState;
NVEvoModesetUpdateState modesetUpdateState;
/*
* The display bandwidth values that NVKMS needs to allocate after the
* modeset is complete.
*/
NvU32 postModesetIsoBandwidthKBPS;
NvU32 postModesetDramFloorKBPS;
} NVModeSetWorkArea;
struct NvKmsVrrTimings {
struct {
struct {
NVHwModeTimingsEvo timings;
NvBool adjusted;
} head[NVKMS_MAX_HEADS_PER_DISP];
} disp[NVKMS_MAX_SUBDEVICES];
};
#endif /* __NVKMS_MODESET_WORKAREA_H__ */

View File

@@ -0,0 +1,50 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_MODESET_H__
#define __NVKMS_MODESET_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo,
const struct NvKmsPerOpenDev *pOpenDev,
const struct NvKmsSetModeRequest *pRequest,
struct NvKmsSetModeReply *pReply,
NvBool bypassComposition,
NvBool doRasterLock);
typedef NvBool (*NVShutDownHeadsTestFunc)(
const NVDispEvoRec *pDispEvo,
const NvU32 head);
void nvShutDownHeads(NVDevEvoPtr pDevEvo, NVShutDownHeadsTestFunc pTestFunc);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_MODESET_H__ */

View File

@@ -0,0 +1,46 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_PREALLOC_TYPES_H__
#define __NVKMS_PREALLOC_TYPES_H__
#include "nvtypes.h"
enum NVPreallocType {
PREALLOC_TYPE_IMP_PARAMS,
PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE,
PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE,
PREALLOC_TYPE_MODE_SET_WORK_AREA,
PREALLOC_TYPE_FLIP_WORK_AREA,
PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE,
PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE,
PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS,
PREALLOC_TYPE_MAX
};
struct NVDevPreallocRec {
void *ptr[PREALLOC_TYPE_MAX];
NvU8 used[(PREALLOC_TYPE_MAX + 7) / 8];
};
#endif /* __NVKMS_PREALLOC_TYPES_H__ */

View File

@@ -0,0 +1,36 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_PREALLOC_H__
#define __NVKMS_PREALLOC_H__
#include "nvkms-types.h"
#include "nvkms-prealloc-types.h"
void *nvPreallocGet(NVDevEvoPtr pDevEvo, enum NVPreallocType type, size_t sizeCheck);
void nvPreallocRelease(NVDevEvoPtr pDevEvo, enum NVPreallocType type);
NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo);
void nvPreallocFree(NVDevEvoPtr pDevEvo);
#endif /* __NVKMS_PREALLOC_H__ */

View File

@@ -0,0 +1,81 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_KMS_PRIVATE_H__
#define __NV_KMS_PRIVATE_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen,
NVDevEvoPtr pDevEvo, NvBool isPrivileged);
void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen,
struct NvKmsPerOpenDev *pOpenDev);
void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType);
void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo,
const enum NvKmsDpyAttribute attribute,
const NvS64 value);
void nvSendFrameLockAttributeChangedEventEvo(
const NVFrameLockEvoRec *pFrameLockEvo,
const enum NvKmsFrameLockAttribute attribute,
const NvS64 value);
void nvSendFlipOccurredEventEvo(
const NVDevEvoRec *pDevEvo,
NVEvoChannelMask channelMask);
void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen);
void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen);
#if defined(DEBUG)
NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo);
#endif
const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev);
const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev);
NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev(
struct NvKmsPerOpenDev *pOpenDev);
const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst(
const struct NvKmsPerOpenDev *pOpenDev);
NVDevEvoPtr nvGetDevEvoFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev);
void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32);
#ifdef __cplusplus
};
#endif
#endif /* __NV_KMS_PRIVATE_H__ */

View File

@@ -0,0 +1,152 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_RM_H__
#define __NVKMS_RM_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nvkms-types.h"
#include <class/cl0092.h> /* NV0092_REGISTER_RG_LINE_CALLBACK_FN */
#include <class/cl9010.h> /* OSVBLANKCALLBACKPROC */
NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo,
NvU32 dpcdAddr,
NvU8 dpcdData);
NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo,
NVOS10_EVENT_KERNEL_CALLBACK_EX *cb,
struct nvkms_ref_ptr *ref_ptr,
NvU32 parentHandle,
NvU32 eventHandle,
Callback5ArgVoidReturn func,
NvU32 event);
enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo);
void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo);
enum NvKmsBeginEndModeset {
BEGIN_MODESET,
END_MODESET
};
void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, enum NvKmsBeginEndModeset, NvU32 mask);
NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList);
void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 dpyId);
void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly);
NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo,
NVDpyIdList dpyIdList);
NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo);
void nvRmPauseDP(NVDevEvoPtr pDevEvo);
NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value);
NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle,
NvU32 *ctxDmaFlags, void **ppBase, NvU64 size,
NvKmsMemoryIsoType isoType);
NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo);
NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo);
NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo);
NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo);
void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo);
void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo);
void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo);
void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo);
NvBool nvRMSyncEvoChannel(
NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChannel,
NvU32 errorToken);
NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd,
NvBool *stoppedBase);
NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID);
NvU32 nvRmEvoBindDispContextDMA(
NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChannel,
NvU32 hCtxDma);
NvU32 nvRmEvoAllocateAndBindDispContextDMA(
NVDevEvoPtr pDevEvo,
NvU32 hMemory,
const enum NvKmsSurfaceMemoryLayout layout,
NvU64 limit);
NvBool nvRmEvoAllocAndBindSyncpt(
NVDevEvoRec *pDevEvo,
NVEvoChannel *pChannel,
NvU32 id,
NvU32 *pSyncptHandle,
NvU32 *pSyncptCtxDmaHandle);
void nvRmEvoFreePreSyncpt(NVDevEvoRec *pDevEvo,
NVEvoChannel *pChannel);
NvBool nvRmGarbageCollectSyncpts(
NVDevEvoRec *pDevEvo);
void nvRmEvoFreeSyncpt(NVDevEvoRec *pDevEvo,
NVEvoSyncpt *pEvoSyncpt);
void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo,
NvU32 *hDispCtxDma);
void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo,
NvU32 memoryHandle,
void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]);
NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo,
NvU32 memoryHandle, NvU64 size,
void *subDeviceAddress[NVKMS_MAX_SUBDEVICES],
NvU32 subDeviceMask);
NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo,
const struct NvKmsAllocDeviceRequest *pRequest);
void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo);
NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo,
const NVDpyIdList dpyIdList);
NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd);
NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo);
void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo);
NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo,
NVEvoDmaPtr pDma,
NvU64 limit,
NvU32 ctxDmaFlags,
NvU32 subDeviceMask);
void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma);
NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue);
NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo);
NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed);
NvU32 nvRmAddRgLine1Callback(
const NVDispEvoRec *pDispEvo,
NvU32 head,
NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallback);
void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo,
NvU32 callbackObjectHandle);
NvU32 nvRmAddVBlankCallback(
const NVDispEvoRec *pDispEvo,
NvU32 head,
OSVBLANKCALLBACKPROC pCallback);
void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo,
NvU32 callbackObjectHandle);
void nvRmMuxInit(NVDevEvoPtr pDevEvo);
NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state);
NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state);
NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state);
NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo);
void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo);
void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_RM_H__ */

View File

@@ -0,0 +1,111 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_RMAPI_H__
#define __NVKMS_RMAPI_H__
#include "nvtypes.h"
#ifdef __cplusplus
extern "C" {
#endif
NvU32 nvRmApiAlloc(
NvU32 hClient,
NvU32 hParent,
NvU32 hObject,
NvU32 hClass,
void *pAllocParams);
NvU32 nvRmApiAllocMemory64(
NvU32 hClient,
NvU32 hParent,
NvU32 hMemory,
NvU32 hClass,
NvU32 flags,
void **ppAddress,
NvU64 *pLimit);
NvU32 nvRmApiControl(
NvU32 hClient,
NvU32 hObject,
NvU32 cmd,
void *pParams,
NvU32 paramsSize);
NvU32 nvRmApiDupObject(
NvU32 hClient,
NvU32 hParent,
NvU32 hObjectDest,
NvU32 hClientSrc,
NvU32 hObjectSrc,
NvU32 flags);
NvU32 nvRmApiFree(
NvU32 hClient,
NvU32 hParent,
NvU32 hObject);
NvU32 nvRmApiVidHeapControl(
void *pVidHeapControlParams);
NvU32 nvRmApiMapMemory(
NvU32 hClient,
NvU32 hDevice,
NvU32 hMemory,
NvU64 offset,
NvU64 length,
void **ppLinearAddress,
NvU32 flags);
NvU32 nvRmApiUnmapMemory(
NvU32 hClient,
NvU32 hDevice,
NvU32 hMemory,
const void *pLinearAddress,
NvU32 flags);
NvU32 nvRmApiMapMemoryDma(
NvU32 hClient,
NvU32 hDevice,
NvU32 hDma,
NvU32 hMemory,
NvU64 offset,
NvU64 length,
NvU32 flags,
NvU64 *pDmaOffset);
NvU32 nvRmApiUnmapMemoryDma(
NvU32 hClient,
NvU32 hDevice,
NvU32 hDma,
NvU32 hMemory,
NvU32 flags,
NvU64 dmaOffset);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_RMAPI_H__ */

View File

@@ -0,0 +1,90 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_SOFTFLOAT_H__
#define __NVKMS_SOFTFLOAT_H__
/*
* This header file provides utility code built on top of the softfloat floating
* point emulation library.
*/
#include "nv-softfloat.h"
#include "nvkms-api-types.h"
/*
* A 3x3 row-major matrix of float32_t's.
*/
struct NvKmsMatrixF32 {
float32_t m[3][3];
};
/*
* A 3x4 row-major matrix of float32_t's.
*/
struct NvKms3x4MatrixF32 {
float32_t m[3][4];
};
/*
* Convert from an NvKmsMatrix (stores floating point values in NvU32s) to an
* NvKmsMatrixF32 (stores floating point values in float32_t).
*/
static inline struct NvKmsMatrixF32 NvKmsMatrixToNvKmsMatrixF32(
const struct NvKmsMatrix in)
{
struct NvKmsMatrixF32 out = { };
int i, j;
for (j = 0; j < 3; j++) {
for (i = 0; i < 3; i++) {
out.m[i][j] = NvU32viewAsF32(in.m[i][j]);
}
}
return out;
}
/*
* Compute the matrix product A * B, where A is a 3x3 matrix and B is a 3x4 matrix,
* and return the resulting 3x4 matrix.
*/
static inline struct NvKms3x4MatrixF32 nvMultiply3x4Matrix(const struct NvKmsMatrixF32 *A,
const struct NvKms3x4MatrixF32 *B)
{
struct NvKms3x4MatrixF32 C = { };
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 4; ++j) {
for (int k = 0; k < 3; ++k) {
C.m[i][j] = f32_mulAdd(A->m[i][k], B->m[k][j], C.m[i][j]);
}
}
}
return C;
}
/* return x**y */
float64_t nvKmsPow(float64_t x, float64_t y);
#endif /* __NVKMS_SOFTFLOAT_H__ */

View File

@@ -0,0 +1,96 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_SURFACE_H__
#define __NVKMS_SURFACE_H__
#include "nvkms-types.h"
void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
struct NvKmsRegisterSurfaceParams *pParams,
enum NvHsMapPermissions hsMapPermissions);
void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
NvKmsSurfaceHandle surfaceHandle,
NvBool skipUpdate);
void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
NvKmsSurfaceHandle surfaceHandle);
void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
NVEvoApiHandlesRec *pOpenDevSurfaceHandles);
void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo);
NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle(
const NVDevEvoRec *pDevEvo,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
const NvKmsSurfaceHandle surfaceHandle,
const NVEvoChannelMask channelMask);
NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoCtxDmaOk(
const NVDevEvoRec *pDevEvo,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
NvKmsSurfaceHandle surfaceHandle);
NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo(
NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoUnregisterDeferredRequestFifo(
NVDevEvoPtr pDevEvo,
NVDeferredRequestFifoRec *pDeferredRequestFifo);
static inline NvBool nvEvoIsSurfaceOwner(const NVSurfaceEvoRec *pSurfaceEvo,
const struct NvKmsPerOpenDev *pOpenDev,
NvKmsSurfaceHandle surfaceHandle)
{
return ((pSurfaceEvo->owner.pOpenDev == pOpenDev) &&
(pSurfaceEvo->owner.surfaceHandle == surfaceHandle));
}
#define ASSERT_EYES_MATCH(_arr, _field) \
nvAssert((_arr)[NVKMS_RIGHT] == NULL || \
(_arr)[NVKMS_LEFT]->_field == (_arr)[NVKMS_RIGHT]->_field);
ct_assert((NVKMS_RIGHT - NVKMS_LEFT) == 1);
#define FOR_ALL_EYES(_eye) \
for ((_eye) = NVKMS_LEFT; (_eye) <= NVKMS_RIGHT; (_eye)++)
#define FOR_ALL_VALID_PLANES(_planeIndex, _pSurface) \
for ((_planeIndex) = 0; \
(_planeIndex) < \
(nvKmsGetSurfaceMemoryFormatInfo((_pSurface)->format))->numPlanes; \
(_planeIndex)++)
#endif /* __NVKMS_SURFACE_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,273 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_UTILS_H__
#define __NVKMS_UTILS_H__
#include "nvkms-types.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "nvidia-modeset-os-interface.h"
/*!
* Subtract B from A, and handle wrap around.
*
* This is useful for cases where A is a number that is incremented and wrapped;
* e.g.,
*
* a = (a + 1) % max;
*
* and we want to subtract some amount from A to get one of its previous values.
*/
static inline NvU8 A_minus_b_with_wrap_U8(NvU8 a, NvU8 b, NvU8 max)
{
return (a + max - b) % max;
}
/*!
* Return whether (A + B) > C, avoiding integer overflow in the addition.
*/
static inline NvBool A_plus_B_greater_than_C_U16(NvU16 a, NvU16 b, NvU16 c)
{
return (NV_U16_MAX - a < b) || ((a + b) > c);
}
static inline NvS32 clamp_S32(NvS32 val, NvS32 lo, NvS32 hi)
{
if (val < lo) {
return lo;
} else if (val > hi) {
return hi;
} else {
return val;
}
}
/*!
* Return whether the bitmask contains bits greater than or equal to
* the maximum.
*/
static inline NvBool nvHasBitAboveMax(NvU32 bitmask, NvU8 max)
{
nvAssert(max <= 32);
if (max == 32) {
return FALSE;
}
return (bitmask & ~((1 << max) - 1)) != 0;
}
/*!
* Check if a timeout is exceeded.
*
* This is intended to be used when busy waiting in a loop, like this:
*
* NvU64 startTime = 0;
*
* do {
* if (SOME-CONDITION) {
* break;
* }
*
* if (nvExceedsTimeoutUSec(&startTime, TIMEOUT-IN-USEC)) {
* break;
* }
*
* nvkms_yield();
*
* } while (TRUE);
*
* The caller should zero-initialize startTime, and nvExceedsTimeoutUSec() will
* set startTime to the starting time on the first call. This is structured
* this way to avoid the nvkms_get_usec() call in the common case where
* SOME-CONDITION is true on the first iteration (nvkms_get_usec() is not
* expected to be a large penalty, but it still seems nice to avoid it when not
* needed).
*/
static inline NvBool nvExceedsTimeoutUSec(
NvU64 *pStartTime,
NvU64 timeoutPeriod)
{
const NvU64 currentTime = nvkms_get_usec();
if (*pStartTime == 0) {
*pStartTime = currentTime;
return FALSE;
}
if (currentTime < *pStartTime) { /* wraparound?! */
return TRUE;
}
return (currentTime - *pStartTime) > timeoutPeriod;
}
/*!
* Return a non-NULL string.
*
* The first argument, stringMightBeNull, could be NULL. In which
* case, return the second argument, safeString, which the caller
* should ensure is not NULL (e.g., by providing a literal).
*
* This is intended as a convenience for situations like this:
*
* char *s = FunctionThatMightReturnNull();
* printf("%s\n", nvSafeString(s, "stringLiteral"));
*/
static inline const char *nvSafeString(char *stringMightBeNull,
const char *safeString)
{
return (stringMightBeNull != NULL) ? stringMightBeNull : safeString;
}
static inline NvU64 nvCtxDmaOffsetFromBytes(NvU64 ctxDmaOffset)
{
nvAssert((ctxDmaOffset & ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1))
== 0);
return (ctxDmaOffset >> 8);
}
NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth);
typedef enum {
EVO_LOG_WARN,
EVO_LOG_ERROR,
EVO_LOG_INFO,
} NVEvoLogType;
void *nvInternalAlloc(size_t size, NvBool zero);
void *nvInternalRealloc(void *ptr, size_t size);
void nvInternalFree(void *ptr);
char *nvInternalStrDup(const char *str);
NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo,
const char *key, NvU32 *val);
#if defined(DEBUG)
void nvReportUnfreedAllocations(void);
void *nvDebugAlloc(size_t size, int line, const char *file);
void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file);
void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file);
void nvDebugFree(void *ptr);
char *nvDebugStrDup(const char *str, int line, const char *file);
#define nvAlloc(s) nvDebugAlloc((s), __LINE__, __FILE__)
#define nvCalloc(n,s) nvDebugCalloc((n), (s), __LINE__, __FILE__)
#define nvFree(p) nvDebugFree(p)
#define nvRealloc(p,s) nvDebugRealloc((p), (s), __LINE__, __FILE__)
#define nvStrDup(s) nvDebugStrDup((s), __LINE__, __FILE__)
#else
#define nvAlloc(s) nvInternalAlloc((s), FALSE)
#define nvCalloc(n,s) nvInternalAlloc((n)*(s), TRUE)
#define nvRealloc(p,s) nvInternalRealloc((p),(s))
#define nvFree(s) nvInternalFree(s)
#define nvStrDup(s) nvInternalStrDup(s)
#endif
void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex,
const char *fmt, va_list ap);
void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
void nvEvoLog(NVEvoLogType logType, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
#if defined(DEBUG)
void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
#else
# define nvEvoLogDebug(...)
# define nvEvoLogDevDebug(pDevEvo, ...)
# define nvEvoLogDispDebug(pDispEvo, ...)
#endif /* DEBUG */
void nvInitInfoString(NVEvoInfoStringPtr pInfoString,
char *s, NvU16 totalLength);
void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString,
const char *format, ...)
__attribute__((format (printf, 2, 3)));
void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString,
const char *format, ...)
__attribute__((format (printf, 2, 3)));
typedef NvU32 NvKmsGenericHandle;
NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles,
void *pointer);
NvKmsGenericHandle nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles,
void *pointer);
void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles,
NvKmsGenericHandle handle);
void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles,
NvKmsGenericHandle *pHandle);
void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles,
NvKmsGenericHandle handle);
NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles,
NvU32 defaultSize);
void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles);
#define FOR_ALL_POINTERS_IN_EVO_API_HANDLES(_pEvoApiHandles, \
_pointer, _handle) \
for ((_handle) = 0, \
(_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \
&(_handle)); \
(_pointer) != NULL; \
(_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \
&(_handle)))
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_UTILS_H__ */

View File

@@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_VRR_H__
#define __NVKMS_VRR_H__
#include "nvkms-types.h"
#include "nvkms-modeset-types.h"
#ifdef __cplusplus
extern "C" {
#endif
void nvAllocVrrEvo(NVDevEvoPtr pDevEvo);
void nvFreeVrrEvo(NVDevEvoPtr pDevEvo);
void nvDisableVrr(NVDevEvoPtr pDevEvo);
void nvEnableVrr(NVDevEvoPtr pDevEvo,
const struct NvKmsSetModeRequest *pRequest);
void nvCancelVrrFrameReleaseTimers(NVDevEvoPtr pDevEvo);
void nvSetVrrActive(NVDevEvoPtr pDevEvo, NvBool active);
void nvApplyVrrBaseFlipOverrides(const NVDispEvoRec *pDispEvo, NvU32 head,
const NVFlipChannelEvoHwState *pOld,
NVFlipChannelEvoHwState *pNew);
void nvSetNextVrrFlipTypeAndIndex(NVDevEvoPtr pDevEvo,
struct NvKmsFlipReply *reply);
void nvTriggerVrrUnstallMoveCursor(NVDispEvoPtr pDispEvo);
void nvTriggerVrrUnstallSetCursorImage(NVDispEvoPtr pDispEvo,
NvBool ctxDmaChanged);
void nvGetDpyMinRefreshRateValidValues(
const NVHwModeTimingsEvo *pTimings,
const enum NvKmsDpyVRRType vrrType,
const NvU32 edidTimeoutMicroseconds,
NvU32 *minMinRefreshRate,
NvU32 *maxMinRefreshRate);
NvBool nvDispSupportsVrr(const NVDispEvoRec *pDispEvo);
NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd);
#ifdef __cplusplus
};
#endif
#endif /* __NVKMS_VRR_H__ */

View File

@@ -0,0 +1,533 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_API_TYPES_H)
#define NVKMS_API_TYPES_H
#include <nvtypes.h>
#include <nvmisc.h>
#include <nvlimits.h>
#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES
#define NVKMS_LEFT 0
#define NVKMS_RIGHT 1
#define NVKMS_MAX_EYES 2
#define NVKMS_MAIN_LAYER 0
#define NVKMS_OVERLAY_LAYER 1
#define NVKMS_MAX_LAYERS_PER_HEAD 8
#define NVKMS_MAX_PLANES_PER_SURFACE 3
#define NVKMS_DP_ADDRESS_STRING_LENGTH 64
#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff
typedef NvU32 NvKmsDeviceHandle;
typedef NvU32 NvKmsDispHandle;
typedef NvU32 NvKmsConnectorHandle;
typedef NvU32 NvKmsSurfaceHandle;
typedef NvU32 NvKmsFrameLockHandle;
typedef NvU32 NvKmsDeferredRequestFifoHandle;
typedef NvU32 NvKmsSwapGroupHandle;
typedef NvU32 NvKmsVblankSyncObjectHandle;
struct NvKmsSize {
NvU16 width;
NvU16 height;
};
struct NvKmsPoint {
NvU16 x;
NvU16 y;
};
struct NvKmsSignedPoint {
NvS16 x;
NvS16 y;
};
struct NvKmsRect {
NvU16 x;
NvU16 y;
NvU16 width;
NvU16 height;
};
/*
* A 3x3 row-major matrix.
*
* The elements are 32-bit single-precision IEEE floating point values. The
* floating point bit pattern should be stored in NvU32s to be passed into the
* kernel.
*/
struct NvKmsMatrix {
NvU32 m[3][3];
};
typedef enum {
NVKMS_CONNECTOR_TYPE_DP = 0,
NVKMS_CONNECTOR_TYPE_VGA = 1,
NVKMS_CONNECTOR_TYPE_DVI_I = 2,
NVKMS_CONNECTOR_TYPE_DVI_D = 3,
NVKMS_CONNECTOR_TYPE_ADC = 4,
NVKMS_CONNECTOR_TYPE_LVDS = 5,
NVKMS_CONNECTOR_TYPE_HDMI = 6,
NVKMS_CONNECTOR_TYPE_USBC = 7,
NVKMS_CONNECTOR_TYPE_DSI = 8,
NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9,
NVKMS_CONNECTOR_TYPE_UNKNOWN = 10,
NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN,
} NvKmsConnectorType;
static inline
const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType)
{
switch (connectorType) {
case NVKMS_CONNECTOR_TYPE_DP: return "DP";
case NVKMS_CONNECTOR_TYPE_VGA: return "VGA";
case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I";
case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D";
case NVKMS_CONNECTOR_TYPE_ADC: return "ADC";
case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS";
case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI";
case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C";
case NVKMS_CONNECTOR_TYPE_DSI: return "DSI";
case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER";
default: break;
}
return "Unknown";
}
typedef enum {
NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0,
NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1,
NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2,
NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3,
NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4,
NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5,
NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX =
NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN,
} NvKmsConnectorSignalFormat;
/*!
* Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces).
*
* When flipping, the client can optionally specify a notifier and/or
* a semaphore to use with the flip. The surfaces used for these
* should be registered with NVKMS to get an NvKmsSurfaceHandle.
*
* NvKmsNIsoSurface::offsetInWords indicates the starting location, in
* 32-bit words, within the surface where EVO should write the
* notifier or semaphore. Note that only the first 4096 bytes of a
* surface can be used by semaphores or notifiers; offsetInWords must
* allow for the semaphore or notifier to be written within the first
* 4096 bytes of the surface. I.e., this must be satisfied:
*
* ((offsetInWords * 4) + elementSizeInBytes) <= 4096
*
* Where elementSizeInBytes is:
*
* if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16
* if NISO_FORMAT_LEGACY,
* if overlay && notifier, elementSizeInBytes = 16
* else, elementSizeInBytes = 4
*
* Note that different GPUs support different semaphore and notifier formats.
* Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are
* valid for the given device.
*
* Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but
* FOUR_WORD uses a format compatible with display class 907[ce], and
* FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by
* the NV_DISP_NOTIFIER definition in clc37d.h).
*/
enum NvKmsNIsoFormat {
NVKMS_NISO_FORMAT_LEGACY,
NVKMS_NISO_FORMAT_FOUR_WORD,
NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY,
};
enum NvKmsEventType {
NVKMS_EVENT_TYPE_DPY_CHANGED,
NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED,
NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED,
NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED,
NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED,
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
};
typedef enum {
NV_EVO_SCALER_1TAP = 0,
NV_EVO_SCALER_2TAPS = 1,
NV_EVO_SCALER_3TAPS = 2,
NV_EVO_SCALER_5TAPS = 3,
NV_EVO_SCALER_8TAPS = 4,
NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP,
NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS,
} NVEvoScalerTaps;
/* This structure describes the scaling bounds for a given layer. */
struct NvKmsScalingUsageBounds {
/*
* Maximum vertical downscale factor (scaled by 1024)
*
* For example, if the downscale factor is 1.5, then maxVDownscaleFactor
* would be 1.5 x 1024 = 1536.
*/
NvU16 maxVDownscaleFactor;
/*
* Maximum horizontal downscale factor (scaled by 1024)
*
* See the example above for maxVDownscaleFactor.
*/
NvU16 maxHDownscaleFactor;
/* Maximum vertical taps allowed */
NVEvoScalerTaps vTaps;
/* Whether vertical upscaling is allowed */
NvBool vUpscalingAllowed;
};
struct NvKmsUsageBounds {
struct {
NvBool usable;
struct NvKmsScalingUsageBounds scaling;
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
};
/*
* A 3x4 row-major colorspace conversion matrix.
*
* The output color C' is the CSC matrix M times the column vector
* [ R, G, B, 1 ].
*
* Each entry in the matrix is a signed 2's-complement fixed-point number with
* 3 integer bits and 16 fractional bits.
*/
struct NvKmsCscMatrix {
NvS32 m[3][4];
};
#define NVKMS_IDENTITY_CSC_MATRIX \
(struct NvKmsCscMatrix){{ \
{ 0x10000, 0, 0, 0 }, \
{ 0, 0x10000, 0, 0 }, \
{ 0, 0, 0x10000, 0 } \
}}
/*!
* A color key match bit used in the blend equations and one can select the src
* or dst Color Key when blending. Assert key bit means match, de-assert key
* bit means nomatch.
*
* The src Color Key means using the key bit from the current layer, the dst
* Color Key means using key bit from the previous layer composition stage. The
* src or dst key bit will be inherited by blended pixel for the preparation of
* next blending, as dst Color Key.
*
* src: Forward the color key match bit from the current layer pixel to next layer
* composition stage.
*
* dst: Forward the color key match bit from the previous composition stage
* pixel to next layer composition stage.
*
* disable: Forward “1” to the next layer composition stage as the color key.
*/
enum NvKmsCompositionColorKeySelect {
NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0,
NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC,
NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST,
};
#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3
/*!
* Composition modes used for surfaces in general.
* The various types of composition are:
*
* Opaque: source pixels are opaque regardless of alpha,
* and will occlude the destination pixel.
*
* Alpha blending: aka opacity, which could be specified
* for a surface in its entirety, or on a per-pixel basis.
*
* Non-premultiplied: alpha value applies to source pixel,
* and also counter-weighs the destination pixel.
* Premultiplied: alpha already applied to source pixel,
* so it only counter-weighs the destination pixel.
*
* Color keying: use a color key structure to decide
* the criteria for matching and compositing.
* (See NVColorKey below.)
*/
enum NvKmsCompositionBlendingMode {
/*!
* Modes that use no other parameters.
*/
NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE,
/*!
* Mode that ignores both per-pixel alpha provided
* by client and the surfaceAlpha, makes source pixel
* totally transparent.
*/
NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT,
/*!
* Modes that use per-pixel alpha provided by client,
* and the surfaceAlpha must be set to 0.
*/
NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA,
NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA,
/*!
* These use both the surface-wide and per-pixel alpha values.
* surfaceAlpha is treated as numerator ranging from 0 to 255
* of a fraction whose denominator is 255.
*/
NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA,
NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA,
};
static inline NvBool
NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode)
{
return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA ||
mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA ||
mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA ||
mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA;
}
/*!
* Abstract description of a color key.
*
* a, r, g, and b are component values in the same width as the framebuffer
* values being scanned out.
*
* match[ARGB] defines whether that component is considered when matching the
* color key -- TRUE means that the value of the corresponding component must
* match the given value for the given pixel to be considered a 'key match';
* FALSE means that the value of that component is not a key match criterion.
*/
typedef struct {
NvU16 a, r, g, b;
NvBool matchA, matchR, matchG, matchB;
} NVColorKey;
/*!
* Describes the composition parameters for the single layer.
*/
struct NvKmsCompositionParams {
enum NvKmsCompositionColorKeySelect colorKeySelect;
NVColorKey colorKey;
/*
* It is possible to assign different blending mode for match pixels and
* nomatch pixels. blendingMode[0] is used to blend a pixel with the color key
* match bit "0", and blendingMode[1] is used to blend a pixel with the color
* key match bit "1".
*
* But because of the hardware restrictions match and nomatch pixels can
* not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA,
* PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once.
*/
enum NvKmsCompositionBlendingMode blendingMode[2];
NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */
/*
* Defines the composition order. A smaller value moves the layer closer to
* the top (away from the background). No need to pick consecutive values,
* requirements are that the value should be different for each of the
* layers owned by the head and the value for the main layer should be
* the greatest one.
*
* Cursor always remains at the top of all other layers, this parameter
* has no effect on cursor. NVKMS assigns default depth to each of the
* supported layers, by default depth of the layer is calculated as
* (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to
* '0' then default depth value will get used.
*/
NvU8 depth;
};
/*!
* Describes the composition capabilities supported by the hardware for
* cursor or layer. It describes supported the color key selects and for each
* of the supported color key selects it describes supported blending modes
* for match and nomatch pixles.
*/
struct NvKmsCompositionCapabilities {
struct {
/*
* A bitmask of the supported blending modes for match and nomatch
* pixels. It should be the bitwise 'or' of one or more
* NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values.
*/
NvU32 supportedBlendModes[2];
} colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS];
/*
* A bitmask of the supported color key selects.
*
* It should be the bitwise 'or' of one or more
* NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*)
* values.
*/
NvU32 supportedColorKeySelects;
};
struct NvKmsLayerCapabilities {
/*!
* Whether Layer supports the window mode. If window mode is supported,
* then clients can set the layer's dimensions so that they're smaller than
* the viewport, and can also change the output position of the layer to a
* non-(0, 0) position.
*
* NOTE: Dimension changes are currently unsupported for the main layer,
* and output position changes for the main layer are currently only
* supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for
* these is coming soon, via changes to flip code.
*/
NvBool supportsWindowMode :1;
/*!
* Whether layer supports HDR pipe.
*/
NvBool supportsHDR :1;
/*!
* Describes the supported Color Key selects and blending modes for
* match and nomatch layer pixels.
*/
struct NvKmsCompositionCapabilities composition;
/*!
* Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS
* device on the given scanout surface layer.
*
* Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported,
* then (1 << value) will be set in the appropriate bitmask.
*
* Note that these bitmasks just report the static SW/HW capabilities,
* and are a superset of the formats that IMP may allow. Clients are
* still expected to honor the NvKmsUsageBounds for each head.
*/
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
};
/*!
* Surface layouts.
*
* BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into
* blocks or tiles for better locality during common GPU operations.
*
* Pitch is the naive "linear" surface layout with pixels laid out sequentially
* in memory line-by-line, optionally with some padding at the end of each line
* for alignment purposes.
*/
enum NvKmsSurfaceMemoryLayout {
NvKmsSurfaceMemoryLayoutBlockLinear = 0,
NvKmsSurfaceMemoryLayoutPitch = 1,
};
static inline const char *NvKmsSurfaceMemoryLayoutToString(
enum NvKmsSurfaceMemoryLayout layout)
{
switch (layout) {
default:
return "Unknown";
case NvKmsSurfaceMemoryLayoutBlockLinear:
return "BlockLinear";
case NvKmsSurfaceMemoryLayoutPitch:
return "Pitch";
}
}
typedef enum {
MUX_STATE_GET = 0,
MUX_STATE_INTEGRATED = 1,
MUX_STATE_DISCRETE = 2,
MUX_STATE_UNKNOWN = 3,
} NvMuxState;
enum NvKmsRotation {
NVKMS_ROTATION_0 = 0,
NVKMS_ROTATION_90 = 1,
NVKMS_ROTATION_180 = 2,
NVKMS_ROTATION_270 = 3,
NVKMS_ROTATION_MIN = NVKMS_ROTATION_0,
NVKMS_ROTATION_MAX = NVKMS_ROTATION_270,
};
struct NvKmsRRParams {
enum NvKmsRotation rotation;
NvBool reflectionX;
NvBool reflectionY;
};
/*!
* Convert each possible NvKmsRRParams to a unique integer [0..15],
* so that we can describe possible NvKmsRRParams with an NvU16 bitmask.
*
* E.g.
* rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0
* ...
* rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15
*/
static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams)
{
NvU8 bitPosition = (NvU8)rrParams->rotation;
if (rrParams->reflectionX) {
bitPosition |= NVBIT(2);
}
if (rrParams->reflectionY) {
bitPosition |= NVBIT(3);
}
return bitPosition;
}
/*
* NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via
* display's isochronous interface. Examples of this type of memory are pixel
* data and LUT entries.
*
* NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via
* display's non-isochronous interface. Examples of this type of memory are
* semaphores and notifiers.
*/
typedef enum {
NVKMS_MEMORY_ISO = 0,
NVKMS_MEMORY_NISO = 1,
} NvKmsMemoryIsoType;
typedef struct {
NvBool coherent;
NvBool noncoherent;
} NvKmsDispIOCoherencyModes;
#endif /* NVKMS_API_TYPES_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,125 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_FORMAT_H)
#define NVKMS_FORMAT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
/*
* In order to interpret these pixel format namings, please take note of these
* conventions:
* - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats
* that have an interleaved chroma component across every two pixels. The
* double-underscore is a separator between these two pixel groups.
* - The triple-underscore is a separator between planes.
* - The 'N' suffix is a delimiter for the chroma decimation factor.
*
* As examples of the above rules:
* - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one
* 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8)
* and one 8-bit chroma component (V8) in pixel (N + 1). This format is
* 422-decimated since the U and V chroma samples are shared between each
* pair of adjacent pixels per line.
* - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components,
* and another plane of 10-bit chroma components (U10V10). This format has no
* chroma decimation since the luma and chroma components are sampled at the
* same rate.
*/
enum NvKmsSurfaceMemoryFormat {
NvKmsSurfaceMemoryFormatI8 = 0,
NvKmsSurfaceMemoryFormatA1R5G5B5 = 1,
NvKmsSurfaceMemoryFormatX1R5G5B5 = 2,
NvKmsSurfaceMemoryFormatR5G6B5 = 3,
NvKmsSurfaceMemoryFormatA8R8G8B8 = 4,
NvKmsSurfaceMemoryFormatX8R8G8B8 = 5,
NvKmsSurfaceMemoryFormatA2B10G10R10 = 6,
NvKmsSurfaceMemoryFormatX2B10G10R10 = 7,
NvKmsSurfaceMemoryFormatA8B8G8R8 = 8,
NvKmsSurfaceMemoryFormatX8B8G8R8 = 9,
NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10,
NvKmsSurfaceMemoryFormatR16G16B16A16 = 11,
NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12,
NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13,
NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14,
NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15,
NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16,
NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17,
NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18,
NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19,
NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20,
NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21,
NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22,
NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23,
NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24,
NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25,
NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26,
NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27,
NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28,
NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29,
NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30,
NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31,
NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32,
NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33,
NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34,
NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8,
NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatY8___U8___V8_N420,
};
typedef struct NvKmsSurfaceMemoryFormatInfo {
enum NvKmsSurfaceMemoryFormat format;
const char *name;
NvU8 depth;
NvBool isYUV;
NvU8 numPlanes;
union {
struct {
NvU8 bytesPerPixel;
NvU8 bitsPerPixel;
} rgb;
struct {
NvU8 depthPerComponent;
NvU8 storageBitsPerComponent;
NvU8 horizChromaDecimationFactor;
NvU8 vertChromaDecimationFactor;
} yuv;
};
} NvKmsSurfaceMemoryFormatInfo;
const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo(
const enum NvKmsSurfaceMemoryFormat format);
const char *nvKmsSurfaceMemoryFormatToString(
const enum NvKmsSurfaceMemoryFormat format);
#ifdef __cplusplus
};
#endif
#endif /* NVKMS_FORMAT_H */

View File

@@ -0,0 +1,73 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_IOCTL_H)
#define NVKMS_IOCTL_H
#include "nvtypes.h"
/*!
* Some of the NVKMS ioctl parameter data structures are quite large
* and would exceed the parameter size constraints on at least SunOS.
*
* Redirect ioctls through a level of indirection: user-space assigns
* NvKmsIoctlParams with the real command, size, and pointer, and
* passes the NvKmsIoctlParams through the ioctl.
*/
struct NvKmsIoctlParams {
NvU32 cmd;
NvU32 size;
NvU64 address NV_ALIGN_BYTES(8);
};
#define NVKMS_IOCTL_MAGIC 'm'
#define NVKMS_IOCTL_CMD 0
#define NVKMS_IOCTL_IOWR \
_IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams)
/*!
* User-space pointers are always passed to NVKMS in an NvU64.
* This user-space address is eventually passed into the platform's
* copyin/copyout functions, in a void* argument.
*
* This utility function converts from an NvU64 to a pointer.
*/
static inline void *nvKmsNvU64ToPointer(NvU64 value)
{
return (void *)(NvUPtr)value;
}
/*!
* Before casting the NvU64 to a void*, check that casting to a pointer
* size within the kernel does not lose any precision in the current
* environment.
*/
static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address)
{
return address == (NvU64)(NvUPtr)address;
}
#endif /* NVKMS_IOCTL_H */

View File

@@ -0,0 +1,97 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_SYNC_H)
#define NVKMS_SYNC_H
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
#include "nvkms-api-types.h"
/* These functions are implemented in nvkms-lib. */
enum nvKmsNotifierStatus {
NVKMS_NOTIFIER_STATUS_NOT_BEGUN,
NVKMS_NOTIFIER_STATUS_BEGUN,
NVKMS_NOTIFIER_STATUS_FINISHED,
};
struct nvKmsParsedNotifier {
NvU64 timeStamp;
NvBool timeStampValid;
enum nvKmsNotifierStatus status;
NvU8 presentCount;
};
static inline NvU32 nvKmsSizeOfNotifier(enum NvKmsNIsoFormat format,
NvBool overlay) {
switch (format) {
default:
case NVKMS_NISO_FORMAT_LEGACY:
return overlay ? 16 : 4;
case NVKMS_NISO_FORMAT_FOUR_WORD:
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
return 16;
}
}
void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base);
void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, const void *base,
struct nvKmsParsedNotifier *out);
struct nvKmsParsedSemaphore {
NvU32 payload;
};
static inline NvU32 nvKmsSizeOfSemaphore(enum NvKmsNIsoFormat format) {
switch (format) {
default:
case NVKMS_NISO_FORMAT_LEGACY:
return 4;
case NVKMS_NISO_FORMAT_FOUR_WORD:
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
return 16;
}
}
NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format);
void nvKmsResetSemaphore(enum NvKmsNIsoFormat format,
NvU32 index, void *base,
NvU32 payload);
void nvKmsParseSemaphore(enum NvKmsNIsoFormat format,
NvU32 index, const void *base,
struct nvKmsParsedSemaphore *out);
#ifdef __cplusplus
};
#endif
#endif /* NVKMS_SYNC_H */

View File

@@ -0,0 +1,181 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_KAPI_INTERNAL_H__
#define __NVKMS_KAPI_INTERNAL_H__
#include "unix_rm_handle.h"
#include "nvkms-utils.h"
#include "nvkms-kapi-private.h"
//XXX Decouple functions like nvEvoLog used for logging from NVKMS
#define nvKmsKapiLogDebug(__format...) \
nvEvoLogDebug(EVO_LOG_INFO, "[kapi] "__format)
#define nvKmsKapiLogDeviceDebug(__device, __format, ...) \
nvEvoLogDebug(EVO_LOG_INFO, "[kapi][GPU Id 0x%08x] "__format, \
device->gpuId, ##__VA_ARGS__)
struct NvKmsKapiDevice {
NvU32 gpuId;
nvkms_sema_handle_t *pSema;
/* RM handles */
NvU32 hRmClient;
NvU32 hRmDevice, hRmSubDevice;
NvU32 deviceInstance;
NVUnixRmHandleAllocatorRec handleAllocator;
/* NVKMS handles */
struct nvkms_per_open *pKmsOpen;
NvKmsDeviceHandle hKmsDevice;
NvKmsDispHandle hKmsDisp;
NvU32 dispIdx;
NvU32 subDeviceMask;
NvBool isSOC;
NvKmsDispIOCoherencyModes isoIOCoherencyModes;
NvKmsDispIOCoherencyModes nisoIOCoherencyModes;
NvBool supportsSyncpts;
/* Device capabilities */
struct {
struct NvKmsCompositionCapabilities cursorCompositionCaps;
struct NvKmsCompositionCapabilities overlayCompositionCaps;
NvU16 validLayerRRTransforms;
NvU32 maxWidthInPixels;
NvU32 maxHeightInPixels;
NvU32 maxCursorSizeInPixels;
NvU8 genericPageKind;
} caps;
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
NvU32 numHeads;
NvU32 numLayers[NVKMS_KAPI_MAX_HEADS];
struct {
NvU32 hRmHandle;
NvKmsSurfaceHandle hKmsHandle;
NvBool mapped;
void *pLinearAddress;
enum NvKmsNIsoFormat format;
} notifier;
struct {
NvU32 currFlipNotifierIndex;
} layerState[NVKMS_KAPI_MAX_HEADS][NVKMS_MAX_LAYERS_PER_HEAD];
void *privateData;
void (*eventCallback)(const struct NvKmsKapiEvent *event);
};
struct NvKmsKapiMemory {
NvU32 hRmHandle;
NvU64 size;
struct NvKmsKapiPrivSurfaceParams surfaceParams;
};
struct NvKmsKapiSurface {
NvKmsSurfaceHandle hKmsHandle;
};
enum NvKmsKapiAllocationType {
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0,
NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1,
};
static inline void *nvKmsKapiCalloc(size_t nmem, size_t size)
{
return nvInternalAlloc(nmem * size, NV_TRUE);
}
static inline void nvKmsKapiFree(void *ptr)
{
return nvInternalFree(ptr);
}
static inline NvU32 nvKmsKapiGenerateRmHandle(struct NvKmsKapiDevice *device)
{
NvU32 handle;
nvkms_sema_down(device->pSema);
handle = nvGenerateUnixRmHandle(&device->handleAllocator);
nvkms_sema_up(device->pSema);
return handle;
}
static inline void nvKmsKapiFreeRmHandle(struct NvKmsKapiDevice *device,
NvU32 handle)
{
nvkms_sema_down(device->pSema);
nvFreeUnixRmHandle(&device->handleAllocator, handle);
nvkms_sema_up(device->pSema);
}
NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device,
NvU32 hRmHandle,
enum NvKmsSurfaceMemoryLayout layout,
NvU64 size,
enum NvKmsKapiAllocationType type,
NvU8 *compressible);
NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device,
NvU32 hRmHandle,
enum NvKmsSurfaceMemoryLayout layout,
NvU64 size,
enum NvKmsKapiAllocationType type,
NvU8 *compressible);
struct NvKmsKapiChannelEvent*
nvKmsKapiAllocateChannelEvent(struct NvKmsKapiDevice *device,
NvKmsChannelEventProc *proc,
void *data,
NvU64 nvKmsParamsUser,
NvU64 nvKmsParamsSize);
void
nvKmsKapiFreeChannelEvent(struct NvKmsKapiDevice *device,
struct NvKmsKapiChannelEvent *cb);
#endif /* __NVKMS_KAPI_INTERNAL_H__ */

View File

@@ -0,0 +1,85 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKMS_KAPI_NOTIFIERS_H__
#define __NVKMS_KAPI_NOTIFIERS_H__
#include "nvkms-kapi-internal.h"
#define NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER 0x2
#define NVKMS_KAPI_NOTIFIER_SIZE 0x10
static inline NvU32 NVKMS_KAPI_INC_NOTIFIER_INDEX(const NvU32 index)
{
return (index + 1) % NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER;
}
static inline NvU32 NVKMS_KAPI_DEC_NOTIFIER_INDEX(const NvU32 index)
{
if (index == 0) {
/*
* Wrap "backwards" to the largest allowed notifier index.
*/
return NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER - 1;
}
return index - 1;
}
static inline NvU32 NVKMS_KAPI_NOTIFIER_INDEX(NvU32 head, NvU32 layer,
NvU32 index)
{
NvU64 notifierIndex = 0;
notifierIndex = head *
NVKMS_MAX_LAYERS_PER_HEAD *
NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER;
notifierIndex += layer *
NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER;
notifierIndex += index;
return notifierIndex;
}
static inline NvU32 NVKMS_KAPI_NOTIFIER_OFFSET(NvU32 head,
NvU32 layer, NvU32 index)
{
return NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index) *
NVKMS_KAPI_NOTIFIER_SIZE;
}
NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, NvBool inVideoMemory);
void nvKmsKapiFreeNotifiers(struct NvKmsKapiDevice *device);
NvBool nvKmsKapiIsNotifierFinish(const struct NvKmsKapiDevice *device,
const NvU32 head, const NvU32 layer,
const NvU32 index);
void nvKmsKapiNotifierSetNotBegun(struct NvKmsKapiDevice *device,
NvU32 head, NvU32 layer, NvU32 index);
#endif /* __NVKMS_KAPI_NOTIFIERS_H__ */

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(__NVKMS_KAPI_PRIVATE_H__)
#define __NVKMS_KAPI_PRIVATE_H__
#include "nvtypes.h"
#include "nvkms-api.h"
struct NvKmsKapiPrivAllocateChannelEventParams {
NvU32 hClient;
NvU32 hChannel;
};
struct NvKmsKapiPrivSurfaceParams {
enum NvKmsSurfaceMemoryLayout layout;
struct {
struct {
NvU32 x;
NvU32 y;
NvU32 z;
} log2GobsPerBlock;
NvU32 pitchInBlocks;
NvBool genericMemory;
} blockLinear;
};
struct NvKmsKapiPrivImportMemoryParams {
int memFd;
struct NvKmsKapiPrivSurfaceParams surfaceParams;
};
struct NvKmsKapiPrivExportMemoryParams {
int memFd;
};
#endif /* !defined(__NVKMS_KAPI_PRIVATE_H__) */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,150 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-modeset-os-interface.h"
#include "nvkms-rmapi.h"
#include "nvkms-kapi.h"
#include "nvkms-kapi-private.h"
#include "nvkms-kapi-internal.h"
#include "class/cl0005.h"
struct NvKmsKapiChannelEvent {
struct NvKmsKapiDevice *device;
NvKmsChannelEventProc *proc;
void *data;
struct NvKmsKapiPrivAllocateChannelEventParams nvKmsParams;
NvHandle hCallback;
NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback;
};
static void ChannelEventHandler(void *arg1, void *arg2, NvHandle hEvent,
NvU32 data, NvU32 status)
{
struct NvKmsKapiChannelEvent *cb = arg1;
cb->proc(cb->data, 0);
}
struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent
(
struct NvKmsKapiDevice *device,
NvKmsChannelEventProc *proc,
void *data,
NvU64 nvKmsParamsUser,
NvU64 nvKmsParamsSize
)
{
int status;
NvU32 ret;
struct NvKmsKapiChannelEvent *cb = NULL;
NV0005_ALLOC_PARAMETERS eventParams = { };
if (device == NULL || proc == NULL) {
goto fail;
}
cb = nvKmsKapiCalloc(1, sizeof(*cb));
if (cb == NULL) {
goto fail;
}
/* Verify the driver-private params size and copy it in from userspace */
if (nvKmsParamsSize != sizeof(cb->nvKmsParams)) {
nvKmsKapiLogDebug(
"NVKMS private memory import parameter size mismatch - "
"expected: 0x%llx, caller specified: 0x%llx",
(NvU64)sizeof(cb->nvKmsParams), nvKmsParamsSize);
goto fail;
}
status = nvkms_copyin(&cb->nvKmsParams,
nvKmsParamsUser, sizeof(cb->nvKmsParams));
if (status != 0) {
nvKmsKapiLogDebug(
"NVKMS private memory import parameters could not be read from "
"userspace");
goto fail;
}
cb->device = device;
cb->proc = proc;
cb->data = data;
cb->rmCallback.func = ChannelEventHandler;
cb->rmCallback.arg = cb;
cb->hCallback = nvGenerateUnixRmHandle(&device->handleAllocator);
if (cb->hCallback == 0x0) {
nvKmsKapiLogDeviceDebug(device,
"Failed to allocate event callback handle");
goto fail;
}
eventParams.hParentClient = cb->nvKmsParams.hClient;
eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
eventParams.notifyIndex = 0;
eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback);
ret = nvRmApiAlloc(device->hRmClient,
cb->nvKmsParams.hChannel,
cb->hCallback,
NV01_EVENT_KERNEL_CALLBACK_EX,
&eventParams);
if (ret != NVOS_STATUS_SUCCESS) {
nvKmsKapiLogDeviceDebug(device, "Failed to allocate event callback");
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback);
goto fail;
}
return cb;
fail:
nvKmsKapiFree(cb);
return NULL;
}
void nvKmsKapiFreeChannelEvent
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiChannelEvent *cb
)
{
if (device == NULL || cb == NULL) {
return;
}
nvRmApiFree(device->hRmClient,
device->hRmClient,
cb->hCallback);
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback);
nvKmsKapiFree(cb);
}

View File

@@ -0,0 +1,227 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-modeset-os-interface.h"
#include "nvkms-api.h"
#include "nvkms-sync.h"
#include "nvkms-rmapi.h"
#include "nvkms-kapi-notifiers.h"
#define NVKMS_KAPI_MAX_NOTIFIERS \
(NVKMS_KAPI_MAX_HEADS * \
NVKMS_MAX_LAYERS_PER_HEAD * \
NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER)
void nvKmsKapiFreeNotifiers(struct NvKmsKapiDevice *device)
{
if (device->notifier.hKmsHandle != 0) {
struct NvKmsUnregisterSurfaceParams paramsUnreg = { };
NvBool status;
paramsUnreg.request.deviceHandle = device->hKmsDevice;
paramsUnreg.request.surfaceHandle = device->notifier.hKmsHandle;
status = nvkms_ioctl_from_kapi(device->pKmsOpen,
NVKMS_IOCTL_UNREGISTER_SURFACE,
&paramsUnreg, sizeof(paramsUnreg));
if (!status) {
nvKmsKapiLogDeviceDebug(
device,
"NVKMS_IOCTL_UNREGISTER_SURFACE failed");
}
device->notifier.hKmsHandle = 0;
}
if (device->notifier.mapped) {
NV_STATUS status;
status = nvRmApiUnmapMemory(device->hRmClient,
device->hRmSubDevice,
device->notifier.hRmHandle,
device->notifier.pLinearAddress,
0);
if (status != NV_OK) {
nvKmsKapiLogDeviceDebug(
device,
"UnmapMemory failed with error code 0x%08x",
status);
}
device->notifier.mapped = NV_FALSE;
}
if (device->notifier.hRmHandle != 0) {
NvU32 status;
status = nvRmApiFree(device->hRmClient,
device->hRmDevice,
device->notifier.hRmHandle);
if (status != NVOS_STATUS_SUCCESS) {
nvKmsKapiLogDeviceDebug(
device,
"RmFree failed with error code 0x%08x",
status);
}
nvFreeUnixRmHandle(&device->handleAllocator, device->notifier.hRmHandle);
device->notifier.hRmHandle = 0;
}
}
static void InitNotifier(struct NvKmsKapiDevice *device,
NvU32 head, NvU32 layer, NvU32 index)
{
nvKmsResetNotifier(device->notifier.format,
(layer == NVKMS_OVERLAY_LAYER),
NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index),
device->notifier.pLinearAddress);
}
#define NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE 0x1000
NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device,
NvBool inVideoMemory)
{
struct NvKmsRegisterSurfaceParams surfParams = {};
NV_STATUS status = 0;
NvU8 compressible = 0;
NvBool ret;
ct_assert((NVKMS_KAPI_MAX_NOTIFIERS * NVKMS_KAPI_NOTIFIER_SIZE) <=
(NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE));
ct_assert(NVKMS_KAPI_NOTIFIER_SIZE >= sizeof(NvNotification));
nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >=
nvKmsSizeOfNotifier(device->notifier.format, TRUE /* overlay */));
nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >=
nvKmsSizeOfNotifier(device->notifier.format, FALSE /* overlay */));
device->notifier.hRmHandle =
nvGenerateUnixRmHandle(&device->handleAllocator);
if (device->notifier.hRmHandle == 0x0) {
nvKmsKapiLogDeviceDebug(
device,
"nvGenerateUnixRmHandle() failed");
return NV_FALSE;
}
if (inVideoMemory) {
ret = nvKmsKapiAllocateVideoMemory(device,
device->notifier.hRmHandle,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE,
NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER,
&compressible);
} else {
ret = nvKmsKapiAllocateSystemMemory(device,
device->notifier.hRmHandle,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE,
NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER,
&compressible);
}
if (!ret) {
nvFreeUnixRmHandle(&device->handleAllocator, device->notifier.hRmHandle);
device->notifier.hRmHandle = 0x0;
goto failed;
}
status = nvRmApiMapMemory(device->hRmClient,
device->hRmSubDevice,
device->notifier.hRmHandle,
0,
NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE,
&device->notifier.pLinearAddress,
0);
if (status != NV_OK) {
nvKmsKapiLogDeviceDebug(
device,
"MapMemory failed with error code 0x%08x",
status);
goto failed;
}
device->notifier.mapped = NV_TRUE;
surfParams.request.deviceHandle = device->hKmsDevice;
surfParams.request.useFd = FALSE;
surfParams.request.rmClient = device->hRmClient;
surfParams.request.widthInPixels = NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE;
surfParams.request.heightInPixels = 1;
surfParams.request.layout = NvKmsSurfaceMemoryLayoutPitch;
surfParams.request.format = NvKmsSurfaceMemoryFormatI8;
surfParams.request.log2GobsPerBlockY = 0;
surfParams.request.isoType = NVKMS_MEMORY_NISO;
surfParams.request.planes[0].u.rmObject = device->notifier.hRmHandle;
surfParams.request.planes[0].pitch = NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE;
surfParams.request.planes[0].rmObjectSizeInBytes =
NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE;
if (!nvkms_ioctl_from_kapi(device->pKmsOpen,
NVKMS_IOCTL_REGISTER_SURFACE,
&surfParams, sizeof(surfParams))) {
nvKmsKapiLogDeviceDebug(
device,
"NVKMS_IOCTL_REGISTER_SURFACE failed");
goto failed;
}
device->notifier.hKmsHandle = surfParams.reply.surfaceHandle;
/* Init Notifiers */
{
NvU32 head;
for (head = 0; head < device->numHeads; head++) {
NvU32 layer;
for (layer = 0; layer < NVKMS_MAX_LAYERS_PER_HEAD; layer++) {
NvU32 index;
for (index = 0;
index < NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; index++) {
InitNotifier(device, head, layer, index);
}
}
}
}
return NV_TRUE;
failed:
nvKmsKapiFreeNotifiers(device);
return NV_FALSE;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,132 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-format.h"
#include "nv_common_utils.h"
#include "nvctassert.h"
#include <stddef.h>
#define RGB_ENTRY(_format, _depth, _bytesPerPixel) \
[NvKmsSurfaceMemoryFormat##_format] = { \
.format = NvKmsSurfaceMemoryFormat##_format, \
.name = #_format, \
.depth = _depth, \
.isYUV = NV_FALSE, \
.numPlanes = 1, \
{ \
.rgb = { \
.bytesPerPixel = _bytesPerPixel, \
.bitsPerPixel = _bytesPerPixel * 8, \
}, \
}, \
}
#define YUV_ENTRY(_format, \
_depth, \
_numPlanes, \
_depthPerComponent, \
_storageBitsPerComponent, \
_horizChromaDecimationFactor, \
_vertChromaDecimationFactor) \
[NvKmsSurfaceMemoryFormat##_format] = { \
.format = NvKmsSurfaceMemoryFormat##_format, \
.name = #_format, \
.depth = _depth, \
.isYUV = NV_TRUE, \
.numPlanes = _numPlanes, \
{ \
.yuv = { \
.depthPerComponent = _depthPerComponent, \
.storageBitsPerComponent = _storageBitsPerComponent, \
.horizChromaDecimationFactor = _horizChromaDecimationFactor, \
.vertChromaDecimationFactor = _vertChromaDecimationFactor, \
}, \
}, \
}
static const NvKmsSurfaceMemoryFormatInfo nvKmsEmptyFormatInfo;
/*
* For 10/12-bit YUV formats, each component is packed in a 16-bit container in
* memory, and fetched by display HW as such.
*/
static const NvKmsSurfaceMemoryFormatInfo nvKmsSurfaceMemoryFormatInfo[] = {
RGB_ENTRY(I8, 8, 1),
RGB_ENTRY(A1R5G5B5, 16, 2),
RGB_ENTRY(X1R5G5B5, 15, 2),
RGB_ENTRY(R5G6B5, 16, 2),
RGB_ENTRY(A8R8G8B8, 32, 4),
RGB_ENTRY(X8R8G8B8, 24, 4),
RGB_ENTRY(A2B10G10R10, 32, 4),
RGB_ENTRY(X2B10G10R10, 30, 4),
RGB_ENTRY(A8B8G8R8, 32, 4),
RGB_ENTRY(X8B8G8R8, 24, 4),
RGB_ENTRY(RF16GF16BF16AF16, 64, 8),
RGB_ENTRY(R16G16B16A16, 64, 8),
RGB_ENTRY(RF32GF32BF32AF32, 128, 16),
YUV_ENTRY(Y8_U8__Y8_V8_N422, 16, 1, 8, 8, 2, 1),
YUV_ENTRY(U8_Y8__V8_Y8_N422, 16, 1, 8, 8, 2, 1),
YUV_ENTRY(Y8___U8V8_N444, 24, 2, 8, 8, 1, 1),
YUV_ENTRY(Y8___V8U8_N444, 24, 2, 8, 8, 1, 1),
YUV_ENTRY(Y8___U8V8_N422, 16, 2, 8, 8, 2, 1),
YUV_ENTRY(Y8___V8U8_N422, 16, 2, 8, 8, 2, 1),
YUV_ENTRY(Y8___U8V8_N420, 12, 2, 8, 8, 2, 2),
YUV_ENTRY(Y8___V8U8_N420, 12, 2, 8, 8, 2, 2),
YUV_ENTRY(Y10___U10V10_N444, 30, 2, 10, 16, 1, 1),
YUV_ENTRY(Y10___V10U10_N444, 30, 2, 10, 16, 1, 1),
YUV_ENTRY(Y10___U10V10_N422, 20, 2, 10, 16, 2, 1),
YUV_ENTRY(Y10___V10U10_N422, 20, 2, 10, 16, 2, 1),
YUV_ENTRY(Y10___U10V10_N420, 15, 2, 10, 16, 2, 2),
YUV_ENTRY(Y10___V10U10_N420, 15, 2, 10, 16, 2, 2),
YUV_ENTRY(Y12___U12V12_N444, 36, 2, 12, 16, 1, 1),
YUV_ENTRY(Y12___V12U12_N444, 36, 2, 12, 16, 1, 1),
YUV_ENTRY(Y12___U12V12_N422, 24, 2, 12, 16, 2, 1),
YUV_ENTRY(Y12___V12U12_N422, 24, 2, 12, 16, 2, 1),
YUV_ENTRY(Y12___U12V12_N420, 18, 2, 12, 16, 2, 2),
YUV_ENTRY(Y12___V12U12_N420, 18, 2, 12, 16, 2, 2),
YUV_ENTRY(Y8___U8___V8_N444, 24, 3, 8, 8, 1, 1),
YUV_ENTRY(Y8___U8___V8_N420, 12, 3, 8, 8, 2, 2),
};
ct_assert(ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo) ==
(NvKmsSurfaceMemoryFormatMax + 1));
const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo(
const enum NvKmsSurfaceMemoryFormat format)
{
if (format >= ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo)) {
return &nvKmsEmptyFormatInfo;
}
return &nvKmsSurfaceMemoryFormatInfo[format];
}
const char *nvKmsSurfaceMemoryFormatToString(
const enum NvKmsSurfaceMemoryFormat format)
{
const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
nvKmsGetSurfaceMemoryFormatInfo(format);
return (pFormatInfo != NULL) ? pFormatInfo->name : NULL;
}

View File

@@ -0,0 +1,377 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvkms-sync.h>
#include <nvmisc.h>
#include <class/cl917c.h> /* NV_DISP_BASE_NOTIFIER_1, NV_DISP_NOTIFICATION_2 */
#include <class/clc37d.h> /* NV_DISP_NOTIFIER */
/*
* HW will never write 1 to lower 32bits of timestamp
*/
#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID 1
/*
* Higher 32bits of timestamp will be 0 only during first ~4sec of
* boot. So for practical purposes, we can consider 0 as invalid.
*/
#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID 0
static void GetNotifierTimeStamp(volatile const NvU32 *notif,
NvU32 timeStampLoIdx,
NvU32 timeStampHiIdx,
struct nvKmsParsedNotifier *out)
{
NvU32 lo, hi;
NvU32 pollCount = 0;
/*
* Caller of ParseNotifier() is expected to poll for notifier
* status to become BEGUN/FINISHED for valid timestamp.
*/
if (out->status == NVKMS_NOTIFIER_STATUS_NOT_BEGUN) {
return;
}
/*
* HW does 4B writes to notifier, so poll till both timestampLo
* and timestampHi bytes become valid.
*/
do {
lo = notif[timeStampLoIdx];
hi = notif[timeStampHiIdx];
if ((lo != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID) &&
(hi != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID)) {
out->timeStamp = (NvU64)lo | ((NvU64)hi << 32);
out->timeStampValid = NV_TRUE;
break;
}
if (++pollCount >= 100) {
break;
}
} while (1);
}
static void ResetNotifierLegacy(NvBool overlay, volatile void *in)
{
volatile NvU32 *notif = in;
if (overlay) {
notif[NV_DISP_NOTIFICATION_2_INFO16_3] =
DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN);
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] =
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID;
} else {
notif[NV_DISP_BASE_NOTIFIER_1__0] =
DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _NOT_BEGUN);
}
}
static void ResetNotifierFourWord(volatile void *in)
{
volatile NvU32 *notif = in;
notif[NV_DISP_NOTIFICATION_2_INFO16_3] =
DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN);
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] =
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID;
}
static void ResetNotifierFourWordNVDisplay(volatile void *in)
{
volatile NvU32 *notif = in;
notif[NV_DISP_NOTIFIER__0] =
DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN);
notif[NV_DISP_NOTIFIER__2] =
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID;
}
void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base)
{
const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay);
void *notif =
(void *)((char *)base + (sizeInBytes * index));
switch (format) {
case NVKMS_NISO_FORMAT_LEGACY:
ResetNotifierLegacy(overlay, notif);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD:
ResetNotifierFourWord(notif);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
ResetNotifierFourWordNVDisplay(notif);
break;
}
}
static void ParseNotifierLegacy(NvBool overlay, volatile const void *in,
struct nvKmsParsedNotifier *out)
{
volatile const NvU32 *notif = in;
if (overlay) {
NvU32 notif3;
/* Read this once since it may be in video memory and we need multiple
* fields */
notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3];
switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) {
case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN;
break;
case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_BEGUN;
break;
case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED:
out->status = NVKMS_NOTIFIER_STATUS_FINISHED;
break;
}
out->presentCount =
DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3);
GetNotifierTimeStamp(notif,
NV_DISP_NOTIFICATION_2_TIME_STAMP_0,
NV_DISP_NOTIFICATION_2_TIME_STAMP_1,
out);
} else {
NvU32 notif0;
/* There's a timestamp available in this notifier, but it's a weird
* 14-bit "audit timestamp" that's not useful for us. */
out->timeStampValid = NV_FALSE;
/* Read this once since it may be in video memory and we need multiple
* fields */
notif0 = notif[NV_DISP_BASE_NOTIFIER_1__0];
switch(DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _STATUS, notif0)) {
case NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN;
break;
case NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_BEGUN;
break;
case NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED:
out->status = NVKMS_NOTIFIER_STATUS_FINISHED;
break;
}
out->presentCount =
DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _PRESENTATION_COUNT, notif0);
}
}
static void ParseNotifierFourWord(const void *in,
struct nvKmsParsedNotifier *out)
{
volatile const NvU32 *notif = in;
NvU32 notif3;
/* Read this once since it may be in video memory and we need multiple
* fields */
notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3];
switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) {
case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN;
break;
case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_BEGUN;
break;
case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED:
out->status = NVKMS_NOTIFIER_STATUS_FINISHED;
break;
}
out->presentCount =
DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3);
GetNotifierTimeStamp(notif,
NV_DISP_NOTIFICATION_2_TIME_STAMP_0,
NV_DISP_NOTIFICATION_2_TIME_STAMP_1,
out);
}
static void ParseNotifierFourWordNVDisplay(const void *in,
struct nvKmsParsedNotifier *out)
{
volatile const NvU32 *notif = in;
NvU32 notif0;
/* Read this once since it may be in video memory and we need multiple
* fields */
notif0 = notif[NV_DISP_NOTIFIER__0];
switch(DRF_VAL(_DISP, _NOTIFIER__0, _STATUS, notif0)) {
case NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN;
break;
case NV_DISP_NOTIFIER__0_STATUS_BEGUN:
out->status = NVKMS_NOTIFIER_STATUS_BEGUN;
break;
case NV_DISP_NOTIFIER__0_STATUS_FINISHED:
out->status = NVKMS_NOTIFIER_STATUS_FINISHED;
break;
}
out->presentCount =
DRF_VAL(_DISP, _NOTIFIER__0, _PRESENT_COUNT, notif0);
GetNotifierTimeStamp(notif,
NV_DISP_NOTIFIER__2,
NV_DISP_NOTIFIER__3,
out);
}
void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, const void *base,
struct nvKmsParsedNotifier *out)
{
const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay);
const void *notif =
(const void *)((const char *)base + (sizeInBytes * index));
switch (format) {
case NVKMS_NISO_FORMAT_LEGACY:
ParseNotifierLegacy(overlay, notif, out);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD:
ParseNotifierFourWord(notif, out);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
ParseNotifierFourWordNVDisplay(notif, out);
break;
}
}
NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format)
{
switch (format) {
case NVKMS_NISO_FORMAT_LEGACY:
return 0;
case NVKMS_NISO_FORMAT_FOUR_WORD:
return NV_DISP_NOTIFICATION_2_INFO32_2;
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
return NV_DISP_NOTIFIER__0;
}
return 0;
}
static void ResetSemaphoreLegacy(volatile void *in, NvU32 payload)
{
volatile NvU32 *sema = in;
*sema = payload;
}
static void ResetSemaphoreFourWord(volatile void *in, NvU32 payload)
{
volatile NvU32 *sema = in;
sema[NV_DISP_NOTIFICATION_2_INFO32_2] = payload;
}
static void ResetSemaphoreFourWordNVDisplay(volatile void *in, NvU32 payload)
{
volatile NvU32 *sema = in;
sema[NV_DISP_NOTIFIER__0] = payload;
}
void nvKmsResetSemaphore(enum NvKmsNIsoFormat format,
NvU32 index, void *base,
NvU32 payload)
{
const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format);
void *sema =
(void *)((char *)base + (sizeInBytes * index));
switch (format) {
case NVKMS_NISO_FORMAT_LEGACY:
ResetSemaphoreLegacy(sema, payload);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD:
ResetSemaphoreFourWord(sema, payload);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
ResetSemaphoreFourWordNVDisplay(sema, payload);
break;
}
}
static NvU32 ParseSemaphoreLegacy(const volatile void *in)
{
const volatile NvU32 *sema = in;
return *sema;
}
static NvU32 ParseSemaphoreFourWord(const volatile void *in)
{
const volatile NvU32 *sema = in;
return sema[NV_DISP_NOTIFICATION_2_INFO32_2];
}
static NvU32 ParseSemaphoreFourWordNVDisplay(const volatile void *in)
{
const volatile NvU32 *sema = in;
return sema[NV_DISP_NOTIFIER__0];
}
void nvKmsParseSemaphore(enum NvKmsNIsoFormat format,
NvU32 index, const void *base,
struct nvKmsParsedSemaphore *out)
{
const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format);
const void *sema =
(const void *)((const char *)base + (sizeInBytes * index));
NvU32 payload = 0;
switch (format) {
case NVKMS_NISO_FORMAT_LEGACY:
payload = ParseSemaphoreLegacy(sema);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD:
payload = ParseSemaphoreFourWord(sema);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
payload = ParseSemaphoreFourWordNVDisplay(sema);
break;
}
out->payload = payload;
}

View File

@@ -0,0 +1,363 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*!
* Define the entry points which the NVKMS kernel interface layer
* provides to core NVKMS.
*/
#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_)
#define _NVIDIA_MODESET_OS_INTERFACE_H_
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include <linux/stddef.h> /* size_t */
#else
#include <stddef.h> /* size_t */
#endif
#include "nvtypes.h" /* NvU8 */
#include "nvkms.h"
#include "nv_stdarg.h"
enum NvKmsSyncPtOp {
NVKMS_SYNCPT_OP_ALLOC,
NVKMS_SYNCPT_OP_GET,
NVKMS_SYNCPT_OP_PUT,
NVKMS_SYNCPT_OP_INCR_MAX,
NVKMS_SYNCPT_OP_CPU_INCR,
NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH,
NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD,
NVKMS_SYNCPT_OP_READ_MINVAL,
NVKMS_SYNCPT_OP_READ_MAXVAL,
NVKMS_SYNCPT_OP_SET_MIN_EQ_MAX,
NVKMS_SYNCPT_OP_SET_MAXVAL,
};
typedef struct {
struct {
const char *syncpt_name; /* in */
NvU32 id; /* out */
} alloc;
struct {
NvU32 id; /* in */
} get;
struct {
NvU32 id; /* in */
} put;
struct {
NvU32 id; /* in */
NvU32 incr; /* in */
NvU32 value; /* out */
} incr_max;
struct {
NvU32 id; /* in */
} cpu_incr;
struct {
NvS32 fd; /* in */
NvU32 id; /* out */
NvU32 thresh; /* out */
} fd_to_id_and_thresh;
struct {
NvU32 id; /* in */
NvU32 thresh; /* in */
NvS32 fd; /* out */
} id_and_thresh_to_fd;
struct {
NvU32 id; /* in */
NvU32 minval; /* out */
} read_minval;
struct {
NvU32 id; /* in */
NvU32 maxval; /* out */
} read_maxval;
struct {
NvU32 id; /* in */
} set_min_eq_max;
struct {
NvU32 id; /* in */
NvU32 val; /* in */
} set_maxval;
} NvKmsSyncPtOpParams;
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,
NvBool zero);
void nvkms_free (void *ptr,
size_t size);
void* nvkms_memset (void *ptr,
NvU8 c,
size_t size);
void* nvkms_memcpy (void *dest,
const void *src,
size_t n);
void* nvkms_memmove (void *dest,
const void *src,
size_t n);
int nvkms_memcmp (const void *s1,
const void *s2,
size_t n);
size_t nvkms_strlen (const char *s);
int nvkms_strcmp (const char *s1,
const char *s2);
char* nvkms_strncpy (char *dest,
const char *src,
size_t n);
void nvkms_usleep (NvU64 usec);
NvU64 nvkms_get_usec (void);
int nvkms_copyin (void *kptr,
NvU64 uaddr,
size_t n);
int nvkms_copyout (NvU64 uaddr,
const void *kptr,
size_t n);
void nvkms_yield (void);
void nvkms_dump_stack (void);
NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op,
NvKmsSyncPtOpParams *params);
int nvkms_snprintf (char *str,
size_t size,
const char *format, ...)
__attribute__((format (printf, 3, 4)));
int nvkms_vsnprintf (char *str,
size_t size,
const char *format,
va_list ap);
#define NVKMS_LOG_LEVEL_INFO 0
#define NVKMS_LOG_LEVEL_WARN 1
#define NVKMS_LOG_LEVEL_ERROR 2
void nvkms_log (const int level,
const char *gpuPrefix,
const char *msg);
/*!
* Refcounted pointer to an object that may be freed while references still
* exist.
*
* This structure is intended to be used for nvkms timers to refer to objects
* that may be freed while timers with references to the object are still
* pending.
*
* When the owner of an nvkms_ref_ptr is freed, the teardown code should call
* nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls
* to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than
* the pointer originally passed to nvkms_alloc_ref_ptr().
*/
struct nvkms_ref_ptr;
/*!
* Allocate and initialize a ref_ptr.
*
* The pointer stored in the ref_ptr is initialized to ptr, and its refcount is
* initialized to 1.
*/
struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr);
/*!
* Clear a ref_ptr.
*
* This function sets the pointer stored in the ref_ptr to NULL and drops the
* reference created by nvkms_alloc_ref_ptr(). This function should be called
* when the object pointed to by the ref_ptr is freed.
*
* A caller should make sure that no code that can call nvkms_inc_ref() can
* execute after nvkms_free_ref_ptr() is called.
*/
void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr);
/*!
* Increment the refcount of a ref_ptr.
*
* This function should be used when a pointer to the ref_ptr is stored
* somewhere. For example, when the ref_ptr is used as the argument to
* nvkms_alloc_timer.
*
* This may be called outside of the nvkms_lock, for example by an RM callback.
*/
void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr);
/*!
* Decrement the refcount of a ref_ptr and extract the embedded pointer.
*
* This should be used by code that needs to atomically determine whether the
* object pointed to by the ref_ptr still exists. To prevent the object from
* being destroyed while the current thread is executing, this should be called
* from inside the nvkms_lock.
*/
void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr);
typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32);
typedef struct nvkms_timer_t nvkms_timer_handle_t;
/*!
* Schedule a callback function to be called in the future.
*
* The callback function 'proc' will be called with the arguments
* 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now.
* If usec==0, the callback will be scheduled to be called as soon as
* possible.
*
* The callback function is guaranteed to be called back with the
* nvkms_lock held, and in process context.
*
* Returns an opaque handle, nvkms_timer_handle_t*, or NULL on
* failure. If non-NULL, the caller is responsible for caching the
* handle and eventually calling nvkms_free_timer() to free the
* memory.
*
* The nvkms_lock may be held when nvkms_alloc_timer() is called, but
* the nvkms_lock is not required.
*/
nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc,
void *dataPtr, NvU32 dataU32,
NvU64 usec);
/*!
* Schedule a callback function to be called in the future.
*
* This function is like nvkms_alloc_timer() except that instead of returning a
* pointer to a structure that the caller should free later, the timer will free
* itself after executing the callback function. This is only intended for
* cases where the caller cannot cache the nvkms_alloc_timer() return value.
*/
NvBool
nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc,
struct nvkms_ref_ptr *ref_ptr,
NvU32 dataU32, NvU64 usec);
/*!
* Free the nvkms_timer_t object. If the callback function has not
* yet been called, freeing the nvkms_timer_handle_t will guarantee
* that it is not called.
*
* The nvkms_lock must be held when calling nvkms_free_timer().
*/
void nvkms_free_timer (nvkms_timer_handle_t *handle);
/*!
* Notify the NVKMS kernel interface that the event queue has changed.
*
* \param[in] pOpenKernel This indicates the file descriptor
* ("per-open") of the client whose event queue
* has been updated. This is the pointer
* passed by the kernel interface to nvKmsOpen().
* \param[in] eventsAvailable If TRUE, a new event has been added to the
* event queue. If FALSE, the last event has
* been removed from the event queue.
*/
void
nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
NvBool eventsAvailable);
/*!
* Get the "per-open" data (the pointer returned by nvKmsOpen())
* associated with this fd.
*/
void* nvkms_get_per_open_data(int fd);
/*!
* Raise and lower the reference count of the specified GPU.
*/
NvBool nvkms_open_gpu(NvU32 gpuId);
void nvkms_close_gpu(NvU32 gpuId);
/*!
* Enumerate nvidia gpus.
*/
NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info);
/*!
* Availability of write combining support for video memory.
*/
NvBool nvkms_allow_write_combining(void);
/*!
* Checks whether the fd is associated with an nvidia character device.
*/
NvBool nvkms_fd_is_nvidia_chardev(int fd);
/*!
* NVKMS interface for kernel space NVKMS clients like KAPI
*/
struct nvkms_per_open;
struct nvkms_per_open* nvkms_open_from_kapi
(
struct NvKmsKapiDevice *device
);
void nvkms_close_from_kapi(struct nvkms_per_open *popen);
NvBool nvkms_ioctl_from_kapi
(
struct nvkms_per_open *popen,
NvU32 cmd, void *params_address, const size_t params_size
);
/*!
* APIs for locking.
*/
typedef struct nvkms_sema_t nvkms_sema_handle_t;
nvkms_sema_handle_t*
nvkms_sema_alloc (void);
void nvkms_sema_free (nvkms_sema_handle_t *sema);
void nvkms_sema_down (nvkms_sema_handle_t *sema);
void nvkms_sema_up (nvkms_sema_handle_t *sema);
/*!
* APIs to register/unregister backlight device.
*/
struct nvkms_backlight_device;
struct nvkms_backlight_device*
nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
NvU32 current_brightness);
void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd);
#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */

View File

@@ -0,0 +1,90 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_KMS_H__
#define __NV_KMS_H__
#include "nvtypes.h"
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include <linux/stddef.h> /* size_t */
#else
#include <stddef.h> /* size_t */
#endif
#include "nvkms-kapi.h"
typedef struct nvkms_per_open nvkms_per_open_handle_t;
typedef void nvkms_procfs_out_string_func_t(void *data,
const char *str);
typedef void nvkms_procfs_proc_t(void *data,
char *buffer, size_t size,
nvkms_procfs_out_string_func_t *outString);
typedef struct {
const char *name;
nvkms_procfs_proc_t *func;
} nvkms_procfs_file_t;
enum NvKmsClientType {
NVKMS_CLIENT_USER_SPACE,
NVKMS_CLIENT_KERNEL_SPACE,
};
NvBool nvKmsIoctl(
void *pOpenVoid,
NvU32 cmd,
NvU64 paramsAddress,
const size_t paramSize);
void nvKmsClose(void *pOpenVoid);
void* nvKmsOpen(
NvU32 pid,
enum NvKmsClientType clientType,
nvkms_per_open_handle_t *pOpenKernel);
NvBool nvKmsModuleLoad(void);
void nvKmsModuleUnload(void);
void nvKmsSuspend(NvU32 gpuId);
void nvKmsResume(NvU32 gpuId);
void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles);
void nvKmsKapiHandleEventQueueChange
(
struct NvKmsKapiDevice *device
);
NvBool nvKmsKapiGetFunctionsTableInternal
(
struct NvKmsKapiFunctionsTable *funcsTable
);
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
#endif /* __NV_KMS_H__ */

View File

@@ -0,0 +1,546 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// This file implements the event sink class, which the DisplayPort library
// uses to notify the driver of display devices being connected or
// disconnected.
#include "dp/nvdp-connector-event-sink.h"
#include "nvdp-connector-event-sink.hpp"
#include "nvkms-types.h"
#include "nvkms-dpy.h"
#include "nvkms-utils.h"
#include "nvkms-vrr.h"
#include "nvkms-attributes.h"
#include "nvkms-private.h"
namespace nvkmsDisplayPort {
ConnectorEventSink::ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo)
: pConnectorEvo(pConnectorEvo)
{
}
static NVDpyEvoPtr FindDpyByDevice(NVConnectorEvoPtr pConnectorEvo,
DisplayPort::Device *device)
{
NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
NVDpyEvoPtr pDpyEvo;
if (nvConnectorUsesDPLib(pConnectorEvo)) {
FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) {
if (pDpyEvo->dp.pDpLibDevice &&
pDpyEvo->dp.pDpLibDevice->device == device) {
return pDpyEvo;
}
}
}
return NULL;
}
// Looks for a display that matches the given DP device from
// the list of disconnected dpys.
static NVDpyEvoPtr FindMatchingDisconnectedDpy(NVDispEvoPtr pDispEvo,
NVConnectorEvoPtr pConnectorEvo,
NVDPLibDevicePtr pDpLibDevice)
{
NVDpyEvoPtr pDpyEvo;
// A match is simply that the display appears on the same connector.
// DP MST devices are matched by topology address in nvGetDPMSTDpy.
const NVDpyIdList dpyIdList =
nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId);
FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) {
if (!pDpyEvo->dp.pDpLibDevice || !pDpyEvo->dp.pDpLibDevice->isPlugged) {
return pDpyEvo;
}
}
return NULL;
}
const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device)
{
DisplayPort::GUID guid;
if (!device) {
return NULL;
}
guid = device->getGUID();
if (!guid.isGuidZero()) {
static DisplayPort::GUID::StringBuffer sb;
guid.toString(sb);
return sb;
}
return NULL;
}
bool nvDPGetDeviceGUID(DisplayPort::Device *device,
NvU8 guidData[DPCD_GUID_SIZE])
{
DisplayPort::GUID guid;
if (!device) {
return false;
}
guid = device->getGUID();
if (guid.isGuidZero()) {
return false;
}
nvkms_memcpy((void*)guidData, (void*)guid.data, sizeof(guid.data));
return true;
}
static const char *DPGetDevicePortStr(DisplayPort::Device *device,
bool skipLeadingZero)
{
DisplayPort::Address addr;
if (!device) {
return NULL;
}
addr = device->getTopologyAddress();
if (addr.size() > 0) {
static DisplayPort::Address::StringBuffer sb;
addr.toString(sb, skipLeadingZero);
return sb;
}
return NULL;
}
static void nvDPPrintDeviceInfo(NVConnectorEvoPtr pConnectorEvo,
DisplayPort::Device *device)
{
#if defined(DEBUG)
NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
const char *connectorType;
unsigned major, minor;
const char *tmp;
device->getDpcdRevision(&major, &minor);
switch (device->getConnectorType()) {
case DisplayPort::connectorDisplayPort:
connectorType = "DisplayPort";
break;
case DisplayPort::connectorHDMI:
connectorType = "HDMI";
break;
case DisplayPort::connectorDVI:
connectorType = "DVI";
break;
case DisplayPort::connectorVGA:
connectorType = "VGA";
break;
default:
connectorType = "unknown";
break;
}
nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
"%s-%d: new DisplayPort %d.%d device detected",
NvKmsConnectorTypeString(pConnectorEvo->type),
pConnectorEvo->typeIndex, major, minor);
tmp = DPGetDevicePortStr(device, false /* skipLeadingZero */);
if (tmp) {
nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
" Address: %s", tmp);
}
tmp = nvDPGetDeviceGUIDStr(device);
if (tmp) {
nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
" GUID: {%s}", tmp);
}
nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
" Connector: %s", connectorType);
nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
" Video: %s", device->isVideoSink() ? "yes" : "no");
nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
" Audio: %s", device->isAudioSink() ? "yes" : "no");
#endif
}
static void nvDPAddDeviceToActiveGroup(NVDpyEvoPtr pDpyEvo)
{
const NVDPLibConnectorRec *pDpLibConnector =
pDpyEvo->pConnectorEvo->pDpLibConnector;
const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo;
NvU32 head;
// If the device is being driven by the firmware group, then we're just
// tracking it so that it can be shut down by the modeset path, and we
// don't have any timing information for it.
if (pDpLibConnector->headInFirmware) {
return;
}
for (head = 0; head < pDevEvo->numHeads; head++) {
if (nvDpyIdIsInDpyIdList(pDpyEvo->id,
pDpLibConnector->dpyIdList[head])) {
pDpLibConnector->pGroup[head]->insert(
pDpyEvo->dp.pDpLibDevice->device);
break;
}
}
}
// when we get this event, the DP lib has done link training and the
// EDID has been read (by the DP lib)
void ConnectorEventSink::newDevice(DisplayPort::Device *device)
{
NVDPLibDevicePtr pDpLibDevice = NULL;
NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
NVDpyEvoPtr pDpyEvo = NULL;
NvBool dynamicDpyCreated = FALSE;
// XXX [VM DP MST] Current POR requires we also check/handle:
// - More than 64 DP dpys on a connector = print error.
// - More than 127 dpys on a system = print error.
nvDPPrintDeviceInfo(pConnectorEvo, device);
// Only add video sink devices.
if (!device->isVideoSink()) {
return;
}
// Protect against redundant newDevices()
pDpyEvo = FindDpyByDevice(pConnectorEvo, device);
if (pDpyEvo) {
nvAssert(!"Got (redundant) DP Lib newDevice() on known display, "
"ignoring.");
return;
}
pDpLibDevice = (NVDPLibDevicePtr)nvCalloc(1, sizeof(*pDpLibDevice));
if (!pDpLibDevice) {
goto fail;
}
nvAssert(!device->getOwningGroup());
// XXX For DP MST, we'll want to handle dynamic display IDs. For now,
// use the connector's display ID.
pDpLibDevice->device = device;
if (device->isMultistream()) {
// Get a dynamic pDpy for this device based on its bus topology path.
// This will create one if it doesn't exist.
pDpyEvo = nvGetDPMSTDpyEvo(
pConnectorEvo,
DPGetDevicePortStr(device, true /* skipLeadingZero */),
&dynamicDpyCreated);
} else {
// Look for a (previously) disconnected pDpy that matches this device.
pDpyEvo = FindMatchingDisconnectedDpy(pDispEvo, pConnectorEvo,
pDpLibDevice);
}
if (!pDpyEvo) {
goto fail;
}
nvAssert(pDpyEvo->pConnectorEvo == pConnectorEvo);
// At this point, the pDpy should no longer be tracking a DP lib device.
if (pDpyEvo->dp.pDpLibDevice) {
nvAssert(!"DP Lib should have already called lostDevice() for this DP "
"device");
// Call lost device ourselves, if the DP lib calls this again later,
// we'll ignore it then.
lostDevice(pDpyEvo->dp.pDpLibDevice->device);
}
nvAssert(device->isPlugged());
pDpLibDevice->isPlugged = TRUE;
pDpyEvo->dp.pDpLibDevice = pDpLibDevice;
// If there's an active group that this pDpy is supposed to be a member of,
// insert it now.
nvDPAddDeviceToActiveGroup(pDpyEvo);
if (dynamicDpyCreated) {
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED);
}
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED);
return;
fail:
nvAssert(pDpyEvo == NULL);
nvFree(pDpLibDevice);
}
void ConnectorEventSink::lostDevice(DisplayPort::Device *device)
{
NVDpyEvoPtr pDpyEvo;
// Ignore non-video sink devices.
if (!device->isVideoSink()) {
return;
}
pDpyEvo = FindDpyByDevice(pConnectorEvo, device);
if (!pDpyEvo) {
nvAssert(!"Got DP Lib lostDevice() on unknown display.");
return;
}
NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice;
nvAssert(pDpLibDevice != NULL);
if (pDpyEvo->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) {
device->resetVrrEnablement();
pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE;
}
if (device->getOwningGroup()) {
device->getOwningGroup()->remove(device);
}
if (pDpLibDevice->isPlugged) {
pDpLibDevice->isPlugged = FALSE;
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED);
}
if (device->isMultistream()) {
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED);
}
pDpyEvo->dp.pDpLibDevice = NULL;
nvFree(pDpLibDevice);
}
void ConnectorEventSink::notifyMustDisconnect(DisplayPort::Group *grp)
{
}
// notifyDetectComplete() is called when DP Library has done a full detect on
// the topology. There is no one-to-one relationship between a long pulse to
// a detectCompleted.
void ConnectorEventSink::notifyDetectComplete()
{
pConnectorEvo->detectComplete = TRUE;
// XXX[DP MST] potentially use this call to notify NV-CONTROL of topology
// change;
// issue: not as current as new/lostDevice and may pose sync issues, but
// less chatty.
}
void ConnectorEventSink::bandwidthChangeNotification(DisplayPort::Device *dev,
bool isComplianceMode)
{
nvDPLibUpdateDpyLinkConfiguration(FindDpyByDevice(pConnectorEvo, dev));
}
void ConnectorEventSink::notifyZombieStateChange(DisplayPort::Device *dev,
bool zombied)
{
NVDpyEvoPtr pDpyEvo = FindDpyByDevice(pConnectorEvo, dev);
NvBool sendEvent = FALSE;
if (pDpyEvo == NULL) {
return;
}
NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice;
if (zombied) {
dev->getOwningGroup()->remove(dev);
if (pDpLibDevice->isPlugged && !dev->isPlugged()) {
pDpLibDevice->isPlugged = FALSE;
sendEvent = TRUE;
}
} else {
if (!pDpLibDevice->isPlugged && dev->isPlugged()) {
pDpLibDevice->isPlugged = TRUE;
sendEvent = TRUE;
}
nvDPAddDeviceToActiveGroup(pDpyEvo);
}
if (sendEvent) {
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED);
}
}
void ConnectorEventSink::notifyCableOkStateChange(DisplayPort::Device *dev,
bool cableOk)
{
}
void ConnectorEventSink::notifyHDCPCapDone(DisplayPort::Device *dev,
bool hdcpCap)
{
}
void ConnectorEventSink::notifyMCCSEvent(DisplayPort::Device *dev)
{
}
}; // namespace nvkmsDisplayPort
// The functions below are exported to the rest of nvkms. Declare them outside
// of the 'nvkmsDisplayPort' namespace. Their prototypes in
// nvdp-connector-event-sink.h are declared as extern "C".
NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo)
{
nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo));
return ((pDpyEvo->dp.pDpLibDevice != NULL) &&
pDpyEvo->dp.pDpLibDevice->isPlugged);
}
// Adaptive-Sync is enabled/disabled by setting the MSA_TIMING_PAR_IGNORE_EN
// bit in the DOWNSPREAD_CTRL register (DP spec 1.4a appendix K)
void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head,
NvBool enable)
{
const NVConnectorEvoRec *pConnectorEvo =
pDispEvo->headState[head].pConnectorEvo;
NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector;
DisplayPort::Group *pGroup = pDpLibConnector->pGroup[head];
DisplayPort::Device *dev;
for (dev = pGroup->enumDevices(0); dev != NULL;
dev = pGroup->enumDevices(dev)) {
dev->setIgnoreMSAEnable(enable);
}
}
// Read the link configuration from the connector and stores it in the pDpy so
// it can be sent to clients via NV-CONTROL. Also generate events if the values
// change.
void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo)
{
if (!pDpyEvo) {
return;
}
NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice;
DisplayPort::Device *dev = pDpLibDevice ? pDpLibDevice->device : NULL;
DisplayPort::Connector *connector =
pDpyEvo->pConnectorEvo->pDpLibConnector->connector;
unsigned laneCount;
NvU64 linkRate;
enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType;
NvBool sinkIsAudioCapable;
if (!dev || !pDpLibDevice->isPlugged) {
linkRate = 0;
laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1;
connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN;
sinkIsAudioCapable = FALSE;
} else {
// XXX[AGP]: Can the path down to a single device have a different link
// configuration from the connector itself?
connector->getCurrentLinkConfig(laneCount, linkRate);
// The DisplayPort library multiplies the link rate enum value by
// 27000000. Convert back to NV-CONTROL's defines.
linkRate /= 27000000;
switch (pDpLibDevice->device->getConnectorType()) {
case DisplayPort::connectorDisplayPort:
connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT;
break;
case DisplayPort::connectorHDMI:
connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI;
break;
case DisplayPort::connectorDVI:
connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI;
break;
case DisplayPort::connectorVGA:
connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA;
break;
default:
connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN;
break;
}
sinkIsAudioCapable = pDpLibDevice->device->isAudioSink();
}
// The DisplayPort library reports a disabled link as 0 lanes. NV-CONTROL,
// for historical reasons, uses a setting of "1 lane @ disabled" for a
// disabled link, so translate to that.
if (laneCount == 0) {
linkRate = 0;
laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1;
}
// Update pDpy and send events if anything changed.
if (laneCount != pDpyEvo->dp.laneCount) {
pDpyEvo->dp.laneCount = laneCount;
nvSendDpyAttributeChangedEventEvo(pDpyEvo,
NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE,
nvRMLaneCountToNvKms(laneCount));
}
if (linkRate != pDpyEvo->dp.linkRate) {
pDpyEvo->dp.linkRate = linkRate;
nvSendDpyAttributeChangedEventEvo(pDpyEvo,
NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE,
linkRate);
}
if (connectorType != pDpyEvo->dp.connectorType) {
pDpyEvo->dp.connectorType = connectorType;
nvSendDpyAttributeChangedEventEvo(pDpyEvo,
NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE,
connectorType);
}
if (sinkIsAudioCapable != pDpyEvo->dp.sinkIsAudioCapable) {
pDpyEvo->dp.sinkIsAudioCapable = sinkIsAudioCapable;
nvSendDpyAttributeChangedEventEvo(pDpyEvo,
NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE,
sinkIsAudioCapable);
}
}

View File

@@ -0,0 +1,98 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVDP_CONNECTOR_EVENT_SINK_HPP__
#define __NVDP_CONNECTOR_EVENT_SINK_HPP__
#include <nvtypes.h>
#include <dp_connector.h>
#include "nvdp-evo-interface.hpp"
namespace nvkmsDisplayPort
{
class ConnectorEventSink : public DisplayPort::Object,
public DisplayPort::Connector::EventSink
{
private:
const NVConnectorEvoPtr pConnectorEvo;
public:
ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo);
// From DisplayPort::Connector::EventSink
virtual void newDevice(DisplayPort::Device *dev);
virtual void lostDevice(DisplayPort::Device *dev);
virtual void notifyMustDisconnect(DisplayPort::Group *grp);
virtual void notifyDetectComplete();
virtual void bandwidthChangeNotification(DisplayPort::Device *dev, bool isComplianceMode);
virtual void notifyZombieStateChange(DisplayPort::Device *dev, bool zombied);
virtual void notifyCableOkStateChange(DisplayPort::Device *dev, bool cableOk);
virtual void notifyHDCPCapDone(DisplayPort::Device *dev, bool hdcpCap);
virtual void notifyMCCSEvent(DisplayPort::Device *dev);
};
const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device);
bool nvDPGetDeviceGUID(DisplayPort::Device *device, NvU8 guid[DPCD_GUID_SIZE]);
}; // namespace nvkmsDisplayPort
struct _nv_dplibconnector {
DisplayPort::Connector *connector;
nvkmsDisplayPort::EvoInterface *evoInterface;
nvkmsDisplayPort::ConnectorEventSink *evtSink;
DisplayPort::MainLink *mainLink;
DisplayPort::AuxBus *auxBus;
NvBool isActive;
// The VBIOS head is actively driving this connector.
bool headInFirmware;
NVConnectorEvoRec *pConnectorEvo;
// Per-head DpLib group, allocated at the time of connector creation:
// In case of multi-streaming, multiple heads can be attached to single
// DP connector driving distinct DP streams.
DisplayPort::Group *pGroup[NVKMS_MAX_HEADS_PER_DISP];
NVDpyIdList dpyIdList[NVKMS_MAX_HEADS_PER_DISP];
// Attached heads bitmask
NvU32 headMask;
// Connection status plugged/unplugged; gets initialized by
// Connector::resume() and gets updated by
// Connector::notifyLongPulse().
NvBool plugged;
};
struct _nv_dplibdevice {
DisplayPort::Device *device;
NvBool isPlugged;
};
struct __nv_dplibmodesetstate {
NVDpyIdList dpyIdList;
DisplayPort::DpModesetParams modesetParams;
};
#endif // __NVDP_CONNECTOR_EVENT_SINK_HPP__

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,148 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "dp/nvdp-device.h"
#include "nvdp-connector-event-sink.hpp"
#include "dp/nvdp-connector-event-sink.h"
#include "nvkms-types.h"
#include "nvkms-rm.h"
#include "nvkms-dpy.h"
#include "nvctassert.h"
void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on)
{
NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
if (!pDpyEvo->dp.pDpLibDevice) {
return;
}
nvAssert(nvDpyUsesDPLib(pDpyEvo));
DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device;
nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
device->setPanelPowerParams(on, on);
/*
* WAR: Some monitors clear the MSA_TIMING_PAR_IGNORE_EN bit in the
* DOWNSPREAD_CTRL DPCD register after changing power state, which will
* cause the monitor to fail to restore the image after powering back on
* while VRR flipping. To work around this, re-enable Adaptive-Sync
* immediately after powering on. (Bug 200488547)
*/
if (nvDpyIsAdaptiveSync(pDpyEvo) && on) {
NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo;
NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector;
NvU32 head;
for (head = 0; head < pDevEvo->numHeads; head++) {
if (nvDpyIdIsInDpyIdList(pDpyEvo->id,
pDpLibConnector->dpyIdList[head]) &&
(pDispEvo->headState[head].timings.vrr.type !=
NVKMS_DPY_VRR_TYPE_NONE)) {
nvDPLibSetAdaptiveSync(pDispEvo, head, TRUE);
break;
}
}
}
}
unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo)
{
NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice;
nvAssert(nvDpyUsesDPLib(pDpyEvo));
if (!pDpLibDevice) {
return 0;
}
return pDpLibDevice->device->getEDIDSize();
}
NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size)
{
NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice;
nvAssert(nvDpyUsesDPLib(pDpyEvo));
if (!pDpLibDevice) {
return FALSE;
}
return pDpLibDevice->device->getEDID((char *)buffer, size);
}
void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo)
{
NVDPLibDevicePtr pDpLibDevice;
const char *str;
nvkms_memset(&pDpyEvo->dp.guid, 0, sizeof(pDpyEvo->dp.guid));
ct_assert(sizeof(pDpyEvo->dp.guid.buffer) == DPCD_GUID_SIZE);
if (!nvDpyUsesDPLib(pDpyEvo)) {
return;
}
pDpLibDevice = pDpyEvo->dp.pDpLibDevice;
if (!pDpLibDevice) {
return;
}
pDpyEvo->dp.guid.valid =
nvkmsDisplayPort::nvDPGetDeviceGUID(pDpLibDevice->device,
pDpyEvo->dp.guid.buffer) == true;
if (!pDpyEvo->dp.guid.valid) {
return;
}
str = nvkmsDisplayPort::nvDPGetDeviceGUIDStr(pDpLibDevice->device);
if (str != NULL) {
nvkms_strncpy(pDpyEvo->dp.guid.str, str, sizeof(pDpyEvo->dp.guid.str));
} else {
pDpyEvo->dp.guid.valid = FALSE;
}
}
// Perform a fake lostDevice during device teardown. This function is called by
// DpyFree before it deletes a pDpy.
void nvDPDpyFree(NVDpyEvoPtr pDpyEvo)
{
if (!nvDpyUsesDPLib(pDpyEvo)) {
return;
}
if (!pDpyEvo->dp.pDpLibDevice) {
return;
}
DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device;
pDpyEvo->pConnectorEvo->pDpLibConnector->evtSink->lostDevice(device);
}

View File

@@ -0,0 +1,149 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// This file implements the EVO RM interface used by the DisplayPort library.
#include "nvkms-utils.h"
#include "nvdp-evo-interface.hpp"
#include "nvkms-rmapi.h"
namespace nvkmsDisplayPort {
EvoInterface::EvoInterface(NVConnectorEvoPtr pConnectorEvo)
: pConnectorEvo(pConnectorEvo)
{
}
NvU32 EvoInterface::rmControl0073(NvU32 command, void * params,
NvU32 paramSize)
{
NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
return nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
command,
params,
paramSize);
}
NvU32 EvoInterface::rmControl5070(NvU32 command, void * params,
NvU32 paramSize)
{
NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
return nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayHandle,
command,
params,
paramSize);
}
/*!
* Look up the value of a particular key in the DisplayPort-specific registry
* corresponding to this connector. These values are provided at device
* allocation time, copied from the client request during nvAllocDevEvo().
*
* \param[in] key The name of the key to look up.
*
* \return The unsigned 32-bit value set for the key, or 0 if the key is
* not set.
*/
NvU32 EvoInterface::getRegkeyValue(const char *key)
{
NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
NvU32 val;
NvBool found = nvGetRegkeyValue(pDevEvo, key, &val);
if (found) {
return val;
} else {
return 0;
}
}
bool EvoInterface::isInbandStereoSignalingSupported()
{
return FALSE;
}
NvU32 EvoInterface::getSubdeviceIndex()
{
return pConnectorEvo->pDispEvo->displayOwner;
}
NvU32 EvoInterface::getDisplayId()
{
return nvDpyIdToNvU32(pConnectorEvo->displayId);
}
NvU32 EvoInterface::getSorIndex()
{
return nvEvoConnectorGetPrimaryOr(pConnectorEvo);
}
NvU32 EvoInterface::getLinkIndex()
{
switch (pConnectorEvo->or.protocol) {
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
return 0;
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
return 1;
}
nvAssert(!"Unrecognized DP protocol");
return -1;
}
NvU32 EvoInterface::monitorDenylistInfo(
NvU32 manufId, NvU32 productId,
DisplayPort::DpMonitorDenylistData *pDenylistData)
{
//
// WAR for Toshiba/Dell internal(eDP) panel Sharp , overriding
// optimal link configuration to HBR2.
//
// HBR2 is required to drive 4K resolution, which is supported on DP1.2
// onward specifications. Panel advertises itself as DP1.2 capable, but
// does not have ESI address space, this is violation the specification
// and hence inside DP library we downgrade the DPCD revision to 1.1.
// With this downgrade in DPCD version, link rate also gets downgraded
// to HBR.
//
if (manufId == 0x104d &&
(productId == 0x1414 || productId == 0x1430)) {
NvU32 warFlags = DisplayPort::DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG;
pDenylistData->dpOverrideOptimalLinkConfig.linkRate = 0x14; // HBR2
pDenylistData->dpOverrideOptimalLinkConfig.laneCount = laneCount_4; // 4 lanes
return warFlags;
}
return 0;
}
}; // namespace nvkmsDisplayPort

View File

@@ -0,0 +1,68 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVDP_EVO_INTERFACE_HPP__
#define __NVDP_EVO_INTERFACE_HPP__
#include <nvtypes.h>
#include <nvkms-types.h>
#include <dp_evoadapter.h>
namespace nvkmsDisplayPort
{
class EvoInterface : public DisplayPort::Object,
public DisplayPort::EvoInterface
{
public:
const NVConnectorEvoPtr pConnectorEvo;
EvoInterface(NVConnectorEvoPtr pConnectorEvo);
// Functions inherited from DisplayPort::EvoInterface
virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize);
virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize);
virtual void disconnectHead(unsigned head) {
nvAssert(!"disconnectHead should never be called");
}
virtual void reattachHead(unsigned head) {
nvAssert(!"reattachHead should never be called");
}
virtual NvU32 getSubdeviceIndex();
virtual NvU32 getDisplayId();
virtual NvU32 getSorIndex();
virtual NvU32 getLinkIndex();
virtual NvU32 getRegkeyValue(const char *key);
virtual bool isInbandStereoSignalingSupported();
virtual NvU32 monitorDenylistInfo(
NvU32 manufId,
NvU32 productId,
DisplayPort::DpMonitorDenylistData *pDenylistData);
};
}; // namespace nvkmsDisplayPort
#endif // __NVDP_EVO_INTERFACE_HPP__

View File

@@ -0,0 +1,68 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/* DisplayPort management routines */
#include <stdarg.h>
#include "nvkms-utils.h"
#include "dp_hostimp.h"
void *dpMalloc(NvLength sz)
{
return nvAlloc(sz);
}
void dpFree(void *p)
{
nvFree(p);
}
void dpPrint(const char *format, ...)
{
va_list ap;
va_start(ap, format);
nvVEvoLog(EVO_LOG_INFO, NV_INVALID_GPU_LOG_INDEX, format, ap);
va_end(ap);
}
void dpDebugBreakpoint(void)
{
nvAssert(!"DisplayPort library debug breakpoint");
}
#if NV_DP_ASSERT_ENABLED
void dpAssert(const char *expression, const char *file,
const char *function, int line)
{
nvDebugAssert(expression, file, function, line);
}
#endif
void dpTraceEvent(NV_DP_TRACING_EVENT event,
NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...)
{
// To support DPlib tracing, implement this function.
}

View File

@@ -0,0 +1,146 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// This file implements the timer callback mechanism for the DisplayPort
// library.
#include "nvkms-types.h"
#include "dp/nvdp-timer.h"
#include "nvdp-timer.hpp"
namespace nvkmsDisplayPort {
Timer::Callback::Callback(DisplayPort::List *pList,
NVDevEvoPtr pDevEvo,
DisplayPort::RawTimer::Callback *dpCallback,
int ms)
: dpCallback(dpCallback),
ref_ptr(pDevEvo->ref_ptr),
handle(nvkms_alloc_timer(onTimerFired, this, 0, ms * 1000)),
expireTimeUs(nvkms_get_usec() + ms * 1000)
{
if (!allocFailed()) {
pList->insertFront(this);
nvkms_inc_ref(ref_ptr);
}
}
Timer::Callback::~Callback()
{
nvkms_free_timer(handle);
}
bool Timer::Callback::allocFailed() const
{
return handle == NULL;
}
bool Timer::Callback::isExpired(NvU64 timeNowUs) const
{
return timeNowUs >= expireTimeUs;
}
void Timer::Callback::onTimerFired(void *data, NvU32 dataU32)
{
Timer::Callback *cb = static_cast<Timer::Callback*>(data);
cb->onTimerFired();
}
void Timer::Callback::onTimerFired()
{
if (nvkms_dec_ref(ref_ptr)) {
dpCallback->expired();
}
delete this;
}
void Timer::Callback::fireIfExpired(NvU64 timeNowUs)
{
if (isExpired(timeNowUs)) {
onTimerFired();
}
}
Timer::Timer(NVDevEvoPtr pDevEvo)
: pDevEvo(pDevEvo)
{
}
void Timer::queueCallback(DisplayPort::RawTimer::Callback *dpCallback, int ms)
{
Callback *cb = new Callback(&timerList, pDevEvo, dpCallback, ms);
nvAssert(cb && !cb->allocFailed());
if (!cb || cb->allocFailed()) {
delete cb;
return;
}
}
NvU64 Timer::getTimeUs()
{
return nvkms_get_usec();
}
void Timer::sleep(int ms)
{
nvkms_usleep(ms * 1000);
}
void Timer::fireExpiredTimers()
{
const NvU64 timeNowUs = getTimeUs();
DisplayPort::ListElement *pElem = timerList.begin();
DisplayPort::ListElement *pNext;
while (pElem != timerList.end()) {
Callback *cb = static_cast<Callback*>(pElem);
pNext = pElem->next;
cb->fireIfExpired(timeNowUs);
pElem = pNext;
}
}
}; // namespace nvkmsDisplayPort
NvBool nvDPTimersPending(void)
{
return FALSE;
}
NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo)
{
NVDPLibTimerPtr pTimer = new _nv_dplibtimer(pDevEvo);
return pTimer;
}
void nvDPFreeTimer(NVDPLibTimerPtr pTimer)
{
delete pTimer;
}
void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo)
{
pDevEvo->dpTimer->rawTimer.fireExpiredTimers();
}

View File

@@ -0,0 +1,93 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVDP_TIMER_HPP__
#define __NVDP_TIMER_HPP__
#include <nvtypes.h>
#include <dp_timer.h>
#include <dp_list.h>
namespace nvkmsDisplayPort
{
class Timer : public DisplayPort::RawTimer
{
NVDevEvoPtr pDevEvo;
DisplayPort::List timerList;
class Callback : public DisplayPort::ListElement {
DisplayPort::RawTimer::Callback *dpCallback;
// ref_ptr to the pDevEvo
nvkms_ref_ptr *ref_ptr;
nvkms_timer_handle_t *handle;
NvU64 expireTimeUs;
static void onTimerFired(void *data, NvU32 dataU32);
void onTimerFired();
public:
// Construct an NVKMS timer callback. Since exceptions cannot be used
// in NVKMS code, callers must call Callback::allocFailed() to query
// whether the constructor succeeded.
//
// Scheduling a callback bumps the refcount on the corresponding
// pDevEvo, so that a device isn't freed until all pending callbacks
// have fired.
Callback(DisplayPort::List *pList,
NVDevEvoPtr pDevEvo,
DisplayPort::RawTimer::Callback *dpCallback,
int ms);
~Callback();
// Returns TRUE if the constructor failed.
bool allocFailed() const;
// Returns TRUE if the timer is ready to fire.
bool isExpired(NvU64 timeNowUs) const;
// Fire the timer if it's ready.
// NOTE: If the timer fires, this deletes it.
void fireIfExpired(NvU64 timeNowUs);
};
public:
Timer(NVDevEvoPtr pDevEvo);
virtual void queueCallback(DisplayPort::RawTimer::Callback *cb, int ms);
virtual NvU64 getTimeUs();
virtual void sleep(int ms);
void fireExpiredTimers();
};
}; // namespace nvkmsDisplayPort
struct _nv_dplibtimer : public DisplayPort::Object {
nvkmsDisplayPort::Timer rawTimer;
DisplayPort::Timer timer;
_nv_dplibtimer(NVDevEvoPtr pDevEvo)
: rawTimer(pDevEvo), timer(&rawTimer)
{
}
};
#endif // __NVDP_TIMER_HPP__

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-types.h"
#include "nvkms-3dvision.h"
void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 head)
{
return;
}
void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo)
{
return;
}
NvBool
nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo,
NVEvoInfoStringPtr pInfoString)
{
return FALSE;
}
void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo)
{
return;
}
void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo,
const NvU32 head)
{
return;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,876 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-console-restore.h"
#include "nvkms-dpy.h"
#include "nvkms-flip.h"
#include "nvkms-modepool.h"
#include "nvkms-modeset.h"
#include "nvkms-prealloc.h"
#include "nvkms-private.h"
#include "nvkms-rm.h"
#include "nvkms-utils.h"
#include "dp/nvdp-connector.h"
/*!
* Find the first valid mode of given dimensions (width and height) that passes
* IMP at boot clocks. If input dimensions are not given then return first
* valid mode that passes IMP at boot clocks.
*/
static NvBool FindMode(NVDpyEvoPtr pDpyEvo,
const enum NvKmsSurfaceMemoryFormat format,
const NvU32 width,
const NvU32 height,
struct NvKmsMode *pModeOut)
{
NvU32 index = 0;
while (TRUE) {
struct NvKmsValidateModeIndexParams params = { };
params.request.dpyId = pDpyEvo->id;
params.request.modeIndex = index++;
params.request.modeValidation.overrides = NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS;
nvValidateModeIndex(pDpyEvo, &params.request, &params.reply);
if (params.reply.end) {
break;
}
if (!params.reply.valid) {
continue;
}
if (!(NVBIT64(format) &
params.reply.modeUsage.layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats)) {
continue;
}
if (height != 0 && height != params.reply.mode.timings.vVisible) {
continue;
}
if (width != 0 && width != params.reply.mode.timings.hVisible) {
continue;
}
*pModeOut = params.reply.mode;
return TRUE;
}
return FALSE;
}
/*!
* Make sure pDispEvo->connectedDpys is up to date.
*
* Do this by querying the dpy dynamic data for all dpys. The results aren't
* actually important, but querying the dynamic data has the side effect of
* updating pDispEvo->connectedDpys.
*/
static NVDpyIdList UpdateConnectedDpys(NVDispEvoPtr pDispEvo)
{
NVDpyEvoPtr pDpyEvo;
struct NvKmsQueryDpyDynamicDataParams *pParams =
nvCalloc(1, sizeof(*pParams));
if (!pParams) {
nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN,
"Failed to allocate NvKmsQueryDpyDynamicDataParams");
return pDispEvo->connectedDisplays;
}
FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) {
nvkms_memset(pParams, 0, sizeof(*pParams));
nvDpyGetDynamicData(pDpyEvo, pParams);
}
nvFree(pParams);
return pDispEvo->connectedDisplays;
}
static void FlipBaseToNull(NVDevEvoPtr pDevEvo)
{
struct NvKmsFlipParams *pParams = nvCalloc(1, sizeof(*pParams));
struct NvKmsFlipRequest *pRequest;
NvU32 sd;
NVDispEvoPtr pDispEvo;
NvBool ret = TRUE;
if (!pParams) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
"Failed to allocate flip parameters for console restore base flip "
"to NULL");
return;
}
pRequest = &pParams->request;
FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
struct NvKmsFlipRequestOneSubDevice *pRequestSd =
&pRequest->sd[sd];
NvU32 head;
for (head = 0; head < pDevEvo->numHeads; head++) {
struct NvKmsFlipCommonParams *pRequestHead =
&pRequestSd->head[head];
NvU32 layer;
if (!nvHeadIsActive(pDispEvo, head)) {
continue;
}
pRequestSd->requestedHeadsBitMask |= NVBIT(head);
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
pRequestHead->layer[layer].surface.specified = TRUE;
// No need to specify sizeIn/sizeOut as we are flipping NULL surface.
pRequestHead->layer[layer].compositionParams.specified = TRUE;
pRequestHead->layer[layer].completionNotifier.specified = TRUE;
pRequestHead->layer[layer].syncObjects.specified = TRUE;
}
pRequest->commit = TRUE;
}
}
// If no heads require changes, there's nothing to do.
if (pRequest->commit) {
ret = nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, pRequest,
&pParams->reply, FALSE /* skipUpdate */,
FALSE /* allowFlipLock */);
}
nvFree(pParams);
if (!ret) {
nvAssert(!"Console restore failed to flip base to NULL");
}
FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
NvU32 head;
for (head = 0; head < pDevEvo->numHeads; head++) {
NvBool stoppedBase;
ret = nvRMIdleBaseChannel(pDevEvo, head, sd, &stoppedBase);
if (!ret) {
nvAssert(!"Console restore failed to idle base");
}
}
}
}
static NvBool InitModeOneHeadRequest(
NVDpyEvoRec *pDpyEvo,
NVSurfaceEvoPtr pSurfaceEvo,
const struct NvKmsMode *pOverrideMode,
const struct NvKmsSize *pOverrideViewPortSizeIn,
const struct NvKmsPoint *pOverrideViewPortPointIn,
const NvU32 head,
struct NvKmsSetModeOneHeadRequest *pRequestHead)
{
struct NvKmsFlipCommonParams *pFlip = &pRequestHead->flip;
NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo;
NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
NvU32 layer;
if (pOverrideMode != NULL) {
pRequestHead->mode = *pOverrideMode;
} else {
if (!FindMode(pDpyEvo,
pSurfaceEvo->format,
0 /* Ignore mode width */,
0 /* Ignore mode height */,
&pRequestHead->mode)) {
return FALSE;
}
}
pRequestHead->dpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id);
pRequestHead->modeValidationParams.overrides =
NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS;
if (pOverrideViewPortSizeIn != NULL) {
pRequestHead->viewPortSizeIn = *pOverrideViewPortSizeIn;
} else {
pRequestHead->viewPortSizeIn.width = pSurfaceEvo->widthInPixels;
pRequestHead->viewPortSizeIn.height = pSurfaceEvo->heightInPixels;
}
pFlip->viewPortIn.specified = TRUE;
if (pOverrideViewPortPointIn != NULL) {
pFlip->viewPortIn.point = *pOverrideViewPortPointIn;
}
pFlip->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] =
pDevEvo->fbConsoleSurfaceHandle;
pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE;
pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = pSurfaceEvo->widthInPixels;
pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = pSurfaceEvo->heightInPixels;
pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE;
pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.val =
pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val;
/* Disable other layers except Main */
for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
if (layer == NVKMS_MAIN_LAYER) {
pFlip->layer[layer].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX;
pFlip->layer[layer].csc.specified = TRUE;
}
pFlip->layer[layer].surface.specified = TRUE;
pFlip->layer[layer].completionNotifier.specified = TRUE;
pFlip->layer[layer].syncObjects.specified = TRUE;
pFlip->layer[layer].compositionParams.specified = TRUE;
}
// Disable other features.
pFlip->cursor.imageSpecified = TRUE;
pRequestHead->lut.input.specified = TRUE;
pRequestHead->lut.output.specified = TRUE;
pRequestHead->lut.synchronous = TRUE;
pRequestHead->allowGsync = FALSE;
pRequestHead->allowAdaptiveSync =
NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED;
return TRUE;
}
static NvBool
ConstructModeOneHeadRequestForOneDpy(NVDpyEvoRec *pDpyEvo,
NVSurfaceEvoPtr pSurfaceEvo,
struct NvKmsSetModeParams *pParams,
const NvU32 dispIndex,
NvU32 *pAvailableHeadsMask)
{
NvBool ret = FALSE;
const NvU32 possibleHeads = *pAvailableHeadsMask &
pDpyEvo->pConnectorEvo->validHeadMask;
if (possibleHeads == 0 || pDpyEvo->isVrHmd) {
goto done;
}
const NvU32 head = BIT_IDX_32(LOWESTBIT(possibleHeads));
NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo;
struct NvKmsSetModeRequest *pRequest = &pParams->request;
struct NvKmsSetModeOneDispRequest *pRequestDisp =
&pRequest->disp[dispIndex];
struct NvKmsSetModeOneHeadRequest *pRequestHead =
&pRequestDisp->head[head];
NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
if (!InitModeOneHeadRequest(pDpyEvo,
pSurfaceEvo,
NULL /* Use default Mode */,
NULL /* Use default ViewPortSizeIn */,
NULL /* Use default ViewPortPointIn */,
head,
pRequestHead)) {
goto done;
}
nvAssert(!pRequestHead->viewPortOutSpecified);
nvAssert(!pRequest->commit);
while (!nvSetDispModeEvo(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pRequest,
&pParams->reply,
TRUE /* bypassComposition */,
FALSE /* doRasterLock */)) {
/*
* If validation is failing even after disabling scaling then leave
* this dpy inactive.
*/
if (pRequestHead->viewPortOutSpecified) {
nvkms_memset(pRequestHead, 0, sizeof(*pRequestHead));
goto done;
}
/* Disable scaling and try again */
pRequestHead->viewPortOut = (struct NvKmsRect) {
.height = pRequestHead->viewPortSizeIn.height,
.width = pRequestHead->viewPortSizeIn.width,
.x = 0,
.y = 0,
};
pRequestHead->viewPortOutSpecified = TRUE;
}
*pAvailableHeadsMask &= ~NVBIT(head);
ret = TRUE;
done:
return ret;
}
typedef struct _TiledDisplayInfo {
NVDpyIdList detectedDpysList;
NvBool isDetectComplete;
NvBool isCapToScaleSingleTile;
} TiledDisplayInfo;
/*
* Detect Tiled-display of topology-id described in given pDisplayIdInfo.
*
* Loop over given all dpys from candidateConnectedDpys list, look for matching
* topology-id. Add dpys of matching topology-id into
* detectedTiledDisplayDpysList list. Mark Tiled-Display detect complete if all
* exact number of tiles are found.
*/
static NvBool DetectTiledDisplay(const NVDispEvoRec *pDispEvo,
const NVT_DISPLAYID_INFO *pDisplayIdInfo,
const NVDpyIdList candidateConnectedDpys,
TiledDisplayInfo *pTiledDisplayInfo)
{
const NVT_TILEDDISPLAY_TOPOLOGY_ID nullTileDisplayTopoId = { 0 };
const NVDpyEvoRec *pDpyEvo;
const NvU32 numTiles = pDisplayIdInfo->tile_topology.row *
pDisplayIdInfo->tile_topology.col;
const NvU32 numTilesMask = NVBIT(numTiles) - 1;
NvU32 detectedTilesCount = 0;
NvU32 detectedTilesMask = 0;
NVDpyIdList detectedTiledDisplayDpysList = nvEmptyDpyIdList();
/*
* If parsed edid is valid and tile_topology_id is non-zero then the dpy
* is considered a valid tile of a tiled display.
*
* The 'tile_topology_id' is a triplet of ids consisting of vendor_id,
* product_id, and serial_number. The DisplayId specification does not
* clearly define an invalid 'tile_topology_id', but here the
* tile_topology_id is considered invalid only if all three ids are zero
* which is consistent with other protocols like RandR1.2 'The tile group
* identifier'.
*/
if (!nvkms_memcmp(&pDisplayIdInfo->tile_topology_id,
&nullTileDisplayTopoId, sizeof(nullTileDisplayTopoId))) {
return FALSE;
}
/*
* Reject Tiled-Display consists of multiple physical display enclosures or
* requires to configure bezel.
*/
if (!pDisplayIdInfo->tile_capability.bSingleEnclosure ||
pDisplayIdInfo->tile_capability.bHasBezelInfo) {
return FALSE;
}
/*
* Reject Tiled-Display which has number of horizontal or vertical tiles
* greater than 4.
*/
if (pDisplayIdInfo->tile_topology.row <= 0 ||
pDisplayIdInfo->tile_topology.col <= 0 ||
pDisplayIdInfo->tile_topology.row > 4 ||
pDisplayIdInfo->tile_topology.col > 4) {
return FALSE;
}
FOR_ALL_EVO_DPYS(pDpyEvo, candidateConnectedDpys, pDispEvo) {
const NVT_EDID_INFO *pEdidInfo = &pDpyEvo->parsedEdid.info;
const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo =
&pEdidInfo->ext_displayid;
if (!pDpyEvo->parsedEdid.valid) {
continue;
}
if (nvkms_memcmp(&pDisplayIdInfo->tile_topology_id,
&pDpyDisplayIdInfo->tile_topology_id,
sizeof(&pDpyDisplayIdInfo->tile_topology_id))) {
continue;
}
/*
* Tiled-Display Topology:
*
* |-----------col
*
* ___ +------------+------------+...
* | | (x=0,y=0) | (x=1,y=0) |
* | | | |
* | | | |
* | +------------+------------+
* row | (x=0,y=1) | (x=1,y=1) |
* | | |
* | | |
* +------------+------------+
* .
* .
* .
*/
if (pDpyDisplayIdInfo->tile_topology.row !=
pDisplayIdInfo->tile_topology.row) {
continue;
}
if (pDpyDisplayIdInfo->tile_topology.col !=
pDisplayIdInfo->tile_topology.col) {
continue;
}
if (pDpyDisplayIdInfo->tile_location.x >=
pDpyDisplayIdInfo->tile_topology.col) {
continue;
}
if (pDpyDisplayIdInfo->tile_location.y >=
pDpyDisplayIdInfo->tile_topology.row) {
continue;
}
nvAssert(pDpyDisplayIdInfo->tile_capability.single_tile_behavior ==
pDisplayIdInfo->tile_capability.single_tile_behavior);
detectedTiledDisplayDpysList =
nvAddDpyIdToDpyIdList(pDpyEvo->id, detectedTiledDisplayDpysList);
detectedTilesMask |= NVBIT((pDpyDisplayIdInfo->tile_location.y *
pDpyDisplayIdInfo->tile_topology.col) +
(pDpyDisplayIdInfo->tile_location.x));
detectedTilesCount++;
}
pTiledDisplayInfo->detectedDpysList = detectedTiledDisplayDpysList;
if (detectedTilesCount != numTiles || detectedTilesMask != numTilesMask) {
pTiledDisplayInfo->isDetectComplete = FALSE;
} else {
pTiledDisplayInfo->isDetectComplete = TRUE;
}
pTiledDisplayInfo->isCapToScaleSingleTile =
pDisplayIdInfo->tile_capability.single_tile_behavior ==
NVT_SINGLE_TILE_BEHAVIOR_SCALE;
return TRUE;
}
/* Construct modeset request for given Tiled-display */
static NvBool
ConstructModeRequestForTiledDisplay(const NVDispEvoRec *pDispEvo,
NVSurfaceEvoPtr pSurfaceEvo,
struct NvKmsSetModeParams *pParams,
const NvU32 dispIndex,
NVDpyIdList tiledDisplayDpysList,
NvU32 *pAvailableHeadsMask)
{
NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
/*
* Get arbitrary dpy from tiledDisplayDpysList,
* to extract Tiled-Display information which should be same across all
* tiles.
*/
NVDpyEvoRec *pArbitraryDpyEvo =
nvGetOneArbitraryDpyEvo(tiledDisplayDpysList, pDispEvo);
const NVT_DISPLAYID_INFO *pPrimaryDisplayIdInfo =
&pArbitraryDpyEvo->parsedEdid.info.ext_displayid;
const NvU32 numRows = pPrimaryDisplayIdInfo->tile_topology.row;
const NvU32 numColumns = pPrimaryDisplayIdInfo->tile_topology.col;
/*
* Split entire input viewport across all tiles of Tiled-Display.
*/
const struct NvKmsSize viewPortSizeIn = {
.width = (pSurfaceEvo->widthInPixels / numColumns),
.height = (pSurfaceEvo->heightInPixels / numRows),
};
struct NvKmsSetModeRequest *pRequest = &pParams->request;
struct NvKmsSetModeOneDispRequest *pRequestDisp =
&pRequest->disp[dispIndex];
NvU32 firstClaimedHead = NV_INVALID_HEAD;
NvU32 claimedHeadMask = 0x0;
NVDpyEvoRec *pDpyEvo;
NvU32 head;
/*
* Return failure if not enough number of heads available to construct
* modeset request for Tiled-Display.
*/
if (nvPopCount32(*pAvailableHeadsMask) <
nvCountDpyIdsInDpyIdList(tiledDisplayDpysList)) {
return FALSE;
}
/*
* Return failure if input viewport has not been split across
* tiles evenly.
*/
if ((pSurfaceEvo->widthInPixels % numRows != 0) ||
(pSurfaceEvo->heightInPixels % numColumns != 0)) {
return FALSE;
}
FOR_ALL_EVO_DPYS(pDpyEvo, tiledDisplayDpysList, pDispEvo) {
const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo =
&pDpyEvo->parsedEdid.info.ext_displayid;
const struct NvKmsPoint viewPortPointIn = {
.x = pDpyDisplayIdInfo->tile_location.x * viewPortSizeIn.width,
.y = pDpyDisplayIdInfo->tile_location.y * viewPortSizeIn.height
};
const NvU32 possibleHeads = *pAvailableHeadsMask &
pDpyEvo->pConnectorEvo->validHeadMask &
~claimedHeadMask;
if (possibleHeads == 0 || pDpyEvo->isVrHmd) {
goto failed;
}
const NvU32 head = BIT_IDX_32(LOWESTBIT(possibleHeads));
struct NvKmsSetModeOneHeadRequest *pRequestHead =
&pRequestDisp->head[head];
struct NvKmsMode mode;
if (firstClaimedHead == NV_INVALID_HEAD) {
/*
* Find mode of native dimensions reported in Tiled-Display
* information.
*/
if (!FindMode(pDpyEvo,
pSurfaceEvo->format,
pPrimaryDisplayIdInfo->native_resolution.width,
pPrimaryDisplayIdInfo->native_resolution.height,
&mode)) {
goto failed;
}
firstClaimedHead = head;
} else {
/* All tiles should support same set of modes */
mode = pRequestDisp->head[firstClaimedHead].mode;
}
claimedHeadMask |= NVBIT(head);
if (!InitModeOneHeadRequest(pDpyEvo,
pSurfaceEvo,
&mode,
&viewPortSizeIn,
&viewPortPointIn,
head,
pRequestHead)) {
goto failed;
}
}
nvAssert(!pRequest->commit);
if (!nvSetDispModeEvo(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pRequest,
&pParams->reply,
TRUE /* bypassComposition */,
FALSE /* doRasterLock */)) {
goto failed;
}
*pAvailableHeadsMask &= ~claimedHeadMask;
return TRUE;
failed:
for (head = 0; head < ARRAY_LEN(pRequestDisp->head); head++) {
if ((NVBIT(head) & claimedHeadMask) == 0x0) {
continue;
}
nvkms_memset(&pRequestDisp->head[head],
0,
sizeof(pRequestDisp->head[head]));
}
return FALSE;
}
static NvBool isDpMSTModeActiveOnAnyConnector(NVDevEvoPtr pDevEvo)
{
NvU32 i;
NVDispEvoPtr pDispEvo;
FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
NvU32 head;
for (head = 0; head < pDevEvo->numHeads; head++) {
const NVDispHeadStateEvoRec *pHeadState =
&pDispEvo->headState[head];
const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo;
if ((pConnectorEvo != NULL) &&
nvConnectorUsesDPLib(pConnectorEvo)) {
const enum NVDpLinkMode activeLinkMode =
nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector);
nvAssert(activeLinkMode != NV_DP_LINK_MODE_OFF);
if (activeLinkMode == NV_DP_LINK_MODE_MST) {
return TRUE;
}
}
}
}
return FALSE;
}
/*!
* Attempt to restore the console.
*
* If a framebuffer console surface was successfully imported from RM, then use
* the core channel to set a mode that displays it.
*
* Enables as many heads as possible in a clone configuration. In first pass
* for connected boot dpys and in second pass for other remaining dpys:
*
* 1. Populate modeset request to enable given dpy.
*
* 2. Do modeset request validation, if fails then disable scaling. If
* modeset request validation fails even after disabling scaling then do not
* enable that dpy.
*
* If console restore succeeds, set pDevEvo->skipConsoleRestore to skip
* deallocating the core channel and triggering RM's console restore code.
*/
NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST)
{
NvBool ret = FALSE;
NvU32 dispIndex;
NVDispEvoPtr pDispEvo;
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev);
NVSurfaceEvoPtr pSurfaceEvo =
nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles,
pDevEvo->fbConsoleSurfaceHandle);
struct NvKmsSetModeParams *params;
/*
* If this function fails to restore a console then NVKMS frees
* and reallocates the core channel, to attempt the console
* restore using Resman. The core channel reallocation also may
* fail and nvEvoRestoreConsole() again may get called from
* nvFreeDevEvo() when client frees the NVKMS device.
*
* If nvEvoRestoreConsole() gets called after the core channel
* allocation/reallocation failure then do nothing and return
* early.
*/
if (pDevEvo->displayHandle == 0x0) {
goto done;
}
/*
* If any DP-MST mode is active on any connector of this device but
* DP-MST is disallowed then force console-restore.
*/
if (pDevEvo->skipConsoleRestore &&
!allowMST && isDpMSTModeActiveOnAnyConnector(pDevEvo)) {
pDevEvo->skipConsoleRestore = FALSE;
}
if (pDevEvo->skipConsoleRestore) {
ret = TRUE;
goto done;
}
if (!pSurfaceEvo) {
// No console surface to restore.
goto done;
}
FlipBaseToNull(pDevEvo);
params = nvPreallocGet(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE,
sizeof(*params));
nvkms_memset(params, 0, sizeof(*params));
nvDPSetAllowMultiStreaming(pDevEvo, allowMST);
// Construct the request.
//
// To start with, try to enable as many connected dpys as possible,
// preferring boot displays first.
struct NvKmsSetModeRequest *pRequest = &params->request;
NvBool foundDpysConfigForConsoleRestore = FALSE;
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
NvU32 availableHeadsMask = NVBIT(pDevEvo->numHeads) - 1;
NVDpyIdList connectedDpys = UpdateConnectedDpys(pDispEvo);
const NVDpyIdList connectedBootDpys =
nvIntersectDpyIdListAndDpyIdList(connectedDpys,
pDispEvo->bootDisplays);
struct NvKmsSetModeOneDispRequest *pRequestDisp =
&pRequest->disp[dispIndex];
int pass;
pRequest->requestedDispsBitMask |= NVBIT(dispIndex);
pRequestDisp->requestedHeadsBitMask = availableHeadsMask;
// Only enable heads on the subdevice that actually contains the
// console.
if (dispIndex != pDevEvo->vtFbInfo.subDeviceInstance) {
continue;
}
NVDpyIdList handledDpysList = nvEmptyDpyIdList();
for (pass = 0; pass < 2; pass++) {
NVDpyIdList candidateDpys;
NVDpyEvoPtr pDpyEvo;
if (availableHeadsMask == 0) {
break;
}
if (pass == 0) {
candidateDpys = connectedBootDpys;
} else {
candidateDpys = nvDpyIdListMinusDpyIdList(connectedDpys,
connectedBootDpys);
}
FOR_ALL_EVO_DPYS(pDpyEvo, candidateDpys, pDispEvo) {
NvBool isTiledDisplayFound = FALSE;
TiledDisplayInfo tiledDisplayInfo = { 0 };
NvBool isTiledDisplayEnable = FALSE;
const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo =
pDpyEvo->parsedEdid.valid ?
&pDpyEvo->parsedEdid.info.ext_displayid : NULL;
NvBool done = FALSE;
if (availableHeadsMask == 0) {
break;
}
if (nvDpyIdIsInDpyIdList(pDpyEvo->id,
handledDpysList)) {
continue;
}
isTiledDisplayFound =
pDpyDisplayIdInfo != NULL &&
DetectTiledDisplay(pDispEvo,
pDpyDisplayIdInfo,
nvDpyIdListMinusDpyIdList(
connectedDpys, handledDpysList),
&tiledDisplayInfo);
/*
* Construct modeset request for Tiled-Display which don't have
* a capability to scale single tile input across entire
* display. If fails then fallback to construct modeset request
* for this single dpy.
*/
if (isTiledDisplayFound &&
tiledDisplayInfo.isDetectComplete &&
!tiledDisplayInfo.isCapToScaleSingleTile) {
done = ConstructModeRequestForTiledDisplay(
pDispEvo,
pSurfaceEvo,
params,
dispIndex,
tiledDisplayInfo.detectedDpysList,
&availableHeadsMask);
isTiledDisplayEnable = done;
}
/*
* If Tiled-Display has capability to scale single tile input
* across entire display then for console restore it is
* sufficient to light up any single tile and ignore rest of
* remaining tiles.
*/
if (!done ||
!isTiledDisplayFound ||
!tiledDisplayInfo.isDetectComplete ||
tiledDisplayInfo.isCapToScaleSingleTile) {
done = ConstructModeOneHeadRequestForOneDpy(
pDpyEvo,
pSurfaceEvo,
params,
dispIndex,
&availableHeadsMask);
isTiledDisplayEnable =
done && tiledDisplayInfo.isCapToScaleSingleTile;
}
handledDpysList =
nvAddDpyIdToDpyIdList(pDpyEvo->id, handledDpysList);
if (isTiledDisplayEnable) {
handledDpysList = nvAddDpyIdListToDpyIdList(
tiledDisplayInfo.detectedDpysList,
handledDpysList);
}
foundDpysConfigForConsoleRestore =
foundDpysConfigForConsoleRestore || done;
}
}
}
/*
* Disable all (flip/raster) locks, dirty locking state in hardware
* left behind by NVKMS console restore causes XID errors and engine hang
* on next modeset because the NVKMS doesn't get back existing display
* hardware state at the time of initialization.
*/
if (foundDpysConfigForConsoleRestore) {
pRequest->commit = TRUE;
ret = nvSetDispModeEvo(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pRequest,
&params->reply,
TRUE /* bypassComposition */,
FALSE /* doRasterLock */);
}
nvPreallocRelease(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE);
done:
nvkms_free_timer(pDevEvo->consoleRestoreTimer);
pDevEvo->consoleRestoreTimer = NULL;
/* If console restore failed then simply shut down all heads */
if (!ret) {
nvShutDownHeads(pDevEvo, NULL /* pTestFunc, shut down all heads */);
}
// If restoring the console from here succeeded, then skip triggering RM's
// console restore.
pDevEvo->skipConsoleRestore = ret;
return ret;
}

View File

@@ -0,0 +1,401 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/* this source file contains routines for setting and moving the cursor.
* NV50 specific */
#include "nvkms-cursor.h"
#include "nvkms-types.h"
#include "nvkms-dma.h"
#include "nvkms-utils.h"
#include "nvkms-rm.h"
#include "nvkms-evo.h"
#include "nvkms-vrr.h"
#include "nvkms-surface.h"
#include "nvkms-flip.h"
#include "nvkms-rmapi.h"
#include <class/cl917a.h> /* sizeof(GK104DispCursorControlPio) */
#include <nvos.h> /* NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS */
/*!
* Get the NVSurfaceEvoPtrs described by NvKmsSetCursorImageCommonParams.
*
* Look up the surfaces described by NvKmsSetCursorImageCommonParams,
* and check that the surfaces are valid for use by cursor on the
* given pDevEvo.
*
* \param[in] pDevEvo The device on which the cursor image will be set.
* \param[in] pParams The parameter structure indicating the surfaces.
* \param[out] pSurfaceEvo The array of surfaces to be assigned.
*
* \return If the parameters are valid, return TRUE and assign
* pSurfaceEvo. Otherwise, return FALSE.
*/
NvBool nvGetCursorImageSurfaces(
const NVDevEvoRec *pDevEvo,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
const struct NvKmsSetCursorImageCommonParams *pParams,
NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES])
{
NvU32 eye;
nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES);
/* XXX NVKMS TODO: add support for stereo cursor */
nvAssert(pParams->surfaceHandle[NVKMS_RIGHT] == 0);
for (eye = 0; eye < ARRAY_LEN(pParams->surfaceHandle); eye++) {
if (pParams->surfaceHandle[eye] != 0) {
NVSurfaceEvoPtr pSurfaceEvo = NULL;
pSurfaceEvo =
nvEvoGetSurfaceFromHandle(pDevEvo,
pOpenDevSurfaceHandles,
pParams->surfaceHandle[eye],
NV_EVO_CHANNEL_MASK_CURSOR_ALL);
if ((pSurfaceEvo == NULL) ||
(pSurfaceEvo->isoType != NVKMS_MEMORY_ISO)) {
return FALSE;
}
pSurfaceEvos[eye] = pSurfaceEvo;
}
}
return TRUE;
}
static void
SetCursorImage(NVDispEvoPtr pDispEvo,
const NvU32 head,
NVSurfaceEvoRec *pSurfaceEvoNew,
const struct NvKmsCompositionParams *pCursorCompParams)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVEvoUpdateState updateState = { };
const NvU32 sd = pDispEvo->displayOwner;
NvBool changed = FALSE;
NVSurfaceEvoPtr pSurfaceEvoOld =
pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo;
if (pSurfaceEvoNew != NULL &&
nvkms_memcmp(pCursorCompParams,
&pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams,
sizeof(*pCursorCompParams)) != 0) {
pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams =
*pCursorCompParams;
changed = TRUE;
}
if (pSurfaceEvoNew != pSurfaceEvoOld) {
if (pSurfaceEvoNew != NULL) {
nvEvoIncrementSurfaceRefCnts(pSurfaceEvoNew);
}
if (pSurfaceEvoOld) {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvoOld);
}
pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo = pSurfaceEvoNew;
changed = TRUE;
}
if (changed) {
nvPushEvoSubDevMaskDisp(pDispEvo);
pDevEvo->hal->SetCursorImage(
pDevEvo,
head,
pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo,
&updateState,
&pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams);
nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState,
TRUE /* releaseElv */);
nvPopEvoSubDevMask(pDevEvo);
}
if (pSurfaceEvoNew) {
nvTriggerVrrUnstallSetCursorImage(pDispEvo, changed);
}
}
static NvBool
FlipCursorImage(NVDispEvoPtr pDispEvo,
const struct NvKmsPerOpenDev *pOpenDevice,
NvU32 head,
const struct NvKmsSetCursorImageCommonParams *pImageParams)
{
const NvU32 sd = pDispEvo->displayOwner;
NvBool ret;
struct NvKmsFlipParams *pFlipParams;
struct NvKmsFlipRequest *pFlipRequest;
pFlipParams = nvCalloc(1, sizeof(*pFlipParams));
if (pFlipParams == NULL) {
return FALSE;
}
pFlipRequest = &pFlipParams->request;
pFlipRequest->sd[sd].head[head] = (struct NvKmsFlipCommonParams) {
.cursor = {
.image = *pImageParams,
.imageSpecified = TRUE,
},
};
pFlipRequest->sd[sd].requestedHeadsBitMask = NVBIT(head);
pFlipRequest->commit = TRUE;
ret = nvFlipEvo(pDispEvo->pDevEvo,
pOpenDevice,
pFlipRequest,
&pFlipParams->reply,
FALSE /* skipUpdate */,
FALSE /* allowFlipLock */);
nvFree(pFlipParams);
return ret;
}
NvBool nvSetCursorImage(
NVDispEvoPtr pDispEvo,
const struct NvKmsPerOpenDev *pOpenDevice,
const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
NvU32 head,
const struct NvKmsSetCursorImageCommonParams *pParams)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES];
NVSurfaceEvoPtr pSurfaceEvoNew;
NvBool flipCursorImage = FALSE;
if (!nvGetCursorImageSurfaces(pDevEvo, pOpenDevSurfaceHandles,
pParams, pSurfaceEvos)) {
return FALSE;
}
pSurfaceEvoNew = pSurfaceEvos[NVKMS_LEFT];
/*
* Use flip to apply or remove workaround for hardware bug 2052012
*/
if (NV5070_CTRL_SYSTEM_GET_CAP(
pDevEvo->capsBits,
NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH)) {
const NvU32 sd = pDispEvo->displayOwner;
NVSurfaceEvoPtr pSurfaceEvoOld =
pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo;
if ((pSurfaceEvoOld != pSurfaceEvoNew) &&
(pSurfaceEvoOld == NULL || pSurfaceEvoNew == NULL)) {
flipCursorImage = TRUE;
}
}
if (flipCursorImage) {
return FlipCursorImage(pDispEvo,
pOpenDevice, head, pParams);
}
SetCursorImage(pDispEvo,
head,
pSurfaceEvoNew,
&pParams->cursorCompParams);
return TRUE;
}
void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo,
NvU32 head, NvS16 x, NvS16 y)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
const NvU32 sd = pDispEvo->displayOwner;
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
pDevEvo->cursorHal->MoveCursor(pDevEvo, sd, head, x, y);
/* If the cursor is visible, trigger VRR unstall to display the
* cursor at the new postion */
if (pEvoSubDev->headState[head].cursor.pSurfaceEvo) {
nvTriggerVrrUnstallMoveCursor(pDispEvo);
}
}
void nvEvoMoveCursor(NVDispEvoPtr pDispEvo, NvU32 head,
const struct NvKmsMoveCursorCommonParams *pParams)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
const NvU32 sd = pDispEvo->displayOwner;
/* XXX NVKMS TODO: validate x,y against current viewport in? */
pDevEvo->gpus[sd].headState[head].cursor.x = pParams->x;
pDevEvo->gpus[sd].headState[head].cursor.y = pParams->y;
nvEvoMoveCursorInternal(pDispEvo,
head, pParams->x, pParams->y);
}
// Allocate and map cursor position PIO channels
NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo)
{
NvU32 head;
for (head = 0; head < pDevEvo->numHeads; head++) {
NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS PioChannelAllocParams = { 0 };
NVDispEvoPtr pDispEvo;
NvU32 sd;
PioChannelAllocParams.channelInstance = head;
// No notifiers in cursor channel
PioChannelAllocParams.hObjectNotify = 0;
pDevEvo->cursorHandle[head] =
nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (nvRmApiAlloc(
nvEvoGlobal.clientHandle,
pDevEvo->displayHandle,
pDevEvo->cursorHandle[head],
pDevEvo->cursorHal->klass,
&PioChannelAllocParams) != NVOS_STATUS_SUCCESS) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Failed to allocate CURSOR PIO for head %d",
head);
nvFreeCursorEvo(pDevEvo);
return FALSE;
}
FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
void *pPioDisplayChannel;
NvU32 status;
status = nvRmApiMapMemory(
nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
pDevEvo->cursorHandle[head],
0,
sizeof(GK104DispCursorControlPio),
&pPioDisplayChannel,
0);
if (status != NVOS_STATUS_SUCCESS) {
nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
"Failed to map CURSOR PIO for head %d",
head);
nvFreeCursorEvo(pDevEvo);
return FALSE;
}
pEvoSubDev->cursorPio[head] = pPioDisplayChannel;
}
}
return TRUE;
}
// Free and unmap Cursor PIO Channels
void nvFreeCursorEvo(NVDevEvoPtr pDevEvo)
{
NvU32 head;
for (head = 0; head < pDevEvo->numHeads; head++) {
NVDispEvoPtr pDispEvo;
NvU32 sd;
NvU32 status;
if (pDevEvo->cursorHandle[head] == 0) {
continue;
}
FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
NvU32 status;
if (pEvoSubDev->cursorPio[head] == NULL) {
continue;
}
status = nvRmApiUnmapMemory(
nvEvoGlobal.clientHandle,
pDevEvo->pSubDevices[sd]->handle,
pDevEvo->cursorHandle[head],
pEvoSubDev->cursorPio[head],
0);
if (status != NVOS_STATUS_SUCCESS) {
nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
"Failed to unmap cursor channel memory");
}
pEvoSubDev->cursorPio[head] = NULL;
}
status = nvRmApiFree(
nvEvoGlobal.clientHandle,
pDevEvo->displayHandle,
pDevEvo->cursorHandle[head]);
if (status != NVOS_STATUS_SUCCESS) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Failed to tear down Cursor channel");
}
nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
pDevEvo->cursorHandle[head]);
pDevEvo->cursorHandle[head] = 0;
}
}
extern NVEvoCursorHAL nvEvoCursor91;
extern NVEvoCursorHAL nvEvoCursorC3;
extern NVEvoCursorHAL nvEvoCursorC5;
extern NVEvoCursorHAL nvEvoCursorC6;
enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo)
{
static const NVEvoCursorHALPtr cursorTable[] = {
&nvEvoCursor91,
&nvEvoCursorC3,
&nvEvoCursorC5,
&nvEvoCursorC6,
};
int i;
for (i = 0; i < ARRAY_LEN(cursorTable); i++) {
if (nvRmEvoClassListCheck(pDevEvo, cursorTable[i]->klass)) {
pDevEvo->cursorHal = cursorTable[i];
return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS;
}
}
return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
}

View File

@@ -0,0 +1,50 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvkms-types.h>
#include <class/cl917a.h>
static void MoveCursor90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head,
NvS16 x, NvS16 y)
{
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
GK104DispCursorControlPio *pEvoCursorControl =
pEvoSubDev->cursorPio[head];
pEvoCursorControl->SetCursorHotSpotPointsOut[NVKMS_LEFT] =
DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _X, x) |
DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _Y, y);
pEvoCursorControl->Update =
DRF_DEF(917A, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE);
}
NVEvoCursorHAL nvEvoCursor91 = {
NV917A_CURSOR_CHANNEL_PIO, /* klass */
MoveCursor90, /* MoveCursor */
NULL, /* ReleaseElv */
{ /* caps */
256, /* maxSize */
},
};

View File

@@ -0,0 +1,114 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvkms-types.h>
#include <nvkms-utils.h>
#include <class/clc37a.h>
#include <class/clc57a.h>
#include <class/clc67a.h>
static void WaitForFreeSpace(NVDevEvoPtr pDevEvo,
NVC37ADispCursorImmControlPio *pEvoCursorControl)
{
/*
* Wait for Free to be non-zero, indicating there is space to push a method.
* The only case where Free is expected to be zero is when display
* frontend (FE) hardware is processing a previous method.
* .1s should be more than enough time to wait for that.
*/
NvU64 startTime = 0;
const NvU64 timeout = 100000; /* 0.1 seconds */
do {
if (pEvoCursorControl->Free != 0) {
return;
}
if (nvExceedsTimeoutUSec(&startTime, timeout)) {
break;
}
nvkms_yield();
} while (TRUE);
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Timed out waiting for cursor PIO space");
}
static void MoveCursorC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head,
NvS16 x, NvS16 y)
{
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
NVC37ADispCursorImmControlPio *pEvoCursorControl =
pEvoSubDev->cursorPio[head];
WaitForFreeSpace(pDevEvo, pEvoCursorControl);
pEvoCursorControl->SetCursorHotSpotPointOut[0] =
DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _X, x) |
DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _Y, y);
WaitForFreeSpace(pDevEvo, pEvoCursorControl);
pEvoCursorControl->Update =
DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE);
}
static void ReleaseElvC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head)
{
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
NVC37ADispCursorImmControlPio *pEvoCursorControl =
pEvoSubDev->cursorPio[head];
WaitForFreeSpace(pDevEvo, pEvoCursorControl);
pEvoCursorControl->Update =
DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE) |
DRF_DEF(C37A, _UPDATE, _RELEASE_ELV, _TRUE);
}
NVEvoCursorHAL nvEvoCursorC3 = {
NVC37A_CURSOR_IMM_CHANNEL_PIO, /* klass */
MoveCursorC3, /* MoveCursor */
ReleaseElvC3, /* ReleaseElv */
{ /* caps */
256, /* maxSize */
},
};
NVEvoCursorHAL nvEvoCursorC5 = {
NVC57A_CURSOR_IMM_CHANNEL_PIO, /* klass */
MoveCursorC3, /* MoveCursor */
ReleaseElvC3, /* ReleaseElv */
{ /* caps */
256, /* maxSize */
},
};
NVEvoCursorHAL nvEvoCursorC6 = {
NVC67A_CURSOR_IMM_CHANNEL_PIO, /* klass */
MoveCursorC3, /* MoveCursor */
ReleaseElvC3, /* ReleaseElv */
{ /* caps */
256, /* maxSize */
},
};

View File

@@ -0,0 +1,484 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stddef.h>
#include "nvkms-dma.h"
#include "nvkms-utils.h"
#include "nvkms-rmapi.h"
#include "class/cl917d.h" // NV917DDispControlDma, NV917D_DMA_*
#include <ctrl/ctrl0080/ctrl0080dma.h> // NV0080_CTRL_CMD_DMA_FLUSH
#include "nvos.h"
#define NV_DMA_PUSHER_CHASE_PAD 5
#define NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC 3000000 // 3 seconds
static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset);
void nvDmaKickoffEvo(NVEvoChannelPtr pChannel)
{
NVDmaBufferEvoPtr p = &pChannel->pb;
NvU32 putOffset = (NvU32)((char *)p->buffer - (char *)p->base);
if (p->put_offset == putOffset) {
return;
}
EvoCoreKickoff(p, putOffset);
}
static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset)
{
NVEvoDmaPtr pDma = &push_buffer->dma;
int i;
nvAssert(putOffset % 4 == 0);
nvAssert(putOffset <= push_buffer->offset_max);
/* If needed, copy the chunk to be kicked off into each GPU's FB */
if (pDma->isBar1Mapping) {
NVDevEvoPtr pDevEvo = push_buffer->pDevEvo;
int sd;
NV0080_CTRL_DMA_FLUSH_PARAMS flushParams = { 0 };
NvU32 ret;
NvU32 *endAddress;
if (putOffset < push_buffer->put_offset) {
/* If we've wrapped, copy to the end of the pushbuffer */
nvAssert(putOffset == 0);
endAddress = push_buffer->base + push_buffer->offset_max /
sizeof(NvU32);
} else {
endAddress = push_buffer->buffer;
}
for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
NvU32 startOffset = push_buffer->put_offset / sizeof(NvU32);
NvU32 *src = push_buffer->base;
NvU32 *dst = pDma->subDeviceAddress[sd];
nvAssert(dst != NULL);
src += startOffset;
dst += startOffset;
while (src < endAddress) {
*dst++ = *src++;
}
}
/*
* Finally, tell RM to flush so that the data actually lands in FB
* before telling the GPU to fetch it.
*/
flushParams.targetUnit = DRF_DEF(0080_CTRL_DMA, _FLUSH_TARGET,
_UNIT_FB, _ENABLE);
ret = nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
NV0080_CTRL_CMD_DMA_FLUSH,
&flushParams, sizeof(flushParams));
if (ret != NVOS_STATUS_SUCCESS) {
nvAssert(!"NV0080_CTRL_CMD_DMA_FLUSH failed");
}
}
#if NVCPU_IS_X86_64
__asm__ __volatile__ ("sfence\n\t" : : : "memory");
#elif NVCPU_IS_FAMILY_ARM
__asm__ __volatile__ ("dsb sy\n\t" : : : "memory");
#endif
/* Kick off all push buffers */
push_buffer->put_offset = putOffset;
for (i = 0; i < push_buffer->num_channels; i++) {
void *pControl = push_buffer->control[i];
nvDmaStorePioMethod(pControl, NV917D_PUT, putOffset);
}
}
/* Read GET from an EVO core channel */
static NvU32 EvoCoreReadGet(NVDmaBufferEvoPtr push_buffer, int sd)
{
void *pControl = push_buffer->control[sd];
return nvDmaLoadPioMethod(pControl, NV917D_GET);
}
/* Read GET for all devices and return the minimum or maximum*/
static NvU32 EvoReadGetOffset(NVDmaBufferEvoPtr push_buffer, NvBool minimum)
{
int i;
NvU32 get, bestGet = 0;
NvS32 distanceToPut, minmaxDistanceToPut = (minimum ?
0 :
(push_buffer->dma.limit + 1));
if (push_buffer->num_channels <= 1) {
return EvoCoreReadGet(push_buffer, 0);
}
for (i =0; i < push_buffer->num_channels; i++) {
get = EvoCoreReadGet(push_buffer, i);
/* Compute distance to put, accounting for wraps */
distanceToPut = push_buffer->put_offset - get;
if (distanceToPut < 0)
distanceToPut += push_buffer->dma.limit + 1;
/* Accumulate the maximum distance to put and the corresponding get. */
if ((minimum && (distanceToPut >= minmaxDistanceToPut)) ||
(!minimum && (distanceToPut <= minmaxDistanceToPut))) {
minmaxDistanceToPut = distanceToPut;
bestGet = get;
}
}
return bestGet;
}
void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count)
{
NVDmaBufferEvoPtr push_buffer = &pChannel->pb;
NvU32 getOffset;
NvU32 putOffset;
NvU64 startTime = 0;
const NvU64 timeout = 5000000; /* 5 seconds */
putOffset = (NvU32) ((char *)push_buffer->buffer -
(char *)push_buffer->base);
if (putOffset >= push_buffer->offset_max) {
*(push_buffer->buffer) = 0x20000000;
push_buffer->buffer = push_buffer->base;
nvDmaKickoffEvo(pChannel);
putOffset = 0;
}
while (1) {
getOffset = EvoReadGetOffset(push_buffer, TRUE);
if (putOffset >= getOffset) {
push_buffer->fifo_free_count =
(push_buffer->offset_max - putOffset) >> 2;
if (push_buffer->fifo_free_count <= count) {
if (getOffset) {
*(push_buffer->buffer) = 0x20000000;
push_buffer->buffer = push_buffer->base;
nvDmaKickoffEvo(pChannel);
putOffset = 0;
}
else if (putOffset != push_buffer->put_offset) {
nvDmaKickoffEvo(pChannel);
// Put offset will have changed if a tail was inserted.
putOffset = push_buffer->put_offset;
}
}
}
else {
getOffset = (getOffset > push_buffer->offset_max) ?
push_buffer->offset_max : getOffset;
if ((putOffset + (NV_DMA_PUSHER_CHASE_PAD * 4)) >= getOffset)
push_buffer->fifo_free_count = 0;
else
push_buffer->fifo_free_count =
((getOffset - putOffset) >> 2) - 1;
}
if (push_buffer->fifo_free_count > count) {
break;
}
/*
* If we have been waiting too long, print an error message. There
* isn't much we can do as currently structured, so just reset
* startTime.
*/
if (nvExceedsTimeoutUSec(&startTime, timeout)) {
nvEvoLogDev(push_buffer->pDevEvo, EVO_LOG_ERROR,
"Error while waiting for GPU progress: "
"0x%08x:%d %d:%d:%d:%d",
pChannel->hwclass, pChannel->instance,
count, push_buffer->fifo_free_count, getOffset, putOffset);
startTime = 0;
}
nvkms_yield();
}
}
static inline void EvoWriteNotifier(volatile NvU32 *pNotifier, NvU32 value)
{
/*
* Note that we don't need to flush to vidmem here; any subsequent GPU
* write will always be triggered by kicking off pushbuffer methods,
* which will perform a general FB flush. This does assume that the
* pushbuffer and its associated notifier surfaces are either both in
* sysmem or both in vidmem, however.
*/
*pNotifier = value;
}
/* Write the EVO core notifier at the given offset to the given value. */
void nvWriteEvoCoreNotifier(
const NVDispEvoRec *pDispEvo,
NvU32 offset,
NvU32 value)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
const NvU32 sd = pDispEvo->displayOwner;
NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd];
volatile NvU32 *pNotifiers = pSubChannel->subDeviceAddress[sd];
EvoWriteNotifier(pNotifiers + offset, value);
}
static NvBool EvoCheckNotifier(const NVDispEvoRec *pDispEvo,
NvU32 offset, NvU32 done_base_bit,
NvU32 done_extent_bit, NvU32 done_value,
NvBool wait)
{
const NvU32 sd = pDispEvo->displayOwner;
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd];
NVDmaBufferEvoPtr p = &pDevEvo->core->pb;
volatile NvU32 *pNotifier;
NvU64 startTime = 0;
pNotifier = pSubChannel->subDeviceAddress[sd];
nvAssert(pNotifier != NULL);
pNotifier += offset;
// While the completion notifier is not set to done_true
do {
const NvU32 val = *pNotifier;
const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit);
const NvU32 done_val = done_value << done_base_bit;
if ((val & done_mask) == done_val) {
return TRUE;
}
if (!wait) {
return FALSE;
}
if (!nvIsEmulationEvo(pDevEvo) &&
nvExceedsTimeoutUSec(
&startTime,
NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC) &&
(p->put_offset == EvoCoreReadGet(p, sd)))
{
nvEvoLogDisp(pDispEvo, EVO_LOG_WARN,
"Lost display notification (%d:0x%08x); "
"continuing.", sd, val);
EvoWriteNotifier(pNotifier, done_value << done_base_bit);
return TRUE;
}
nvkms_yield();
} while (TRUE);
}
/*
* Used by NV_EVO_WAIT_FOR_NOTIFIER() and NV_EVO_WAIT_FOR_CAPS_NOTIFIER()
*/
void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset,
NvU32 done_base_bit, NvU32 done_extent_bit,
NvU32 done_value)
{
EvoCheckNotifier(pDispEvo, offset,
done_base_bit, done_extent_bit, done_value, TRUE);
}
/*
* Used by the EVO HAL IsNotifierComplete functions. Returns TRUE if the
* notifier is complete.
*/
NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, NvU32 offset,
NvU32 done_base_bit, NvU32 done_extent_bit,
NvU32 done_value)
{
return EvoCheckNotifier(pDispEvo,
offset, done_base_bit, done_extent_bit,
done_value, FALSE);
}
void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask)
{
NVDmaBufferEvoPtr p = &pChannel->pb;
nvAssert(!nvDmaSubDevMaskMatchesCurrent(pChannel, mask));
p->currentSubDevMask = mask;
ASSERT_DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask);
if (p->fifo_free_count <= 1) {
nvEvoMakeRoom(pChannel, 1);
}
nvDmaSetEvoMethodData(pChannel,
DRF_DEF(917D, _DMA, _OPCODE, _SET_SUBDEVICE_MASK) |
DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask));
p->fifo_free_count -= 1;
}
/*!
* Reads CRC values from the notifier.
*
* This function will attempt to read in the first 'entry_count' CRC notifier
* entries that HW generated. The actual number of entries that are read may
* be less.
*
* \param[in] pCRC32Notifier Pointer to the CRC notifier memory.
* \param[in] entry_stride Stride of a single CRC notifier entry
* \param[in] entry_count Expected count of notifier entries to read
* \param[in] status_offset Offset for Status flags header in CRC notifier
* \param[in] field_count Number of fields to read from each CRC notifier
* entry.
* \param[in] flag_count Number of flags to read from the Status Header
* \param[in out] field_info Specifies the offset/base/extent info for each field.
* Each 'field_info' contains an output array for
* storing 'entry_count' field values.
* \param[in] flag_info Specifies the base/extent info for each flag.
* Each 'flag_info' contains a 'flag_type' for
* addressing error cases related to the flags.
*
* \return Returns the MIN(count, entry_count) of successfully
* read entries.
*/
NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier,
NvU32 entry_stride,
NvU32 entry_count,
NvU32 status_offset,
NvU32 field_count,
NvU32 flag_count,
const CRC32NotifierEntryRec *field_info,
const CRC32NotifierEntryFlags *flag_info)
{
NvU32 count = 0;
NvU32 i, j, k;
nvAssert(pCRC32Notifier != NULL);
// Iterate over flags (unique at start of the CRC32Notifier Struct)
for (k = 0; k < flag_count; k++) {
CRC32NotifierEntryFlags info = flag_info[k];
volatile NvU32 *pFlag = pCRC32Notifier + status_offset;
NvU32 flag_mask =
DRF_SHIFTMASK((info.flag_extent_bit):(info.flag_base_bit));
NvU32 flag = (*pFlag & flag_mask) >> info.flag_base_bit;
switch (info.flag_type)
{
case NVEvoCrc32NotifierFlagCount:
count = flag;
// entry_count is max of each field_frame_values[i] array
if (count > entry_count) {
nvEvoLog(EVO_LOG_WARN, "Too many CRC32 generated entries "
"(%d expected; %d found)", entry_count, count);
count = entry_count;
}
break;
case NVEvoCrc32NotifierFlagCrcOverflow:
if (flag) {
count = 0;
nvEvoLog(EVO_LOG_ERROR, "CRC Overflow occured, "
"CRC value unable to be processed fast enough.\n"
"Failing flag index in status_info array: %d",
k);
return count;
}
break;
}
}
// Iterate over each collection of fields, for count pairs of values
for (i = 0; i < count; i++) {
for (j = 0; j < field_count; j++) {
CRC32NotifierEntryRec info = field_info[j];
volatile NvU32 *pEntry = pCRC32Notifier + info.field_offset;
NvU32 field_mask =
DRF_SHIFTMASK((info.field_extent_bit):(info.field_base_bit));
info.field_frame_values[i].value =
(*pEntry & field_mask) >> info.field_base_bit;
info.field_frame_values[i].supported = TRUE;
}
pCRC32Notifier += entry_stride;
}
return count;
}
void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier,
NvU32 offset,
NvU32 reset_base_bit,
NvU32 reset_value)
{
const NvU32 reset_val = reset_value << reset_base_bit;
nvAssert(pCRC32Notifier != NULL);
pCRC32Notifier += offset;
EvoWriteNotifier(pCRC32Notifier, reset_val);
}
NvBool nvEvoWaitForCRC32Notifier(volatile NvU32 *pCRC32Notifier,
NvU32 offset,
NvU32 done_base_bit,
NvU32 done_extent_bit,
NvU32 done_value)
{
const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit);
const NvU32 done_val = done_value << done_base_bit;
NvU64 startTime = 0;
nvAssert(pCRC32Notifier != NULL);
pCRC32Notifier += offset;
do {
const NvU32 status = *pCRC32Notifier;
if ((status & done_mask) == done_val) {
return TRUE;
}
if (nvExceedsTimeoutUSec(
&startTime,
NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC)) {
return FALSE;
}
nvkms_yield();
} while (TRUE);
return FALSE;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,207 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvos.h"
#include "dp/nvdp-connector.h"
#include "nvkms-event.h"
#include "nvkms-rm.h"
#include "nvkms-types.h"
#include "nvkms-dpy.h"
#include "nvkms-rmapi.h"
#include "nvkms-utils.h"
#include "nvkms-private.h"
#include "nvkms-evo.h"
/*
* Handle a display device hotplug event.
*
* What "hotplug" means is unclear, but it could mean any of the following:
* - A display device is plugged in.
* - A display device is unlugged.
* - A display device was unplugged and then plugged back in.
* - A display device was plugged in and then unplugged.
* - An already connected display device is turned on.
* - An already connected display device is turned off.
* - A DisplayPort device needs its link status and RX Capabilities fields
* read and may need to be retrained ("long" hotplug event, > 2ms).
*
* DisplayPort "short" hotplug events, which are between 0.25ms and 2ms, are
* handled separately by nvHandleDPIRQEventDeferredWork below.
*/
void
nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32)
{
NVDispEvoPtr pDispEvo = dataPtr;
NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS hotplugParams = { 0 };
NvU32 ret;
NVDpyIdList hotplugged, unplugged, tmpUnplugged, changed;
NVDpyIdList connectedDisplays;
NVDpyEvoPtr pDpyEvo;
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
// Get the hotplug state.
hotplugParams.subDeviceInstance = pDispEvo->displayOwner;
if ((ret = nvRmApiControl(
nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE,
&hotplugParams,
sizeof(hotplugParams)))
!= NVOS_STATUS_SUCCESS) {
nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "Failed to determine which "
"devices were hotplugged: 0x%x\n", ret);
return;
}
/*
* Work around an RM bug in hotplug notification when the GPU is in
* GC6. In this case, the RM will notify us of a hotplug event, but
* NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE returns both
* hotPlugMask and hotUnplugMask as 0.
* Bug 200528641 tracks finding a root cause. Until that bug is
* fixed, call NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE to get the
* full list of connected dpys and construct hotplugged and
* unplugged lists from that if we encounter this case.
*/
if ((hotplugParams.hotPlugMask == 0) &&
(hotplugParams.hotUnplugMask == 0)) {
const NVDpyIdList updatedDisplayList = nvRmGetConnectedDpys(pDispEvo,
pDispEvo->connectorIds);
hotplugged = nvDpyIdListMinusDpyIdList(updatedDisplayList,
pDispEvo->connectedDisplays);
unplugged = nvDpyIdListMinusDpyIdList(pDispEvo->connectedDisplays,
updatedDisplayList);
} else {
hotplugged = nvNvU32ToDpyIdList(hotplugParams.hotPlugMask);
unplugged = nvNvU32ToDpyIdList(hotplugParams.hotUnplugMask);
}
// The RM only reports the latest plug/unplug status of each dpy.
nvAssert(nvDpyIdListIsEmpty(nvIntersectDpyIdListAndDpyIdList(hotplugged,
unplugged)));
nvAssert(nvDpyIdListIsASubSetofDpyIdList(hotplugged,
pDispEvo->connectorIds));
nvAssert(nvDpyIdListIsASubSetofDpyIdList(unplugged,
pDispEvo->connectorIds));
connectedDisplays = pDispEvo->connectedDisplays;
// Ignore non-DP devices that were reported as unplugged while already
// disconnected.
tmpUnplugged = nvEmptyDpyIdList();
FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) {
NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
if (nvConnectorUsesDPLib(pConnectorEvo) ||
nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) {
tmpUnplugged =
nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, tmpUnplugged);
}
}
unplugged = tmpUnplugged;
// Non-DP devices that were disconnected and connected again should generate an
// unplug / plug pair.
FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) {
NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
if (!nvConnectorUsesDPLib(pConnectorEvo) &&
nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) {
unplugged = nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, unplugged);
}
}
#if defined(DEBUG)
if (!nvDpyIdListIsEmpty(hotplugged)) {
char *str = nvGetDpyIdListStringEvo(pDispEvo, hotplugged);
nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO,
"Received display hotplug event: %s",
nvSafeString(str, "unknown"));
nvFree(str);
}
if (!nvDpyIdListIsEmpty(unplugged)) {
char *str = nvGetDpyIdListStringEvo(pDispEvo, unplugged);
nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO,
"Received display unplug event: %s",
nvSafeString(str, "unknown"));
nvFree(str);
}
#endif /* DEBUG */
// First, the OR configuration of the connector should not change, but
// re-query it to make sure.
changed = nvAddDpyIdListToDpyIdList(hotplugged, unplugged);
FOR_ALL_EVO_DPYS(pDpyEvo, changed, pDispEvo) {
nvRmGetConnectorORInfo(pDpyEvo->pConnectorEvo, TRUE);
}
// Next, disconnect devices that are in the unplug mask.
FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) {
NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
if (nvConnectorUsesDPLib(pConnectorEvo)) {
nvDPNotifyLongPulse(pConnectorEvo, FALSE);
} else {
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED);
}
}
// Finally, connect devices that are in the plug mask.
FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) {
NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
if (nvConnectorUsesDPLib(pConnectorEvo)) {
nvDPNotifyLongPulse(pConnectorEvo, TRUE);
} else {
nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED);
}
}
}
void
nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32)
{
NVDispEvoPtr pDispEvo = dataPtr;
// XXX[AGP]: ReceiveDPIRQEvent throws away the DisplayID of the device that
// caused the event, so for now we have to poll all of the connected DP
// devices to see which ones need attention. When RM is fixed, this can be
// improved.
NVConnectorEvoPtr pConnectorEvo;
// Notify all connectors which are using DP lib. For DP Serializer connector,
// HPD_IRQ indicates loss of clock/sync, so re-train the link.
FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
if (nvConnectorUsesDPLib(pConnectorEvo)) {
nvDPNotifyShortPulse(pConnectorEvo->pDpLibConnector);
} else if (nvConnectorIsDPSerializer(pConnectorEvo)) {
nvDPSerializerHandleDPIRQ(pDispEvo, pConnectorEvo);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,539 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* This file contains implementations of the EVO HAL methods for display class
* 1.x, found in the Tesla and Fermi 1 (GF10x) chips.
*/
#include "nvkms-types.h"
#include "nvkms-rm.h"
#include "nvkms-rmapi.h"
#include "nvkms-evo1.h"
#include "nvkms-prealloc.h"
#include "nvkms-utils.h"
#include <ctrl/ctrl5070/ctrl5070chnc.h> // NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS
/*!
* Initialize head-independent IMP param fields.
*
* Initializes an NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS structure.
* IMP users should call this once, followed by per-head calls to
* AssignPerHeadImpParams().
*
* \param pImp[in] A pointer to a param structure.
*/
static void InitImpParams(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp)
{
int i;
nvkms_memset(pImp, 0, sizeof(*pImp));
/* Initialize to not possible. */
pImp->IsPossible = NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO;
/* Set all heads to inactive. */
for (i = 0; i < NV5070_CTRL_CMD_MAX_HEADS; i++) {
pImp->Head[i].HeadActive =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO;
}
/* Set all ORs to no owner. */
for (i = 0; i < NV5070_CTRL_CMD_MAX_DACS; i++) {
pImp->Dac[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE;
}
pImp->bUseSorOwnerMask = TRUE;
for (i = 0; i < NV5070_CTRL_CMD_MAX_SORS; i++) {
pImp->Sor[i].ownerMask = NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE;
}
for (i = 0; i < NV5070_CTRL_CMD_MAX_PIORS; i++) {
pImp->Pior[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE;
}
}
/*!
* Initialize head-specific IMP param fields.
*
* Initialize the portion of the NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS
* structure that applies to a specific head, and the OR driven by
* that head.
*
* The param structure should be initialized by InitImpParams()
* before calling this per-head function.
*
* \param[out] pImp The param structure to initialize.
* \param[in] pTimings The rastering timings and viewport configuration.
* \param[in] pUsage The usage bounds that will be used for this head.
* \param[in] head The number of the head that will be driven.
* \param[in] orNumber The number of the OR driven by the head.
* \param[in] orType The type of the OR driven by the head.
*/
static void AssignPerHeadImpParams(const NVDevEvoRec *pDevEvo,
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp,
const NVHwModeTimingsEvo *pTimings,
const struct NvKmsUsageBounds *pUsage,
const int head,
const int orNumber,
const int orType)
{
const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort;
NvU64 overlayFormats = 0;
NvU32 protocol;
nvkms_memset(&pImp->Head[head], 0, sizeof(pImp->Head[head]));
nvAssert(head < NV5070_CTRL_CMD_MAX_HEADS);
pImp->Head[head].HeadActive = TRUE;
nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE ||
orNumber != NV_INVALID_OR);
/* raster timings */
pImp->Head[head].PixelClock.Frequency = pTimings->pixelClock;
pImp->Head[head].PixelClock.Adj1000Div1001 =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO;
pImp->Head[head].RasterSize.Width = pTimings->rasterSize.x;
pImp->Head[head].RasterSize.Height = pTimings->rasterSize.y;
pImp->Head[head].RasterBlankStart.X = pTimings->rasterBlankStart.x;
pImp->Head[head].RasterBlankStart.Y = pTimings->rasterBlankStart.y;
pImp->Head[head].RasterBlankEnd.X = pTimings->rasterBlankEnd.x;
pImp->Head[head].RasterBlankEnd.Y = pTimings->rasterBlankEnd.y;
pImp->Head[head].RasterVertBlank2.YStart = pTimings->rasterVertBlank2Start;
pImp->Head[head].RasterVertBlank2.YEnd = pTimings->rasterVertBlank2End;
pImp->Head[head].Control.Structure =
pTimings->interlaced ?
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED :
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE;
if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_DAC) {
nvAssert(orNumber < ARRAY_LEN(pImp->Dac));
nvAssert(pImp->Dac[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE);
pImp->Dac[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head);
nvAssert(pTimings->protocol == NVKMS_PROTOCOL_DAC_RGB);
pImp->Dac[orNumber].protocol = NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT;
} else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
nvAssert(orNumber < ARRAY_LEN(pImp->Sor));
pImp->Sor[orNumber].ownerMask |= NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(head);
switch (pTimings->protocol) {
default:
nvAssert(!"Unknown protocol");
/* fall through */
case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM:
protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM;
break;
case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A:
protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A;
break;
case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B:
protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B;
break;
case NVKMS_PROTOCOL_SOR_DUAL_TMDS:
protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS;
break;
case NVKMS_PROTOCOL_SOR_DP_A:
protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A;
break;
case NVKMS_PROTOCOL_SOR_DP_B:
protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B;
break;
}
pImp->Sor[orNumber].protocol = protocol;
pImp->Sor[orNumber].pixelReplicateMode =
NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF;
} else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) {
nvAssert(orNumber < ARRAY_LEN(pImp->Pior));
nvAssert(pImp->Pior[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE);
pImp->Pior[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head);
switch (pTimings->protocol) {
default:
nvAssert(!"Unknown protocol");
/* fall through */
case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC:
protocol = NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC;
break;
}
pImp->Pior[orNumber].protocol = protocol;
} else {
nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE);
}
/* viewport out */
pImp->Head[head].OutputScaler.VerticalTaps =
NVEvoScalerTapsToNum(pViewPort->vTaps);
pImp->Head[head].OutputScaler.HorizontalTaps =
NVEvoScalerTapsToNum(pViewPort->hTaps);
pImp->Head[head].ViewportSizeOut.Width = pViewPort->out.width;
pImp->Head[head].ViewportSizeOut.Height = pViewPort->out.height;
pImp->Head[head].ViewportSizeOutMin.Width =
pImp->Head[head].ViewportSizeOut.Width;
pImp->Head[head].ViewportSizeOutMin.Height =
pImp->Head[head].ViewportSizeOut.Height;
pImp->Head[head].ViewportSizeOutMax.Width =
pImp->Head[head].ViewportSizeOut.Width;
pImp->Head[head].ViewportSizeOutMax.Height =
pImp->Head[head].ViewportSizeOut.Height;
/* viewport in */
pImp->Head[head].ViewportSizeIn.Width = pViewPort->in.width;
pImp->Head[head].ViewportSizeIn.Height = pViewPort->in.height;
/*
* The actual format doesn't really matter, since RM just
* converts it back to bits per pixel for its IMP calculation anyway. The
* hardware doesn't have a "usage bound" for core -- changing the format
* of the core surface will always incur a supervisor interrupt and rerun
* IMP (XXX if we change the core surface as part of a flip to one of a
* different depth, should we force the pre/post IMP update path?).
*
* EVO2 hal uses surfaces of the same format in the core and base channels,
* see needToReprogramCoreSurface() in nvkms-evo2.c.
*/
if (pUsage->layer[NVKMS_MAIN_LAYER].usable) {
if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) {
pImp->Head[head].Params.Format =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16;
} else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) {
pImp->Head[head].Params.Format =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8;
} else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) {
pImp->Head[head].Params.Format =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5;
} else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) {
pImp->Head[head].Params.Format =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8;
} else { /* default to RGB 4BPP */
nvAssert(!"Unknown core format");
pImp->Head[head].Params.Format =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8;
}
} else {
pImp->Head[head].Params.Format =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8;
}
pImp->Head[head].Params.SuperSample =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA;
/* base usage bounds */
if (pUsage->layer[NVKMS_MAIN_LAYER].usable) {
pImp->Head[head].BaseUsageBounds.Usable =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES;
if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) {
pImp->Head[head].BaseUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64;
} else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) {
pImp->Head[head].BaseUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32;
} else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) {
pImp->Head[head].BaseUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16;
} else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) {
pImp->Head[head].BaseUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8;
} else { /* default to RGB 8BPP */
nvAssert(!"Unknown base channel usage bound format");
pImp->Head[head].BaseUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64;
}
pImp->Head[head].BaseUsageBounds.SuperSample =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA;
} else {
pImp->Head[head].BaseUsageBounds.Usable =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO;
}
/* overlay usage bounds */
pImp->Head[head].OverlayUsageBounds.Usable =
pUsage->layer[NVKMS_OVERLAY_LAYER].usable
? NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES
: NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO;
overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ?
pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats :
NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP;
if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) {
pImp->Head[head].OverlayUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32;
} else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) {
pImp->Head[head].OverlayUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16;
} else {
nvAssert(!"Unknown overlay channel usage bound format");
pImp->Head[head].OverlayUsageBounds.PixelDepth =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32;
}
/* pixel depth */
switch (pTimings->pixelDepth) {
case NVKMS_PIXEL_DEPTH_18_444:
pImp->Head[head].outputResourcePixelDepthBPP =
NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444;
break;
case NVKMS_PIXEL_DEPTH_24_444:
pImp->Head[head].outputResourcePixelDepthBPP =
NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
break;
case NVKMS_PIXEL_DEPTH_30_444:
pImp->Head[head].outputResourcePixelDepthBPP =
NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444;
break;
}
}
void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo,
const NVEvoIsModePossibleDispInput *pInput,
NVEvoIsModePossibleDispOutput *pOutput)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp =
nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp));
NvBool result = FALSE;
NvU32 head;
NvU32 ret;
InitImpParams(pImp);
pImp->RequestedOperation =
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY;
for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
if (pInput->head[head].pTimings == NULL) {
continue;
}
AssignPerHeadImpParams(pDevEvo, pImp,
pInput->head[head].pTimings,
pInput->head[head].pUsage,
head,
pInput->head[head].orIndex,
pInput->head[head].orType);
}
pImp->base.subdeviceIndex = pDispEvo->displayOwner;
if (pInput->requireBootClocks) {
// XXX TODO: IMP requires lock pin information if pstate information is
// requested. For now, just assume no locking.
pImp->MinPState = NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE;
}
for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
pImp->Head[head].displayId[0] = pInput->head[head].displayId;
}
ret = nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayHandle,
NV5070_CTRL_CMD_IS_MODE_POSSIBLE,
pImp, sizeof(*pImp));
if (ret != NV_OK || !pImp->IsPossible ||
(pInput->requireBootClocks &&
// P8 = "boot clocks"
(pImp->MinPState < NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 &&
// XXX TODO: With PStates 3.0, only a "v-pstate" is returned in
// impParams.minPerfLevel. We need to correlate that with "boot
// clocks" somehow.
pImp->MinPState != NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED))) {
goto done;
}
result = TRUE;
done:
nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS);
pOutput->possible = result;
}
void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp =
nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp));
NvU32 ret;
if (isPre) {
/*
* Sync the core channel for pre-modeset IMP to ensure that the state
* cache reflects all of the methods we've pushed
*/
ret = nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
if (!ret) {
nvAssert(!"nvRMSyncEvoChannel failed during PreModesetIMP");
}
}
nvkms_memset(pImp, 0, sizeof(*pImp));
pImp->RequestedOperation = isPre ?
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC :
NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC;
pImp->base.subdeviceIndex = pDispEvo->displayOwner;
ret = nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayHandle,
NV5070_CTRL_CMD_IS_MODE_POSSIBLE,
pImp, sizeof(*pImp));
if ((ret != NVOS_STATUS_SUCCESS) || !pImp->IsPossible) {
nvAssert(!"NV5070_CTRL_CMD_IS_MODE_POSSIBLE failed");
}
nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS);
}
/*!
* Return the value to use for HEAD_SET_STORAGE_PITCH.
*
* Per dispClass_02.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks
* if the layout is BLOCKLINEAR, the units are multiples of 256 bytes
* if the layout is PITCH."
*
* \return Returns 0 if the pitch is invalid. Otherwise returns the
* HEAD_SET_STORAGE_PITCH value.
*/
NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo,
enum NvKmsSurfaceMemoryLayout layout,
NvU32 pitch)
{
if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
/* pitch is already in units of blocks; nothing else needed. */
} else {
/* pitch is in units of bytes, and must be aligned to 0x100. */
if ((pitch & 0xFF) != 0) {
return 0;
}
pitch >>= 8;
}
if (pitch > pDevEvo->caps.maxPitchValue) {
return 0;
}
return pitch;
}
static NvBool GetChannelState(NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChan,
NvU32 sd,
NvU32 *result)
{
NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { };
NvU32 ret;
info.base.subdeviceIndex = sd;
info.channelClass = pChan->hwclass;
info.channelInstance = pChan->instance;
ret = nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayHandle,
NV5070_CTRL_CMD_GET_CHANNEL_INFO,
&info, sizeof(info));
if (ret != NVOS_STATUS_SUCCESS) {
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
"Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x",
pChan->hwclass, pChan->instance, sd, ret);
return FALSE;
}
*result = info.channelState;
return TRUE;
}
NvBool nvEvo1IsChannelIdle(NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChan,
NvU32 sd,
NvBool *result)
{
NvU32 channelState;
if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) {
return FALSE;
}
*result = (channelState == NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE);
return TRUE;
}
/*
* Result is false if an EVO channel is either one of NO_METHOD_PENDING or
* UNCONNECTED, true o.w.
*
* NO_METHOD_PENDING is a mask for EMPTY | WRTIDLE | IDLE.
*
* If NVKMS hasn't grabbed the channel, it can be seen as UNCONNECTED.
*/
NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr pDevEvo,
NVEvoChannelPtr pChan,
NvU32 sd,
NvBool *result)
{
NvU32 channelState;
if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) {
return FALSE;
}
*result = !(channelState &
(NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING |
NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED));
return TRUE;
}
void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NVHwModeTimingsEvo *pTimings)
{
nvAssert(!pTimings->dpDsc.enable);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,212 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-types.h"
#include "nvkms-cursor.h"
#include "nvkms-hal.h"
#include "nvkms-rm.h"
#include "class/cl9470.h" // NV9470_DISPLAY
#include "class/cl9570.h" // NV9570_DISPLAY
#include "class/cl9770.h" // NV9770_DISPLAY
#include "class/cl9870.h" // NV9870_DISPLAY
#include "class/clc370.h" // NVC370_DISPLAY
#include "class/clc570.h" // NVC570_DISPLAY
#include "class/clc670.h" // NVC670_DISPLAY
#include "class/cl947d.h" // NV947D_CORE_CHANNEL_DMA
#include "class/cl957d.h" // NV957D_CORE_CHANNEL_DMA
#include "class/cl977d.h" // NV977D_CORE_CHANNEL_DMA
#include "class/cl987d.h" // NV987D_CORE_CHANNEL_DMA
#include "class/clc37d.h" // NVC37D_CORE_CHANNEL_DMA
#include "class/clc37e.h" // NVC37E_WINDOW_CHANNEL_DMA
#include "class/clc57d.h" // NVC57D_CORE_CHANNEL_DMA
#include "class/clc57e.h" // NVC57E_WINDOW_CHANNEL_DMA
#include "class/clc67d.h" // NVC67D_CORE_CHANNEL_DMA
#include "class/clc67e.h" // NVC67E_WINDOW_CHANNEL_DMA
extern NVEvoHAL nvEvo94;
extern NVEvoHAL nvEvoC3;
extern NVEvoHAL nvEvoC5;
extern NVEvoHAL nvEvoC6;
enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo)
{
#define ENTRY(_classPrefix, \
_pEvoHal, \
_supportsInbandStereoSignaling, \
_supportsDP13, \
_supportsHDMI20, \
_inputLutAppliesToBase, \
_genericPageKind, \
_validNIsoFormatMask, \
_maxPitch, \
_maxWidthInBytes, \
_maxWidthInPixels, \
_maxHeight, \
_coreChannelDmaArmedOffset, \
_dmaArmedSize) \
{ \
.class = NV ## _classPrefix ## 70_DISPLAY, \
.pEvoHal = _pEvoHal, \
.coreChannelDma = { \
.coreChannelClass = \
NV ## _classPrefix ## 7D_CORE_CHANNEL_DMA, \
.dmaArmedSize = _dmaArmedSize, \
.dmaArmedOffset = \
_coreChannelDmaArmedOffset, \
}, \
.evoCaps = { \
.supportsDP13 = _supportsDP13, \
.supportsInbandStereoSignaling = \
_supportsInbandStereoSignaling, \
.supportsHDMI20 = _supportsHDMI20, \
.validNIsoFormatMask = _validNIsoFormatMask, \
.inputLutAppliesToBase = _inputLutAppliesToBase, \
.maxPitchValue = _maxPitch, \
.maxWidthInBytes = _maxWidthInBytes, \
.maxWidthInPixels = _maxWidthInPixels, \
.maxHeight = _maxHeight, \
.genericPageKind = _genericPageKind, \
.maxRasterWidth = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_WIDTH), \
.maxRasterHeight = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_HEIGHT),\
} \
}
#define EVO_CORE_CHANNEL_DMA_ARMED_OFFSET 0x0
#define EVO_CORE_CHANNEL_DMA_ARMED_SIZE 0x1000
/* Pre-NVDisplay EVO entries */
#define ENTRY_EVO(_classPrefix, ...) \
ENTRY(_classPrefix, __VA_ARGS__, \
((1 << NVKMS_NISO_FORMAT_LEGACY) | \
(1 << NVKMS_NISO_FORMAT_FOUR_WORD)), \
DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH), \
DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH) * \
NVKMS_BLOCK_LINEAR_GOB_WIDTH, \
DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_WIDTH), \
DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_HEIGHT), \
EVO_CORE_CHANNEL_DMA_ARMED_OFFSET, \
EVO_CORE_CHANNEL_DMA_ARMED_SIZE)
/*
* The file
* https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_display_withoffset.ref.txt
* defines:
*
* #define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000
* #define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768)
*
* The NVD_CORE_CHANNEL_DMA_ARMED_OFFSET is calculated as
* (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE).
*/
#define NVD_CORE_CHANNEL_DMA_ARMED_OFFSET 0x8000
/*
* From the above in dev_display_withoffset.ref.txt, ARMED is the upper
* 32k of the core channel's 64k space.
*/
#define NVD_CORE_CHANNEL_DMA_ARMED_SIZE 0x8000
/*
* The file
* https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/turing/tu104/dev_mmu.ref.txt
* defines:
*
* #define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06
*
* The file
* https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt
* defines:
*
* #define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe
*
* Which correspond to the "generic" page kind used for non-compressed single-
* sample blocklinear color images on Turing+ and pre-Turing GPUs respectively.
* This is the only blocklinear memory layout display ever cares about.
*/
#define TURING_GENERIC_KIND 0x06
#define FERMI_GENERIC_KIND 0xfe
/* NVDisplay and later entries */
#define ENTRY_NVD(_classPrefix, ...) \
ENTRY(_classPrefix, __VA_ARGS__, \
(1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY), \
DRF_MASK(NV ## _classPrefix ## 7E_SET_PLANAR_STORAGE_PITCH), \
DRF_MASK(NV ## _classPrefix ## 7E_SET_PLANAR_STORAGE_PITCH) * \
NVKMS_BLOCK_LINEAR_GOB_WIDTH, \
DRF_MASK(NV ## _classPrefix ## 7E_SET_SIZE_IN_WIDTH), \
DRF_MASK(NV ## _classPrefix ## 7E_SET_SIZE_IN_WIDTH), \
NVD_CORE_CHANNEL_DMA_ARMED_OFFSET, \
NVD_CORE_CHANNEL_DMA_ARMED_SIZE)
static const struct {
NvU32 class;
const NVEvoHAL *pEvoHal;
const NVEvoCoreChannelDmaRec coreChannelDma;
const NVEvoCapsRec evoCaps;
} dispTable[] = {
/*
* genericPageKind--------------------+
* inputLutAppliesToBase --------+ |
* supportsHDMI20 ------------+ | |
* supportsDP13 -----------+ | | |
* inbandStereoSignaling+ | | | |
* pEvoHal ----------+ | | | | |
* classPrefix | | | | | |
* | | | | | | |
*/
ENTRY_NVD(C6, &nvEvoC6, 1, 1, 1, 0, TURING_GENERIC_KIND),
ENTRY_NVD(C5, &nvEvoC5, 1, 1, 1, 0, TURING_GENERIC_KIND),
ENTRY_NVD(C3, &nvEvoC3, 1, 1, 1, 0, FERMI_GENERIC_KIND),
ENTRY_EVO(98, &nvEvo94, 1, 1, 1, 1, FERMI_GENERIC_KIND),
ENTRY_EVO(97, &nvEvo94, 1, 1, 1, 1, FERMI_GENERIC_KIND),
ENTRY_EVO(95, &nvEvo94, 1, 0, 1, 1, FERMI_GENERIC_KIND),
ENTRY_EVO(94, &nvEvo94, 1, 0, 0, 1, FERMI_GENERIC_KIND),
};
int i;
for (i = 0; i < ARRAY_LEN(dispTable); i++) {
if (nvRmEvoClassListCheck(pDevEvo, dispTable[i].class)) {
pDevEvo->hal = dispTable[i].pEvoHal;
pDevEvo->dispClass = dispTable[i].class;
pDevEvo->caps = dispTable[i].evoCaps;
pDevEvo->coreChannelDma = dispTable[i].coreChannelDma;
nvAssert(nvRmEvoClassListCheck(
pDevEvo,
pDevEvo->coreChannelDma.coreChannelClass));
return nvInitDispHalCursorEvo(pDevEvo);
}
}
return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,391 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-lut.h"
#include "nvkms-rm.h"
#include "nvkms-rmapi.h"
#include "nvkms-dma.h"
#include "nvkms-utils.h"
#include "nvos.h"
#include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
static void FreeLutSurfaceEvoInVidmem(NVLutSurfaceEvoPtr pSurfEvo)
{
NVDevEvoPtr pDevEvo;
if (pSurfEvo == NULL) {
return;
}
pDevEvo = pSurfEvo->pDevEvo;
nvRmEvoUnMapVideoMemory(pDevEvo, pSurfEvo->handle,
pSurfEvo->subDeviceAddress);
/* Free display context dmas for the surface, if any */
nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma);
/* Free the surface */
if (pSurfEvo->handle) {
NvU32 result;
result = nvRmApiFree(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle, pSurfEvo->handle);
if (result != NVOS_STATUS_SUCCESS) {
nvAssert(!"Freeing LUT surface failed");
}
nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
pSurfEvo->handle);
pSurfEvo->handle = 0;
}
nvFree(pSurfEvo);
}
static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
{
NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
NvU32 ret = NVOS_STATUS_ERROR_GENERIC;
NvU32 attr = 0, attr2 = 0;
NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
NvU64 size = 0, alignment = 4096;
NVLutSurfaceEvoPtr pSurfEvo;
pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo));
if (pSurfEvo == NULL) {
return NULL;
}
pSurfEvo->pDevEvo = pDevEvo;
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
pSurfEvo->size = size;
pSurfEvo->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (pSurfEvo->handle == 0) {
goto fail;
}
attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr);
attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2);
alignment = NV_MAX(alignment, NV_EVO_SURFACE_ALIGNMENT);
if (alignment != 0) {
allocFlags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
}
memAllocParams.owner = NVKMS_RM_HEAP_ID;
memAllocParams.type = NVOS32_TYPE_IMAGE;
memAllocParams.size = size;
memAllocParams.attr = attr;
memAllocParams.attr2 = attr2;
memAllocParams.flags = allocFlags;
memAllocParams.alignment = alignment;
ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pSurfEvo->handle,
NV01_MEMORY_LOCAL_USER,
&memAllocParams);
/* If we failed the allocation above, abort */
if (ret != NVOS_STATUS_SUCCESS) {
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle);
pSurfEvo->handle = 0;
goto fail;
}
/* Allocate a display context dma */
pSurfEvo->dispCtxDma =
nvRmEvoAllocateAndBindDispContextDMA(pDevEvo,
pSurfEvo->handle,
NvKmsSurfaceMemoryLayoutPitch,
pSurfEvo->size - 1);
if (!pSurfEvo->dispCtxDma) {
goto fail;
}
/* Map the surface for the CPU */
if (!nvRmEvoMapVideoMemory(pSurfEvo->pDevEvo,
pSurfEvo->handle, pSurfEvo->size,
pSurfEvo->subDeviceAddress,
SUBDEVICE_MASK_ALL)) {
goto fail;
}
return pSurfEvo;
fail:
/* An error occurred -- free the surface */
FreeLutSurfaceEvoInVidmem(pSurfEvo);
return NULL;
}
static void FreeLutSurfaceEvoInSysmem(NVLutSurfaceEvoPtr pSurfEvo)
{
NVDevEvoPtr pDevEvo;
if (pSurfEvo == NULL) {
return;
}
pDevEvo = pSurfEvo->pDevEvo;
/* Free display context dmas for the surface, if any */
nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma);
/* Free the surface */
if (pSurfEvo->handle) {
NvU32 result;
if (pSurfEvo->subDeviceAddress[0] != NULL) {
/*
* SOC display devices should only have one subdevice
* (and therefore it is safe to unmap only subDeviceAddress[0])
* for reasons described in AllocLutSurfaceEvoInSysmem
*/
nvAssert(pDevEvo->numSubDevices == 1);
result = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pSurfEvo->handle,
pSurfEvo->subDeviceAddress[0],
0);
if (result != NVOS_STATUS_SUCCESS) {
nvAssert(!"Unmapping LUT surface failed");
}
pSurfEvo->subDeviceAddress[0] = NULL;
}
result = nvRmApiFree(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle, pSurfEvo->handle);
if (result != NVOS_STATUS_SUCCESS) {
nvAssert(!"Freeing LUT surface failed");
}
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle);
}
nvFree(pSurfEvo);
}
static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo)
{
NvU32 memoryHandle = 0;
void *pBase = NULL;
NvU64 size = 0;
NVLutSurfaceEvoPtr pSurfEvo;
pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo));
if (pSurfEvo == NULL) {
return NULL;
}
pSurfEvo->pDevEvo = pDevEvo;
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
pSurfEvo->size = size;
memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (memoryHandle == 0) {
goto fail;
}
/* Allocate the LUT memory from sysmem */
if (!nvRmAllocSysmem(pDevEvo, memoryHandle, NULL, &pBase, size,
NVKMS_MEMORY_ISO)) {
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
"Unable to allocate LUT memory from sysmem");
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
goto fail;
}
pSurfEvo->handle = memoryHandle;
/* Allocate and bind a display context dma */
pSurfEvo->dispCtxDma =
nvRmEvoAllocateAndBindDispContextDMA(pDevEvo,
pSurfEvo->handle,
NvKmsSurfaceMemoryLayoutPitch,
pSurfEvo->size - 1);
if (!pSurfEvo->dispCtxDma) {
goto fail;
}
/*
* AllocLutSurfaceEvoInSysmem() will only be called if
* pDevEvo->requiresAllAllocationsInSysmem is TRUE. NVKMS will only set this
* cap bit for SOC display devices, and these devices should only have one
* subdevice.
*/
nvAssert(pDevEvo->numSubDevices == 1);
pSurfEvo->subDeviceAddress[0] = pBase;
return pSurfEvo;
fail:
/* An error occurred -- free the surface */
FreeLutSurfaceEvoInSysmem(pSurfEvo);
return NULL;
}
static void FreeLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo)
{
NVDevEvoPtr pDevEvo;
if (pSurfEvo == NULL) {
return;
}
pDevEvo = pSurfEvo->pDevEvo;
if (pDevEvo->requiresAllAllocationsInSysmem) {
FreeLutSurfaceEvoInSysmem(pSurfEvo);
} else {
FreeLutSurfaceEvoInVidmem(pSurfEvo);
}
}
static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo)
{
if (pDevEvo->requiresAllAllocationsInSysmem) {
return AllocLutSurfaceEvoInSysmem(pDevEvo);
} else {
return AllocLutSurfaceEvoInVidmem(pDevEvo);
}
}
NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo)
{
NVDispEvoPtr pDispEvo;
NvU32 head, dispIndex, i;
for (head = 0; head < pDevEvo->numHeads; head++) {
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) {
pDevEvo->lut.head[head].LUT[i] = AllocLutSurfaceEvo(pDevEvo);
if (pDevEvo->lut.head[head].LUT[i] == NULL) {
nvFreeLutSurfacesEvo(pDevEvo);
return FALSE;
}
}
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
// No palette has been loaded yet, so disable the LUT.
pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = FALSE;
pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = FALSE;
pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = FALSE;
}
}
if (pDevEvo->hal->caps.needDefaultLutSurface) {
pDevEvo->lut.defaultLut = AllocLutSurfaceEvo(pDevEvo);
if (pDevEvo->lut.defaultLut == NULL) {
nvFreeLutSurfacesEvo(pDevEvo);
return FALSE;
}
pDevEvo->hal->InitDefaultLut(pDevEvo);
}
return TRUE;
}
void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo)
{
NvU32 head, i, dispIndex;
NVDispEvoPtr pDispEvo;
/* Cancel any queued LUT update timers */
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
for (head = 0; head < pDevEvo->numHeads; head++) {
nvCancelLutUpdateEvo(pDispEvo, head);
}
}
/* wait for any outstanding LUT updates before freeing the surface */
if (pDevEvo->core) {
nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
}
if (pDevEvo->lut.defaultLut != NULL) {
FreeLutSurfaceEvo(pDevEvo->lut.defaultLut);
pDevEvo->lut.defaultLut = NULL;
}
for (head = 0; head < pDevEvo->numHeads; head++) {
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) {
if (pDevEvo->lut.head[head].LUT[i] != NULL) {
FreeLutSurfaceEvo(pDevEvo->lut.head[head].LUT[i]);
pDevEvo->lut.head[head].LUT[i] = NULL;
}
}
}
}
void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo,
const NVEvoLutDataRec *pLUTBuffer,
NVDispEvoPtr pDispEvo)
{
const NvU32* data = (const NvU32*)pLUTBuffer;
size_t size = sizeof(*pLUTBuffer);
const int sd = pDispEvo->displayOwner;
NvU32 *dst;
const NvU32 *src;
int dword;
if (pSurfEvo == NULL) {
nvAssert(pSurfEvo);
return;
}
nvAssert(pSurfEvo->subDeviceAddress[sd]);
/* The size to copy should not be larger than the surface. */
nvAssert(size <= pSurfEvo->size);
/* The source, destination, and size should be 4-byte aligned. */
nvAssert((((NvUPtr)data) & 0x3) == 0);
nvAssert((((NvUPtr)pSurfEvo->subDeviceAddress[sd]) & 0x3) == 0);
nvAssert((size % 4) == 0);
src = data;
dst = (NvU32*)pSurfEvo->subDeviceAddress[sd];
for (dword = 0; dword < (size/4); dword++) {
*(dst++) = *(src++);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,146 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-types.h"
#include "nvkms-flip-workarea.h"
#include "nvkms-modeset-types.h"
#include "nvkms-modeset-workarea.h"
#include "nvkms-prealloc.h"
#include "nvkms-utils.h"
#include "nvkms-api.h"
#include <nvmisc.h>
static size_t GetSizeForType(NVDevEvoPtr pDevEvo, enum NVPreallocType type)
{
switch (type) {
case PREALLOC_TYPE_IMP_PARAMS:
return pDevEvo->hal->caps.impStructSize;
case PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE: /* fall through */
case PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE:
return sizeof(struct NvKmsSetModeParams);
case PREALLOC_TYPE_MODE_SET_WORK_AREA:
return sizeof(NVModeSetWorkArea);
case PREALLOC_TYPE_FLIP_WORK_AREA:
return sizeof(struct NvKmsFlipWorkArea);
case PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE: /* fallthrough */
case PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE:
return sizeof(NVProposedModeSetHwState);
case PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS:
return sizeof(NVHwModeTimingsEvo);
case PREALLOC_TYPE_MAX:
/* Not a real option, but added for -Wswitch-enum */
break;
}
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Unknown prealloc type %d in GetSizeForType.", type);
return 0;
}
void *nvPreallocGet(
NVDevEvoPtr pDevEvo,
enum NVPreallocType type,
size_t sizeCheck)
{
struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc;
size_t size = GetSizeForType(pDevEvo, type);
if (size != sizeCheck) {
nvAssert(size == sizeCheck);
return NULL;
}
if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) != 0) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Prealloc type %d already used in nvPreallocGet.", type);
return NULL;
}
/* Since these are preallocated, they should not be NULL. */
if (pPrealloc->ptr[type] == NULL) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Prealloc type %d NULL in nvPreallocGet.", type);
}
pPrealloc->used[type / 8] |= NVBIT(type % 8);
return pPrealloc->ptr[type];
}
void nvPreallocRelease(
NVDevEvoPtr pDevEvo,
enum NVPreallocType type)
{
struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc;
if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) == 0) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Prealloc type %d not used in nvPreallocRelease.", type);
}
pPrealloc->used[type / 8] &= ~(NvU8)NVBIT(type % 8);
}
NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo)
{
struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc;
NvU32 type;
for (type = 0; type < PREALLOC_TYPE_MAX; type++) {
size_t size = GetSizeForType(pDevEvo, type);
if (size == 0) {
goto fail;
}
pPrealloc->ptr[type] = nvAlloc(size);
if (pPrealloc->ptr[type] == NULL) {
goto fail;
}
}
nvkms_memset(pPrealloc->used, 0, sizeof(pPrealloc->used));
return TRUE;
fail:
nvPreallocFree(pDevEvo);
return FALSE;
}
void nvPreallocFree(NVDevEvoPtr pDevEvo)
{
struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc;
NvU32 type;
for (type = 0; type < PREALLOC_TYPE_MAX; type++) {
if ((pDevEvo->prealloc.used[type / 8] & NVBIT(type % 8)) != 0) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
"Prealloc type %d still used in nvPreallocFree.", type);
}
nvFree(pPrealloc->ptr[type]);
pPrealloc->ptr[type] = NULL;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,260 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kernel-rmapi-ops.h"
#include "nvidia-modeset-os-interface.h"
#include "nvkms-rmapi.h"
NvU32 nvRmApiAlloc(
NvU32 hClient,
NvU32 hParent,
NvU32 hObject,
NvU32 hClass,
void *pAllocParams)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_ALLOC;
ops.params.alloc.hRoot = hClient;
ops.params.alloc.hObjectParent = hParent;
ops.params.alloc.hObjectNew = hObject;
ops.params.alloc.hClass = hClass;
ops.params.alloc.pAllocParms = NV_PTR_TO_NvP64(pAllocParams);
nvkms_call_rm(&ops);
return ops.params.alloc.status;
}
NvU32 nvRmApiAllocMemory64(
NvU32 hClient,
NvU32 hParent,
NvU32 hMemory,
NvU32 hClass,
NvU32 flags,
void **ppAddress,
NvU64 *pLimit)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV01_ALLOC_MEMORY;
ops.params.allocMemory64.hRoot = hClient;
ops.params.allocMemory64.hObjectParent = hParent;
ops.params.allocMemory64.hObjectNew = hMemory;
ops.params.allocMemory64.hClass = hClass;
ops.params.allocMemory64.flags = flags;
ops.params.allocMemory64.pMemory = NV_PTR_TO_NvP64(*ppAddress);
ops.params.allocMemory64.limit = *pLimit;
nvkms_call_rm(&ops);
*pLimit = ops.params.allocMemory64.limit;
*ppAddress = NvP64_VALUE(ops.params.allocMemory64.pMemory);
return ops.params.allocMemory64.status;
}
NvU32 nvRmApiControl(
NvU32 hClient,
NvU32 hObject,
NvU32 cmd,
void *pParams,
NvU32 paramsSize)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_CONTROL;
ops.params.control.hClient = hClient;
ops.params.control.hObject = hObject;
ops.params.control.cmd = cmd;
ops.params.control.params = NV_PTR_TO_NvP64(pParams);
ops.params.control.paramsSize = paramsSize;
nvkms_call_rm(&ops);
return ops.params.control.status;
}
NvU32 nvRmApiDupObject(
NvU32 hClient,
NvU32 hParent,
NvU32 hObjectDest,
NvU32 hClientSrc,
NvU32 hObjectSrc,
NvU32 flags)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_DUP_OBJECT;
ops.params.dupObject.hClient = hClient;
ops.params.dupObject.hParent = hParent;
ops.params.dupObject.hObject = hObjectDest;
ops.params.dupObject.hClientSrc = hClientSrc;
ops.params.dupObject.hObjectSrc = hObjectSrc;
ops.params.dupObject.flags = flags;
nvkms_call_rm(&ops);
return ops.params.dupObject.status;
}
NvU32 nvRmApiFree(
NvU32 hClient,
NvU32 hParent,
NvU32 hObject)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV01_FREE;
ops.params.free.hRoot = hClient;
ops.params.free.hObjectParent = hParent;
ops.params.free.hObjectOld = hObject;
nvkms_call_rm(&ops);
return ops.params.free.status;
}
NvU32 nvRmApiVidHeapControl(
void *pVidHeapControlParams)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
NVOS32_PARAMETERS *pParams = pVidHeapControlParams;
ops.op = NV04_VID_HEAP_CONTROL;
ops.params.pVidHeapControl = pParams;
nvkms_call_rm(&ops);
return pParams->status;
}
NvU32 nvRmApiMapMemory(
NvU32 hClient,
NvU32 hDevice,
NvU32 hMemory,
NvU64 offset,
NvU64 length,
void **ppLinearAddress,
NvU32 flags)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_MAP_MEMORY;
ops.params.mapMemory.hClient = hClient;
ops.params.mapMemory.hDevice = hDevice;
ops.params.mapMemory.hMemory = hMemory;
ops.params.mapMemory.offset = offset;
ops.params.mapMemory.length = length;
ops.params.mapMemory.flags = flags;
nvkms_call_rm(&ops);
*ppLinearAddress = NvP64_VALUE(ops.params.mapMemory.pLinearAddress);
return ops.params.mapMemory.status;
}
NvU32 nvRmApiUnmapMemory(
NvU32 hClient,
NvU32 hDevice,
NvU32 hMemory,
const void *pLinearAddress,
NvU32 flags)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_UNMAP_MEMORY;
ops.params.unmapMemory.hClient = hClient;
ops.params.unmapMemory.hDevice = hDevice;
ops.params.unmapMemory.hMemory = hMemory;
ops.params.unmapMemory.pLinearAddress = NV_PTR_TO_NvP64(pLinearAddress);
ops.params.unmapMemory.flags = flags;
nvkms_call_rm(&ops);
return ops.params.unmapMemory.status;
}
NvU32 nvRmApiMapMemoryDma(
NvU32 hClient,
NvU32 hDevice,
NvU32 hDma,
NvU32 hMemory,
NvU64 offset,
NvU64 length,
NvU32 flags,
NvU64 *pDmaOffset)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_MAP_MEMORY_DMA;
ops.params.mapMemoryDma.hClient = hClient;
ops.params.mapMemoryDma.hDevice = hDevice;
ops.params.mapMemoryDma.hDma = hDma;
ops.params.mapMemoryDma.hMemory = hMemory;
ops.params.mapMemoryDma.offset = offset;
ops.params.mapMemoryDma.length = length;
ops.params.mapMemoryDma.flags = flags;
ops.params.mapMemoryDma.dmaOffset = *pDmaOffset;
nvkms_call_rm(&ops);
*pDmaOffset = ops.params.mapMemoryDma.dmaOffset;
return ops.params.mapMemoryDma.status;
}
NvU32 nvRmApiUnmapMemoryDma(
NvU32 hClient,
NvU32 hDevice,
NvU32 hDma,
NvU32 hMemory,
NvU32 flags,
NvU64 dmaOffset)
{
nvidia_kernel_rmapi_ops_t ops = { 0 };
ops.op = NV04_UNMAP_MEMORY_DMA;
ops.params.unmapMemoryDma.hClient = hClient;
ops.params.unmapMemoryDma.hDevice = hDevice;
ops.params.unmapMemoryDma.hDma = hDma;
ops.params.unmapMemoryDma.hMemory = hMemory;
ops.params.unmapMemoryDma.flags = flags;
ops.params.unmapMemoryDma.dmaOffset = dmaOffset;
nvkms_call_rm(&ops);
return ops.params.unmapMemoryDma.status;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,796 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-utils.h"
#include "nvkms-types.h"
#include "nv_mode_timings_utils.h"
#include "nv_vasprintf.h"
#include "nv_list.h" /* for nv_container_of() */
void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex,
const char *fmt, va_list ap)
{
char *msg, prefix[10];
const char *gpuPrefix = "";
int level;
switch (logType) {
default:
case EVO_LOG_INFO: level = NVKMS_LOG_LEVEL_INFO; break;
case EVO_LOG_WARN: level = NVKMS_LOG_LEVEL_WARN; break;
case EVO_LOG_ERROR: level = NVKMS_LOG_LEVEL_ERROR; break;
}
msg = nv_vasprintf(fmt, ap);
if (msg == NULL) {
return;
}
if (gpuLogIndex != NV_INVALID_GPU_LOG_INDEX) {
nvkms_snprintf(prefix, sizeof(prefix), "GPU:%d: ", gpuLogIndex);
gpuPrefix = prefix;
}
nvkms_log(level, gpuPrefix, msg);
nvFree(msg);
}
void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap);
va_end(ap);
}
void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap);
va_end(ap);
}
void nvEvoLog(NVEvoLogType logType, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap);
va_end(ap);
}
#if defined(DEBUG)
void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap);
va_end(ap);
}
void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap);
va_end(ap);
}
void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap);
va_end(ap);
}
#endif /* DEBUG */
/*!
* Initialize the given NVEvoInfoString.
*
* Point the infoString at the specified character array.
*/
void nvInitInfoString(NVEvoInfoStringPtr pInfoString,
char *s, NvU16 totalLength)
{
nvkms_memset(pInfoString, 0, sizeof(*pInfoString));
pInfoString->s = s;
pInfoString->totalLength = totalLength;
}
/*!
* Append the text, described by 'format' and 'ap', to the infoString.
*/
static void LogInfoString(NVEvoInfoStringPtr pInfoString,
const char *format, va_list ap)
{
char *s;
size_t size = pInfoString->totalLength - pInfoString->length;
int ret;
if (pInfoString->s == NULL) {
return;
}
if (size <= 1) {
nvAssert(!"pInfoString too small");
return;
}
s = pInfoString->s + pInfoString->length;
ret = nvkms_vsnprintf(s, size, format, ap);
if (ret > 0) {
pInfoString->length += NV_MIN((size_t)ret, size - 1);
}
/*
* If ret is larger than size, then we may need to increase
* totalLength to support logging everything that we are trying to
* log to this buffer.
*/
nvAssert(ret <= size);
nvAssert(pInfoString->length < pInfoString->totalLength);
pInfoString->s[pInfoString->length] = '\0';
}
/*!
* Append to the infoString, without any additions.
*/
void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
LogInfoString(pInfoString, format, ap);
va_end(ap);
}
/*!
* Append to the infoString, appending a newline.
*/
void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
LogInfoString(pInfoString, format, ap);
va_end(ap);
nvEvoLogInfoStringRaw(pInfoString, "\n");
}
/*!
* The NVEvoApiHandlesRec-related functions below are used to manage
* sets of NvKms API handles. For the various NvKms objects (e.g.,
* devices, disps, connectors, surfaces) clients will specify the
* object by handle, and NVKMS will look up the corresponding object.
*
* We store a pointer to the object in a dynamically allocated array,
* and use the handle to look up the pointer in the array.
*
* Note that handles are 1-based (valid handles are in the range
* [1,numPointers], and 0 is an invalid handle), while indices to the
* corresponding pointers are 0-based (valid indices are in the range
* [0,numPointers-1]). Subtract 1 from the handle to get the index
* for the pointer.
*/
/*!
* Increase the size of the NVEvoApiHandles::pointers array.
*
* Reallocate the pointers array, increasing by defaultSize.
* Initialize the new region of memory.
*/
static NvBool GrowApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles)
{
NvU32 newNumPointers =
pEvoApiHandles->numPointers + pEvoApiHandles->defaultSize;
size_t oldSize = pEvoApiHandles->numPointers * sizeof(void *);
size_t newSize = newNumPointers * sizeof(void *);
void **newPointers;
/* Check for wrap in the newNumPointers computation. */
if (newSize <= oldSize) {
return FALSE;
}
newPointers = nvRealloc(pEvoApiHandles->pointers, newSize);
if (newPointers == NULL) {
return FALSE;
}
nvkms_memset(&newPointers[pEvoApiHandles->numPointers], 0, newSize - oldSize);
pEvoApiHandles->pointers = newPointers;
pEvoApiHandles->numPointers = newNumPointers;
return TRUE;
}
/*!
* Attempt to shrink the NVEvoApiHandles::pointers array.
*
* If high elements in the array are unused, reduce the array size in
* multiples of defaultSize.
*/
static void ShrinkApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles)
{
NvU32 index;
NvU32 newNumPointers;
void **newPointers;
/* If the array is already as small as it can be, we are done. */
if (pEvoApiHandles->numPointers == pEvoApiHandles->defaultSize) {
return;
}
/* Find the highest non-empty element. */
for (index = pEvoApiHandles->numPointers - 1; index > 0; index--) {
if (pEvoApiHandles->pointers[index] != NULL) {
break;
}
}
/*
* Compute the new array size by rounding index up to the next
* multiple of defaultSize.
*/
newNumPointers = ((index / pEvoApiHandles->defaultSize) + 1) *
pEvoApiHandles->defaultSize;
/* If the array is already that size, we are done. */
if (pEvoApiHandles->numPointers == newNumPointers) {
return;
}
newPointers =
nvRealloc(pEvoApiHandles->pointers, newNumPointers * sizeof(void *));
if (newPointers != NULL) {
pEvoApiHandles->pointers = newPointers;
pEvoApiHandles->numPointers = newNumPointers;
}
}
/*!
* Return true if 'pointer' is already present in pEvoApiHandles
*/
NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles,
void *pointer)
{
NvU32 index;
for (index = 0; index < pEvoApiHandles->numPointers; index++) {
if (pEvoApiHandles->pointers[index] == pointer) {
return TRUE;
}
}
return FALSE;
}
/*!
* Create an NvKms API handle.
*
* Create an available handle from pEvoApiHandles, and associate
* 'pointer' with the handle.
*/
NvKmsGenericHandle
nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, void *pointer)
{
NvU32 index;
if (pointer == NULL) {
return 0;
}
for (index = 0; index < pEvoApiHandles->numPointers; index++) {
if (pEvoApiHandles->pointers[index] == NULL) {
goto availableIndex;
}
}
/*
* Otherwise, there are no free elements in the pointers array:
* grow the array and try again.
*/
if (!GrowApiHandlesPointersArray(pEvoApiHandles)) {
return 0;
}
/* fall through */
availableIndex:
nvAssert(index < pEvoApiHandles->numPointers);
nvAssert(pEvoApiHandles->pointers[index] == NULL);
pEvoApiHandles->pointers[index] = pointer;
return index + 1;
}
/*!
* Retrieve a pointer that maps to an NvKms API handle.
*
* Return the pointer that nvEvoCreateApiHandle() associated with 'handle'.
*/
void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles,
NvKmsGenericHandle handle)
{
NvU32 index;
if (handle == 0) {
return NULL;
}
index = handle - 1;
if (index >= pEvoApiHandles->numPointers) {
return NULL;
}
return pEvoApiHandles->pointers[index];
}
/*!
* Retrieve a pointer that maps to the next NvKms API handle.
*
* This is intended to be used by the
* FOR_ALL_POINTERS_IN_EVO_API_HANDLES() macro. On the first
* iteration, *pHandle == 0, and this will return the first pointer it
* finds in the pointer array. The returned *pHandle will be the
* location to begin searching on the next iteration, and so on.
*
* Once there are no more non-zero elements in the pointer array,
* return NULL.
*/
void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles,
NvKmsGenericHandle *pHandle)
{
NvU32 index = *pHandle;
for (; index < pEvoApiHandles->numPointers; index++) {
if (pEvoApiHandles->pointers[index] != NULL) {
*pHandle = index + 1;
return pEvoApiHandles->pointers[index];
}
}
return NULL;
}
/*!
* Remove an NvKms API handle.
*
* Clear the 'handle' entry, and its corresponding pointer, from pEvoApiHandles.
*/
void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles,
NvKmsGenericHandle handle)
{
NvU32 index;
if (handle == 0) {
return;
}
index = handle - 1;
if (index >= pEvoApiHandles->numPointers) {
return;
}
pEvoApiHandles->pointers[index] = NULL;
ShrinkApiHandlesPointersArray(pEvoApiHandles);
}
/* Only used in nvAssert, so only build into debug builds to avoid never-used
* warnings */
#if defined(DEBUG)
/*!
* Return the number of non-NULL pointers in the pointer array.
*/
static NvU32
CountApiHandles(const NVEvoApiHandlesRec *pEvoApiHandles)
{
NvU32 index, count = 0;
for (index = 0; index < pEvoApiHandles->numPointers; index++) {
if (pEvoApiHandles->pointers[index] != NULL) {
count++;
}
}
return count;
}
#endif /* DEBUG */
/*!
* Initialize the NVEvoApiHandlesRec.
*
* This should be called before any
* nvEvo{Create,GetPointerFrom,Destroy}ApiHandle() calls on this
* pEvoApiHandles.
*
* The pointer array for the pEvoApiHandles will be managed in
* multiples of 'defaultSize'.
*/
NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, NvU32 defaultSize)
{
nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles));
pEvoApiHandles->defaultSize = defaultSize;
return GrowApiHandlesPointersArray(pEvoApiHandles);
}
/*!
* Free the NVEvoApiHandlesPtr resources.
*/
void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles)
{
nvAssert(CountApiHandles(pEvoApiHandles) == 0);
nvFree(pEvoApiHandles->pointers);
nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles));
}
NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth)
{
switch (pixelDepth) {
case NVKMS_PIXEL_DEPTH_18_444:
return 6;
case NVKMS_PIXEL_DEPTH_24_444:
return 8;
case NVKMS_PIXEL_DEPTH_30_444:
return 10;
}
nvAssert(!"Unknown NVKMS_PIXEL_DEPTH");
return 0;
}
/* Import function required by nvBuildModeName() */
int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...)
{
va_list ap;
int ret;
va_start(ap, format);
ret = nvkms_vsnprintf(str, size, format, ap);
va_end(ap);
return ret;
}
/* Import functions required by nv_vasprintf() */
void *nv_vasprintf_alloc(size_t size)
{
return nvAlloc(size);
}
void nv_vasprintf_free(void *ptr)
{
nvFree(ptr);
}
int nv_vasprintf_vsnprintf(char *str, size_t size,
const char *format, va_list ap)
{
return nvkms_vsnprintf(str, size, format, ap);
}
/*
* Track the size of each allocation, so that it can be passed to
* nvkms_free().
*/
typedef struct {
size_t size; /* includes sizeof(nvkms_memory_info_t) */
char data[] __attribute__((aligned(8)));
} nvkms_memory_info_t;
void *nvInternalAlloc(size_t size, const NvBool zero)
{
size_t totalSize = size + sizeof(nvkms_memory_info_t);
nvkms_memory_info_t *p;
if (totalSize < size) { /* overflow in the above addition */
return NULL;
}
p = nvkms_alloc(totalSize, zero);
if (p == NULL) {
return NULL;
}
p->size = totalSize;
return p->data;
}
void *nvInternalRealloc(void *ptr, size_t size)
{
nvkms_memory_info_t *p = NULL;
void *newptr;
if (ptr == NULL) {
/* realloc with a ptr of NULL is equivalent to alloc. */
return nvInternalAlloc(size, FALSE);
}
if (size == 0) {
/* realloc with a size of 0 is equivalent to free. */
nvInternalFree(ptr);
return NULL;
}
p = nv_container_of(ptr, nvkms_memory_info_t, data);
newptr = nvInternalAlloc(size, FALSE);
if (newptr != NULL) {
size_t oldsize = p->size - sizeof(nvkms_memory_info_t);
size_t copysize = (size < oldsize) ? size : oldsize;
nvkms_memcpy(newptr, ptr, copysize);
nvInternalFree(ptr);
}
return newptr;
}
void nvInternalFree(void *ptr)
{
nvkms_memory_info_t *p;
if (ptr == NULL) {
return;
}
p = nv_container_of(ptr, nvkms_memory_info_t, data);
nvkms_free(p, p->size);
}
char *nvInternalStrDup(const char *str)
{
size_t len;
char *newstr;
if (str == NULL) {
return NULL;
}
len = nvkms_strlen(str) + 1;
newstr = nvInternalAlloc(len, FALSE);
if (newstr == NULL) {
return NULL;
}
nvkms_memcpy(newstr, str, len);
return newstr;
}
/*!
* Look up the value of a key in the set of registry keys provided at device
* allocation time, copied from the client request during nvAllocDevEvo().
*
* \param[in] pDevEvo The device with regkeys to be checked.
*
* \param[in] key The name of the key to look up.
*
* \param[out] val The value of the key, if the key was specified.
*
* \return Whether the key was specified in the registry.
*/
NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo,
const char *key, NvU32 *val)
{
int i;
for (i = 0; i < ARRAY_LEN(pDevEvo->registryKeys); i++) {
if (nvkms_strcmp(key, pDevEvo->registryKeys[i].name) == 0) {
*val = pDevEvo->registryKeys[i].value;
return TRUE;
}
}
return FALSE;
}
#if defined(DEBUG)
#include "nv_memory_tracker.h"
void *nvDebugAlloc(size_t size, int line, const char *file)
{
return nvMemoryTrackerTrackedAlloc(&nvEvoGlobal.debugMemoryAllocationList,
size, line, file);
}
void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file)
{
return nvMemoryTrackerTrackedCalloc(&nvEvoGlobal.debugMemoryAllocationList,
nmemb, size, line, file);
}
void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file)
{
return nvMemoryTrackerTrackedRealloc(&nvEvoGlobal.debugMemoryAllocationList,
ptr, size, line, file);
}
void nvDebugFree(void *ptr)
{
nvMemoryTrackerTrackedFree(ptr);
}
char *nvDebugStrDup(const char *str, int line, const char *file)
{
size_t size = nvkms_strlen(str);
char *newStr = nvDebugAlloc(size + 1, line, file);
if (newStr == NULL) {
return NULL;
}
nvkms_memcpy(newStr, str, size);
newStr[size] = '\0';
return newStr;
}
void nvReportUnfreedAllocations(void)
{
nvMemoryTrackerPrintUnfreedAllocations(
&nvEvoGlobal.debugMemoryAllocationList);
}
void nvMemoryTrackerPrintf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, format, ap);
va_end(ap);
}
void *nvMemoryTrackerAlloc(size_t size)
{
return nvkms_alloc(size, FALSE);
}
void nvMemoryTrackerFree(void *ptr, size_t size)
{
nvkms_free(ptr, size);
}
void nvMemoryTrackerMemset(void *s, int c, size_t n)
{
nvkms_memset(s, c, n);
}
void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n)
{
nvkms_memcpy(dest, src, n);
}
#endif /* DEBUG */
/*
* The C++ displayPort library source code introduces a reference to
* __cxa_pure_virtual. This should never actually get called, so
* simply assert.
*/
void __cxa_pure_virtual(void);
void __cxa_pure_virtual(void)
{
nvAssert(!"Pure virtual function called");
}
/* Import functions required by unix_rm_handle */
#if defined(DEBUG)
void nvUnixRmHandleDebugAssert(const char *expString,
const char *filenameString,
const char *funcString,
const unsigned lineNumber)
{
nvDebugAssert(expString, filenameString, funcString, lineNumber);
}
void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
/* skip verbose messages */
if (level < NV_UNIX_RM_HANDLE_DEBUG_VERBOSE) {
nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, fmt, ap);
}
va_end(ap);
}
#endif /* DEBUG */
void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize)
{
return nvRealloc(oldPtr, newSize);
}
void nvUnixRmHandleFreeMem(void *ptr)
{
nvFree(ptr);
}
/* Import functions required by nv_assert */
#if defined(DEBUG)
void nvDebugAssert(const char *expString, const char *filenameString,
const char *funcString, const unsigned int lineNumber)
{
nvEvoLog(EVO_LOG_WARN, "NVKMS Assert @%s:%d:%s(): '%s'",
filenameString, lineNumber, funcString, expString);
}
#endif /* DEBUG */

View File

@@ -0,0 +1,177 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvkms-dma.h"
#include "nvkms-evo.h"
#include "nvkms-rm.h"
#include "nvkms-rmapi.h"
#include "nvkms-vrr.h"
#include "dp/nvdp-connector-event-sink.h"
#include "nvkms-hdmi.h"
#include "nvkms-dpy.h"
#include <ctrl/ctrl0000/ctrl0000unix.h>
/*!
* Allocate the VRR semaphore surface.
*
* Only one array of VRR semaphores is needed per "head group", which for our
* purposes means a pDevEvo. This array is allocated when the device is
* initialized and kept around for the lifetime of the pDevEvo.
*/
void nvAllocVrrEvo(NVDevEvoPtr pDevEvo)
{
NvU32 handle;
NvU64 size = NVKMS_VRR_SEMAPHORE_SURFACE_SIZE;
/* On GPUs that support the HEAD_SET_DISPLAY_RATE method (nvdisplay), we
* don't need a VRR semaphore surface. */
if (pDevEvo->hal->caps.supportsDisplayRate) {
return;
}
handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (nvRmAllocSysmem(pDevEvo, handle, NULL, &pDevEvo->vrr.pSemaphores,
size, NVKMS_MEMORY_NISO)) {
pDevEvo->vrr.semaphoreHandle = handle;
} else {
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
"Failed to allocate G-SYNC semaphore memory");
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
}
}
void nvFreeVrrEvo(NVDevEvoPtr pDevEvo)
{
if (pDevEvo->vrr.semaphoreHandle != 0) {
if (pDevEvo->vrr.pSemaphores != NULL) {
nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
pDevEvo->deviceHandle,
pDevEvo->vrr.semaphoreHandle,
pDevEvo->vrr.pSemaphores,
0);
pDevEvo->vrr.pSemaphores = NULL;
}
nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle,
pDevEvo->vrr.semaphoreHandle);
nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
pDevEvo->vrr.semaphoreHandle);
pDevEvo->vrr.semaphoreHandle = 0;
}
}
NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd)
{
// Export the memory as an FD.
NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { };
const NvU32 hMemory = pDevEvo->vrr.semaphoreHandle;
NvU32 status;
if (hMemory == 0) {
return FALSE;
}
exportParams.fd = fd;
exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM;
exportParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle;
exportParams.object.data.rmObject.hObject = hMemory;
status = nvRmApiControl(nvEvoGlobal.clientHandle,
nvEvoGlobal.clientHandle,
NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD,
&exportParams, sizeof(exportParams));
return status == NVOS_STATUS_SUCCESS;
}
NvBool nvDispSupportsVrr(
const NVDispEvoRec *pDispEvo)
{
return FALSE;
}
void nvDisableVrr(NVDevEvoPtr pDevEvo)
{
return;
}
void nvGetDpyMinRefreshRateValidValues(
const NVHwModeTimingsEvo *pTimings,
const enum NvKmsDpyVRRType vrrType,
const NvU32 edidTimeoutMicroseconds,
NvU32 *minMinRefreshRate,
NvU32 *maxMinRefreshRate)
{
return;
}
void nvEnableVrr(
NVDevEvoPtr pDevEvo,
const struct NvKmsSetModeRequest *pRequest)
{
return;
}
void nvSetVrrActive(
NVDevEvoPtr pDevEvo,
NvBool active)
{
return;
}
void nvApplyVrrBaseFlipOverrides(
const NVDispEvoRec *pDispEvo,
NvU32 head,
const NVFlipChannelEvoHwState *pOld,
NVFlipChannelEvoHwState *pNew)
{
return;
}
void nvCancelVrrFrameReleaseTimers(
NVDevEvoPtr pDevEvo)
{
return;
}
void nvSetNextVrrFlipTypeAndIndex(
NVDevEvoPtr pDevEvo,
struct NvKmsFlipReply *reply)
{
return;
}
void nvTriggerVrrUnstallMoveCursor(
NVDispEvoPtr pDispEvo)
{
return;
}
void nvTriggerVrrUnstallSetCursorImage(
NVDispEvoPtr pDispEvo,
NvBool ctxDmaChanged)
{
return;
}

File diff suppressed because it is too large Load Diff

181
src/nvidia-modeset/srcs.mk Normal file
View File

@@ -0,0 +1,181 @@
SRCS ?=
SRCS_CXX ?=
SRCS += ../common/shared/nvstatus/nvstatus.c
SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
SRCS += ../common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
SRCS += ../common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
SRCS += ../common/softfloat/source/f32_add.c
SRCS += ../common/softfloat/source/f32_div.c
SRCS += ../common/softfloat/source/f32_eq.c
SRCS += ../common/softfloat/source/f32_eq_signaling.c
SRCS += ../common/softfloat/source/f32_isSignalingNaN.c
SRCS += ../common/softfloat/source/f32_le.c
SRCS += ../common/softfloat/source/f32_le_quiet.c
SRCS += ../common/softfloat/source/f32_lt.c
SRCS += ../common/softfloat/source/f32_lt_quiet.c
SRCS += ../common/softfloat/source/f32_mul.c
SRCS += ../common/softfloat/source/f32_mulAdd.c
SRCS += ../common/softfloat/source/f32_rem.c
SRCS += ../common/softfloat/source/f32_roundToInt.c
SRCS += ../common/softfloat/source/f32_sqrt.c
SRCS += ../common/softfloat/source/f32_sub.c
SRCS += ../common/softfloat/source/f32_to_f16.c
SRCS += ../common/softfloat/source/f32_to_f64.c
SRCS += ../common/softfloat/source/f32_to_i32.c
SRCS += ../common/softfloat/source/f32_to_i32_r_minMag.c
SRCS += ../common/softfloat/source/f32_to_i64.c
SRCS += ../common/softfloat/source/f32_to_i64_r_minMag.c
SRCS += ../common/softfloat/source/f32_to_ui32.c
SRCS += ../common/softfloat/source/f32_to_ui32_r_minMag.c
SRCS += ../common/softfloat/source/f32_to_ui64.c
SRCS += ../common/softfloat/source/f32_to_ui64_r_minMag.c
SRCS += ../common/softfloat/source/f64_add.c
SRCS += ../common/softfloat/source/f64_div.c
SRCS += ../common/softfloat/source/f64_eq.c
SRCS += ../common/softfloat/source/f64_eq_signaling.c
SRCS += ../common/softfloat/source/f64_isSignalingNaN.c
SRCS += ../common/softfloat/source/f64_le.c
SRCS += ../common/softfloat/source/f64_le_quiet.c
SRCS += ../common/softfloat/source/f64_lt.c
SRCS += ../common/softfloat/source/f64_lt_quiet.c
SRCS += ../common/softfloat/source/f64_mul.c
SRCS += ../common/softfloat/source/f64_mulAdd.c
SRCS += ../common/softfloat/source/f64_rem.c
SRCS += ../common/softfloat/source/f64_roundToInt.c
SRCS += ../common/softfloat/source/f64_sqrt.c
SRCS += ../common/softfloat/source/f64_sub.c
SRCS += ../common/softfloat/source/f64_to_f32.c
SRCS += ../common/softfloat/source/f64_to_i32.c
SRCS += ../common/softfloat/source/f64_to_i32_r_minMag.c
SRCS += ../common/softfloat/source/f64_to_i64.c
SRCS += ../common/softfloat/source/f64_to_i64_r_minMag.c
SRCS += ../common/softfloat/source/f64_to_ui32.c
SRCS += ../common/softfloat/source/f64_to_ui32_r_minMag.c
SRCS += ../common/softfloat/source/f64_to_ui64.c
SRCS += ../common/softfloat/source/f64_to_ui64_r_minMag.c
SRCS += ../common/softfloat/source/i32_to_f32.c
SRCS += ../common/softfloat/source/i32_to_f64.c
SRCS += ../common/softfloat/source/i64_to_f32.c
SRCS += ../common/softfloat/source/i64_to_f64.c
SRCS += ../common/softfloat/source/s_addMagsF32.c
SRCS += ../common/softfloat/source/s_addMagsF64.c
SRCS += ../common/softfloat/source/s_approxRecipSqrt32_1.c
SRCS += ../common/softfloat/source/s_approxRecipSqrt_1Ks.c
SRCS += ../common/softfloat/source/s_countLeadingZeros64.c
SRCS += ../common/softfloat/source/s_countLeadingZeros8.c
SRCS += ../common/softfloat/source/s_mul64To128.c
SRCS += ../common/softfloat/source/s_mulAddF32.c
SRCS += ../common/softfloat/source/s_mulAddF64.c
SRCS += ../common/softfloat/source/s_normRoundPackToF32.c
SRCS += ../common/softfloat/source/s_normRoundPackToF64.c
SRCS += ../common/softfloat/source/s_normSubnormalF32Sig.c
SRCS += ../common/softfloat/source/s_normSubnormalF64Sig.c
SRCS += ../common/softfloat/source/s_roundPackToF16.c
SRCS += ../common/softfloat/source/s_roundPackToF32.c
SRCS += ../common/softfloat/source/s_roundPackToF64.c
SRCS += ../common/softfloat/source/s_roundToI32.c
SRCS += ../common/softfloat/source/s_roundToI64.c
SRCS += ../common/softfloat/source/s_roundToUI32.c
SRCS += ../common/softfloat/source/s_roundToUI64.c
SRCS += ../common/softfloat/source/s_shiftRightJam128.c
SRCS += ../common/softfloat/source/s_subMagsF32.c
SRCS += ../common/softfloat/source/s_subMagsF64.c
SRCS += ../common/softfloat/source/softfloat_state.c
SRCS += ../common/softfloat/source/ui32_to_f32.c
SRCS += ../common/softfloat/source/ui32_to_f64.c
SRCS += ../common/softfloat/source/ui64_to_f32.c
SRCS += ../common/softfloat/source/ui64_to_f64.c
SRCS_CXX += ../common/displayport/src/dp_auxretry.cpp
SRCS_CXX += ../common/displayport/src/dp_bitstream.cpp
SRCS_CXX += ../common/displayport/src/dp_buffer.cpp
SRCS_CXX += ../common/displayport/src/dp_configcaps.cpp
SRCS_CXX += ../common/displayport/src/dp_connectorimpl.cpp
SRCS_CXX += ../common/displayport/src/dp_crc.cpp
SRCS_CXX += ../common/displayport/src/dp_deviceimpl.cpp
SRCS_CXX += ../common/displayport/src/dp_discovery.cpp
SRCS_CXX += ../common/displayport/src/dp_edid.cpp
SRCS_CXX += ../common/displayport/src/dp_evoadapter.cpp
SRCS_CXX += ../common/displayport/src/dp_groupimpl.cpp
SRCS_CXX += ../common/displayport/src/dp_guid.cpp
SRCS_CXX += ../common/displayport/src/dp_list.cpp
SRCS_CXX += ../common/displayport/src/dp_merger.cpp
SRCS_CXX += ../common/displayport/src/dp_messagecodings.cpp
SRCS_CXX += ../common/displayport/src/dp_messageheader.cpp
SRCS_CXX += ../common/displayport/src/dp_messages.cpp
SRCS_CXX += ../common/displayport/src/dp_mst_edid.cpp
SRCS_CXX += ../common/displayport/src/dp_splitter.cpp
SRCS_CXX += ../common/displayport/src/dp_sst_edid.cpp
SRCS_CXX += ../common/displayport/src/dp_timer.cpp
SRCS_CXX += ../common/displayport/src/dp_vrr.cpp
SRCS_CXX += ../common/displayport/src/dp_wardatabase.cpp
SRCS_CXX += ../common/displayport/src/dp_watermark.cpp
SRCS_CXX += ../common/displayport/src/dptestutil/dp_testmessage.cpp
SRCS += ../common/modeset/hdmipacket/nvhdmipkt.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_0073.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9171.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9271.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9471.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9571.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C371.c
SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C671.c
SRCS += ../common/modeset/timing/nvt_cvt.c
SRCS += ../common/modeset/timing/nvt_displayid20.c
SRCS += ../common/modeset/timing/nvt_dmt.c
SRCS += ../common/modeset/timing/nvt_dsc_pps.c
SRCS += ../common/modeset/timing/nvt_edid.c
SRCS += ../common/modeset/timing/nvt_edidext_861.c
SRCS += ../common/modeset/timing/nvt_edidext_displayid.c
SRCS += ../common/modeset/timing/nvt_edidext_displayid20.c
SRCS += ../common/modeset/timing/nvt_gtf.c
SRCS += ../common/modeset/timing/nvt_tv.c
SRCS += ../common/modeset/timing/nvt_util.c
SRCS += ../common/unix/common/utils/nv_memory_tracker.c
SRCS += ../common/unix/common/utils/nv_mode_timings_utils.c
SRCS += ../common/unix/common/utils/nv_vasprintf.c
SRCS += ../common/unix/common/utils/unix_rm_handle.c
SRCS += kapi/src/nvkms-kapi-channelevent.c
SRCS += kapi/src/nvkms-kapi-notifiers.c
SRCS += kapi/src/nvkms-kapi.c
SRCS += lib/nvkms-format.c
SRCS += lib/nvkms-sync.c
SRCS_CXX += src/dp/nvdp-connector-event-sink.cpp
SRCS_CXX += src/dp/nvdp-connector.cpp
SRCS_CXX += src/dp/nvdp-device.cpp
SRCS_CXX += src/dp/nvdp-evo-interface.cpp
SRCS_CXX += src/dp/nvdp-host.cpp
SRCS_CXX += src/dp/nvdp-timer.cpp
SRCS += src/g_nvkms-evo-states.c
SRCS += src/nvkms-3dvision.c
SRCS += src/nvkms-attributes.c
SRCS += src/nvkms-console-restore.c
SRCS += src/nvkms-cursor.c
SRCS += src/nvkms-cursor2.c
SRCS += src/nvkms-cursor3.c
SRCS += src/nvkms-dma.c
SRCS += src/nvkms-dpy.c
SRCS += src/nvkms-event.c
SRCS += src/nvkms-evo.c
SRCS += src/nvkms-evo1.c
SRCS += src/nvkms-evo2.c
SRCS += src/nvkms-evo3.c
SRCS += src/nvkms-flip.c
SRCS += src/nvkms-framelock.c
SRCS += src/nvkms-hal.c
SRCS += src/nvkms-hdmi.c
SRCS += src/nvkms-hw-states.c
SRCS += src/nvkms-lut.c
SRCS += src/nvkms-modepool.c
SRCS += src/nvkms-modeset.c
SRCS += src/nvkms-prealloc.c
SRCS += src/nvkms-rm.c
SRCS += src/nvkms-rmapi-dgpu.c
SRCS += src/nvkms-surface.c
SRCS += src/nvkms-utils.c
SRCS += src/nvkms-vrr.c
SRCS += src/nvkms.c