515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "nv-pci-table.h"
/* Devices supported by RM */
struct pci_device_id nv_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{ }
};
/* Devices supported by all drivers in nvidia.ko */
struct pci_device_id nv_module_device_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
.class_mask = ~0
},
{ }
};
MODULE_DEVICE_TABLE(pci, nv_module_device_table);

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_TABLE_H_
#define _NV_PCI_TABLE_H_
#include <linux/pci.h>
extern struct pci_device_id nv_pci_table[];
#endif /* _NV_PCI_TABLE_H_ */

View File

@@ -0,0 +1,121 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DMA_FENCE_HELPER_H__
#define __NVIDIA_DMA_FENCE_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* Fence headers are moved to file dma-fence.h and struct fence has
* been renamed to dma_fence by commit -
*
* 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429
*/
#if defined(NV_LINUX_FENCE_H_PRESENT)
#include <linux/fence.h>
#else
#include <linux/dma-fence.h>
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
typedef struct fence nv_dma_fence_t;
typedef struct fence_ops nv_dma_fence_ops_t;
#else
typedef struct dma_fence nv_dma_fence_t;
typedef struct dma_fence_ops nv_dma_fence_ops_t;
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
#else
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
#endif
static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_is_signaled(fence);
#else
return dma_fence_is_signaled(fence);
#endif
}
static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_get(fence);
#else
return dma_fence_get(fence);
#endif
}
static inline void nv_dma_fence_put(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_put(fence);
#else
dma_fence_put(fence);
#endif
}
static inline signed long
nv_dma_fence_default_wait(nv_dma_fence_t *fence,
bool intr, signed long timeout) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_default_wait(fence, intr, timeout);
#else
return dma_fence_default_wait(fence, intr, timeout);
#endif
}
static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal(fence);
#else
return dma_fence_signal(fence);
#endif
}
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_context_alloc(num);
#else
return dma_fence_context_alloc(num);
#endif
}
static inline void
nv_dma_fence_init(nv_dma_fence_t *fence,
const nv_dma_fence_ops_t *ops,
spinlock_t *lock, u64 context, unsigned seqno) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_init(fence, ops, lock, context, seqno);
#else
dma_fence_init(fence, ops, lock, context, seqno);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DMA_RESV_HELPER_H__
#define __NVIDIA_DMA_RESV_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* linux/reservation.h is renamed to linux/dma-resv.h, by commit
* 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv)
* in v5.4.
*/
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#include <linux/dma-resv.h>
#else
#include <linux/reservation.h>
#endif
#include <nvidia-dma-fence-helper.h>
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
typedef struct dma_resv nv_dma_resv_t;
#else
typedef struct reservation_object nv_dma_resv_t;
#endif
static inline void nv_dma_resv_init(nv_dma_resv_t *obj)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_init(obj);
#else
reservation_object_init(obj);
#endif
}
static inline void nv_dma_resv_fini(nv_dma_resv_t *obj)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_fini(obj);
#else
reservation_object_init(obj);
#endif
}
static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
dma_resv_add_excl_fence(obj, fence);
#else
reservation_object_add_excl_fence(obj, fence);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */

View File

@@ -0,0 +1,64 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_CONFTEST_H__
#define __NVIDIA_DRM_CONFTEST_H__
#include "conftest.h"
/*
* NOTE: This file is expected to get included at the top before including any
* of linux/drm headers.
*
* The goal is to redefine refcount_dec_and_test and refcount_inc before
* including drm header files, so that the drm macro/inline calls to
* refcount_dec_and_test* and refcount_inc get redirected to
* alternate implementation in this file.
*/
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc
#include <linux/refcount.h>
#define refcount_inc(__ptr) \
do { \
atomic_inc(&(__ptr)->refs); \
} while(0)
#endif
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test
#include <linux/refcount.h>
#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs)
#endif
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \
defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#define NV_DRM_FENCE_AVAILABLE
#else
#undef NV_DRM_FENCE_AVAILABLE
#endif
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */

View File

@@ -0,0 +1,467 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-helper.h"
#include "nvidia-drm-priv.h"
#include "nvidia-drm-connector.h"
#include "nvidia-drm-utils.h"
#include "nvidia-drm-encoder.h"
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
* moves a number of helper function definitions from
* drm/drm_crtc_helper.h to a new drm_probe_helper.h.
*/
#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
#include <drm/drm_probe_helper.h>
#endif
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
static void nv_drm_connector_destroy(struct drm_connector *connector)
{
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (nv_connector->edid != NULL) {
nv_drm_free(nv_connector->edid);
}
nv_drm_free(nv_connector);
}
static bool
__nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams,
struct drm_connector *connector,
struct drm_encoder *encoder)
{
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_encoder *nv_encoder;
/*
* DVI-I connectors can drive both digital and analog
* encoders. If a digital connection has been forced then
* skip analog encoders.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII &&
connector->force == DRM_FORCE_ON_DIGITAL &&
encoder->encoder_type == DRM_MODE_ENCODER_DAC) {
return false;
}
nv_encoder = to_nv_encoder(encoder);
memset(pDetectParams, 0, sizeof(*pDetectParams));
pDetectParams->handle = nv_encoder->hDisplay;
switch (connector->force) {
case DRM_FORCE_ON:
case DRM_FORCE_ON_DIGITAL:
pDetectParams->forceConnected = NV_TRUE;
break;
case DRM_FORCE_OFF:
pDetectParams->forceDisconnected = NV_TRUE;
break;
case DRM_FORCE_UNSPECIFIED:
break;
}
if (connector->override_edid) {
const struct drm_property_blob *edid = connector->edid_blob_ptr;
if (edid->length <= sizeof(pDetectParams->edid.buffer)) {
memcpy(pDetectParams->edid.buffer, edid->data, edid->length);
pDetectParams->edid.bufferSize = edid->length;
pDetectParams->overrideEdid = NV_TRUE;
} else {
WARN_ON(edid->length >
sizeof(pDetectParams->edid.buffer));
}
}
if (!nvKms->getDynamicDisplayInfo(nv_dev->pDevice, pDetectParams)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to detect display state");
return false;
}
if (pDetectParams->connected) {
if (!pDetectParams->overrideEdid && pDetectParams->edid.bufferSize) {
if ((nv_connector->edid = nv_drm_calloc(
1,
pDetectParams->edid.bufferSize)) != NULL) {
memcpy(nv_connector->edid,
pDetectParams->edid.buffer,
pDetectParams->edid.bufferSize);
} else {
NV_DRM_LOG_ERR("Out of Memory");
}
}
return true;
}
return false;
}
static enum drm_connector_status __nv_drm_connector_detect_internal(
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
enum drm_connector_status status = connector_status_disconnected;
struct drm_encoder *detected_encoder = NULL;
struct nv_drm_encoder *nv_detected_encoder = NULL;
struct drm_encoder *encoder;
struct NvKmsKapiDynamicDisplayParams *pDetectParams = NULL;
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
if (nv_connector->edid != NULL) {
nv_drm_free(nv_connector->edid);
nv_connector->edid = NULL;
}
if ((pDetectParams = nv_drm_calloc(
1,
sizeof(*pDetectParams))) == NULL) {
WARN_ON(pDetectParams == NULL);
goto done;
}
nv_drm_connector_for_each_possible_encoder(connector, encoder) {
if (__nv_drm_detect_encoder(pDetectParams, connector, encoder)) {
detected_encoder = encoder;
break;
}
} nv_drm_connector_for_each_possible_encoder_end;
if (detected_encoder == NULL) {
goto done;
}
nv_detected_encoder = to_nv_encoder(detected_encoder);
status = connector_status_connected;
nv_connector->nv_detected_encoder = nv_detected_encoder;
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DVI_I) {
drm_object_property_set_value(
&connector->base,
dev->mode_config.dvi_i_subconnector_property,
detected_encoder->encoder_type == DRM_MODE_ENCODER_DAC ?
DRM_MODE_SUBCONNECTOR_DVIA :
DRM_MODE_SUBCONNECTOR_DVID);
}
done:
nv_drm_free(pDetectParams);
return status;
}
static void __nv_drm_connector_force(struct drm_connector *connector)
{
__nv_drm_connector_detect_internal(connector);
}
static enum drm_connector_status
nv_drm_connector_detect(struct drm_connector *connector, bool force)
{
return __nv_drm_connector_detect_internal(connector);
}
static struct drm_connector_funcs nv_connector_funcs = {
#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT
.dpms = drm_atomic_helper_connector_dpms,
#endif
.destroy = nv_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.force = __nv_drm_connector_force,
.detect = nv_drm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int nv_drm_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
struct nv_drm_encoder *nv_detected_encoder =
nv_connector->nv_detected_encoder;
NvU32 modeIndex = 0;
int count = 0;
if (nv_connector->edid != NULL) {
nv_drm_connector_update_edid_property(connector, nv_connector->edid);
}
while (1) {
struct drm_display_mode *mode;
struct NvKmsKapiDisplayMode displayMode;
NvBool valid = 0;
NvBool preferredMode = NV_FALSE;
int ret;
ret = nvKms->getDisplayMode(nv_dev->pDevice,
nv_detected_encoder->hDisplay,
modeIndex++, &displayMode, &valid,
&preferredMode);
if (ret < 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to get mode at modeIndex %d of NvKmsKapiDisplay 0x%08x",
modeIndex, nv_detected_encoder->hDisplay);
break;
}
/* Is end of mode-list */
if (ret == 0) {
break;
}
/* Ignore invalid modes */
if (!valid) {
continue;
}
mode = drm_mode_create(connector->dev);
if (mode == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to create mode for NvKmsKapiDisplay 0x%08x",
nv_detected_encoder->hDisplay);
continue;
}
nvkms_display_mode_to_drm_mode(&displayMode, mode);
if (preferredMode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
/* Add a mode to a connector's probed_mode list */
drm_mode_probed_add(connector, mode);
count++;
}
return count;
}
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_encoder *nv_detected_encoder =
to_nv_connector(connector)->nv_detected_encoder;
struct NvKmsKapiDisplayMode displayMode;
if (nv_detected_encoder == NULL) {
return MODE_BAD;
}
drm_mode_to_nvkms_display_mode(mode, &displayMode);
if (!nvKms->validateDisplayMode(nv_dev->pDevice,
nv_detected_encoder->hDisplay,
&displayMode)) {
return MODE_BAD;
}
return MODE_OK;
}
static struct drm_encoder*
nv_drm_connector_best_encoder(struct drm_connector *connector)
{
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->nv_detected_encoder != NULL) {
return &nv_connector->nv_detected_encoder->base;
}
return NULL;
}
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
.get_modes = nv_drm_connector_get_modes,
.mode_valid = nv_drm_connector_mode_valid,
.best_encoder = nv_drm_connector_best_encoder,
};
static struct drm_connector*
nv_drm_connector_new(struct drm_device *dev,
NvU32 physicalIndex, NvKmsConnectorType type,
NvBool internal,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_connector *nv_connector = NULL;
int ret = -ENOMEM;
if ((nv_connector = nv_drm_calloc(1, sizeof(*nv_connector))) == NULL) {
goto failed;
}
if ((nv_connector->base.state =
nv_drm_calloc(1, sizeof(*nv_connector->base.state))) == NULL) {
goto failed_state_alloc;
}
nv_connector->base.state->connector = &nv_connector->base;
nv_connector->physicalIndex = physicalIndex;
nv_connector->type = type;
nv_connector->internal = internal;
strcpy(nv_connector->dpAddress, dpAddress);
ret = drm_connector_init(
dev,
&nv_connector->base, &nv_connector_funcs,
nvkms_connector_type_to_drm_connector_type(type, internal));
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to initialize connector created from physical index %u",
nv_connector->physicalIndex);
goto failed_connector_init;
}
drm_connector_helper_add(&nv_connector->base, &nv_connector_helper_funcs);
nv_connector->base.polled = DRM_CONNECTOR_POLL_HPD;
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_VGA) {
nv_connector->base.polled =
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
/* Register connector with DRM subsystem */
ret = drm_connector_register(&nv_connector->base);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to register connector created from physical index %u",
nv_connector->physicalIndex);
goto failed_connector_register;
}
return &nv_connector->base;
failed_connector_register:
drm_connector_cleanup(&nv_connector->base);
failed_connector_init:
nv_drm_free(nv_connector->base.state);
failed_state_alloc:
nv_drm_free(nv_connector);
failed:
return ERR_PTR(ret);
}
/*
* Get connector with given physical index one exists. Otherwise, create and
* return a new connector.
*/
struct drm_connector*
nv_drm_get_connector(struct drm_device *dev,
NvU32 physicalIndex, NvKmsConnectorType type,
NvBool internal,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
{
struct drm_connector *connector = NULL;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#else
struct drm_mode_config *config = &dev->mode_config;
mutex_lock(&config->mutex);
#endif
/* Lookup for existing connector with same physical index */
nv_drm_for_each_connector(connector, &conn_iter, dev) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->physicalIndex == physicalIndex) {
BUG_ON(nv_connector->type != type ||
nv_connector->internal != internal);
if (strcmp(nv_connector->dpAddress, dpAddress) == 0) {
goto done;
}
}
}
connector = NULL;
done:
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#else
mutex_unlock(&config->mutex);
#endif
if (!connector) {
connector = nv_drm_connector_new(dev,
physicalIndex, type, internal,
dpAddress);
}
return connector;
}
#endif

View File

@@ -0,0 +1,89 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_CONNECTOR_H__
#define __NVIDIA_DRM_CONNECTOR_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
#include "nvtypes.h"
#include "nvkms-api-types.h"
struct nv_drm_connector {
NvU32 physicalIndex;
NvBool internal;
NvKmsConnectorType type;
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH];
struct nv_drm_encoder *nv_detected_encoder;
struct edid *edid;
atomic_t connection_status_dirty;
struct drm_connector base;
};
static inline struct nv_drm_connector *to_nv_connector(
struct drm_connector *connector)
{
if (connector == NULL) {
return NULL;
}
return container_of(connector, struct nv_drm_connector, base);
}
static inline void nv_drm_connector_mark_connection_status_dirty(
struct nv_drm_connector *nv_connector)
{
atomic_cmpxchg(&nv_connector->connection_status_dirty, false, true);
}
static inline bool nv_drm_connector_check_connection_status_dirty_and_clear(
struct nv_drm_connector *nv_connector)
{
return atomic_cmpxchg(
&nv_connector->connection_status_dirty,
true,
false) == true;
}
struct drm_connector*
nv_drm_get_connector(struct drm_device *dev,
NvU32 physicalIndex, NvKmsConnectorType type,
NvBool internal,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_CONNECTOR_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,296 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_CRTC_H__
#define __NVIDIA_DRM_CRTC_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-helper.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include <drm/drm_crtc.h>
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_ROTATE_* , DRM_REFLECT_* */
#include <drm/drm_blend.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */
#include <uapi/drm/drm_mode.h>
#endif
#include "nvtypes.h"
#include "nvkms-kapi.h"
#if defined(NV_DRM_ROTATION_AVAILABLE)
/*
* 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added
* DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed
* DRM_ROTATE_* and DRM_MODE_REFLECT_*
*/
#if !defined(DRM_MODE_ROTATE_0)
#define DRM_MODE_ROTATE_0 DRM_ROTATE_0
#define DRM_MODE_ROTATE_90 DRM_ROTATE_90
#define DRM_MODE_ROTATE_180 DRM_ROTATE_180
#define DRM_MODE_ROTATE_270 DRM_ROTATE_270
#define DRM_MODE_REFLECT_X DRM_REFLECT_X
#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y
#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK
#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK
#endif
#endif //NV_DRM_ROTATION_AVAILABLE
struct nv_drm_crtc {
NvU32 head;
/**
* @flip_list:
*
* List of flips pending to get processed by __nv_drm_handle_flip_event().
* Protected by @flip_list_lock.
*/
struct list_head flip_list;
/**
* @flip_list_lock:
*
* Spinlock to protect @flip_list.
*/
spinlock_t flip_list_lock;
struct drm_crtc base;
};
/**
* struct nv_drm_flip - flip state
*
* This state is getting used to consume DRM completion event associated
* with each crtc state from atomic commit.
*
* Function nv_drm_atomic_apply_modeset_config() consumes DRM completion
* event, save it into flip state associated with crtc and queue flip state into
* crtc's flip list and commits atomic update to hardware.
*/
struct nv_drm_flip {
/**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of
* the state update.
*/
struct drm_pending_vblank_event *event;
/**
* @pending_events
*
* Number of HW events pending to signal completion of the state
* update.
*/
uint32_t pending_events;
/**
* @list_entry:
*
* Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by
* &nv_drm_crtc.flip_list_lock.
*/
struct list_head list_entry;
/**
* @deferred_flip_list
*
* List flip objects whose processing is deferred until processing of
* this flip object. Protected by &nv_drm_crtc.flip_list_lock.
* nv_drm_atomic_commit() gets last flip object from
* nv_drm_crtc:flip_list and add deferred flip objects into
* @deferred_flip_list, __nv_drm_handle_flip_event() processes
* @deferred_flip_list.
*/
struct list_head deferred_flip_list;
};
struct nv_drm_crtc_state {
/**
* @base:
*
* Base DRM crtc state object for this.
*/
struct drm_crtc_state base;
/**
* @head_req_config:
*
* Requested head's modeset configuration corresponding to this crtc state.
*/
struct NvKmsKapiHeadRequestedConfig req_config;
/**
* @nv_flip:
*
* Flip state associated with this crtc state, gets allocated
* by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets
* consumed and queued into flip list by
* nv_drm_atomic_apply_modeset_config() and finally gets destroyed
* by __nv_drm_handle_flip_event() after getting processed.
*
* In case of failure of atomic commit, this flip state getting destroyed by
* nv_drm_atomic_crtc_destroy_state().
*/
struct nv_drm_flip *nv_flip;
};
static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
{
return container_of(state, struct nv_drm_crtc_state, base);
}
struct nv_drm_plane {
/**
* @base:
*
* Base DRM plane object for this plane.
*/
struct drm_plane base;
/**
* @defaultCompositionMode:
*
* Default composition blending mode of this plane.
*/
enum NvKmsCompositionBlendingMode defaultCompositionMode;
/**
* @layer_idx
*
* Index of this plane in the per head array of layers.
*/
uint32_t layer_idx;
};
static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
{
if (plane == NULL) {
return NULL;
}
return container_of(plane, struct nv_drm_plane, base);
}
struct nv_drm_plane_state {
struct drm_plane_state base;
s32 __user *fd_user_ptr;
};
static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)
{
return container_of(state, struct nv_drm_plane_state, base);
}
static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc)
{
if (crtc == NULL) {
return NULL;
}
return container_of(crtc, struct nv_drm_crtc, base);
}
/*
* CRTCs are static objects, list does not change once after initialization and
* before teardown of device. Initialization/teardown paths are single
* threaded, so no locking required.
*/
static inline
struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head)
{
struct drm_crtc *crtc;
nv_drm_for_each_crtc(crtc, nv_dev->dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
if (nv_crtc->head == head) {
return nv_crtc;
}
}
return NULL;
}
/**
* nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc.
*/
static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc,
struct nv_drm_flip *nv_flip)
{
spin_lock(&nv_crtc->flip_list_lock);
list_add(&nv_flip->list_entry, &nv_crtc->flip_list);
spin_unlock(&nv_crtc->flip_list_lock);
}
/**
* nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc.
*/
static inline
struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc)
{
struct nv_drm_flip *nv_flip = NULL;
uint32_t pending_events = 0;
spin_lock(&nv_crtc->flip_list_lock);
nv_flip = list_first_entry_or_null(&nv_crtc->flip_list,
struct nv_drm_flip, list_entry);
if (likely(nv_flip != NULL)) {
/*
* Decrement pending_event count and dequeue flip object if
* pending_event count becomes 0.
*/
pending_events = --nv_flip->pending_events;
if (!pending_events) {
list_del(&nv_flip->list_entry);
}
}
spin_unlock(&nv_crtc->flip_list_lock);
if (WARN_ON(nv_flip == NULL) || pending_events) {
return NULL;
}
return nv_flip;
}
void nv_drm_enumerate_crtcs_and_planes(
struct nv_drm_device *nv_dev,
const struct NvKmsKapiDeviceResourcesInfo *pResInfo);
int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_CRTC_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_DRV_H__
#define __NVIDIA_DRM_DRV_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
int nv_drm_probe_devices(void);
void nv_drm_remove_devices(void);
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_DRV_H__ */

View File

@@ -0,0 +1,352 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-encoder.h"
#include "nvidia-drm-utils.h"
#include "nvidia-drm-connector.h"
#include "nvidia-drm-crtc.h"
#include "nvidia-drm-helper.h"
#include "nvmisc.h"
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
* moves a number of helper function definitions from
* drm/drm_crtc_helper.h to a new drm_probe_helper.h.
*/
#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
#include <drm/drm_probe_helper.h>
#endif
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
static void nv_drm_encoder_destroy(struct drm_encoder *encoder)
{
struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder);
drm_encoder_cleanup(encoder);
nv_drm_free(nv_encoder);
}
static const struct drm_encoder_funcs nv_encoder_funcs = {
.destroy = nv_drm_encoder_destroy,
};
static bool nv_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void nv_drm_encoder_prepare(struct drm_encoder *encoder)
{
}
static void nv_drm_encoder_commit(struct drm_encoder *encoder)
{
}
static void nv_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static const struct drm_encoder_helper_funcs nv_encoder_helper_funcs = {
.mode_fixup = nv_drm_encoder_mode_fixup,
.prepare = nv_drm_encoder_prepare,
.commit = nv_drm_encoder_commit,
.mode_set = nv_drm_encoder_mode_set,
};
static uint32_t get_crtc_mask(struct drm_device *dev, uint32_t headMask)
{
struct drm_crtc *crtc = NULL;
uint32_t crtc_mask = 0x0;
nv_drm_for_each_crtc(crtc, dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
if (headMask & NVBIT(nv_crtc->head)) {
crtc_mask |= drm_crtc_mask(crtc);
}
}
return crtc_mask;
}
/*
* Helper function to create new encoder for given NvKmsKapiDisplay
* with given signal format.
*/
static struct drm_encoder*
nv_drm_encoder_new(struct drm_device *dev,
NvKmsKapiDisplay hDisplay,
NvKmsConnectorSignalFormat format,
unsigned int crtc_mask)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_encoder *nv_encoder = NULL;
int ret = 0;
/* Allocate an NVIDIA encoder object */
nv_encoder = nv_drm_calloc(1, sizeof(*nv_encoder));
if (nv_encoder == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate memory for NVIDIA-DRM encoder object");
return ERR_PTR(-ENOMEM);
}
nv_encoder->hDisplay = hDisplay;
/* Initialize the base encoder object and add it to the drm subsystem */
ret = drm_encoder_init(dev,
&nv_encoder->base, &nv_encoder_funcs,
nvkms_connector_signal_to_drm_encoder_signal(format)
#if defined(NV_DRM_ENCODER_INIT_HAS_NAME_ARG)
, NULL
#endif
);
if (ret != 0) {
nv_drm_free(nv_encoder);
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to initialize encoder created from NvKmsKapiDisplay 0x%08x",
hDisplay);
return ERR_PTR(ret);
}
nv_encoder->base.possible_crtcs = crtc_mask;
drm_encoder_helper_add(&nv_encoder->base, &nv_encoder_helper_funcs);
return &nv_encoder->base;
}
/*
* Add encoder for given NvKmsKapiDisplay
*/
struct drm_encoder*
nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL;
struct NvKmsKapiConnectorInfo *connectorInfo = NULL;
struct drm_encoder *encoder = NULL;
struct nv_drm_encoder *nv_encoder = NULL;
struct drm_connector *connector = NULL;
int ret = 0;
/* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */
if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) {
ret = -ENOMEM;
goto done;
}
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) {
ret = -EINVAL;
goto done;
}
connectorInfo = nvkms_get_connector_info(nv_dev->pDevice,
displayInfo->connectorHandle);
if (IS_ERR(connectorInfo)) {
ret = PTR_ERR(connectorInfo);
goto done;
}
/* Create and add drm encoder */
encoder = nv_drm_encoder_new(dev,
displayInfo->handle,
connectorInfo->signalFormat,
get_crtc_mask(dev, connectorInfo->headMask));
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
goto done;
}
/* Get connector from respective physical index */
connector =
nv_drm_get_connector(dev,
connectorInfo->physicalIndex,
connectorInfo->type,
displayInfo->internal, displayInfo->dpAddress);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
goto failed_connector_encoder_attach;
}
/* Attach encoder and connector */
ret = nv_drm_connector_attach_encoder(connector, encoder);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to attach encoder created from NvKmsKapiDisplay 0x%08x "
"to connector",
hDisplay);
goto failed_connector_encoder_attach;
}
nv_encoder = to_nv_encoder(encoder);
mutex_lock(&dev->mode_config.mutex);
nv_encoder->nv_connector = to_nv_connector(connector);
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
mutex_unlock(&dev->mode_config.mutex);
goto done;
failed_connector_encoder_attach:
drm_encoder_cleanup(encoder);
nv_drm_free(encoder);
done:
nv_drm_free(displayInfo);
nv_drm_free(connectorInfo);
return ret != 0 ? ERR_PTR(ret) : encoder;
}
static inline struct nv_drm_encoder*
get_nv_encoder_from_nvkms_display(struct drm_device *dev,
NvKmsKapiDisplay hDisplay)
{
struct drm_encoder *encoder;
nv_drm_for_each_encoder(encoder, dev) {
struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder);
if (nv_encoder->hDisplay == hDisplay) {
return nv_encoder;
}
}
return NULL;
}
void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay)
{
struct drm_device *dev = nv_dev->dev;
struct nv_drm_encoder *nv_encoder = NULL;
mutex_lock(&dev->mode_config.mutex);
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
mutex_unlock(&dev->mode_config.mutex);
if (nv_encoder == NULL) {
return;
}
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
drm_kms_helper_hotplug_event(dev);
}
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay)
{
struct drm_device *dev = nv_dev->dev;
struct drm_encoder *encoder = NULL;
struct nv_drm_encoder *nv_encoder = NULL;
/*
* Look for an existing encoder with the same hDisplay and
* use it if available.
*/
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
if (nv_encoder != NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Encoder with NvKmsKapiDisplay 0x%08x already exists.",
hDisplay);
return;
}
encoder = nv_drm_add_encoder(dev, hDisplay);
if (IS_ERR(encoder)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to add encoder for NvKmsKapiDisplay 0x%08x",
hDisplay);
return;
}
/*
* On some kernels, DRM has the notion of a "primary group" that
* tracks the global mode setting state for the device.
*
* On kernels where DRM has a primary group, we need to reinitialize
* after adding encoders and connectors.
*/
#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT)
drm_reinit_primary_mode_group(dev);
#endif
drm_kms_helper_hotplug_event(dev);
}
#endif

View File

@@ -0,0 +1,68 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_ENCODER_H__
#define __NVIDIA_DRM_ENCODER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#if defined(NV_DRM_DRM_ENCODER_H_PRESENT)
#include <drm/drm_encoder.h>
#else
#include <drm/drmP.h>
#endif
#include "nvkms-kapi.h"
struct nv_drm_encoder {
NvKmsKapiDisplay hDisplay;
struct nv_drm_connector *nv_connector;
struct drm_encoder base;
};
static inline struct nv_drm_encoder *to_nv_encoder(
struct drm_encoder *encoder)
{
if (encoder == NULL) {
return NULL;
}
return container_of(encoder, struct nv_drm_encoder, base);
}
struct drm_encoder*
nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay);
void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay);
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_ENCODER_H__ */

View File

@@ -0,0 +1,257 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-ioctl.h"
#include "nvidia-drm-fb.h"
#include "nvidia-drm-utils.h"
#include "nvidia-drm-gem.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-format.h"
#include <drm/drm_crtc_helper.h>
static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
{
uint32_t i;
/* Unreference gem object */
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
nv_drm_gem_object_unreference_unlocked(nv_fb->nv_gem[i]);
}
}
/* Free framebuffer */
nv_drm_free(nv_fb);
}
static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct nv_drm_device *nv_dev = to_nv_device(fb->dev);
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
/* Cleaup core framebuffer object */
drm_framebuffer_cleanup(fb);
/* Free NvKmsKapiSurface associated with this framebuffer object */
nvKms->destroySurface(nv_dev->pDevice, nv_fb->pSurface);
__nv_drm_framebuffer_free(nv_fb);
}
static int
nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file, unsigned int *handle)
{
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
return nv_drm_gem_handle_create(file,
nv_fb->nv_gem[0],
handle);
}
static struct drm_framebuffer_funcs nv_framebuffer_funcs = {
.destroy = nv_drm_framebuffer_destroy,
.create_handle = nv_drm_framebuffer_create_handle,
};
static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
uint32_t i;
/* Allocate memory for the framebuffer object */
nv_fb = nv_drm_calloc(1, sizeof(*nv_fb));
if (nv_fb == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to allocate memory for framebuffer object");
return ERR_PTR(-ENOMEM);
}
if (num_planes > ARRAY_SIZE(nv_fb->nv_gem)) {
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes");
goto failed;
}
for (i = 0; i < num_planes; i++) {
if ((nv_fb->nv_gem[i] = nv_drm_gem_object_lookup(
dev,
file,
cmd->handles[i])) == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to find gem object of type nvkms memory");
goto failed;
}
}
return nv_fb;
failed:
__nv_drm_framebuffer_free(nv_fb);
return ERR_PTR(-ENOENT);
}
static int nv_drm_framebuffer_init(struct drm_device *dev,
struct nv_drm_framebuffer *nv_fb,
enum NvKmsSurfaceMemoryFormat format,
bool have_modifier,
uint64_t modifier)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiCreateSurfaceParams params = { };
uint32_t i;
int ret;
/* Initialize the base framebuffer object and add it to drm subsystem */
ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs);
if (ret != 0) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to initialize framebuffer object");
return ret;
}
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
params.planes[i].memory = nv_fb->nv_gem[i]->pMemory;
params.planes[i].offset = nv_fb->base.offsets[i];
params.planes[i].pitch = nv_fb->base.pitches[i];
}
}
params.height = nv_fb->base.height;
params.width = nv_fb->base.width;
params.format = format;
if (have_modifier) {
params.explicit_layout = true;
params.layout = (modifier & 0x10) ?
NvKmsSurfaceMemoryLayoutBlockLinear :
NvKmsSurfaceMemoryLayoutPitch;
params.log2GobsPerBlockY = modifier & 0xf;
} else {
params.explicit_layout = false;
}
/* Create NvKmsKapiSurface */
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, &params);
if (nv_fb->pSurface == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface");
drm_framebuffer_cleanup(&nv_fb->base);
return -EINVAL;
}
return 0;
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
uint64_t modifier = 0;
int ret;
enum NvKmsSurfaceMemoryFormat format;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
int i;
#endif
bool have_modifier = false;
/* Check whether NvKms supports the given pixel format */
if (!nv_drm_format_to_nvkms_format(cmd->pixel_format, &format)) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Unsupported drm pixel format 0x%08x", cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
if (cmd->flags & DRM_MODE_FB_MODIFIERS) {
have_modifier = true;
modifier = cmd->modifier[0];
for (i = 0; nv_dev->modifiers[i] != DRM_FORMAT_MOD_INVALID; i++) {
if (nv_dev->modifiers[i] == modifier) {
break;
}
}
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Invalid format modifier for framebuffer object: 0x%016llx",
modifier);
return ERR_PTR(-EINVAL);
}
}
#endif
nv_fb = nv_drm_framebuffer_alloc(dev, file, cmd);
if (IS_ERR(nv_fb)) {
return (struct drm_framebuffer *)nv_fb;
}
/* Fill out framebuffer metadata from the userspace fb creation request */
drm_helper_mode_fill_fb_struct(
#if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG)
dev,
#endif
&nv_fb->base,
cmd);
/*
* Finish up FB initialization by creating the backing NVKMS surface and
* publishing the DRM fb
*/
ret = nv_drm_framebuffer_init(dev, nv_fb, format, have_modifier, modifier);
if (ret != 0) {
__nv_drm_framebuffer_free(nv_fb);
return ERR_PTR(ret);
}
return &nv_fb->base;
}
#endif

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_FB_H__
#define __NVIDIA_DRM_FB_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvkms-kapi.h"
struct nv_drm_framebuffer {
struct NvKmsKapiSurface *pSurface;
struct nv_drm_gem_object*
nv_gem[NVKMS_MAX_PLANES_PER_SURFACE];
struct drm_framebuffer base;
};
static inline struct nv_drm_framebuffer *to_nv_framebuffer(
struct drm_framebuffer *fb)
{
if (fb == NULL) {
return NULL;
}
return container_of(fb, struct nv_drm_framebuffer, base);
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FB_H__ */

View File

@@ -0,0 +1,162 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include "nvidia-drm-format.h"
#include "nvidia-drm-os-interface.h"
static const u32 nvkms_to_drm_format[] = {
/* RGB formats */
[NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555,
[NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555,
[NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565,
[NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888,
[NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888,
[NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010,
[NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010,
[NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888,
[NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422] = DRM_FORMAT_YUYV,
[NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422] = DRM_FORMAT_UYVY,
/* YUV semi-planar formats
*
* NVKMS YUV semi-planar formats are MSB aligned. Yx__UxVx means
* that the UV components are packed like UUUUUVVVVV (MSB to LSB)
* and Yx_VxUx means VVVVVUUUUU (MSB to LSB).
*/
/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
* or
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
*/
[NvKmsSurfaceMemoryFormatY8___V8U8_N444] = DRM_FORMAT_NV24, /* non-subsampled Cr:Cb plane */
[NvKmsSurfaceMemoryFormatY8___U8V8_N444] = DRM_FORMAT_NV42, /* non-subsampled Cb:Cr plane */
[NvKmsSurfaceMemoryFormatY8___V8U8_N422] = DRM_FORMAT_NV16, /* 2x1 subsampled Cr:Cb plane */
[NvKmsSurfaceMemoryFormatY8___U8V8_N422] = DRM_FORMAT_NV61, /* 2x1 subsampled Cb:Cr plane */
[NvKmsSurfaceMemoryFormatY8___V8U8_N420] = DRM_FORMAT_NV12, /* 2x2 subsampled Cr:Cb plane */
[NvKmsSurfaceMemoryFormatY8___U8V8_N420] = DRM_FORMAT_NV21, /* 2x2 subsampled Cb:Cr plane */
#if defined(DRM_FORMAT_P210)
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*
* 2x1 subsampled Cr:Cb plane, 10 bit per channel
*/
[NvKmsSurfaceMemoryFormatY10___V10U10_N422] = DRM_FORMAT_P210,
#endif
#if defined(DRM_FORMAT_P010)
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*
* 2x2 subsampled Cr:Cb plane 10 bits per channel
*/
[NvKmsSurfaceMemoryFormatY10___V10U10_N420] = DRM_FORMAT_P010,
#endif
#if defined(DRM_FORMAT_P012)
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [12:4] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
*
* 2x2 subsampled Cr:Cb plane 12 bits per channel
*/
[NvKmsSurfaceMemoryFormatY12___V12U12_N420] = DRM_FORMAT_P012,
#endif
};
bool nv_drm_format_to_nvkms_format(u32 format,
enum NvKmsSurfaceMemoryFormat *nvkms_format)
{
enum NvKmsSurfaceMemoryFormat i;
for (i = 0; i < ARRAY_SIZE(nvkms_to_drm_format); i++) {
/*
* Note nvkms_to_drm_format[] is sparsely populated: it doesn't
* handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0
* entries when iterating through it.
*/
if (nvkms_to_drm_format[i] != 0 && nvkms_to_drm_format[i] == format) {
*nvkms_format = i;
return true;
}
}
return false;
}
uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask)
{
enum NvKmsSurfaceMemoryFormat i;
unsigned int max_count = hweight64(nvkms_format_mask);
uint32_t *array = nv_drm_calloc(1, sizeof(uint32_t) * max_count);
if (array == NULL) {
return NULL;
}
*count = 0;
for_each_set_bit(i, &nvkms_format_mask,
sizeof(nvkms_format_mask) * BITS_PER_BYTE) {
if (i >= ARRAY_SIZE(nvkms_to_drm_format)) {
break;
}
/*
* Note nvkms_to_drm_format[] is sparsely populated: it doesn't
* handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0
* entries when iterating through it.
*/
if (nvkms_to_drm_format[i] == 0) {
continue;
}
array[(*count)++] = nvkms_to_drm_format[i];
}
if (*count == 0) {
nv_drm_free(array);
return NULL;
}
return array;
}
#endif

View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_FORMAT_H__
#define __NVIDIA_DRM_FORMAT_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include <drm/drm_fourcc.h>
#include "nvkms-format.h"
bool nv_drm_format_to_nvkms_format(u32 format,
enum NvKmsSurfaceMemoryFormat *nvkms_format);
uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FORMAT_H__ */

View File

@@ -0,0 +1,228 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-ioctl.h"
#include "linux/dma-buf.h"
static inline
void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem);
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (nv_dma_buf->base.pMemory) {
/* Free NvKmsKapiMemory handle associated with this gem object */
nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory);
}
#endif
drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt);
nv_drm_free(nv_dma_buf);
}
static int __nv_drm_gem_dma_buf_create_mmap_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
(void)nv_dev;
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
}
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
struct dma_buf *dma_buf = attach->dmabuf;
struct file *old_file;
int ret;
/* check if buffer supports mmap */
if (!dma_buf->file->f_op->mmap)
return -EINVAL;
/* readjust the vma */
get_file(dma_buf->file);
old_file = vma->vm_file;
vma->vm_file = dma_buf->file;
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);;
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
if (ret) {
/* restore old parameters on failure */
vma->vm_file = old_file;
fput(dma_buf->file);
} else {
if (old_file)
fput(old_file);
}
return ret;
}
const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops = {
.free = __nv_drm_gem_dma_buf_free,
.create_mmap_offset = __nv_drm_gem_dma_buf_create_mmap_offset,
.mmap = __nv_drm_gem_dma_buf_mmap,
};
struct drm_gem_object*
nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct dma_buf *dma_buf = attach->dmabuf;
struct nv_drm_gem_dma_buf *nv_dma_buf;
struct NvKmsKapiMemory *pMemory;
if ((nv_dma_buf =
nv_drm_calloc(1, sizeof(*nv_dma_buf))) == NULL) {
return NULL;
}
// dma_buf->size must be a multiple of PAGE_SIZE
BUG_ON(dma_buf->size % PAGE_SIZE);
pMemory = NULL;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice,
(NvP64)(NvUPtr)dma_buf,
dma_buf->size - 1);
}
#endif
nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base,
&__nv_gem_dma_buf_ops, dma_buf->size, pMemory);
nv_dma_buf->sgt = sgt;
return &nv_dma_buf->base.base;
}
int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_export_dmabuf_memory_params *p = data;
struct nv_drm_gem_dma_buf *nv_dma_buf = NULL;
int ret = 0;
struct NvKmsKapiMemory *pTmpMemory = NULL;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto done;
}
if (p->__pad != 0) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
}
if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(
dev, filep, p->handle)) == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup DMA-BUF GEM object for export: 0x%08x",
p->handle);
goto done;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (!nv_dma_buf->base.pMemory) {
/*
* Get RM system memory handle from SGT - RM will take a reference
* on this GEM object to prevent the DMA-BUF from being unpinned
* prematurely.
*/
pTmpMemory = nvKms->getSystemMemoryHandleFromSgt(
nv_dev->pDevice,
(NvP64)(NvUPtr)nv_dma_buf->sgt,
(NvP64)(NvUPtr)&nv_dma_buf->base.base,
nv_dma_buf->base.base.size - 1);
}
}
#endif
if (!nv_dma_buf->base.pMemory && !pTmpMemory) {
ret = -ENOMEM;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to get memory to export from DMA-BUF GEM object: 0x%08x",
p->handle);
goto done;
}
if (!nvKms->exportMemory(nv_dev->pDevice,
nv_dma_buf->base.pMemory ?
nv_dma_buf->base.pMemory : pTmpMemory,
p->nvkms_params_ptr,
p->nvkms_params_size)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to export memory from DMA-BUF GEM object: 0x%08x",
p->handle);
goto done;
}
done:
if (pTmpMemory) {
/*
* Release reference on RM system memory to prevent circular
* refcounting. Another refcount will still be held by RM FD.
*/
nvKms->freeMemory(nv_dev->pDevice, pTmpMemory);
}
if (nv_dma_buf != NULL) {
nv_drm_gem_object_unreference_unlocked(&nv_dma_buf->base);
}
return ret;
}
#endif

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_DMA_BUF_H__
#define __NVIDIA_DRM_GEM_DMA_BUF_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-gem.h"
struct nv_drm_gem_dma_buf {
struct nv_drm_gem_object base;
struct sg_table *sgt;
};
extern const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops;
static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_dma_buf, base);
}
return NULL;
}
static inline
struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_nv_dma_buf(nv_gem);
}
struct drm_gem_object*
nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif
#endif /* __NVIDIA_DRM_GEM_DMA_BUF_H__ */

View File

@@ -0,0 +1,585 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-ioctl.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include <linux/io.h>
#include "nv-mm.h"
static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (nv_nvkms_memory->physically_mapped) {
if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) {
iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress);
}
nvKms->unmapMemory(nv_dev->pDevice,
nv_nvkms_memory->base.pMemory,
NVKMS_KAPI_MAPPING_TYPE_USER,
nv_nvkms_memory->pPhysicalAddress);
}
if (nv_nvkms_memory->pages_count != 0) {
nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages);
}
/* Free NvKmsKapiMemory handle associated with this gem object */
nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory);
nv_drm_free(nv_nvkms_memory);
}
static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
return drm_gem_mmap_obj(&nv_gem->base,
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
}
static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset, pfn;
vm_fault_t ret;
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
if (nv_nvkms_memory->pages_count == 0) {
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
pfn >>= PAGE_SHIFT;
pfn += page_offset;
} else {
BUG_ON(page_offset > nv_nvkms_memory->pages_count);
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
}
#if defined(NV_VMF_INSERT_PFN_PRESENT)
ret = vmf_insert_pfn(vma, address, pfn);
#else
ret = vm_insert_pfn(vma, address, pfn);
switch (ret) {
case 0:
case -EBUSY:
/*
* EBUSY indicates that another thread already handled
* the faulted range.
*/
ret = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
default:
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
ret = VM_FAULT_SIGBUS;
break;
}
#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */
return ret;
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
return VM_FAULT_SIGBUS;
}
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
struct drm_device *dev,
const struct nv_drm_gem_object *nv_gem_src);
static int __nv_drm_gem_nvkms_map(
struct nv_drm_device *nv_dev,
struct NvKmsKapiMemory *pMemory,
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
uint64_t size)
{
if (!nv_dev->hasVideoMemory) {
return 0;
}
if (!nvKms->mapMemory(nv_dev->pDevice,
pMemory,
NVKMS_KAPI_MAPPING_TYPE_USER,
&nv_nvkms_memory->pPhysicalAddress)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to map NvKmsKapiMemory 0x%p",
pMemory);
return -ENOMEM;
}
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
(uintptr_t)nv_nvkms_memory->pPhysicalAddress,
size);
if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) {
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Failed to ioremap_wc NvKmsKapiMemory 0x%p",
pMemory);
}
nv_nvkms_memory->physically_mapped = true;
return 0;
}
static int __nv_drm_gem_map_nvkms_memory_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_dev,
nv_nvkms_memory->base.pMemory,
nv_nvkms_memory,
nv_nvkms_memory->base.base.size);
if (ret) {
return ret;
}
}
return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset);
}
static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
struct sg_table *sg_table;
if (nv_nvkms_memory->pages_count == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
nv_gem->pMemory);
return NULL;
}
sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev,
nv_nvkms_memory->pages,
nv_nvkms_memory->pages_count);
return sg_table;
}
const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
.free = __nv_drm_gem_nvkms_memory_free,
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
.mmap = __nv_drm_gem_nvkms_mmap,
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
.prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table,
};
static int __nv_drm_nvkms_gem_obj_init(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
struct NvKmsKapiMemory *pMemory,
uint64_t size)
{
NvU64 *pages = NULL;
NvU32 numPages = 0;
nv_nvkms_memory->pPhysicalAddress = NULL;
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
nv_nvkms_memory->physically_mapped = false;
if (!nvKms->getMemoryPages(nv_dev->pDevice,
pMemory,
&pages,
&numPages) &&
!nv_dev->hasVideoMemory) {
/* GetMemoryPages may fail for vidmem allocations,
* but it should not fail for sysmem allocations. */
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to get memory pages for NvKmsKapiMemory 0x%p",
pMemory);
return -ENOMEM;
}
nv_nvkms_memory->pages_count = numPages;
nv_nvkms_memory->pages = (struct page **)pages;
nv_drm_gem_object_init(nv_dev,
&nv_nvkms_memory->base,
&nv_gem_nvkms_memory_ops,
size,
pMemory);
return 0;
}
int nv_drm_dumb_create(
struct drm_file *file_priv,
struct drm_device *dev, struct drm_mode_create_dumb *args)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
uint8_t compressible = 0;
struct NvKmsKapiMemory *pMemory;
int ret = 0;
args->pitch = roundup(args->width * ((args->bpp + 7) >> 3),
nv_dev->pitchAlignment);
args->size = args->height * args->pitch;
/* Core DRM requires gem object size to be aligned with PAGE_SIZE */
args->size = roundup(args->size, PAGE_SIZE);
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
ret = -ENOMEM;
goto fail;
}
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
args->size,
&compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
args->size,
&compressible);
}
if (pMemory == NULL) {
ret = -ENOMEM;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate NvKmsKapiMemory for dumb object of size %llu",
args->size);
goto nvkms_alloc_memory_failed;
}
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size);
if (ret) {
goto nvkms_gem_obj_init_failed;
}
/* Always map dumb buffer memory up front. Clients are only expected
* to use dumb buffers for software rendering, so they're not much use
* without a CPU mapping.
*/
ret = __nv_drm_gem_nvkms_map(nv_dev, pMemory, nv_nvkms_memory, args->size);
if (ret) {
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
goto fail;
}
return nv_drm_gem_handle_create_drop_reference(file_priv,
&nv_nvkms_memory->base,
&args->handle);
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_alloc_memory_failed:
nv_drm_free(nv_nvkms_memory);
fail:
return ret;
}
int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_import_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct NvKmsKapiMemory *pMemory;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto failed;
}
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
ret = -ENOMEM;
goto failed;
}
pMemory = nvKms->importMemory(nv_dev->pDevice,
p->mem_size,
p->nvkms_params_ptr,
p->nvkms_params_size);
if (pMemory == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to import NVKMS memory to GEM object");
goto nvkms_import_memory_failed;
}
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size);
if (ret) {
goto nvkms_gem_obj_init_failed;
}
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_nvkms_memory->base,
&p->handle);
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_import_memory_failed:
nv_drm_free(nv_nvkms_memory);
failed:
return ret;
}
int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_export_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto done;
}
if (p->__pad != 0) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
}
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
filep,
p->handle)) == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup NVKMS gem object for export: 0x%08x",
p->handle);
goto done;
}
if (!nvKms->exportMemory(nv_dev->pDevice,
nv_nvkms_memory->base.pMemory,
p->nvkms_params_ptr,
p->nvkms_params_size)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to export memory from NVKMS GEM object: 0x%08x", p->handle);
goto done;
}
done:
if (nv_nvkms_memory != NULL) {
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
}
return ret;
}
int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
struct NvKmsKapiMemory *pMemory;
enum NvKmsSurfaceMemoryLayout layout;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
goto failed;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
goto failed;
}
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
ret = -ENOMEM;
goto failed;
}
layout = p->block_linear ?
NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch;
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
layout,
p->memory_size,
&p->compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
layout,
p->memory_size,
&p->compressible);
}
if (pMemory == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to allocate NVKMS memory for GEM object");
goto nvkms_alloc_memory_failed;
}
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory,
p->memory_size);
if (ret) {
goto nvkms_gem_obj_init_failed;
}
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_nvkms_memory->base,
&p->handle);
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_alloc_memory_failed:
nv_drm_free(nv_nvkms_memory);
failed:
return ret;
}
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
struct drm_device *dev,
const struct nv_drm_gem_object *nv_gem_src)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
const struct nv_drm_device *nv_dev_src;
const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct NvKmsKapiMemory *pMemory;
BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
nv_dev_src = to_nv_device(nv_gem_src->base.dev);
nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src);
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
return NULL;
}
pMemory = nvKms->dupMemory(nv_dev->pDevice,
nv_dev_src->pDevice, nv_gem_src->pMemory);
if (pMemory == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to import NVKMS memory to GEM object");
goto nvkms_dup_memory_failed;
}
if (__nv_drm_nvkms_gem_obj_init(nv_dev,
nv_nvkms_memory,
pMemory,
nv_gem_src->base.size)) {
goto nvkms_gem_obj_init_failed;
}
return &nv_nvkms_memory->base.base;
nvkms_gem_obj_init_failed:
nvKms->freeMemory(nv_dev->pDevice, pMemory);
nvkms_dup_memory_failed:
nv_drm_free(nv_nvkms_memory);
return NULL;
}
int nv_drm_dumb_map_offset(struct drm_file *file,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
int ret = -EINVAL;
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
file,
handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for mapping: 0x%08x",
handle);
return ret;
}
ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev,
&nv_nvkms_memory->base, offset);
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
return ret;
}
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
#endif

View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__
#define __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-gem.h"
struct nv_drm_gem_nvkms_memory {
struct nv_drm_gem_object base;
bool physically_mapped;
void *pPhysicalAddress;
void *pWriteCombinedIORemapAddress;
struct page **pages;
unsigned long pages_count;
};
extern const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops;
static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base);
}
return NULL;
}
static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const(
const struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base);
}
return NULL;
}
static inline
struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_nv_nvkms_memory(nv_gem);
}
int nv_drm_dumb_create(
struct drm_file *file_priv,
struct drm_device *dev, struct drm_mode_create_dumb *args);
int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_dumb_map_offset(struct drm_file *file,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
struct drm_gem_object *nv_drm_gem_nvkms_prime_import(
struct drm_device *dev,
struct drm_gem_object *gem);
#endif
#endif /* __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ */

View File

@@ -0,0 +1,217 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-ioctl.h"
#include "linux/dma-buf.h"
#include "linux/mm.h"
#include "nv-mm.h"
static inline
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
nv_drm_unlock_user_pages(nv_user_memory->pages_count,
nv_user_memory->pages);
nv_drm_free(nv_user_memory);
}
static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
struct drm_gem_object *gem = &nv_gem->base;
return nv_drm_prime_pages_to_sg(gem->dev,
nv_user_memory->pages,
nv_user_memory->pages_count);
}
static void *__nv_drm_gem_user_memory_prime_vmap(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
return nv_drm_vmap(nv_user_memory->pages,
nv_user_memory->pages_count);
}
static void __nv_drm_gem_user_memory_prime_vunmap(
struct nv_drm_gem_object *gem,
void *address)
{
nv_drm_vunmap(address);
}
static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
int ret = drm_gem_mmap_obj(&nv_gem->base,
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
if (ret < 0) {
return ret;
}
/*
* Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW
* with MAP_PRIVATE and VM_MIXEDMAP
*/
if (!(vma->vm_flags & VM_SHARED)) {
return -EINVAL;
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags &= ~VM_IO;
vma->vm_flags |= VM_MIXEDMAP;
return 0;
}
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset;
vm_fault_t ret;
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
BUG_ON(page_offset > nv_user_memory->pages_count);
ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
switch (ret) {
case 0:
case -EBUSY:
/*
* EBUSY indicates that another thread already handled
* the faulted range.
*/
ret = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
default:
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
ret = VM_FAULT_SIGBUS;
break;
}
return ret;
}
static int __nv_drm_gem_user_create_mmap_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
(void)nv_dev;
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
}
const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = {
.free = __nv_drm_gem_user_memory_free,
.prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table,
.prime_vmap = __nv_drm_gem_user_memory_prime_vmap,
.prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap,
.mmap = __nv_drm_gem_user_memory_mmap,
.handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault,
.create_mmap_offset = __nv_drm_gem_user_create_mmap_offset,
};
int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_import_userspace_memory_params *params = data;
struct nv_drm_gem_user_memory *nv_user_memory;
struct page **pages = NULL;
unsigned long pages_count = 0;
int ret = 0;
if ((params->size % PAGE_SIZE) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Userspace memory 0x%llx size should be in a multiple of page "
"size to create a gem object",
params->address);
return -EINVAL;
}
pages_count = params->size / PAGE_SIZE;
ret = nv_drm_lock_user_pages(params->address, pages_count, &pages);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lock user pages for address 0x%llx: %d",
params->address, ret);
return ret;
}
if ((nv_user_memory =
nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) {
ret = -ENOMEM;
goto failed;
}
nv_user_memory->pages = pages;
nv_user_memory->pages_count = pages_count;
nv_drm_gem_object_init(nv_dev,
&nv_user_memory->base,
&__nv_gem_user_memory_ops,
params->size,
NULL /* pMemory */);
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_user_memory->base,
&params->handle);
failed:
nv_drm_unlock_user_pages(pages_count, pages);
return ret;
}
#endif

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_USER_MEMORY_H__
#define __NVIDIA_DRM_GEM_USER_MEMORY_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-gem.h"
struct nv_drm_gem_user_memory {
struct nv_drm_gem_object base;
struct page **pages;
unsigned long pages_count;
};
extern const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops;
static inline struct nv_drm_gem_user_memory *to_nv_user_memory(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_user_memory, base);
}
return NULL;
}
int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
static inline
struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_nv_user_memory(nv_gem);
}
#endif
#endif /* __NVIDIA_DRM_GEM_USER_MEMORY_H__ */

View File

@@ -0,0 +1,399 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-ioctl.h"
#include "nvidia-drm-prime-fence.h"
#include "nvidia-drm-gem.h"
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-dma-resv-helper.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-gem-nvkms-memory.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
#include "linux/dma-buf.h"
#include "nv-mm.h"
void nv_drm_gem_free(struct drm_gem_object *gem)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
/* Cleanup core gem object */
drm_gem_object_release(&nv_gem->base);
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_fini(&nv_gem->resv);
#endif
nv_gem->ops->free(nv_gem);
}
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \
defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
/*
* The 'dma_buf_map' structure is renamed to 'iosys_map' by the commit
* 7938f4218168 ("dma-buf-map: Rename to iosys-map").
*/
#if defined(NV_LINUX_IOSYS_MAP_H_PRESENT)
typedef struct iosys_map nv_sysio_map_t;
#else
typedef struct dma_buf_map nv_sysio_map_t;
#endif
static int nv_drm_gem_vmap(struct drm_gem_object *gem,
nv_sysio_map_t *map)
{
map->vaddr = nv_drm_gem_prime_vmap(gem);
if (map->vaddr == NULL) {
return -ENOMEM;
}
map->is_iomem = true;
return 0;
}
static void nv_drm_gem_vunmap(struct drm_gem_object *gem,
nv_sysio_map_t *map)
{
nv_drm_gem_prime_vunmap(gem, map->vaddr);
map->vaddr = NULL;
}
#endif
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \
!defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
static struct drm_gem_object_funcs nv_drm_gem_funcs = {
.free = nv_drm_gem_free,
.get_sg_table = nv_drm_gem_prime_get_sg_table,
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
.export = drm_gem_prime_export,
#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
.vmap = nv_drm_gem_vmap,
.vunmap = nv_drm_gem_vunmap,
#else
.vmap = nv_drm_gem_prime_vmap,
.vunmap = nv_drm_gem_prime_vunmap,
#endif
.vm_ops = &nv_drm_gem_vma_ops,
#endif
};
#endif
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
const struct nv_drm_gem_object_funcs * const ops,
size_t size,
struct NvKmsKapiMemory *pMemory)
{
struct drm_device *dev = nv_dev->dev;
nv_gem->nv_dev = nv_dev;
nv_gem->ops = ops;
nv_gem->pMemory = pMemory;
/* Initialize the gem object */
#if defined(NV_DRM_FENCE_AVAILABLE)
nv_dma_resv_init(&nv_gem->resv);
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_gem->base.resv = &nv_gem->resv;
#endif
#endif
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
nv_gem->base.funcs = &nv_drm_gem_funcs;
#endif
drm_gem_private_object_init(dev, &nv_gem->base, size);
}
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
#if defined(NV_DMA_BUF_OWNER_PRESENT)
struct drm_gem_object *gem_dst;
struct nv_drm_gem_object *nv_gem_src;
if (dma_buf->owner == dev->driver->fops->owner) {
nv_gem_src = to_nv_gem_object(dma_buf->priv);
if (nv_gem_src->base.dev != dev &&
nv_gem_src->ops->prime_dup != NULL) {
/*
* If we're importing from another NV device, try to handle the
* import internally rather than attaching through the dma-buf
* mechanisms. Importing from the same device is even easier,
* and drm_gem_prime_import() handles that just fine.
*/
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
if (gem_dst)
return gem_dst;
}
}
#endif /* NV_DMA_BUF_OWNER_PRESENT */
return drm_gem_prime_import(dev, dma_buf);
}
struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (nv_gem->ops->prime_get_sg_table != NULL) {
return nv_gem->ops->prime_get_sg_table(nv_gem);
}
return ERR_PTR(-ENOTSUPP);
}
void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (nv_gem->ops->prime_vmap != NULL) {
return nv_gem->ops->prime_vmap(nv_gem);
}
return ERR_PTR(-ENOTSUPP);
}
void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (nv_gem->ops->prime_vunmap != NULL) {
nv_gem->ops->prime_vunmap(nv_gem, address);
}
}
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj);
return &nv_gem->resv;
}
#endif
int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_map_offset_params *params = data;
struct nv_drm_gem_object *nv_gem;
int ret;
if ((nv_gem = nv_drm_gem_object_lookup(dev,
filep,
params->handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for map: 0x%08x",
params->handle);
return -EINVAL;
}
if (nv_gem->ops->create_mmap_offset) {
ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &params->offset);
} else {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Gem object type does not support mapping: 0x%08x",
params->handle);
ret = -EINVAL;
}
nv_drm_gem_object_unreference_unlocked(nv_gem);
return ret;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct drm_file *priv = file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret = 0;
struct nv_drm_gem_object *nv_gem;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = nv_drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff, vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it proceeds
* to tear down the object. In the process it will attempt to remove
* the VMA offset and so acquire this mgr->vm_lock. Therefore if we
* find an object with a 0-refcnt that matches our range, we know it is
* in the process of being destroyed and will be freed as soon as we
* release the lock - so we have to check for the 0-refcnted object and
* treat it as invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
if (!obj)
return -EINVAL;
nv_gem = to_nv_gem_object(obj);
if (nv_gem->ops->mmap == NULL) {
ret = -EINVAL;
goto done;
}
if (!nv_drm_vma_node_is_allowed(node, file)) {
ret = -EACCES;
goto done;
}
#if defined(NV_DRM_VMA_OFFSET_NODE_HAS_READONLY)
if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
ret = -EINVAL;
goto done;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
#endif
ret = nv_gem->ops->mmap(nv_gem, vma);
done:
nv_drm_gem_object_unreference_unlocked(nv_gem);
return ret;
}
#endif
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct drm_nvidia_gem_identify_object_params *p = data;
struct nv_drm_gem_dma_buf *nv_dma_buf;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct nv_drm_gem_user_memory *nv_user_memory;
struct nv_drm_gem_object *nv_gem = NULL;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EINVAL;
}
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
if (nv_dma_buf) {
p->object_type = NV_GEM_OBJECT_DMABUF;
nv_gem = &nv_dma_buf->base;
goto done;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(dev, filep, p->handle);
if (nv_nvkms_memory) {
p->object_type = NV_GEM_OBJECT_NVKMS;
nv_gem = &nv_nvkms_memory->base;
goto done;
}
#endif
nv_user_memory = nv_drm_gem_object_user_memory_lookup(dev, filep, p->handle);
if (nv_user_memory) {
p->object_type = NV_GEM_OBJECT_USERMEMORY;
nv_gem = &nv_user_memory->base;
goto done;
}
p->object_type = NV_GEM_OBJECT_UNKNOWN;
done:
if (nv_gem) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
}
return 0;
}
/* XXX Move these vma operations to os layer */
static vm_fault_t __nv_drm_vma_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct drm_gem_object *gem = vma->vm_private_data;
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
if (!nv_gem) {
return VM_FAULT_SIGBUS;
}
return nv_gem->ops->handle_vma_fault(nv_gem, vma, vmf);
}
/*
* Note that nv_drm_vma_fault() can be called for different or same
* ranges of the same drm_gem_object simultaneously.
*/
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
static vm_fault_t nv_drm_vma_fault(struct vm_fault *vmf)
{
return __nv_drm_vma_fault(vmf->vma, vmf);
}
#else
static vm_fault_t nv_drm_vma_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
return __nv_drm_vma_fault(vma, vmf);
}
#endif
const struct vm_operations_struct nv_drm_gem_vma_ops = {
.open = drm_gem_vm_open,
.fault = nv_drm_vma_fault,
.close = drm_gem_vm_close,
};
#endif /* NV_DRM_AVAILABLE */

View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_GEM_H__
#define __NVIDIA_DRM_GEM_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvkms-kapi.h"
#include "nv-mm.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include "nvidia-dma-resv-helper.h"
#endif
struct nv_drm_gem_object;
struct nv_drm_gem_object_funcs {
void (*free)(struct nv_drm_gem_object *nv_gem);
struct sg_table *(*prime_get_sg_table)(struct nv_drm_gem_object *nv_gem);
void *(*prime_vmap)(struct nv_drm_gem_object *nv_gem);
void (*prime_vunmap)(struct nv_drm_gem_object *nv_gem, void *address);
struct drm_gem_object *(*prime_dup)(struct drm_device *dev,
const struct nv_drm_gem_object *nv_gem_src);
int (*mmap)(struct nv_drm_gem_object *nv_gem, struct vm_area_struct *vma);
vm_fault_t (*handle_vma_fault)(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf);
int (*create_mmap_offset)(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
uint64_t *offset);
};
struct nv_drm_gem_object {
struct drm_gem_object base;
struct nv_drm_device *nv_dev;
const struct nv_drm_gem_object_funcs *ops;
struct NvKmsKapiMemory *pMemory;
#if defined(NV_DRM_FENCE_AVAILABLE)
nv_dma_resv_t resv;
#endif
};
static inline struct nv_drm_gem_object *to_nv_gem_object(
struct drm_gem_object *gem)
{
if (gem != NULL) {
return container_of(gem, struct nv_drm_gem_object, base);
}
return NULL;
}
/*
* drm_gem_object_{get/put}() added by commit
* e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and
* drm_gem_object_{reference/unreference}() removed by commit
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
*/
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
drm_gem_object_put_unlocked(&nv_gem->base);
#else
drm_gem_object_put(&nv_gem->base);
#endif
#else
drm_gem_object_unreference_unlocked(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_put(&nv_gem->base);
#else
drm_gem_object_unreference(&nv_gem->base);
#endif
}
static inline int nv_drm_gem_handle_create_drop_reference(
struct drm_file *file_priv,
struct nv_drm_gem_object *nv_gem,
uint32_t *handle)
{
int ret = drm_gem_handle_create(file_priv, &nv_gem->base, handle);
/* drop reference from allocate - handle holds it now */
nv_drm_gem_object_unreference_unlocked(nv_gem);
return ret;
}
static inline int nv_drm_gem_create_mmap_offset(
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
int ret;
if ((ret = drm_gem_create_mmap_offset(&nv_gem->base)) < 0) {
NV_DRM_DEV_LOG_ERR(
nv_gem->nv_dev,
"drm_gem_create_mmap_offset failed with error code %d",
ret);
goto done;
}
*offset = drm_vma_node_offset_addr(&nv_gem->base.vma_node);
done:
return ret;
}
void nv_drm_gem_free(struct drm_gem_object *gem);
static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
#if (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 3)
return to_nv_gem_object(drm_gem_object_lookup(dev, filp, handle));
#elif (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 2)
return to_nv_gem_object(drm_gem_object_lookup(filp, handle));
#else
#error "Unknown argument count of drm_gem_object_lookup()"
#endif
}
static inline int nv_drm_gem_handle_create(struct drm_file *filp,
struct nv_drm_gem_object *nv_gem,
uint32_t *handle)
{
return drm_gem_handle_create(filp, &nv_gem->base, handle);
}
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
const struct nv_drm_gem_object_funcs * const ops,
size_t size,
struct NvKmsKapiMemory *pMemory);
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem);
void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem);
void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address);
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj);
#endif
extern const struct vm_operations_struct nv_drm_gem_vma_ops;
int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma);
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_GEM_H__ */

View File

@@ -0,0 +1,191 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* This file contains snapshots of DRM helper functions from the
* Linux kernel which are used by nvidia-drm.ko if the target kernel
* predates the helper function. Having these functions consistently
* present simplifies nvidia-drm.ko source.
*/
#include "nvidia-drm-helper.h"
#include "nvmisc.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT)
#include <drm/drm_atomic_uapi.h>
#endif
static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb)
{
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
drm_framebuffer_put(fb);
#else
drm_framebuffer_unreference(fb);
#endif
}
/*
* drm_atomic_helper_disable_all() has been added by commit
* 1494276000db789c6d2acd85747be4707051c801, which is Signed-off-by:
* Thierry Reding <treding@nvidia.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* drm_atomic_helper_disable_all() is copied from
* linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use
* nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs,
* use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop
* over all modeset object states, and use drm_atomic_state_free() if
* drm_atomic_state_put() is not available.
*
* drm_atomic_helper_disable_all() is copied from
* linux/drivers/gpu/drm/drm_atomic_helper.c @
* 49d70aeaeca8f62b72b7712ecd1e29619a445866, which has the following
* copyright and license information:
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector_state *conn_state;
struct drm_connector *conn;
struct drm_plane_state *plane_state;
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
nv_drm_for_each_crtc(crtc, dev) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto free;
}
crtc_state->active = false;
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
if (ret < 0)
goto free;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret < 0)
goto free;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret < 0)
goto free;
}
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
nv_drm_for_each_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
plane_mask |= NVBIT(drm_plane_index(plane));
plane->old_fb = plane->fb;
}
ret = drm_atomic_commit(state);
free:
if (plane_mask) {
drm_for_each_plane_mask(plane, dev, plane_mask) {
if (ret == 0) {
plane->fb = NULL;
plane->crtc = NULL;
WARN_ON(plane->state->fb);
WARN_ON(plane->state->crtc);
if (plane->old_fb)
__nv_drm_framebuffer_put(plane->old_fb);
}
plane->old_fb = NULL;
}
}
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
return ret;
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */

View File

@@ -0,0 +1,584 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_HELPER_H__
#define __NVIDIA_DRM_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
/*
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
* (2017-09-26) and drm_dev_unref() is removed by
* ba1d345401476a5f7fbad622607c5a1f95e59b31 (2018-11-15).
*
* drm_dev_unref() has been added and drm_dev_free() removed by commit -
*
* 2014-01-29: 099d1c290e2ebc3b798961a6c177c3aef5f0b789
*/
static inline void nv_drm_dev_free(struct drm_device *dev)
{
#if defined(NV_DRM_DEV_PUT_PRESENT)
drm_dev_put(dev);
#elif defined(NV_DRM_DEV_UNREF_PRESENT)
drm_dev_unref(dev);
#else
drm_dev_free(dev);
#endif
}
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
static inline struct sg_table*
nv_drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{
#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG)
return drm_prime_pages_to_sg(dev, pages, nr_pages);
#else
return drm_prime_pages_to_sg(pages, nr_pages);
#endif
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
* drm_for_each_encoder and drm_for_each_plane() were added by kernel
* commit 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 which was
* Signed-off-by:
* Daniel Vetter <daniel.vetter@intel.com>
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
* drm_for_each_encoder and drm_for_each_plane() are copied from
* include/drm/drm_crtc @
* 6295d607ad34ee4e43aab3f20714c2ef7a6adea1
* which has the following copyright and license information:
*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_crtc.h>
#if defined(drm_for_each_plane)
#define nv_drm_for_each_plane(plane, dev) \
drm_for_each_plane(plane, dev)
#else
#define nv_drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
#endif
#if defined(drm_for_each_crtc)
#define nv_drm_for_each_crtc(crtc, dev) \
drm_for_each_crtc(crtc, dev)
#else
#define nv_drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
#endif
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector_iter(connector, conn_iter)
#elif defined(drm_for_each_connector)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector(connector, dev)
#else
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); \
list_for_each_entry(connector, &(dev)->mode_config.connector_list, head)
#endif
#if defined(drm_for_each_encoder)
#define nv_drm_for_each_encoder(encoder, dev) \
drm_for_each_encoder(encoder, dev)
#else
#define nv_drm_for_each_encoder(encoder, dev) \
list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
#endif
#if defined(drm_for_each_fb)
#define nv_drm_for_each_fb(fb, dev) \
drm_for_each_fb(fb, dev)
#else
#define nv_drm_for_each_fb(fb, dev) \
list_for_each_entry(fb, &(dev)->mode_config.fb_list, head)
#endif
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
/*
* for_each_connector_in_state(), for_each_crtc_in_state() and
* for_each_plane_in_state() were added by kernel commit
* df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by:
* Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* for_each_connector_in_state(), for_each_crtc_in_state() and
* for_each_plane_in_state() were copied from
* include/drm/drm_atomic.h @
* 21a01abbe32a3cbeb903378a24e504bfd9fe0648
* which has the following copyright and license information:
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
/**
* nv_drm_for_each_connector_in_state - iterate over all connectors in an
* atomic update
* @__state: &struct drm_atomic_state pointer
* @connector: &struct drm_connector iteration cursor
* @connector_state: &struct drm_connector_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all connectors in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_connector_in_state)
#define nv_drm_for_each_connector_in_state(__state, \
connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
((connector) = (__state)->connectors[__i].ptr, \
(connector_state) = (__state)->connectors[__i].state, 1); \
(__i)++) \
for_each_if (connector)
#else
#define nv_drm_for_each_connector_in_state(__state, \
connector, connector_state, __i) \
for_each_connector_in_state(__state, connector, connector_state, __i)
#endif
/**
* nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update
* @__state: &struct drm_atomic_state pointer
* @crtc: &struct drm_crtc iteration cursor
* @crtc_state: &struct drm_crtc_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all CRTCs in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_crtc_in_state)
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_crtc && \
((crtc) = (__state)->crtcs[__i].ptr, \
(crtc_state) = (__state)->crtcs[__i].state, 1); \
(__i)++) \
for_each_if (crtc_state)
#else
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for_each_crtc_in_state(__state, crtc, crtc_state, __i)
#endif
/**
* nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @plane_state: &struct drm_plane_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_plane_in_state)
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_total_plane && \
((plane) = (__state)->planes[__i].ptr, \
(plane_state) = (__state)->planes[__i].state, 1); \
(__i)++) \
for_each_if (plane_state)
#else
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
static inline struct drm_crtc *nv_drm_crtc_find(struct drm_device *dev,
uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_crtc_find(dev, NULL /* file_priv */, id);
#else
return drm_crtc_find(dev, id);
#endif
}
static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev,
uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_encoder_find(dev, NULL /* file_priv */, id);
#else
return drm_encoder_find(dev, id);
#endif
}
/*
* drm_connector_for_each_possible_encoder() is added by commit
* 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by:
* Ville Syrjälä <ville.syrjala@linux.intel.com>
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h and modified to use nv_drm_encoder_find()
* instead of drm_encoder_find().
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h @
* 83aefbb887b59df0b3520965c3701e01deacfc52
* which has the following copyright and license information:
*
* Copyright (c) 2016 Intel Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
/**
* nv_drm_connector_for_each_possible_encoder - iterate connector's possible
* encoders
* @connector: &struct drm_connector pointer
* @encoder: &struct drm_encoder pointer used as cursor
* @__i: int iteration cursor, for macro-internal use
*/
#if !defined(drm_connector_for_each_possible_encoder)
#if !defined(for_each_if)
#define for_each_if(condition) if (!(condition)) {} else
#endif
#define __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) \
for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
(connector)->encoder_ids[(__i)] != 0; (__i)++) \
for_each_if((encoder) = \
nv_drm_encoder_find((connector)->dev, \
(connector)->encoder_ids[(__i)]))
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
{ \
unsigned int __i; \
__nv_drm_connector_for_each_possible_encoder(connector, encoder, __i)
#define nv_drm_connector_for_each_possible_encoder_end \
}
#else
#if NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT == 3
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
{ \
unsigned int __i; \
drm_connector_for_each_possible_encoder(connector, encoder, __i)
#define nv_drm_connector_for_each_possible_encoder_end \
}
#else
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
drm_connector_for_each_possible_encoder(connector, encoder)
#define nv_drm_connector_for_each_possible_encoder_end
#endif
#endif
static inline int
nv_drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
return drm_mode_connector_attach_encoder(connector, encoder);
#else
return drm_connector_attach_encoder(connector, encoder);
#endif
}
static inline int
nv_drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid)
{
#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
return drm_mode_connector_update_edid_property(connector, edid);
#else
return drm_connector_update_edid_property(connector, edid);
#endif
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#include <drm/drm_connector.h>
static inline
void nv_drm_connector_list_iter_begin(struct drm_device *dev,
struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_begin(dev, iter);
#else
drm_connector_list_iter_get(dev, iter);
#endif
}
static inline
void nv_drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_end(iter);
#else
drm_connector_list_iter_put(iter);
#endif
}
#endif
/*
* The drm_format_num_planes() function was added by commit d0d110e09629 drm:
* Add drm_format_num_planes() utility function in v3.3 (2011-12-20). Prototype
* was moved from drm_crtc.h to drm_fourcc.h by commit ae4df11a0f53 (drm: Move
* format-related helpers to drm_fourcc.c) in v4.8 (2016-06-09).
* drm_format_num_planes() has been removed by commit 05c452c115bf (drm: Remove
* users of drm_format_num_planes) in v5.3 (2019-05-16).
*
* drm_format_info() is available only from v4.10 (2016-10-18), added by commit
* 84770cc24f3a (drm: Centralize format information).
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
static inline int nv_drm_format_num_planes(uint32_t format)
{
#if defined(NV_DRM_FORMAT_NUM_PLANES_PRESENT)
return drm_format_num_planes(format);
#else
const struct drm_format_info *info = drm_format_info(format);
return info != NULL ? info->num_planes : 1;
#endif
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
/*
* DRM_FORMAT_MOD_LINEAR was also defined after the original modifier support
* was added to the kernel, as a more explicit alias of DRM_FORMAT_MOD_NONE
*/
#if !defined(DRM_FORMAT_MOD_VENDOR_NONE)
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#endif
#if !defined(DRM_FORMAT_MOD_LINEAR)
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
#endif
/*
* DRM_FORMAT_MOD_INVALID was defined after the original modifier support was
* added to the kernel, for use as a sentinel value.
*/
#if !defined(DRM_FORMAT_RESERVED)
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#endif
#if !defined(DRM_FORMAT_MOD_INVALID)
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
#endif
/*
* DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called
* DRM_FORMAT_MOD_VNEDOR_NV.
*/
#if !defined(DRM_FORMAT_MOD_VENDOR_NVIDIA)
#define DRM_FORMAT_MOD_VENDOR_NVIDIA DRM_FORMAT_MOD_VENDOR_NV
#endif
/*
* DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D is a relatively new addition to the
* upstream kernel headers compared to the other format modifiers.
*/
#if !defined(DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D)
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
fourcc_mod_code(NVIDIA, (0x10 | \
((h) & 0xf) | \
(((k) & 0xff) << 12) | \
(((g) & 0x3) << 20) | \
(((s) & 0x1) << 22) | \
(((c) & 0x7) << 23)))
#endif
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
/*
* drm_vma_offset_exact_lookup_locked() were added
* by kernel commit 2225cfe46bcc which was Signed-off-by:
* Daniel Vetter <daniel.vetter@intel.com>
*
* drm_vma_offset_exact_lookup_locked() were copied from
* include/drm/drm_vma_manager.h @ 2225cfe46bcc
* which has the following copyright and license information:
*
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_vma_manager.h>
/**
* nv_drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
nv_drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
#if defined(NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT)
return drm_vma_offset_exact_lookup_locked(mgr, start, pages);
#else
struct drm_vma_offset_node *node;
node = drm_vma_offset_lookup_locked(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
#endif
}
static inline bool
nv_drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp)
{
#if defined(NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG)
return drm_vma_node_is_allowed(node, filp->private_data);
#else
return drm_vma_node_is_allowed(node, filp);
#endif
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_HELPER_H__ */

View File

@@ -0,0 +1,232 @@
/*
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_NVIDIA_DRM_IOCTL_H_
#define _UAPI_NVIDIA_DRM_IOCTL_H_
#include <drm/drm.h>
/*
* We should do our best to keep these values constant. Any change to these will
* be backwards incompatible with client applications that might be using them
*/
#define DRM_NVIDIA_GET_CRTC_CRC32 0x00
#define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY 0x01
#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02
#define DRM_NVIDIA_GET_DEV_INFO 0x03
#define DRM_NVIDIA_FENCE_SUPPORTED 0x04
#define DRM_NVIDIA_FENCE_CONTEXT_CREATE 0x05
#define DRM_NVIDIA_GEM_FENCE_ATTACH 0x06
#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08
#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09
#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a
#define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY 0x0b
#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c
#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d
#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
struct drm_nvidia_gem_import_nvkms_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY), \
struct drm_nvidia_gem_import_userspace_memory_params)
#define DRM_IOCTL_NVIDIA_GET_DEV_INFO \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO), \
struct drm_nvidia_get_dev_info_params)
/*
* XXX Solaris compiler has issues with DRM_IO. None of this is supported on
* Solaris anyway, so just skip it.
*
* 'warning: suggest parentheses around arithmetic in operand of |'
*/
#if defined(NV_LINUX)
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
#else
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0
#endif
#define DRM_IOCTL_NVIDIA_FENCE_CONTEXT_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_CONTEXT_CREATE), \
struct drm_nvidia_fence_context_create_params)
#define DRM_IOCTL_NVIDIA_GEM_FENCE_ATTACH \
DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_FENCE_ATTACH), \
struct drm_nvidia_gem_fence_attach_params)
#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \
struct drm_nvidia_get_client_capability_params)
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \
struct drm_nvidia_get_crtc_crc32_params)
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \
struct drm_nvidia_get_crtc_crc32_v2_params)
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \
struct drm_nvidia_gem_export_nvkms_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \
struct drm_nvidia_gem_map_offset_params)
#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \
struct drm_nvidia_gem_alloc_nvkms_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \
struct drm_nvidia_gem_export_dmabuf_memory_params)
#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \
struct drm_nvidia_gem_identify_object_params)
struct drm_nvidia_gem_import_nvkms_memory_params {
uint64_t mem_size; /* IN */
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
uint32_t handle; /* OUT */
uint32_t __pad;
};
struct drm_nvidia_gem_import_userspace_memory_params {
uint64_t size; /* IN Size of memory in bytes */
uint64_t address; /* IN Virtual address of userspace memory */
uint32_t handle; /* OUT Handle to gem object */
};
struct drm_nvidia_get_dev_info_params {
uint32_t gpu_id; /* OUT */
uint32_t primary_index; /* OUT; the "card%d" value */
/* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */
uint32_t generic_page_kind; /* OUT */
uint32_t page_kind_generation; /* OUT */
uint32_t sector_layout; /* OUT */
};
struct drm_nvidia_fence_context_create_params {
uint32_t handle; /* OUT GEM handle to fence context */
uint32_t index; /* IN Index of semaphore to use for fencing */
uint64_t size; /* IN Size of semaphore surface in bytes */
/* Params for importing userspace semaphore surface */
uint64_t import_mem_nvkms_params_ptr; /* IN */
uint64_t import_mem_nvkms_params_size; /* IN */
/* Params for creating software signaling event */
uint64_t event_nvkms_params_ptr; /* IN */
uint64_t event_nvkms_params_size; /* IN */
};
struct drm_nvidia_gem_fence_attach_params {
uint32_t handle; /* IN GEM handle to attach fence to */
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
};
struct drm_nvidia_get_client_capability_params {
uint64_t capability; /* IN Client capability enum */
uint64_t value; /* OUT Client capability value */
};
/* Struct that stores Crc value and if it is supported by hardware */
struct drm_nvidia_crtc_crc32 {
uint32_t value; /* Read value, undefined if supported is false */
uint8_t supported; /* Supported boolean, true if readable by hardware */
};
struct drm_nvidia_crtc_crc32_v2_out {
struct drm_nvidia_crtc_crc32 compositorCrc32; /* OUT compositor hardware CRC32 value */
struct drm_nvidia_crtc_crc32 rasterGeneratorCrc32; /* OUT raster generator CRC32 value */
struct drm_nvidia_crtc_crc32 outputCrc32; /* OUT SF/SOR CRC32 value */
};
struct drm_nvidia_get_crtc_crc32_v2_params {
uint32_t crtc_id; /* IN CRTC identifier */
struct drm_nvidia_crtc_crc32_v2_out crc32; /* OUT Crc32 output structure */
};
struct drm_nvidia_get_crtc_crc32_params {
uint32_t crtc_id; /* IN CRTC identifier */
uint32_t crc32; /* OUT CRC32 value */
};
struct drm_nvidia_gem_export_nvkms_memory_params {
uint32_t handle; /* IN */
uint32_t __pad;
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
};
struct drm_nvidia_gem_map_offset_params {
uint32_t handle; /* IN Handle to gem object */
uint32_t __pad;
uint64_t offset; /* OUT Fake offset */
};
struct drm_nvidia_gem_alloc_nvkms_memory_params {
uint32_t handle; /* OUT */
uint8_t block_linear; /* IN */
uint8_t compressible; /* IN/OUT */
uint16_t __pad;
uint64_t memory_size; /* IN */
};
struct drm_nvidia_gem_export_dmabuf_memory_params {
uint32_t handle; /* IN GEM Handle*/
uint32_t __pad;
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
};
typedef enum {
NV_GEM_OBJECT_NVKMS,
NV_GEM_OBJECT_DMABUF,
NV_GEM_OBJECT_USERMEMORY,
NV_GEM_OBJECT_UNKNOWN = 0x7fffffff /* Force size of 32-bits. */
} drm_nvidia_gem_object_type;
struct drm_nvidia_gem_identify_object_params {
uint32_t handle; /* IN GEM handle*/
drm_nvidia_gem_object_type object_type; /* OUT GEM object type */
};
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */

View File

@@ -0,0 +1,189 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include "nvidia-drm-os-interface.h"
#include "nvidia-drm.h"
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include <linux/vmalloc.h>
#include "nv-mm.h"
MODULE_PARM_DESC(
modeset,
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
bool nv_drm_modeset_module_param = false;
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
void *nv_drm_calloc(size_t nmemb, size_t size)
{
return kzalloc(nmemb * size, GFP_KERNEL);
}
void nv_drm_free(void *ptr)
{
if (IS_ERR(ptr)) {
return;
}
kfree(ptr);
}
char *nv_drm_asprintf(const char *fmt, ...)
{
va_list ap;
char *p;
va_start(ap, fmt);
p = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
return p;
}
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_FAMILY_ARM)
#if defined(NVCPU_ARM)
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
#elif defined(NVCPU_AARCH64)
#define WRITE_COMBINE_FLUSH() mb()
#endif
#elif defined(NVCPU_PPC64LE)
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
#endif
void nv_drm_write_combine_flush(void)
{
WRITE_COMBINE_FLUSH();
}
int nv_drm_lock_user_pages(unsigned long address,
unsigned long pages_count, struct page ***pages)
{
struct mm_struct *mm = current->mm;
struct page **user_pages;
const int write = 1;
const int force = 0;
int pages_pinned;
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
if (user_pages == NULL) {
return -ENOMEM;
}
nv_mmap_read_lock(mm);
pages_pinned = NV_GET_USER_PAGES(address, pages_count, write, force,
user_pages, NULL);
nv_mmap_read_unlock(mm);
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
goto failed;
}
*pages = user_pages;
return 0;
failed:
if (pages_pinned > 0) {
int i;
for (i = 0; i < pages_pinned; i++) {
put_page(user_pages[i]);
}
}
nv_drm_free(user_pages);
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
}
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
{
unsigned long i;
for (i = 0; i < pages_count; i++) {
set_page_dirty_lock(pages[i]);
put_page(pages[i]);
}
nv_drm_free(pages);
}
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
{
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
}
void nv_drm_vunmap(void *address)
{
vunmap(address);
}
#endif /* NV_DRM_AVAILABLE */
/*************************************************************************
* Linux loading support code.
*************************************************************************/
static int __init nv_linux_drm_init(void)
{
return nv_drm_init();
}
static void __exit nv_linux_drm_exit(void)
{
nv_drm_exit();
}
module_init(nv_linux_drm_init);
module_exit(nv_linux_drm_exit);
#if defined(MODULE_LICENSE)
MODULE_LICENSE("Dual MIT/GPL");
#endif
#if defined(MODULE_INFO)
MODULE_INFO(supported, "external");
#endif
#if defined(MODULE_VERSION)
MODULE_VERSION(NV_VERSION_STRING);
#endif

View File

@@ -0,0 +1,577 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-modeset.h"
#include "nvidia-drm-crtc.h"
#include "nvidia-drm-os-interface.h"
#include "nvidia-drm-helper.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
struct nv_drm_atomic_state {
struct NvKmsKapiRequestedModeSetConfig config;
struct drm_atomic_state base;
};
static inline struct nv_drm_atomic_state *to_nv_atomic_state(
struct drm_atomic_state *state)
{
return container_of(state, struct nv_drm_atomic_state, base);
}
struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev)
{
struct nv_drm_atomic_state *nv_state =
nv_drm_calloc(1, sizeof(*nv_state));
if (nv_state == NULL || drm_atomic_state_init(dev, &nv_state->base) < 0) {
nv_drm_free(nv_state);
return NULL;
}
return &nv_state->base;
}
void nv_drm_atomic_state_clear(struct drm_atomic_state *state)
{
drm_atomic_state_default_clear(state);
}
void nv_drm_atomic_state_free(struct drm_atomic_state *state)
{
struct nv_drm_atomic_state *nv_state =
to_nv_atomic_state(state);
drm_atomic_state_default_release(state);
nv_drm_free(nv_state);
}
/**
* __will_generate_flip_event - Check whether event is going to be generated by
* hardware when it flips from old crtc/plane state to current one. This
* function is called after drm_atomic_helper_swap_state(), therefore new state
* is swapped into current state.
*/
static bool __will_generate_flip_event(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_crtc_state *new_crtc_state = crtc->state;
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(new_crtc_state);
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane *plane = NULL;
struct drm_plane *primary_plane = crtc->primary;
bool primary_event = false;
bool overlay_event = false;
int i;
if (!old_crtc_state->active && !new_crtc_state->active) {
/*
* crtc is not active in old and new states therefore all planes are
* disabled, hardware can not generate flip events.
*/
return false;
}
/* Find out whether primary & overlay flip done events will be generated. */
nv_drm_for_each_plane_in_state(old_crtc_state->state,
plane, old_plane_state, i) {
if (old_plane_state->crtc != crtc) {
continue;
}
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
continue;
}
/*
* Hardware generates flip event for only those
* planes which were active previously.
*/
if (old_crtc_state->active && old_plane_state->fb != NULL) {
nv_new_crtc_state->nv_flip->pending_events++;
}
}
return nv_new_crtc_state->nv_flip->pending_events != 0;
}
static int __nv_drm_put_back_post_fence_fd(
struct nv_drm_plane_state *plane_state,
const struct NvKmsKapiLayerReplyConfig *layer_reply_config)
{
int fd = layer_reply_config->postSyncptFd;
if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) {
if (put_user(fd, plane_state->fd_user_ptr)) {
return -EFAULT;
}
/*! set back to Null and let set_property specify it again */
plane_state->fd_user_ptr = NULL;
}
return 0;
}
static int __nv_drm_get_syncpt_data(
struct nv_drm_device *nv_dev,
struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state,
struct NvKmsKapiRequestedModeSetConfig *requested_config,
struct NvKmsKapiModeSetReplyConfig *reply_config)
{
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
struct NvKmsKapiHeadReplyConfig *head_reply_config;
struct nv_drm_plane_state *plane_state;
struct drm_crtc_state *new_crtc_state = crtc->state;
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
struct drm_plane *plane = NULL;
int i, ret;
if (!old_crtc_state->active && !new_crtc_state->active) {
/*
* crtc is not active in old and new states therefore all planes are
* disabled, exit early.
*/
return 0;
}
head_reply_config = &reply_config->headReplyConfig[nv_crtc->head];
nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) {
continue;
}
new_plane_state = plane->state;
if (new_plane_state->crtc != crtc) {
continue;
}
plane_state = to_nv_drm_plane_state(new_plane_state);
ret = __nv_drm_put_back_post_fence_fd(
plane_state,
&head_reply_config->layerReplyConfig[nv_plane->layer_idx]);
if (ret != 0) {
return ret;
}
}
return 0;
}
/**
* nv_drm_atomic_commit - validate/commit modeset config
* @dev: DRM device
* @state: atomic state tracking atomic update
* @commit: commit/check modeset config associated with atomic update
*
* @state tracks atomic update and modeset objects affected
* by the atomic update, but the state of the modeset objects it contains
* depends on the current stage of the update.
* At the commit stage, the proposed state is already stored in the current
* state, and @state contains old state for all affected modeset objects.
* At the check/validation stage, @state contains the proposed state for
* all affected objects.
*
* Sequence of atomic update -
* 1. The check/validation of proposed atomic state,
* 2. Do any other steps that might fail,
* 3. Put the proposed state into the current state pointers,
* 4. Actually commit the hardware state,
* 5. Cleanup old state.
*
* The function nv_drm_atomic_apply_modeset_config() is getting called
* at stages (1) and (4) after drm_atomic_helper_swap_state().
*/
static int
nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
struct drm_atomic_state *state,
bool commit)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiRequestedModeSetConfig *requested_config =
&(to_nv_atomic_state(state)->config);
struct NvKmsKapiModeSetReplyConfig reply_config = { };
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
int ret;
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* When committing a state, the new state is already stored in
* crtc->state. When checking a proposed state, the proposed state is
* stored in crtc_state.
*/
struct drm_crtc_state *new_crtc_state =
commit ? crtc->state : crtc_state;
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
requested_config->headRequestedConfig[nv_crtc->head] =
to_nv_crtc_state(new_crtc_state)->req_config;
requested_config->headsMask |= 1 << nv_crtc->head;
if (commit) {
struct drm_crtc_state *old_crtc_state = crtc_state;
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(new_crtc_state);
nv_new_crtc_state->nv_flip->event = new_crtc_state->event;
nv_new_crtc_state->nv_flip->pending_events = 0;
new_crtc_state->event = NULL;
/*
* If flip event will be generated by hardware
* then defer flip object processing to flip event from hardware.
*/
if (__will_generate_flip_event(crtc, old_crtc_state)) {
nv_drm_crtc_enqueue_flip(nv_crtc,
nv_new_crtc_state->nv_flip);
nv_new_crtc_state->nv_flip = NULL;
}
}
}
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
/*
* XXX This call is required only if dumb buffer is going
* to be presented.
*/
nv_drm_write_combine_flush();
}
if (!nvKms->applyModeSetConfig(nv_dev->pDevice,
requested_config,
&reply_config,
commit)) {
return -EINVAL;
}
if (commit && nv_dev->supportsSyncpts) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */
ret = __nv_drm_get_syncpt_data(
nv_dev, crtc, crtc_state, requested_config, &reply_config);
if (ret != 0) {
return ret;
}
}
}
return 0;
}
int nv_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret = 0;
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
goto done;
}
ret = nv_drm_atomic_apply_modeset_config(dev,
state, false /* commit */);
done:
return ret;
}
/**
* __nv_drm_handle_flip_event - handle flip occurred event
* @nv_crtc: crtc on which flip has been occurred
*
* This handler dequeues the first nv_drm_flip from the crtc's flip_list,
* generates an event if requested at flip time, and frees the nv_drm_flip.
*/
static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc)
{
struct drm_device *dev = nv_crtc->base.dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_flip *nv_flip;
/*
* Acquire event_lock before nv_flip object dequeue, otherwise immediate
* flip event delivery from nv_drm_atomic_commit() races ahead and
* messes up with event delivery order.
*/
spin_lock(&dev->event_lock);
nv_flip = nv_drm_crtc_dequeue_flip(nv_crtc);
if (likely(nv_flip != NULL)) {
struct nv_drm_flip *nv_deferred_flip, *nv_next_deferred_flip;
if (nv_flip->event != NULL) {
drm_crtc_send_vblank_event(&nv_crtc->base, nv_flip->event);
}
/*
* Process flips that were deferred until processing of this nv_flip
* object.
*/
list_for_each_entry_safe(nv_deferred_flip,
nv_next_deferred_flip,
&nv_flip->deferred_flip_list, list_entry) {
if (nv_deferred_flip->event != NULL) {
drm_crtc_send_vblank_event(&nv_crtc->base,
nv_deferred_flip->event);
}
list_del(&nv_deferred_flip->list_entry);
nv_drm_free(nv_deferred_flip);
}
}
spin_unlock(&dev->event_lock);
wake_up_all(&nv_dev->flip_event_wq);
nv_drm_free(nv_flip);
}
int nv_drm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
int ret = -EBUSY;
int i;
struct drm_crtc *crtc = NULL;
struct drm_crtc_state *crtc_state = NULL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
/*
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if previous updates (commit tasks/flip event) are
* pending. In case of blocking commits it mandates to wait for previous
* updates to complete.
*/
if (nonblock) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (!list_empty(&nv_crtc->flip_list)) {
return -EBUSY;
}
}
}
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG)
/*
* nv_drm_atomic_commit_internal()
* implements blocking/non-blocking atomic commit using
* nv_drm_crtc::flip_list, it does not require any help from core DRM
* helper functions to stall commit processing. Therefore passing false to
* 'stall' parameter.
* In this context, failure from drm_atomic_helper_swap_state() is not
* expected.
*/
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT)
ret = drm_atomic_helper_swap_state(state, false /* stall */);
if (WARN_ON(ret != 0)) {
return ret;
}
#else
drm_atomic_helper_swap_state(state, false /* stall */);
#endif
#else
drm_atomic_helper_swap_state(dev, state);
#endif
/*
* nv_drm_atomic_commit_internal() must not return failure after
* calling drm_atomic_helper_swap_state().
*/
if ((ret = nv_drm_atomic_apply_modeset_config(
dev,
state, true /* commit */)) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to apply atomic modeset. Error code: %d",
ret);
goto done;
}
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(crtc->state);
/*
* If nv_drm_atomic_apply_modeset_config() hasn't consumed the flip
* object, no event will be generated for this flip, and we need process
* it:
*/
if (nv_new_crtc_state->nv_flip != NULL) {
/*
* First, defer processing of all pending flips for this crtc until
* last flip in the queue has been processed. This is to ensure a
* correct order in event delivery.
*/
spin_lock(&nv_crtc->flip_list_lock);
if (!list_empty(&nv_crtc->flip_list)) {
struct nv_drm_flip *nv_last_flip =
list_last_entry(&nv_crtc->flip_list,
struct nv_drm_flip, list_entry);
list_add(&nv_new_crtc_state->nv_flip->list_entry,
&nv_last_flip->deferred_flip_list);
nv_new_crtc_state->nv_flip = NULL;
}
spin_unlock(&nv_crtc->flip_list_lock);
}
if (nv_new_crtc_state->nv_flip != NULL) {
/*
* Then, if no more pending flips for this crtc, deliver event for the
* current flip.
*/
if (nv_new_crtc_state->nv_flip->event != NULL) {
spin_lock(&dev->event_lock);
drm_crtc_send_vblank_event(crtc,
nv_new_crtc_state->nv_flip->event);
spin_unlock(&dev->event_lock);
}
nv_drm_free(nv_new_crtc_state->nv_flip);
nv_new_crtc_state->nv_flip = NULL;
}
if (!nonblock) {
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (wait_event_timeout(
nv_dev->flip_event_wq,
list_empty(&nv_crtc->flip_list),
3 * HZ /* 3 second */) == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
}
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
/*
* If ref counting is present, state will be freed when the caller
* drops its reference after we return.
*/
#else
drm_atomic_state_free(state);
#endif
return 0;
}
void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
NvU32 head, NvU32 plane)
{
struct nv_drm_crtc *nv_crtc = nv_drm_crtc_lookup(nv_dev, head);
if (NV_DRM_WARN(nv_crtc == NULL)) {
return;
}
__nv_drm_handle_flip_event(nv_crtc);
}
#endif

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_MODESET_H__
#define __NVIDIA_DRM_MODESET_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvkms-kapi.h"
struct drm_device;
struct drm_atomic_state;
struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev);
void nv_drm_atomic_state_clear(struct drm_atomic_state *state);
void nv_drm_atomic_state_free(struct drm_atomic_state *state);
int nv_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int nv_drm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
NvU32 head, NvU32 plane);
int nv_drm_shut_down_all_crtcs(struct drm_device *dev);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_MODESET_H__ */

View File

@@ -0,0 +1,56 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_OS_INTERFACE_H__
#define __NVIDIA_DRM_OS_INTERFACE_H__
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#include "nvtypes.h"
#if defined(NV_DRM_AVAILABLE)
struct page;
/* Set to true when the atomic modeset feature is enabled. */
extern bool nv_drm_modeset_module_param;
void *nv_drm_calloc(size_t nmemb, size_t size);
void nv_drm_free(void *ptr);
char *nv_drm_asprintf(const char *fmt, ...);
void nv_drm_write_combine_flush(void);
int nv_drm_lock_user_pages(unsigned long address,
unsigned long pages_count, struct page ***pages);
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
void nv_drm_vunmap(void *address);
#endif
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */

View File

@@ -0,0 +1,518 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include "nvidia-drm-priv.h"
#include "nvidia-drm-ioctl.h"
#include "nvidia-drm-gem.h"
#include "nvidia-drm-prime-fence.h"
#include "nvidia-dma-resv-helper.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
struct nv_drm_fence_context {
struct nv_drm_device *nv_dev;
uint32_t context;
NvU64 fenceSemIndex; /* Index into semaphore surface */
/* Mapped semaphore surface */
struct NvKmsKapiMemory *pSemSurface;
NvU32 *pLinearAddress;
/* Protects nv_drm_fence_context::{pending, last_seqno} */
spinlock_t lock;
/*
* Software signaling structures. __nv_drm_fence_context_new()
* allocates channel event and __nv_drm_fence_context_destroy() frees it.
* There are no simultaneous read/write access to 'cb', therefore it does
* not require spin-lock protection.
*/
struct NvKmsKapiChannelEvent *cb;
/* List of pending fences which are not yet signaled */
struct list_head pending;
unsigned last_seqno;
};
struct nv_drm_prime_fence {
struct list_head list_entry;
nv_dma_fence_t base;
spinlock_t lock;
};
static inline
struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence)
{
return container_of(fence, struct nv_drm_prime_fence, base);
}
static const char*
nv_drm_gem_prime_fence_op_get_driver_name(nv_dma_fence_t *fence)
{
return "NVIDIA";
}
static const char*
nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence)
{
return "nvidia.prime";
}
static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence)
{
// DO NOTHING
return true;
}
static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence)
{
struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence);
nv_drm_free(nv_fence);
}
static signed long
nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
bool intr, signed long timeout)
{
/*
* If the waiter requests to wait with no timeout, force a timeout to ensure
* that it won't get stuck forever in the kernel if something were to go
* wrong with signaling, such as a malicious userspace not releasing the
* semaphore.
*
* 96 ms (roughly 6 frames @ 60 Hz) is arbitrarily chosen to be long enough
* that it should never get hit during normal operation, but not so long
* that the system becomes unresponsive.
*/
return nv_dma_fence_default_wait(fence, intr,
(timeout == MAX_SCHEDULE_TIMEOUT) ?
msecs_to_jiffies(96) : timeout);
}
static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = {
.get_driver_name = nv_drm_gem_prime_fence_op_get_driver_name,
.get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name,
.enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling,
.release = nv_drm_gem_prime_fence_op_release,
.wait = nv_drm_gem_prime_fence_op_wait,
};
static inline void
__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence)
{
list_del(&nv_fence->list_entry);
nv_dma_fence_signal(&nv_fence->base);
nv_dma_fence_put(&nv_fence->base);
}
static void nv_drm_gem_prime_force_fence_signal(
struct nv_drm_fence_context *nv_fence_context)
{
WARN_ON(!spin_is_locked(&nv_fence_context->lock));
while (!list_empty(&nv_fence_context->pending)) {
struct nv_drm_prime_fence *nv_fence = list_first_entry(
&nv_fence_context->pending,
typeof(*nv_fence),
list_entry);
__nv_drm_prime_fence_signal(nv_fence);
}
}
static void nv_drm_gem_prime_fence_event
(
void *dataPtr,
NvU32 dataU32
)
{
struct nv_drm_fence_context *nv_fence_context = dataPtr;
spin_lock(&nv_fence_context->lock);
while (!list_empty(&nv_fence_context->pending)) {
struct nv_drm_prime_fence *nv_fence = list_first_entry(
&nv_fence_context->pending,
typeof(*nv_fence),
list_entry);
/* Index into surface with 16 byte stride */
unsigned int seqno = *((nv_fence_context->pLinearAddress) +
(nv_fence_context->fenceSemIndex * 4));
if (nv_fence->base.seqno > seqno) {
/*
* Fences in list are placed in increasing order of sequence
* number, breaks a loop once found first fence not
* ready to signal.
*/
break;
}
__nv_drm_prime_fence_signal(nv_fence);
}
spin_unlock(&nv_fence_context->lock);
}
static inline struct nv_drm_fence_context *__nv_drm_fence_context_new(
struct nv_drm_device *nv_dev,
struct drm_nvidia_fence_context_create_params *p)
{
struct nv_drm_fence_context *nv_fence_context;
struct NvKmsKapiMemory *pSemSurface;
NvU32 *pLinearAddress;
/* Allocate backup nvkms resources */
pSemSurface = nvKms->importMemory(nv_dev->pDevice,
p->size,
p->import_mem_nvkms_params_ptr,
p->import_mem_nvkms_params_size);
if (!pSemSurface) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to import fence semaphore surface");
goto failed;
}
if (!nvKms->mapMemory(nv_dev->pDevice,
pSemSurface,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
(void **) &pLinearAddress)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to map fence semaphore surface");
goto failed_to_map_memory;
}
/*
* Allocate a fence context object, initialize it and allocate channel
* event for it.
*/
if ((nv_fence_context = nv_drm_calloc(
1,
sizeof(*nv_fence_context))) == NULL) {
goto failed_alloc_fence_context;
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
*nv_fence_context = (struct nv_drm_fence_context) {
.nv_dev = nv_dev,
.context = nv_dma_fence_context_alloc(1),
.pSemSurface = pSemSurface,
.pLinearAddress = pLinearAddress,
.fenceSemIndex = p->index,
};
INIT_LIST_HEAD(&nv_fence_context->pending);
spin_lock_init(&nv_fence_context->lock);
/*
* Except 'cb', the fence context should be completely initialized
* before channel event allocation because the fence context may start
* receiving events immediately after allocation.
*
* There are no simultaneous read/write access to 'cb', therefore it does
* not require spin-lock protection.
*/
nv_fence_context->cb =
nvKms->allocateChannelEvent(nv_dev->pDevice,
nv_drm_gem_prime_fence_event,
nv_fence_context,
p->event_nvkms_params_ptr,
p->event_nvkms_params_size);
if (!nv_fence_context->cb) {
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to allocate fence signaling event");
goto failed_to_allocate_channel_event;
}
return nv_fence_context;
failed_to_allocate_channel_event:
nv_drm_free(nv_fence_context);
failed_alloc_fence_context:
nvKms->unmapMemory(nv_dev->pDevice,
pSemSurface,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
(void *) pLinearAddress);
failed_to_map_memory:
nvKms->freeMemory(nv_dev->pDevice, pSemSurface);
failed:
return NULL;
}
static void __nv_drm_fence_context_destroy(
struct nv_drm_fence_context *nv_fence_context)
{
struct nv_drm_device *nv_dev = nv_fence_context->nv_dev;
/*
* Free channel event before destroying the fence context, otherwise event
* callback continue to get called.
*/
nvKms->freeChannelEvent(nv_dev->pDevice, nv_fence_context->cb);
/* Force signal all pending fences and empty pending list */
spin_lock(&nv_fence_context->lock);
nv_drm_gem_prime_force_fence_signal(nv_fence_context);
spin_unlock(&nv_fence_context->lock);
/* Free nvkms resources */
nvKms->unmapMemory(nv_dev->pDevice,
nv_fence_context->pSemSurface,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
(void *) nv_fence_context->pLinearAddress);
nvKms->freeMemory(nv_dev->pDevice, nv_fence_context->pSemSurface);
nv_drm_free(nv_fence_context);
}
static nv_dma_fence_t *__nv_drm_fence_context_create_fence(
struct nv_drm_fence_context *nv_fence_context,
unsigned int seqno)
{
struct nv_drm_prime_fence *nv_fence;
int ret = 0;
if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) {
ret = -ENOMEM;
goto out;
}
spin_lock(&nv_fence_context->lock);
/*
* If seqno wrapped, force signal fences to make sure none of them
* get stuck.
*/
if (seqno < nv_fence_context->last_seqno) {
nv_drm_gem_prime_force_fence_signal(nv_fence_context);
}
INIT_LIST_HEAD(&nv_fence->list_entry);
spin_lock_init(&nv_fence->lock);
nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_fence_context->context,
seqno);
list_add_tail(&nv_fence->list_entry, &nv_fence_context->pending);
nv_fence_context->last_seqno = seqno;
spin_unlock(&nv_fence_context->lock);
out:
return ret != 0 ? ERR_PTR(ret) : &nv_fence->base;
}
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
return nv_dev->pDevice ? 0 : -EINVAL;
}
struct nv_drm_gem_fence_context {
struct nv_drm_gem_object base;
struct nv_drm_fence_context *nv_fence_context;
};
static inline struct nv_drm_gem_fence_context *to_gem_fence_context(
struct nv_drm_gem_object *nv_gem)
{
if (nv_gem != NULL) {
return container_of(nv_gem, struct nv_drm_gem_fence_context, base);
}
return NULL;
}
/*
* Tear down of the 'struct nv_drm_gem_fence_context' object is not expected
* to be happen from any worker thread, if that happen it causes dead-lock
* because tear down sequence calls to flush all existing
* worker thread.
*/
static void __nv_drm_gem_fence_context_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_fence_context *nv_gem_fence_context =
to_gem_fence_context(nv_gem);
__nv_drm_fence_context_destroy(nv_gem_fence_context->nv_fence_context);
nv_drm_free(nv_gem_fence_context);
}
const struct nv_drm_gem_object_funcs nv_gem_fence_context_ops = {
.free = __nv_drm_gem_fence_context_free,
};
static inline
struct nv_drm_gem_fence_context *__nv_drm_gem_object_fence_context_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_gem_fence_context_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
return NULL;
}
return to_gem_fence_context(nv_gem);
}
int nv_drm_fence_context_create_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_fence_context_create_params *p = data;
struct nv_drm_gem_fence_context *nv_gem_fence_context = NULL;
if ((nv_gem_fence_context = nv_drm_calloc(
1,
sizeof(struct nv_drm_gem_fence_context))) == NULL) {
goto done;
}
if ((nv_gem_fence_context->nv_fence_context =
__nv_drm_fence_context_new(nv_dev, p)) == NULL) {
goto fence_context_new_failed;
}
nv_drm_gem_object_init(nv_dev,
&nv_gem_fence_context->base,
&nv_gem_fence_context_ops,
0 /* size */,
NULL /* pMemory */);
return nv_drm_gem_handle_create_drop_reference(filep,
&nv_gem_fence_context->base,
&p->handle);
fence_context_new_failed:
nv_drm_free(nv_gem_fence_context);
done:
return -ENOMEM;
}
int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
int ret = -EINVAL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_gem_fence_attach_params *p = data;
struct nv_drm_gem_object *nv_gem;
struct nv_drm_gem_fence_context *nv_gem_fence_context;
nv_dma_fence_t *fence;
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for fence attach: 0x%08x",
p->handle);
goto done;
}
if((nv_gem_fence_context = __nv_drm_gem_object_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for fence context: 0x%08x",
p->fence_context_handle);
goto fence_context_lookup_failed;
}
if (IS_ERR(fence = __nv_drm_fence_context_create_fence(
nv_gem_fence_context->nv_fence_context,
p->sem_thresh))) {
ret = PTR_ERR(fence);
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate fence: 0x%08x", p->handle);
goto fence_context_create_fence_failed;
}
nv_dma_resv_add_excl_fence(&nv_gem->resv, fence);
ret = 0;
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_gem_fence_context->base);
fence_context_lookup_failed:
nv_drm_gem_object_unreference_unlocked(nv_gem);
done:
return ret;
}
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_PRIME_FENCE_H__
#define __NVIDIA_DRM_PRIME_FENCE_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_AVAILABLE)
struct drm_file;
struct drm_device;
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_fence_context_create_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */

View File

@@ -0,0 +1,139 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_PRIV_H__
#define __NVIDIA_DRM_PRIV_H__
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DEVICE_H_PRESENT)
#include <drm/drm_device.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvidia-drm-os-interface.h"
#include "nvkms-kapi.h"
#define NV_DRM_LOG_ERR(__fmt, ...) \
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_LOG_INFO(__fmt, ...) \
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_WARN(__condition) WARN_ON((__condition))
#define NV_DRM_DEBUG_DRIVER(__fmt, ...) \
DRM_DEBUG_DRIVER("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_DEV_DEBUG_DRIVER(__dev, __fmt, ...) \
DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \
__dev->gpu_info.gpu_id, ##__VA_ARGS__)
struct nv_drm_device {
nv_gpu_info_t gpu_info;
struct drm_device *dev;
struct NvKmsKapiDevice *pDevice;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* Lock to protect drm-subsystem and fields of this structure
* from concurrent access.
*
* Do not hold this lock if some lock from core drm-subsystem
* is already held, locking order should be like this -
*
* mutex_lock(nv_drm_device::lock);
* ....
* mutex_lock(drm_device::mode_config::lock);
* ....
* .......
* mutex_unlock(drm_device::mode_config::lock);
* ........
* ..
* mutex_lock(drm_device::struct_mutex);
* ....
* ........
* mutex_unlock(drm_device::struct_mutex);
* ..
* mutex_unlock(nv_drm_device::lock);
*/
struct mutex lock;
NvU32 pitchAlignment;
NvU8 genericPageKind;
NvU8 pageKindGeneration;
NvU8 sectorLayout;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
#endif
atomic_t enable_event_handling;
/**
* @flip_event_wq:
*
* The wait queue on which nv_drm_atomic_commit_internal() sleeps until
* next flip event occurs.
*/
wait_queue_head_t flip_event_wq;
#endif
NvBool hasVideoMemory;
NvBool supportsSyncpts;
struct drm_property *nv_out_fence_property;
struct nv_drm_device *next;
};
static inline struct nv_drm_device *to_nv_device(
struct drm_device *dev)
{
return dev->dev_private;
}
extern const struct NvKmsKapiFunctionsTable* const nvKms;
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_PRIV_H__ */

View File

@@ -0,0 +1,231 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_PLANE_H_PRESENT)
#include <drm/drm_plane.h>
#endif
#include <drm/drm_modes.h>
#include <uapi/drm/drm_fourcc.h>
#include "nvidia-drm-priv.h"
#include "nvidia-drm-utils.h"
struct NvKmsKapiConnectorInfo*
nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice,
NvKmsKapiConnector hConnector)
{
struct NvKmsKapiConnectorInfo *connectorInfo =
nv_drm_calloc(1, sizeof(*connectorInfo));
if (connectorInfo == NULL) {
return ERR_PTR(-ENOMEM);
}
if (!nvKms->getConnectorInfo(pDevice, hConnector, connectorInfo)) {
nv_drm_free(connectorInfo);
return ERR_PTR(-EINVAL);
}
return connectorInfo;
}
int
nvkms_connector_signal_to_drm_encoder_signal(NvKmsConnectorSignalFormat format)
{
switch (format) {
default:
case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN:
return DRM_MODE_ENCODER_NONE;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS:
case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP:
return DRM_MODE_ENCODER_TMDS;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS:
return DRM_MODE_ENCODER_LVDS;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA:
return DRM_MODE_ENCODER_DAC;
case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI:
return DRM_MODE_ENCODER_DSI;
}
}
int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type,
NvBool internal)
{
switch (type) {
default:
case NVKMS_CONNECTOR_TYPE_UNKNOWN:
return DRM_MODE_CONNECTOR_Unknown;
case NVKMS_CONNECTOR_TYPE_DP:
return
internal ?
DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort;
case NVKMS_CONNECTOR_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
case NVKMS_CONNECTOR_TYPE_DVI_D:
return DRM_MODE_CONNECTOR_DVID;
case NVKMS_CONNECTOR_TYPE_DVI_I:
return DRM_MODE_CONNECTOR_DVII;
case NVKMS_CONNECTOR_TYPE_LVDS:
return DRM_MODE_CONNECTOR_LVDS;
case NVKMS_CONNECTOR_TYPE_VGA:
return DRM_MODE_CONNECTOR_VGA;
case NVKMS_CONNECTOR_TYPE_DSI:
return DRM_MODE_CONNECTOR_DSI;
case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER:
return DRM_MODE_CONNECTOR_DisplayPort;
}
}
void
nvkms_display_mode_to_drm_mode(const struct NvKmsKapiDisplayMode *displayMode,
struct drm_display_mode *mode)
{
#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH)
mode->vrefresh = (displayMode->timings.refreshRate + 500) / 1000; /* In Hz */
#endif
mode->clock = (displayMode->timings.pixelClockHz + 500) / 1000; /* In Hz */
mode->hdisplay = displayMode->timings.hVisible;
mode->hsync_start = displayMode->timings.hSyncStart;
mode->hsync_end = displayMode->timings.hSyncEnd;
mode->htotal = displayMode->timings.hTotal;
mode->hskew = displayMode->timings.hSkew;
mode->vdisplay = displayMode->timings.vVisible;
mode->vsync_start = displayMode->timings.vSyncStart;
mode->vsync_end = displayMode->timings.vSyncEnd;
mode->vtotal = displayMode->timings.vTotal;
if (displayMode->timings.flags.interlaced) {
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
if (displayMode->timings.flags.doubleScan) {
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
if (displayMode->timings.flags.hSyncPos) {
mode->flags |= DRM_MODE_FLAG_PHSYNC;
}
if (displayMode->timings.flags.hSyncNeg) {
mode->flags |= DRM_MODE_FLAG_NHSYNC;
}
if (displayMode->timings.flags.vSyncPos) {
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
if (displayMode->timings.flags.vSyncNeg) {
mode->flags |= DRM_MODE_FLAG_NVSYNC;
}
mode->width_mm = displayMode->timings.widthMM;
mode->height_mm = displayMode->timings.heightMM;
if (strlen(displayMode->name) != 0) {
memcpy(
mode->name, displayMode->name,
min(sizeof(mode->name), sizeof(displayMode->name)));
mode->name[sizeof(mode->name) - 1] = '\0';
} else {
drm_mode_set_name(mode);
}
}
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
struct NvKmsKapiDisplayMode *dst)
{
#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH)
dst->timings.refreshRate = src->vrefresh * 1000;
#else
dst->timings.refreshRate = drm_mode_vrefresh(src) * 1000;
#endif
dst->timings.pixelClockHz = src->clock * 1000; /* In Hz */
dst->timings.hVisible = src->hdisplay;
dst->timings.hSyncStart = src->hsync_start;
dst->timings.hSyncEnd = src->hsync_end;
dst->timings.hTotal = src->htotal;
dst->timings.hSkew = src->hskew;
dst->timings.vVisible = src->vdisplay;
dst->timings.vSyncStart = src->vsync_start;
dst->timings.vSyncEnd = src->vsync_end;
dst->timings.vTotal = src->vtotal;
if (src->flags & DRM_MODE_FLAG_INTERLACE) {
dst->timings.flags.interlaced = NV_TRUE;
} else {
dst->timings.flags.interlaced = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_DBLSCAN) {
dst->timings.flags.doubleScan = NV_TRUE;
} else {
dst->timings.flags.doubleScan = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_PHSYNC) {
dst->timings.flags.hSyncPos = NV_TRUE;
} else {
dst->timings.flags.hSyncPos = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_NHSYNC) {
dst->timings.flags.hSyncNeg = NV_TRUE;
} else {
dst->timings.flags.hSyncNeg = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_PVSYNC) {
dst->timings.flags.vSyncPos = NV_TRUE;
} else {
dst->timings.flags.vSyncPos = NV_FALSE;
}
if (src->flags & DRM_MODE_FLAG_NVSYNC) {
dst->timings.flags.vSyncNeg = NV_TRUE;
} else {
dst->timings.flags.vSyncNeg = NV_FALSE;
}
dst->timings.widthMM = src->width_mm;
dst->timings.heightMM = src->height_mm;
memcpy(dst->name, src->name, min(sizeof(dst->name), sizeof(src->name)));
}
#endif

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_UTILS_H__
#define __NVIDIA_DRM_UTILS_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include "nvkms-kapi.h"
enum drm_plane_type;
struct drm_display_mode;
struct NvKmsKapiConnectorInfo*
nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice,
NvKmsKapiConnector hConnector);
int nvkms_connector_signal_to_drm_encoder_signal(
NvKmsConnectorSignalFormat format);
int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type,
NvBool internal);
void nvkms_display_mode_to_drm_mode(
const struct NvKmsKapiDisplayMode *displayMode,
struct drm_display_mode *mode);
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
struct NvKmsKapiDisplayMode *dst);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_UTILS_H__ */

View File

@@ -0,0 +1,117 @@
###########################################################################
# Kbuild fragment for nvidia-drm.ko
###########################################################################
#
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
#
NVIDIA_DRM_SOURCES =
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-prime-fence.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
obj-m += nvidia-drm.o
nvidia-drm-y := $(NVIDIA_DRM_OBJECTS)
NVIDIA_DRM_KO = nvidia-drm/nvidia-drm.ko
NV_KERNEL_MODULE_TARGETS += $(NVIDIA_DRM_KO)
#
# Define nvidia-drm.ko-specific CFLAGS.
#
NVIDIA_DRM_CFLAGS += -I$(src)/nvidia-drm
NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
#
# Register the conftests needed by nvidia-drm.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm.h"
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-drv.h"
static struct NvKmsKapiFunctionsTable nvKmsFuncsTable = {
.versionString = NV_VERSION_STRING,
};
const struct NvKmsKapiFunctionsTable* const nvKms = &nvKmsFuncsTable;
#endif
int nv_drm_init(void)
{
#if defined(NV_DRM_AVAILABLE)
if (!nvKmsKapiGetFunctionsTable(&nvKmsFuncsTable)) {
NV_DRM_LOG_ERR(
"Version mismatch: nvidia-modeset.ko(%s) nvidia-drm.ko(%s)",
nvKmsFuncsTable.versionString, NV_VERSION_STRING);
return -EINVAL;
}
return nv_drm_probe_devices();
#else
return 0;
#endif
}
void nv_drm_exit(void)
{
#if defined(NV_DRM_AVAILABLE)
nv_drm_remove_devices();
#endif
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DRM_H__
#define __NVIDIA_DRM_H__
#include "nvidia-drm-conftest.h"
int nv_drm_init(void);
void nv_drm_exit(void);
#endif /* __NVIDIA_DRM_H__ */