545.23.06

This commit is contained in:
Andy Ritger
2023-10-17 09:25:29 -07:00
parent f59818b751
commit b5bf85a8e3
917 changed files with 132480 additions and 110015 deletions

View File

@@ -0,0 +1,334 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
const static unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION_ONSTACK(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

View File

@@ -43,9 +43,13 @@
#if defined(NV_LINUX_FENCE_H_PRESENT)
typedef struct fence nv_dma_fence_t;
typedef struct fence_ops nv_dma_fence_ops_t;
typedef struct fence_cb nv_dma_fence_cb_t;
typedef fence_func_t nv_dma_fence_func_t;
#else
typedef struct dma_fence nv_dma_fence_t;
typedef struct dma_fence_ops nv_dma_fence_ops_t;
typedef struct dma_fence_cb nv_dma_fence_cb_t;
typedef dma_fence_func_t nv_dma_fence_func_t;
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
@@ -97,6 +101,14 @@ static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#endif
}
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal_locked(fence);
#else
return dma_fence_signal_locked(fence);
#endif
}
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_context_alloc(num);
@@ -108,7 +120,7 @@ static inline u64 nv_dma_fence_context_alloc(unsigned num) {
static inline void
nv_dma_fence_init(nv_dma_fence_t *fence,
const nv_dma_fence_ops_t *ops,
spinlock_t *lock, u64 context, unsigned seqno) {
spinlock_t *lock, u64 context, uint64_t seqno) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_init(fence, ops, lock, context, seqno);
#else
@@ -116,6 +128,29 @@ nv_dma_fence_init(nv_dma_fence_t *fence,
#endif
}
static inline void
nv_dma_fence_set_error(nv_dma_fence_t *fence,
int error) {
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
return dma_fence_set_error(fence, error);
#elif defined(NV_FENCE_SET_ERROR_PRESENT)
return fence_set_error(fence, error);
#else
fence->status = error;
#endif
}
static inline int
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb,
nv_dma_fence_func_t func) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_add_callback(fence, cb, func);
#else
return dma_fence_add_callback(fence, cb, func);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */

View File

@@ -121,6 +121,20 @@ static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
#endif
}
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ);
#else
dma_resv_add_shared_fence(obj, fence);
#endif
#else
reservation_object_add_shared_fence(obj, fence);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */

View File

@@ -61,4 +61,15 @@
#undef NV_DRM_FENCE_AVAILABLE
#endif
/*
* We can support color management if either drm_helper_crtc_enable_color_mgmt()
* or drm_crtc_enable_color_mgmt() exist.
*/
#if defined(NV_DRM_HELPER_CRTC_ENABLE_COLOR_MGMT_PRESENT) || \
defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
#define NV_DRM_COLOR_MGMT_AVAILABLE
#else
#undef NV_DRM_COLOR_MGMT_AVAILABLE
#endif
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */

View File

@@ -349,10 +349,125 @@ nv_drm_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
static const NvU32 __nv_drm_connector_supported_colorspaces =
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
#endif
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
static int
__nv_drm_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *new_connector_state =
drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_connector_state =
drm_atomic_get_old_connector_state(state, connector);
struct nv_drm_device *nv_dev = to_nv_device(connector->dev);
struct drm_crtc *crtc = new_connector_state->crtc;
struct drm_crtc_state *crtc_state;
struct nv_drm_crtc_state *nv_crtc_state;
struct NvKmsKapiHeadRequestedConfig *req_config;
if (!crtc) {
return 0;
}
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
nv_crtc_state = to_nv_crtc_state(crtc_state);
req_config = &nv_crtc_state->req_config;
/*
* Override metadata for the entire head instead of allowing NVKMS to derive
* it from the layers' metadata.
*
* This is the metadata that will sent to the display, and if applicable,
* layers will be tone mapped to this metadata rather than that of the
* display.
*/
req_config->flags.hdrInfoFrameChanged =
!drm_connector_atomic_hdr_metadata_equal(old_connector_state,
new_connector_state);
if (new_connector_state->hdr_output_metadata &&
new_connector_state->hdr_output_metadata->data) {
/*
* Note that HDMI definitions are used here even though we might not
* be using HDMI. While that seems odd, it is consistent with
* upstream behavior.
*/
struct hdr_output_metadata *hdr_metadata =
new_connector_state->hdr_output_metadata->data;
struct hdr_metadata_infoframe *info_frame =
&hdr_metadata->hdmi_metadata_type1;
unsigned int i;
if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) {
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x =
info_frame->display_primaries[i].x;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y =
info_frame->display_primaries[i].y;
}
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x =
info_frame->white_point.x;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y =
info_frame->white_point.y;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance =
info_frame->max_display_mastering_luminance;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance =
info_frame->min_display_mastering_luminance;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL =
info_frame->max_cll;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL =
info_frame->max_fall;
req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf;
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE;
} else {
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE;
}
req_config->flags.colorimetryChanged =
(old_connector_state->colorspace != new_connector_state->colorspace);
// When adding a case here, also add to __nv_drm_connector_supported_colorspaces
switch (new_connector_state->colorspace) {
case DRM_MODE_COLORIMETRY_DEFAULT:
req_config->modeSetConfig.colorimetry =
NVKMS_OUTPUT_COLORIMETRY_DEFAULT;
break;
case DRM_MODE_COLORIMETRY_BT2020_RGB:
case DRM_MODE_COLORIMETRY_BT2020_YCC:
// Ignore RGB/YCC
// See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4
req_config->modeSetConfig.colorimetry =
NVKMS_OUTPUT_COLORIMETRY_BT2100;
break;
default:
// XXX HDR TODO: Add support for more color spaces
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space");
return -EINVAL;
}
return 0;
}
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
.get_modes = nv_drm_connector_get_modes,
.mode_valid = nv_drm_connector_mode_valid,
.best_encoder = nv_drm_connector_best_encoder,
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
.atomic_check = __nv_drm_connector_atomic_check,
#endif
};
static struct drm_connector*
@@ -405,6 +520,32 @@ nv_drm_connector_new(struct drm_device *dev,
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) {
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
if (drm_mode_create_hdmi_colorspace_property(
&nv_connector->base,
__nv_drm_connector_supported_colorspaces) == 0) {
#else
if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) {
#endif
drm_connector_attach_colorspace_property(&nv_connector->base);
}
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
} else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) {
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
if (drm_mode_create_dp_colorspace_property(
&nv_connector->base,
__nv_drm_connector_supported_colorspaces) == 0) {
#else
if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) {
#endif
drm_connector_attach_colorspace_property(&nv_connector->base);
}
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
}
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
/* Register connector with DRM subsystem */
ret = drm_connector_register(&nv_connector->base);

View File

@@ -48,6 +48,11 @@
#include <linux/host1x-next.h>
#endif
#if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
#include <drm/drm_color_mgmt.h>
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
static int
nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
@@ -399,27 +404,25 @@ plane_req_config_update(struct drm_plane *plane,
}
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i ++) {
req_config->config.hdrMetadata.displayPrimaries[i].x =
req_config->config.hdrMetadata.val.displayPrimaries[i].x =
info_frame->display_primaries[i].x;
req_config->config.hdrMetadata.displayPrimaries[i].y =
req_config->config.hdrMetadata.val.displayPrimaries[i].y =
info_frame->display_primaries[i].y;
}
req_config->config.hdrMetadata.whitePoint.x =
req_config->config.hdrMetadata.val.whitePoint.x =
info_frame->white_point.x;
req_config->config.hdrMetadata.whitePoint.y =
req_config->config.hdrMetadata.val.whitePoint.y =
info_frame->white_point.y;
req_config->config.hdrMetadata.maxDisplayMasteringLuminance =
req_config->config.hdrMetadata.val.maxDisplayMasteringLuminance =
info_frame->max_display_mastering_luminance;
req_config->config.hdrMetadata.minDisplayMasteringLuminance =
req_config->config.hdrMetadata.val.minDisplayMasteringLuminance =
info_frame->min_display_mastering_luminance;
req_config->config.hdrMetadata.maxCLL =
req_config->config.hdrMetadata.val.maxCLL =
info_frame->max_cll;
req_config->config.hdrMetadata.maxFALL =
req_config->config.hdrMetadata.val.maxFALL =
info_frame->max_fall;
req_config->config.hdrMetadataSpecified = true;
switch (info_frame->eotf) {
case HDMI_EOTF_SMPTE_ST2084:
req_config->config.tf = NVKMS_OUTPUT_TF_PQ;
@@ -432,10 +435,21 @@ plane_req_config_update(struct drm_plane *plane,
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported EOTF");
return -1;
}
req_config->config.hdrMetadata.enabled = true;
} else {
req_config->config.hdrMetadataSpecified = false;
req_config->config.hdrMetadata.enabled = false;
req_config->config.tf = NVKMS_OUTPUT_TF_NONE;
}
req_config->flags.hdrMetadataChanged =
((old_config.hdrMetadata.enabled !=
req_config->config.hdrMetadata.enabled) ||
memcmp(&old_config.hdrMetadata.val,
&req_config->config.hdrMetadata.val,
sizeof(struct NvKmsHDRStaticMetadata)));
req_config->flags.tfChanged = (old_config.tf != req_config->config.tf);
#endif
/*
@@ -692,9 +706,11 @@ static inline void __nv_drm_plane_atomic_destroy_state(
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
{
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
}
#endif
}
@@ -800,6 +816,9 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
&(to_nv_crtc_state(crtc->state)->req_config),
&nv_state->req_config);
nv_state->ilut_ramps = NULL;
nv_state->olut_ramps = NULL;
return &nv_state->base;
}
@@ -823,6 +842,9 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
__nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base);
nv_drm_free(nv_state->ilut_ramps);
nv_drm_free(nv_state->olut_ramps);
nv_drm_free(nv_state);
}
@@ -833,6 +855,9 @@ static struct drm_crtc_funcs nv_crtc_funcs = {
.destroy = nv_drm_crtc_destroy,
.atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state,
.atomic_destroy_state = nv_drm_atomic_crtc_destroy_state,
#if defined(NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT)
.gamma_set = drm_atomic_helper_legacy_gamma_set,
#endif
};
/*
@@ -866,6 +891,198 @@ static int head_modeset_config_attach_connector(
return 0;
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
static int color_mgmt_config_copy_lut(struct NvKmsLutRamps *nvkms_lut,
struct drm_color_lut *drm_lut,
uint64_t lut_len)
{
uint64_t i = 0;
if (lut_len != NVKMS_LUT_ARRAY_SIZE) {
return -EINVAL;
}
/*
* Both NvKms and drm LUT values are 16-bit linear values. NvKms LUT ramps
* are in arrays in a single struct while drm LUT ramps are an array of
* structs.
*/
for (i = 0; i < lut_len; i++) {
nvkms_lut->red[i] = drm_lut[i].red;
nvkms_lut->green[i] = drm_lut[i].green;
nvkms_lut->blue[i] = drm_lut[i].blue;
}
return 0;
}
static void color_mgmt_config_ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc,
struct drm_color_ctm *drm_ctm)
{
int y;
/* CTM is a 3x3 matrix while ours is 3x4. Zero out the last column. */
nvkms_csc->m[0][3] = nvkms_csc->m[1][3] = nvkms_csc->m[2][3] = 0;
for (y = 0; y < 3; y++) {
int x;
for (x = 0; x < 3; x++) {
/*
* Values in the CTM are encoded in S31.32 sign-magnitude fixed-
* point format, while NvKms CSC values are signed 2's-complement
* S15.16 (Ssign-extend12-3.16?) fixed-point format.
*/
NvU64 ctmVal = drm_ctm->matrix[y*3 + x];
NvU64 signBit = ctmVal & (1ULL << 63);
NvU64 magnitude = ctmVal & ~signBit;
/*
* Drop the low 16 bits of the fractional part and the high 17 bits
* of the integral part. Drop 17 bits to avoid corner cases where
* the highest resulting bit is a 1, causing the `cscVal = -cscVal`
* line to result in a positive number.
*/
NvS32 cscVal = (magnitude >> 16) & ((1ULL << 31) - 1);
if (signBit) {
cscVal = -cscVal;
}
nvkms_csc->m[y][x] = cscVal;
}
}
}
static int color_mgmt_config_set(struct nv_drm_crtc_state *nv_crtc_state,
struct NvKmsKapiHeadRequestedConfig *req_config)
{
struct NvKmsKapiHeadModeSetConfig *modeset_config =
&req_config->modeSetConfig;
struct drm_crtc_state *crtc_state = &nv_crtc_state->base;
int ret = 0;
struct drm_color_lut *degamma_lut = NULL;
struct drm_color_ctm *ctm = NULL;
struct drm_color_lut *gamma_lut = NULL;
uint64_t degamma_len = 0;
uint64_t gamma_len = 0;
int i;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
/*
* According to the comment in the Linux kernel's
* drivers/gpu/drm/drm_color_mgmt.c, if any of these properties are NULL,
* that LUT or CTM needs to be changed to a linear LUT or identity matrix
* respectively.
*/
req_config->flags.lutChanged = NV_TRUE;
if (crtc_state->degamma_lut) {
nv_crtc_state->ilut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->ilut_ramps));
if (!nv_crtc_state->ilut_ramps) {
ret = -ENOMEM;
goto fail;
}
degamma_lut = (struct drm_color_lut *)crtc_state->degamma_lut->data;
degamma_len = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->ilut_ramps,
degamma_lut,
degamma_len)) != 0) {
goto fail;
}
modeset_config->lut.input.specified = NV_TRUE;
modeset_config->lut.input.depth = 30; /* specify the full LUT */
modeset_config->lut.input.start = 0;
modeset_config->lut.input.end = degamma_len - 1;
modeset_config->lut.input.pRamps = nv_crtc_state->ilut_ramps;
} else {
/* setting input.end to 0 is equivalent to disabling the LUT, which
* should be equivalent to a linear LUT */
modeset_config->lut.input.specified = NV_TRUE;
modeset_config->lut.input.depth = 30; /* specify the full LUT */
modeset_config->lut.input.start = 0;
modeset_config->lut.input.end = 0;
modeset_config->lut.input.pRamps = NULL;
}
nv_drm_for_each_new_plane_in_state(crtc_state->state, plane,
plane_state, i) {
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
uint32_t layer = nv_plane->layer_idx;
struct NvKmsKapiLayerRequestedConfig *layer_config;
if (layer == NVKMS_KAPI_LAYER_INVALID_IDX || plane_state->crtc != crtc_state->crtc) {
continue;
}
layer_config = &req_config->layerRequestedConfig[layer];
if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX && crtc_state->ctm) {
ctm = (struct drm_color_ctm *)crtc_state->ctm->data;
color_mgmt_config_ctm_to_csc(&layer_config->config.csc, ctm);
layer_config->config.cscUseMain = NV_FALSE;
} else {
/* When crtc_state->ctm is unset, this also sets the main layer to
* the identity matrix.
*/
layer_config->config.csc = NVKMS_IDENTITY_CSC_MATRIX;
}
layer_config->flags.cscChanged = NV_TRUE;
}
if (crtc_state->gamma_lut) {
nv_crtc_state->olut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->olut_ramps));
if (!nv_crtc_state->olut_ramps) {
ret = -ENOMEM;
goto fail;
}
gamma_lut = (struct drm_color_lut *)crtc_state->gamma_lut->data;
gamma_len = crtc_state->gamma_lut->length /
sizeof(struct drm_color_lut);
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->olut_ramps,
gamma_lut,
gamma_len)) != 0) {
goto fail;
}
modeset_config->lut.output.specified = NV_TRUE;
modeset_config->lut.output.enabled = NV_TRUE;
modeset_config->lut.output.pRamps = nv_crtc_state->olut_ramps;
} else {
/* disabling the output LUT should be equivalent to setting a linear
* LUT */
modeset_config->lut.output.specified = NV_TRUE;
modeset_config->lut.output.enabled = NV_FALSE;
modeset_config->lut.output.pRamps = NULL;
}
return 0;
fail:
/* free allocated state */
nv_drm_free(nv_crtc_state->ilut_ramps);
nv_drm_free(nv_crtc_state->olut_ramps);
/* remove dangling pointers */
nv_crtc_state->ilut_ramps = NULL;
nv_crtc_state->olut_ramps = NULL;
modeset_config->lut.input.pRamps = NULL;
modeset_config->lut.output.pRamps = NULL;
/* prevent attempts at reading NULLs */
modeset_config->lut.input.specified = NV_FALSE;
modeset_config->lut.output.specified = NV_FALSE;
return ret;
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
/**
* nv_drm_crtc_atomic_check() can fail after it has modified
* the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state'
@@ -887,6 +1104,9 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
struct NvKmsKapiHeadRequestedConfig *req_config =
&nv_crtc_state->req_config;
int ret = 0;
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
struct nv_drm_device *nv_dev = to_nv_device(crtc_state->crtc->dev);
#endif
if (crtc_state->mode_changed) {
drm_mode_to_nvkms_display_mode(&crtc_state->mode,
@@ -925,6 +1145,25 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
req_config->flags.activeChanged = NV_TRUE;
}
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
req_config->modeSetConfig.vrrEnabled = crtc_state->vrr_enabled;
#endif
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (nv_dev->drmMasterChangedSinceLastAtomicCommit &&
(crtc_state->degamma_lut ||
crtc_state->ctm ||
crtc_state->gamma_lut)) {
crtc_state->color_mgmt_changed = NV_TRUE;
}
if (crtc_state->color_mgmt_changed) {
if ((ret = color_mgmt_config_set(nv_crtc_state, req_config)) != 0) {
return ret;
}
}
#endif
return ret;
}
@@ -1156,6 +1395,8 @@ nv_drm_plane_create(struct drm_device *dev,
plane,
validLayerRRTransforms);
nv_drm_free(formats);
return plane;
failed_plane_init:
@@ -1220,6 +1461,22 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs);
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
#if defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
drm_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE, true,
NVKMS_LUT_ARRAY_SIZE);
#else
drm_helper_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE,
NVKMS_LUT_ARRAY_SIZE);
#endif
ret = drm_mode_crtc_set_gamma_size(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE);
if (ret != 0) {
NV_DRM_DEV_LOG_WARN(
nv_dev,
"Failed to initialize legacy gamma support for head %u", head);
}
#endif
return &nv_crtc->base;
failed_init_crtc:
@@ -1328,10 +1585,16 @@ static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs,
{
drmCrcs->outputCrc32.value = crcs->outputCrc32.value;
drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported;
drmCrcs->outputCrc32.__pad0 = 0;
drmCrcs->outputCrc32.__pad1 = 0;
drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value;
drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported;
drmCrcs->rasterGeneratorCrc32.__pad0 = 0;
drmCrcs->rasterGeneratorCrc32.__pad1 = 0;
drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value;
drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported;
drmCrcs->compositorCrc32.__pad0 = 0;
drmCrcs->compositorCrc32.__pad1 = 0;
}
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,

View File

@@ -129,6 +129,9 @@ struct nv_drm_crtc_state {
*/
struct NvKmsKapiHeadRequestedConfig req_config;
struct NvKmsLutRamps *ilut_ramps;
struct NvKmsLutRamps *olut_ramps;
/**
* @nv_flip:
*

View File

@@ -44,6 +44,10 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT)
#include <drm/drm_atomic_uapi.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
@@ -60,6 +64,15 @@
#include <drm/drm_ioctl.h>
#endif
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
#include <drm/drm_aperture.h>
#include <drm/drm_fb_helper.h>
#endif
#if defined(NV_DRM_DRM_FBDEV_GENERIC_H_PRESENT)
#include <drm/drm_fbdev_generic.h>
#endif
#include <linux/pci.h>
/*
@@ -84,6 +97,11 @@
#include <drm/drm_atomic_helper.h>
#endif
static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
struct drm_file *filep,
NvU32 dpyId);
static int nv_drm_revoke_sub_ownership(struct drm_device *dev);
static struct nv_drm_device *dev_list = NULL;
static const char* nv_get_input_colorspace_name(
@@ -460,6 +478,11 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts;
nv_dev->semsurf_stride = resInfo.caps.semsurf.stride;
nv_dev->semsurf_max_submitted_offset =
resInfo.caps.semsurf.maxSubmittedOffset;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
gen = nv_dev->pageKindGeneration;
kind = nv_dev->genericPageKind;
@@ -546,6 +569,8 @@ static void __nv_drm_unload(struct drm_device *dev)
mutex_lock(&nv_dev->lock);
WARN_ON(nv_dev->subOwnershipGranted);
/* Disable event handling */
atomic_set(&nv_dev->enable_event_handling, false);
@@ -595,9 +620,15 @@ static int __nv_drm_master_set(struct drm_device *dev,
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
if (!nvKms->grabOwnership(nv_dev->pDevice)) {
/*
* If this device is driving a framebuffer, then nvidia-drm already has
* modeset ownership. Otherwise, grab ownership now.
*/
if (!nv_dev->hasFramebufferConsole &&
!nvKms->grabOwnership(nv_dev->pDevice)) {
return -EINVAL;
}
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_TRUE;
return 0;
}
@@ -631,6 +662,9 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
struct nv_drm_device *nv_dev = to_nv_device(dev);
int err;
nv_drm_revoke_modeset_permission(dev, file_priv, 0);
nv_drm_revoke_sub_ownership(dev);
/*
* After dropping nvkms modeset onwership, it is not guaranteed that
* drm and nvkms modeset state will remain in sync. Therefore, disable
@@ -655,7 +689,9 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
drm_modeset_unlock_all(dev);
nvKms->releaseOwnership(nv_dev->pDevice);
if (!nv_dev->hasFramebufferConsole) {
nvKms->releaseOwnership(nv_dev->pDevice);
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
@@ -693,15 +729,24 @@ static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
params->gpu_id = nv_dev->gpu_info.gpu_id;
params->primary_index = dev->primary->index;
params->generic_page_kind = 0;
params->page_kind_generation = 0;
params->sector_layout = 0;
params->supports_sync_fd = false;
params->supports_semsurf = false;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
params->generic_page_kind = nv_dev->genericPageKind;
params->page_kind_generation = nv_dev->pageKindGeneration;
params->sector_layout = nv_dev->sectorLayout;
#else
params->generic_page_kind = 0;
params->page_kind_generation = 0;
params->sector_layout = 0;
#endif
/* Semaphore surfaces are only supported if the modeset = 1 parameter is set */
if ((nv_dev->pDevice) != NULL && (nv_dev->semsurf_stride != 0)) {
params->supports_semsurf = true;
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
params->supports_sync_fd = true;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
return 0;
}
@@ -833,10 +878,10 @@ static NvU32 nv_drm_get_head_bit_from_connector(struct drm_connector *connector)
return 0;
}
static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data,
struct drm_file *filep)
static int nv_drm_grant_modeset_permission(struct drm_device *dev,
struct drm_nvidia_grant_permissions_params *params,
struct drm_file *filep)
{
struct drm_nvidia_grant_permissions_params *params = data;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_connector *target_nv_connector = NULL;
struct nv_drm_crtc *target_nv_crtc = NULL;
@@ -958,26 +1003,102 @@ done:
return ret;
}
static bool nv_drm_revoke_connector(struct nv_drm_device *nv_dev,
struct nv_drm_connector *nv_connector)
static int nv_drm_grant_sub_ownership(struct drm_device *dev,
struct drm_nvidia_grant_permissions_params *params)
{
bool ret = true;
if (nv_connector->modeset_permission_crtc) {
if (nv_connector->nv_detected_encoder) {
ret = nvKms->revokePermissions(
nv_dev->pDevice, nv_connector->modeset_permission_crtc->head,
nv_connector->nv_detected_encoder->hDisplay);
}
nv_connector->modeset_permission_crtc->modeset_permission_filep = NULL;
nv_connector->modeset_permission_crtc = NULL;
int ret = -EINVAL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_modeset_acquire_ctx *pctx;
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
ret);
pctx = &ctx;
#else
mutex_lock(&dev->mode_config.mutex);
pctx = dev->mode_config.acquire_ctx;
#endif
if (nv_dev->subOwnershipGranted ||
!nvKms->grantSubOwnership(params->fd, nv_dev->pDevice)) {
goto done;
}
nv_connector->modeset_permission_filep = NULL;
return ret;
/*
* When creating an ownership grant, shut down all heads and disable flip
* notifications.
*/
ret = nv_drm_atomic_helper_disable_all(dev, pctx);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"nv_drm_atomic_helper_disable_all failed with error code %d!",
ret);
}
atomic_set(&nv_dev->enable_event_handling, false);
nv_dev->subOwnershipGranted = NV_TRUE;
ret = 0;
done:
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
#else
mutex_unlock(&dev->mode_config.mutex);
#endif
return 0;
}
static int nv_drm_revoke_permission(struct drm_device *dev,
struct drm_file *filep, NvU32 dpyId)
static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data,
struct drm_file *filep)
{
struct drm_nvidia_grant_permissions_params *params = data;
if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
return nv_drm_grant_modeset_permission(dev, params, filep);
} else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
return nv_drm_grant_sub_ownership(dev, params);
}
return -EINVAL;
}
static int
nv_drm_atomic_disable_connector(struct drm_atomic_state *state,
struct nv_drm_connector *nv_connector)
{
struct drm_crtc_state *crtc_state;
struct drm_connector_state *connector_state;
int ret = 0;
if (nv_connector->modeset_permission_crtc) {
crtc_state = drm_atomic_get_crtc_state(
state, &nv_connector->modeset_permission_crtc->base);
if (!crtc_state) {
return -EINVAL;
}
crtc_state->active = false;
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
if (ret < 0) {
return ret;
}
}
connector_state = drm_atomic_get_connector_state(state, &nv_connector->base);
if (!connector_state) {
return -EINVAL;
}
return drm_atomic_set_crtc_for_connector(connector_state, NULL);
}
static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
struct drm_file *filep, NvU32 dpyId)
{
struct drm_modeset_acquire_ctx *pctx;
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_crtc *crtc;
int ret = 0;
@@ -988,10 +1109,19 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
ret);
pctx = &ctx;
#else
mutex_lock(&dev->mode_config.mutex);
pctx = dev->mode_config.acquire_ctx;
#endif
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto done;
}
state->acquire_ctx = pctx;
/*
* If dpyId is set, only revoke those specific resources. Otherwise,
* it is from closing the file so revoke all resources for that filep.
@@ -1003,10 +1133,13 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->modeset_permission_filep == filep &&
(!dpyId || nv_drm_connector_is_dpy_id(connector, dpyId))) {
if (!nv_drm_connector_revoke_permissions(dev, nv_connector)) {
ret = -EINVAL;
// Continue trying to revoke as much as possible.
ret = nv_drm_atomic_disable_connector(state, nv_connector);
if (ret < 0) {
goto done;
}
// Continue trying to revoke as much as possible.
nv_drm_connector_revoke_permissions(dev, nv_connector);
}
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
@@ -1020,6 +1153,25 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
}
}
ret = drm_atomic_commit(state);
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
#else
@@ -1029,14 +1181,55 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
return ret;
}
static int nv_drm_revoke_sub_ownership(struct drm_device *dev)
{
int ret = -EINVAL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
ret);
#else
mutex_lock(&dev->mode_config.mutex);
#endif
if (!nv_dev->subOwnershipGranted) {
goto done;
}
if (!nvKms->revokeSubOwnership(nv_dev->pDevice)) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to revoke sub-ownership from NVKMS");
goto done;
}
nv_dev->subOwnershipGranted = NV_FALSE;
atomic_set(&nv_dev->enable_event_handling, true);
ret = 0;
done:
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
#else
mutex_unlock(&dev->mode_config.mutex);
#endif
return ret;
}
static int nv_drm_revoke_permission_ioctl(struct drm_device *dev, void *data,
struct drm_file *filep)
{
struct drm_nvidia_revoke_permissions_params *params = data;
if (!params->dpyId) {
return -EINVAL;
if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
if (!params->dpyId) {
return -EINVAL;
}
return nv_drm_revoke_modeset_permission(dev, filep, params->dpyId);
} else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
return nv_drm_revoke_sub_ownership(dev);
}
return nv_drm_revoke_permission(dev, filep, params->dpyId);
return -EINVAL;
}
static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep)
@@ -1051,7 +1244,7 @@ static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep)
dev->mode_config.num_connector > 0 &&
dev->mode_config.connector_list.next != NULL &&
dev->mode_config.connector_list.prev != NULL) {
nv_drm_revoke_permission(dev, filep, 0);
nv_drm_revoke_modeset_permission(dev, filep, 0);
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
@@ -1310,6 +1503,18 @@ static const struct drm_ioctl_desc nv_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_PRIME_FENCE_ATTACH,
nv_drm_gem_prime_fence_attach_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CTX_CREATE,
nv_drm_semsurf_fence_ctx_create_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CREATE,
nv_drm_semsurf_fence_create_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_WAIT,
nv_drm_semsurf_fence_wait_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_ATTACH,
nv_drm_semsurf_fence_attach_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
#endif
DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY,
@@ -1513,6 +1718,30 @@ static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
goto failed_drm_register;
}
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
if (nv_drm_fbdev_module_param &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
if (!nvKms->grabOwnership(nv_dev->pDevice)) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to grab NVKMS modeset ownership");
goto failed_grab_ownership;
}
if (device->bus == &pci_bus_type) {
struct pci_dev *pdev = to_pci_dev(device);
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_HAS_DRIVER_ARG)
drm_aperture_remove_conflicting_pci_framebuffers(pdev, &nv_drm_driver);
#else
drm_aperture_remove_conflicting_pci_framebuffers(pdev, nv_drm_driver.name);
#endif
}
drm_fbdev_generic_setup(dev, 32);
nv_dev->hasFramebufferConsole = NV_TRUE;
}
#endif /* defined(NV_DRM_FBDEV_GENERIC_AVAILABLE) */
/* Add NVIDIA-DRM device into list */
nv_dev->next = dev_list;
@@ -1520,6 +1749,12 @@ static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
return; /* Success */
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
failed_grab_ownership:
drm_dev_unregister(dev);
#endif
failed_drm_register:
nv_drm_dev_free(dev);
@@ -1582,9 +1817,16 @@ void nv_drm_remove_devices(void)
{
while (dev_list != NULL) {
struct nv_drm_device *next = dev_list->next;
struct drm_device *dev = dev_list->dev;
drm_dev_unregister(dev_list->dev);
nv_drm_dev_free(dev_list->dev);
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
if (dev_list->hasFramebufferConsole) {
drm_atomic_helper_shutdown(dev);
nvKms->releaseOwnership(dev_list->pDevice);
}
#endif
drm_dev_unregister(dev);
nv_drm_dev_free(dev);
nv_drm_free(dev_list);

File diff suppressed because it is too large Load Diff

View File

@@ -41,6 +41,22 @@ int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */

View File

@@ -465,7 +465,7 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
goto failed;
}
if (p->__pad != 0) {
if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
goto failed;

View File

@@ -95,6 +95,16 @@ static inline struct nv_drm_gem_object *to_nv_gem_object(
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
*/
static inline void
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_get(&nv_gem->base);
#else
drm_gem_object_reference(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{

View File

@@ -306,6 +306,36 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
/*
* for_each_new_plane_in_state() was added by kernel commit
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* This commit also added the old_state and new_state pointers to
* __drm_planes_state. Because of this, the best that can be done on kernel
* versions without this macro is for_each_plane_in_state.
*/
/**
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update, tracking only the new
* state. This is useful in enable functions, where we need the new state the
* hardware should be in when the atomic commit operation has completed.
*/
#if !defined(for_each_new_plane_in_state)
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
#else
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
#endif
static inline struct drm_connector *
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
uint32_t id)

View File

@@ -48,6 +48,10 @@
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
@@ -133,6 +137,26 @@
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
struct drm_nvidia_revoke_permissions_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \
struct drm_nvidia_semsurf_fence_ctx_create_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_CREATE), \
struct drm_nvidia_semsurf_fence_create_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \
DRM_IOW((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
struct drm_nvidia_semsurf_fence_wait_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \
DRM_IOW((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \
struct drm_nvidia_semsurf_fence_attach_params)
struct drm_nvidia_gem_import_nvkms_memory_params {
uint64_t mem_size; /* IN */
@@ -158,6 +182,8 @@ struct drm_nvidia_get_dev_info_params {
uint32_t generic_page_kind; /* OUT */
uint32_t page_kind_generation; /* OUT */
uint32_t sector_layout; /* OUT */
uint32_t supports_sync_fd; /* OUT */
uint32_t supports_semsurf; /* OUT */
};
struct drm_nvidia_prime_fence_context_create_params {
@@ -179,6 +205,7 @@ struct drm_nvidia_gem_prime_fence_attach_params {
uint32_t handle; /* IN GEM handle to attach fence to */
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
uint32_t __pad;
};
struct drm_nvidia_get_client_capability_params {
@@ -190,6 +217,8 @@ struct drm_nvidia_get_client_capability_params {
struct drm_nvidia_crtc_crc32 {
uint32_t value; /* Read value, undefined if supported is false */
uint8_t supported; /* Supported boolean, true if readable by hardware */
uint8_t __pad0;
uint16_t __pad1;
};
struct drm_nvidia_crtc_crc32_v2_out {
@@ -229,10 +258,11 @@ struct drm_nvidia_gem_alloc_nvkms_memory_params {
uint32_t handle; /* OUT */
uint8_t block_linear; /* IN */
uint8_t compressible; /* IN/OUT */
uint16_t __pad;
uint16_t __pad0;
uint64_t memory_size; /* IN */
uint32_t flags; /* IN */
uint32_t __pad1;
};
struct drm_nvidia_gem_export_dmabuf_memory_params {
@@ -266,13 +296,90 @@ struct drm_nvidia_get_connector_id_for_dpy_id_params {
uint32_t connectorId; /* OUT */
};
enum drm_nvidia_permissions_type {
NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
};
struct drm_nvidia_grant_permissions_params {
int32_t fd; /* IN */
uint32_t dpyId; /* IN */
uint32_t type; /* IN */
};
struct drm_nvidia_revoke_permissions_params {
uint32_t dpyId; /* IN */
uint32_t type; /* IN */
};
struct drm_nvidia_semsurf_fence_ctx_create_params {
uint64_t index; /* IN Index of the desired semaphore in the
* fence context's semaphore surface */
/* Params for importing userspace semaphore surface */
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
uint32_t handle; /* OUT GEM handle to fence context */
uint32_t __pad;
};
struct drm_nvidia_semsurf_fence_create_params {
uint32_t fence_context_handle; /* IN GEM handle to fence context on which
* fence is run on */
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
* after which the fence will be signaled
* with its error status set to -ETIMEDOUT.
* Default timeout value is 5000ms */
uint64_t wait_value; /* IN Semaphore value to reach before signal */
int32_t fd; /* OUT sync FD object representing the
* semaphore at the specified index reaching
* a value >= wait_value */
uint32_t __pad;
};
/*
* Note there is no provision for timeouts in this ioctl. The kernel
* documentation asserts timeouts should be handled by fence producers, and
* that waiters should not second-guess their logic, as it is producers rather
* than consumers that have better information when it comes to determining a
* reasonable timeout for a given workload.
*/
struct drm_nvidia_semsurf_fence_wait_params {
uint32_t fence_context_handle; /* IN GEM handle to fence context which will
* be used to wait on the sync FD. Need not
* be the fence context used to create the
* sync FD. */
int32_t fd; /* IN sync FD object to wait on */
uint64_t pre_wait_value; /* IN Wait for the semaphore represented by
* fence_context to reach this value before
* waiting for the sync file. */
uint64_t post_wait_value; /* IN Signal the semaphore represented by
* fence_context to this value after waiting
* for the sync file */
};
struct drm_nvidia_semsurf_fence_attach_params {
uint32_t handle; /* IN GEM handle of buffer */
uint32_t fence_context_handle; /* IN GEM handle of fence context */
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
* after which the fence will be signaled
* with its error status set to -ETIMEDOUT.
* Default timeout value is 5000ms */
uint32_t shared; /* IN If true, fence will reserve shared
* access to the buffer, otherwise it will
* reserve exclusive access */
uint64_t wait_value; /* IN Semaphore value to reach before signal */
};
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */

View File

@@ -35,7 +35,13 @@
#include <drm/drmP.h>
#endif
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
#include <linux/file.h>
#include <linux/sync_file.h>
#endif
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include "nv-mm.h"
@@ -45,6 +51,14 @@ MODULE_PARM_DESC(
bool nv_drm_modeset_module_param = false;
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
MODULE_PARM_DESC(
fbdev,
"Create a framebuffer device (1 = enable, 0 = disable (default)) (EXPERIMENTAL)");
bool nv_drm_fbdev_module_param = false;
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
#endif
void *nv_drm_calloc(size_t nmemb, size_t size)
{
size_t total_size = nmemb * size;
@@ -81,14 +95,10 @@ char *nv_drm_asprintf(const char *fmt, ...)
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_FAMILY_ARM)
#if defined(NVCPU_ARM)
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
#elif defined(NVCPU_AARCH64)
#define WRITE_COMBINE_FLUSH() mb()
#endif
#elif defined(NVCPU_PPC64LE)
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
#else
#define WRITE_COMBINE_FLUSH() mb()
#endif
void nv_drm_write_combine_flush(void)
@@ -160,6 +170,122 @@ void nv_drm_vunmap(void *address)
vunmap(address);
}
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
{
worker->shutting_down = false;
if (nv_kthread_q_init(&worker->q, name)) {
return false;
}
spin_lock_init(&worker->lock);
return true;
}
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
{
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
worker->shutting_down = true;
spin_unlock_irqrestore(&worker->lock, flags);
nv_kthread_q_stop(&worker->q);
}
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg)
{
nv_kthread_q_item_init(work, callback, arg);
}
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&worker->lock, flags);
if (!worker->shutting_down) {
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
{
nv_timer_setup(timer, callback);
}
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
{
mod_timer(&timer->kernel_timer, timeout_native);
}
unsigned long nv_drm_timer_now(void)
{
return jiffies;
}
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
{
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
{
if (del_timer_sync(&timer->kernel_timer)) {
return true;
} else {
return false;
}
}
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
struct sync_file *sync;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
return fd;
}
/* sync_file_create() generates its own reference to the fence */
sync = sync_file_create(fence);
if (IS_ERR(sync)) {
put_unused_fd(fd);
return PTR_ERR(sync);
}
fd_install(fd, sync->file);
return fd;
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
return -EINVAL;
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
}
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
{
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
return sync_file_get_fence(fd);
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
return NULL;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
#endif /* NV_DRM_AVAILABLE */
/*************************************************************************

View File

@@ -237,6 +237,14 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
int i;
int ret;
/*
* If sub-owner permission was granted to another NVKMS client, disallow
* modesets through the DRM interface.
*/
if (nv_dev->subOwnershipGranted) {
return -EINVAL;
}
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
@@ -274,9 +282,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
nv_new_crtc_state->nv_flip = NULL;
}
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
requested_config->headRequestedConfig[nv_crtc->head].modeSetConfig.vrrEnabled = new_crtc_state->vrr_enabled;
#endif
}
}
@@ -292,7 +297,9 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
requested_config,
&reply_config,
commit)) {
return -EINVAL;
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
return -EINVAL;
}
}
if (commit && nv_dev->supportsSyncpts) {
@@ -388,42 +395,56 @@ int nv_drm_atomic_commit(struct drm_device *dev,
struct nv_drm_device *nv_dev = to_nv_device(dev);
/*
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if previous updates (commit tasks/flip event) are
* pending. In case of blocking commits it mandates to wait for previous
* updates to complete.
* XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if the commit would need to wait for previous
* updates (commit tasks/flip event) to complete. In case of blocking
* commits it mandates to wait for previous updates to complete. However,
* the kernel DRM-KMS documentation does explicitly allow maintaining a
* queue of outstanding commits.
*
* Our system already implements such a queue, but due to
* bug 4054608, it is currently not used.
*/
if (nonblock) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (nonblock) {
if (!list_empty(&nv_crtc->flip_list)) {
return -EBUSY;
}
} else {
if (wait_event_timeout(
nv_dev->flip_event_wq,
list_empty(&nv_crtc->flip_list),
3 * HZ /* 3 second */) == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
}
@@ -467,6 +488,7 @@ int nv_drm_atomic_commit(struct drm_device *dev,
goto done;
}
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_FALSE;
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);

View File

@@ -29,10 +29,47 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
#if defined(NV_LINUX)
#include "nv-kthread-q.h"
#include "linux/spinlock.h"
typedef struct nv_drm_workthread {
spinlock_t lock;
struct nv_kthread_q q;
bool shutting_down;
} nv_drm_workthread;
typedef nv_kthread_q_item_t nv_drm_work;
#else /* defined(NV_LINUX) */
#error "Need to define deferred work primitives for this OS"
#endif /* else defined(NV_LINUX) */
#if defined(NV_LINUX)
#include "nv-timer.h"
typedef struct nv_timer nv_drm_timer;
#else /* defined(NV_LINUX) */
#error "Need to define kernel timer callback primitives for this OS"
#endif /* else defined(NV_LINUX) */
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
struct page;
/* Set to true when the atomic modeset feature is enabled. */
extern bool nv_drm_modeset_module_param;
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
/* Set to true when the nvidia-drm driver should install a framebuffer device */
extern bool nv_drm_fbdev_module_param;
#endif
void *nv_drm_calloc(size_t nmemb, size_t size);
@@ -51,6 +88,37 @@ void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
void nv_drm_vunmap(void *address);
#endif
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name);
/* Can be called concurrently with nv_drm_workthread_add_work() */
void nv_drm_workthread_shutdown(nv_drm_workthread *worker);
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg);
/* Can be called concurrently with nv_drm_workthread_shutdown() */
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work);
void nv_drm_timer_setup(nv_drm_timer *timer,
void (*callback)(nv_drm_timer *nv_drm_timer));
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
unsigned long nv_drm_timer_now(void);
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void);
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */

View File

@@ -46,12 +46,33 @@
#define NV_DRM_LOG_ERR(__fmt, ...) \
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
/*
* DRM_WARN() was added in v4.9 by kernel commit
* 30b0da8d556e65ff935a56cd82c05ba0516d3e4a
*
* Before this commit, only DRM_INFO and DRM_ERROR were defined and
* DRM_INFO(fmt, ...) was defined as
* printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if
* DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the
* same pattern as DRM_INFO.
*/
#ifdef DRM_WARN
#define NV_DRM_LOG_WARN(__fmt, ...) \
DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#else
#define NV_DRM_LOG_WARN(__fmt, ...) \
printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#endif
#define NV_DRM_LOG_INFO(__fmt, ...) \
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \
NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
@@ -117,9 +138,26 @@ struct nv_drm_device {
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
NvU64 semsurf_stride;
NvU64 semsurf_max_submitted_offset;
#endif
NvBool hasVideoMemory;
NvBool supportsSyncpts;
NvBool subOwnershipGranted;
NvBool hasFramebufferConsole;
/**
* @drmMasterChangedSinceLastAtomicCommit:
*
* This flag is set in nv_drm_master_set and reset after a completed atomic
* commit. It is used to restore or recommit state that is lost by the
* NvKms modeset owner change, such as the CRTC color management
* properties.
*/
NvBool drmMasterChangedSinceLastAtomicCommit;
struct drm_property *nv_out_fence_property;
struct drm_property *nv_input_colorspace_property;

View File

@@ -19,6 +19,7 @@ NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
@@ -79,6 +80,17 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
@@ -133,3 +145,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg