560.28.03

This commit is contained in:
Gaurav Juvekar
2024-07-19 15:45:15 -07:00
parent 5fdf5032fb
commit 448d5cc656
859 changed files with 165424 additions and 91129 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -176,7 +176,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
{
unsigned i, j;
const static unsigned attempts = 3;
static const unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {

View File

@@ -42,12 +42,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#include <linux/nvhost.h>
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
#include <linux/host1x-next.h>
#endif
#if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
#include <drm/drm_color_mgmt.h>
#endif
@@ -264,7 +258,6 @@ plane_req_config_update(struct drm_plane *plane,
{
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
struct NvKmsKapiLayerConfig old_config = req_config->config;
struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(plane_state);
@@ -392,49 +385,16 @@ plane_req_config_update(struct drm_plane *plane,
req_config->config.inputColorSpace =
nv_drm_plane_state->input_colorspace;
req_config->config.syncptParams.preSyncptSpecified = false;
req_config->config.syncptParams.postSyncptRequested = false;
req_config->config.syncParams.preSyncptSpecified = false;
req_config->config.syncParams.postSyncptRequested = false;
req_config->config.syncParams.semaphoreSpecified = false;
if (plane_state->fence != NULL || nv_drm_plane_state->fd_user_ptr) {
if (!nv_dev->supportsSyncpts) {
if (nv_drm_plane_state->fd_user_ptr) {
if (to_nv_device(plane->dev)->supportsSyncpts) {
req_config->config.syncParams.postSyncptRequested = true;
} else {
return -1;
}
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT)
if (plane_state->fence != NULL) {
int ret = nvhost_dma_fence_unpack(
plane_state->fence,
&req_config->config.syncptParams.preSyncptId,
&req_config->config.syncptParams.preSyncptValue);
if (ret != 0) {
return ret;
}
req_config->config.syncptParams.preSyncptSpecified = true;
}
#endif
if (nv_drm_plane_state->fd_user_ptr) {
req_config->config.syncptParams.postSyncptRequested = true;
}
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
if (plane_state->fence != NULL) {
int ret = host1x_fence_extract(
plane_state->fence,
&req_config->config.syncptParams.preSyncptId,
&req_config->config.syncptParams.preSyncptValue);
if (ret != 0) {
return ret;
}
req_config->config.syncptParams.preSyncptSpecified = true;
}
if (nv_drm_plane_state->fd_user_ptr) {
req_config->config.syncptParams.postSyncptRequested = true;
}
#else
return -1;
#endif
}
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
@@ -857,7 +817,7 @@ __nv_drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
#endif
}
static inline void nv_drm_crtc_duplicate_req_head_modeset_config(
static inline bool nv_drm_crtc_duplicate_req_head_modeset_config(
const struct NvKmsKapiHeadRequestedConfig *old,
struct NvKmsKapiHeadRequestedConfig *new)
{
@@ -876,6 +836,34 @@ static inline void nv_drm_crtc_duplicate_req_head_modeset_config(
new->layerRequestedConfig[i].config =
old->layerRequestedConfig[i].config;
}
if (old->modeSetConfig.lut.input.pRamps) {
new->modeSetConfig.lut.input.pRamps =
nv_drm_calloc(1, sizeof(*new->modeSetConfig.lut.input.pRamps));
if (!new->modeSetConfig.lut.input.pRamps) {
return false;
}
*new->modeSetConfig.lut.input.pRamps =
*old->modeSetConfig.lut.input.pRamps;
}
if (old->modeSetConfig.lut.output.pRamps) {
new->modeSetConfig.lut.output.pRamps =
nv_drm_calloc(1, sizeof(*new->modeSetConfig.lut.output.pRamps));
if (!new->modeSetConfig.lut.output.pRamps) {
/*
* new->modeSetConfig.lut.input.pRamps is either NULL or it was
* just allocated
*/
nv_drm_free(new->modeSetConfig.lut.input.pRamps);
new->modeSetConfig.lut.input.pRamps = NULL;
return false;
}
*new->modeSetConfig.lut.output.pRamps =
*old->modeSetConfig.lut.output.pRamps;
}
return true;
}
static inline struct nv_drm_crtc_state *nv_drm_crtc_state_alloc(void)
@@ -947,17 +935,24 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
return NULL;
}
__drm_atomic_helper_crtc_duplicate_state(crtc, &nv_state->base);
INIT_LIST_HEAD(&nv_state->nv_flip->list_entry);
INIT_LIST_HEAD(&nv_state->nv_flip->deferred_flip_list);
nv_drm_crtc_duplicate_req_head_modeset_config(
&(to_nv_crtc_state(crtc->state)->req_config),
&nv_state->req_config);
/*
* nv_drm_crtc_duplicate_req_head_modeset_config potentially allocates
* nv_state->req_config.modeSetConfig.lut.{in,out}put.pRamps, so they should
* be freed in any following failure paths.
*/
if (!nv_drm_crtc_duplicate_req_head_modeset_config(
&(to_nv_crtc_state(crtc->state)->req_config),
&nv_state->req_config)) {
nv_state->ilut_ramps = NULL;
nv_state->olut_ramps = NULL;
nv_drm_free(nv_state->nv_flip);
nv_drm_free(nv_state);
return NULL;
}
__drm_atomic_helper_crtc_duplicate_state(crtc, &nv_state->base);
return &nv_state->base;
}
@@ -982,8 +977,8 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
__nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base);
nv_drm_free(nv_state->ilut_ramps);
nv_drm_free(nv_state->olut_ramps);
nv_drm_free(nv_state->req_config.modeSetConfig.lut.input.pRamps);
nv_drm_free(nv_state->req_config.modeSetConfig.lut.output.pRamps);
nv_drm_free(nv_state);
}
@@ -1066,94 +1061,82 @@ static int color_mgmt_config_set_luts(struct nv_drm_crtc_state *nv_crtc_state,
* According to the comment in the Linux kernel's
* drivers/gpu/drm/drm_color_mgmt.c, if either property is NULL, that LUT
* needs to be changed to a linear LUT
*
* On failure, any LUT ramps allocated in this function are freed when the
* subsequent atomic state cleanup calls nv_drm_atomic_crtc_destroy_state.
*/
req_config->flags.lutChanged = NV_TRUE;
if (crtc_state->degamma_lut) {
struct drm_color_lut *degamma_lut = NULL;
uint64_t degamma_len = 0;
nv_crtc_state->ilut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->ilut_ramps));
if (!nv_crtc_state->ilut_ramps) {
ret = -ENOMEM;
goto fail;
if (!modeset_config->lut.input.pRamps) {
modeset_config->lut.input.pRamps =
nv_drm_calloc(1, sizeof(*modeset_config->lut.input.pRamps));
if (!modeset_config->lut.input.pRamps) {
return -ENOMEM;
}
}
degamma_lut = (struct drm_color_lut *)crtc_state->degamma_lut->data;
degamma_len = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->ilut_ramps,
if ((ret = color_mgmt_config_copy_lut(modeset_config->lut.input.pRamps,
degamma_lut,
degamma_len)) != 0) {
goto fail;
return ret;
}
modeset_config->lut.input.specified = NV_TRUE;
modeset_config->lut.input.depth = 30; /* specify the full LUT */
modeset_config->lut.input.start = 0;
modeset_config->lut.input.end = degamma_len - 1;
modeset_config->lut.input.pRamps = nv_crtc_state->ilut_ramps;
} else {
/* setting input.end to 0 is equivalent to disabling the LUT, which
* should be equivalent to a linear LUT */
modeset_config->lut.input.specified = NV_TRUE;
modeset_config->lut.input.depth = 30; /* specify the full LUT */
modeset_config->lut.input.start = 0;
modeset_config->lut.input.end = 0;
modeset_config->lut.input.pRamps = NULL;
nv_drm_free(modeset_config->lut.input.pRamps);
modeset_config->lut.input.pRamps = NULL;
}
req_config->flags.ilutChanged = NV_TRUE;
if (crtc_state->gamma_lut) {
struct drm_color_lut *gamma_lut = NULL;
uint64_t gamma_len = 0;
nv_crtc_state->olut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->olut_ramps));
if (!nv_crtc_state->olut_ramps) {
ret = -ENOMEM;
goto fail;
if (!modeset_config->lut.output.pRamps) {
modeset_config->lut.output.pRamps =
nv_drm_calloc(1, sizeof(*modeset_config->lut.output.pRamps));
if (!modeset_config->lut.output.pRamps) {
return -ENOMEM;
}
}
gamma_lut = (struct drm_color_lut *)crtc_state->gamma_lut->data;
gamma_len = crtc_state->gamma_lut->length /
sizeof(struct drm_color_lut);
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->olut_ramps,
if ((ret = color_mgmt_config_copy_lut(modeset_config->lut.output.pRamps,
gamma_lut,
gamma_len)) != 0) {
goto fail;
return ret;
}
modeset_config->lut.output.specified = NV_TRUE;
modeset_config->lut.output.enabled = NV_TRUE;
modeset_config->lut.output.pRamps = nv_crtc_state->olut_ramps;
} else {
/* disabling the output LUT should be equivalent to setting a linear
* LUT */
modeset_config->lut.output.specified = NV_TRUE;
modeset_config->lut.output.enabled = NV_FALSE;
nv_drm_free(modeset_config->lut.output.pRamps);
modeset_config->lut.output.pRamps = NULL;
}
req_config->flags.olutChanged = NV_TRUE;
return 0;
fail:
/* free allocated state */
nv_drm_free(nv_crtc_state->ilut_ramps);
nv_drm_free(nv_crtc_state->olut_ramps);
/* remove dangling pointers */
nv_crtc_state->ilut_ramps = NULL;
nv_crtc_state->olut_ramps = NULL;
modeset_config->lut.input.pRamps = NULL;
modeset_config->lut.output.pRamps = NULL;
/* prevent attempts at reading NULLs */
modeset_config->lut.input.specified = NV_FALSE;
modeset_config->lut.output.specified = NV_FALSE;
return ret;
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
@@ -1178,9 +1161,6 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
struct NvKmsKapiHeadRequestedConfig *req_config =
&nv_crtc_state->req_config;
int ret = 0;
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
struct nv_drm_device *nv_dev = to_nv_device(crtc_state->crtc->dev);
#endif
if (crtc_state->mode_changed) {
drm_mode_to_nvkms_display_mode(&crtc_state->mode,
@@ -1224,13 +1204,6 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
#endif
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (nv_dev->drmMasterChangedSinceLastAtomicCommit &&
(crtc_state->degamma_lut ||
crtc_state->ctm ||
crtc_state->gamma_lut)) {
crtc_state->color_mgmt_changed = NV_TRUE;
}
if (crtc_state->color_mgmt_changed) {
if ((ret = color_mgmt_config_set_luts(nv_crtc_state, req_config)) != 0) {
return ret;
@@ -1256,7 +1229,7 @@ static const struct drm_crtc_helper_funcs nv_crtc_helper_funcs = {
static void nv_drm_plane_install_properties(
struct drm_plane *plane,
NvBool supportsHDR)
NvBool supportsICtCp)
{
struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
@@ -1272,7 +1245,7 @@ static void nv_drm_plane_install_properties(
}
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
if (supportsHDR && nv_dev->nv_hdr_output_metadata_property) {
if (supportsICtCp && nv_dev->nv_hdr_output_metadata_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_hdr_output_metadata_property, 0);
}
@@ -1458,7 +1431,7 @@ nv_drm_plane_create(struct drm_device *dev,
if (plane_type != DRM_PLANE_TYPE_CURSOR) {
nv_drm_plane_install_properties(
plane,
pResInfo->supportsHDR[layer_idx]);
pResInfo->supportsICtCp[layer_idx]);
}
__nv_drm_plane_create_alpha_blending_properties(
@@ -1681,7 +1654,7 @@ int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
struct NvKmsKapiCrcs crc32;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -ENOENT;
return -EOPNOTSUPP;
}
crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
@@ -1709,7 +1682,7 @@ int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
struct NvKmsKapiCrcs crc32;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -ENOENT;
return -EOPNOTSUPP;
}
crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);

View File

@@ -129,9 +129,6 @@ struct nv_drm_crtc_state {
*/
struct NvKmsKapiHeadRequestedConfig req_config;
struct NvKmsLutRamps *ilut_ramps;
struct NvKmsLutRamps *olut_ramps;
/**
* @nv_flip:
*

View File

@@ -430,7 +430,7 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
struct NvKmsKapiAllocateDeviceParams allocateDeviceParams;
struct NvKmsKapiDeviceResourcesInfo resInfo;
#endif
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
NvU64 kind;
NvU64 gen;
@@ -517,6 +517,12 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->semsurf_max_submitted_offset =
resInfo.caps.semsurf.maxSubmittedOffset;
nv_dev->display_semaphores.count =
resInfo.caps.numDisplaySemaphores;
nv_dev->display_semaphores.next_index = 0;
nv_dev->requiresVrrSemaphores = resInfo.caps.requiresVrrSemaphores;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
gen = nv_dev->pageKindGeneration;
kind = nv_dev->genericPageKind;
@@ -673,7 +679,6 @@ static int __nv_drm_master_set(struct drm_device *dev,
!nvKms->grabOwnership(nv_dev->pDevice)) {
return -EINVAL;
}
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_TRUE;
return 0;
}
@@ -863,13 +868,18 @@ static int nv_drm_get_dpy_id_for_connector_id_ioctl(struct drm_device *dev,
struct drm_file *filep)
{
struct drm_nvidia_get_dpy_id_for_connector_id_params *params = data;
struct drm_connector *connector;
struct nv_drm_connector *nv_connector;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EOPNOTSUPP;
}
// Importantly, drm_connector_lookup (with filep) will only return the
// connector if we are master, a lessee with the connector, or not master at
// all. It will return NULL if we are a lessee with other connectors.
struct drm_connector *connector =
nv_drm_connector_lookup(dev, filep, params->connectorId);
struct nv_drm_connector *nv_connector;
int ret = 0;
connector = nv_drm_connector_lookup(dev, filep, params->connectorId);
if (!connector) {
return -EINVAL;
@@ -902,6 +912,11 @@ static int nv_drm_get_connector_id_for_dpy_id_ioctl(struct drm_device *dev,
int ret = -EINVAL;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
#endif
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EOPNOTSUPP;
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
@@ -1114,6 +1129,10 @@ static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data,
{
struct drm_nvidia_grant_permissions_params *params = data;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EOPNOTSUPP;
}
if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
return nv_drm_grant_modeset_permission(dev, params, filep);
} else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
@@ -1279,6 +1298,10 @@ static int nv_drm_revoke_permission_ioctl(struct drm_device *dev, void *data,
{
struct drm_nvidia_revoke_permissions_params *params = data;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EOPNOTSUPP;
}
if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
if (!params->dpyId) {
return -EINVAL;

View File

@@ -463,10 +463,15 @@ int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_prime_fence_context_create_params *p = data;
struct nv_drm_prime_fence_context *nv_prime_fence_context =
__nv_drm_prime_fence_context_new(nv_dev, p);
struct nv_drm_prime_fence_context *nv_prime_fence_context;
int err;
if (nv_dev->pDevice == NULL) {
return -EOPNOTSUPP;
}
nv_prime_fence_context = __nv_drm_prime_fence_context_new(nv_dev, p);
if (!nv_prime_fence_context) {
goto done;
}
@@ -521,6 +526,11 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
struct nv_drm_fence_context *nv_fence_context;
nv_dma_fence_t *fence;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
goto done;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
@@ -1308,6 +1318,10 @@ int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
struct nv_drm_semsurf_fence_ctx *ctx;
int err;
if (nv_dev->pDevice == NULL) {
return -EOPNOTSUPP;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
return -EINVAL;
@@ -1469,6 +1483,11 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
int ret = -EINVAL;
int fd;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
goto done;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
@@ -1631,6 +1650,10 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
unsigned long flags;
int ret = -EINVAL;
if (nv_dev->pDevice == NULL) {
return -EOPNOTSUPP;
}
if (p->pre_wait_value >= p->post_wait_value) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
@@ -1739,6 +1762,11 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
nv_dma_fence_t *fence;
int ret = -EINVAL;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
goto done;
}
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
if (!nv_gem) {

View File

@@ -380,7 +380,7 @@ int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
ret = -EOPNOTSUPP;
goto failed;
}
@@ -430,7 +430,7 @@ int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
ret = -EOPNOTSUPP;
goto done;
}
@@ -483,7 +483,7 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
ret = -EOPNOTSUPP;
goto failed;
}

View File

@@ -319,7 +319,7 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem = NULL;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EINVAL;
return -EOPNOTSUPP;
}
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);

View File

@@ -42,6 +42,16 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#include <linux/nvhost.h>
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
#include <linux/host1x-next.h>
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
struct nv_drm_atomic_state {
struct NvKmsKapiRequestedModeSetConfig config;
struct drm_atomic_state base;
@@ -146,6 +156,159 @@ static int __nv_drm_put_back_post_fence_fd(
return ret;
}
#if defined(NV_DRM_FENCE_AVAILABLE)
struct nv_drm_plane_fence_cb_data {
nv_dma_fence_cb_t dma_fence_cb;
struct nv_drm_device *nv_dev;
NvU32 semaphore_index;
};
static void
__nv_drm_plane_fence_cb(
nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb_data
)
{
struct nv_drm_plane_fence_cb_data *fence_data =
container_of(cb_data, typeof(*fence_data), dma_fence_cb);
struct nv_drm_device *nv_dev = fence_data->nv_dev;
nv_dma_fence_put(fence);
nvKms->signalDisplaySemaphore(nv_dev->pDevice, fence_data->semaphore_index);
nv_drm_free(fence_data);
}
static int __nv_drm_convert_in_fences(
struct nv_drm_device *nv_dev,
struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_plane *plane = NULL;
struct drm_plane_state *plane_state = NULL;
struct nv_drm_plane *nv_plane = NULL;
struct NvKmsKapiLayerRequestedConfig *plane_req_config = NULL;
struct NvKmsKapiHeadRequestedConfig *head_req_config =
&to_nv_crtc_state(crtc_state)->req_config;
struct nv_drm_plane_fence_cb_data *fence_data;
uint32_t semaphore_index;
int ret, i;
if (!crtc_state->active) {
return 0;
}
nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) {
if ((plane->type == DRM_PLANE_TYPE_CURSOR) ||
(plane_state->crtc != crtc) ||
(plane_state->fence == NULL)) {
continue;
}
nv_plane = to_nv_plane(plane);
plane_req_config =
&head_req_config->layerRequestedConfig[nv_plane->layer_idx];
if (nv_dev->supportsSyncpts) {
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT)
int ret =
nvhost_dma_fence_unpack(
plane_state->fence,
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
if (ret == 0) {
plane_req_config->config.syncParams.preSyncptSpecified = true;
continue;
}
#endif
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
int ret =
host1x_fence_extract(
plane_state->fence,
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
if (ret == 0) {
plane_req_config->config.syncParams.preSyncptSpecified = true;
continue;
}
#endif
}
/*
* Syncpt extraction failed, or syncpts are not supported.
* Use general DRM fence support with semaphores instead.
*/
if (plane_req_config->config.syncParams.postSyncptRequested) {
// Can't mix Syncpts and semaphores in a given request.
return -EINVAL;
}
semaphore_index = nv_drm_next_display_semaphore(nv_dev);
if (!nvKms->resetDisplaySemaphore(nv_dev->pDevice, semaphore_index)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to initialize semaphore for plane fence");
/*
* This should only happen if the semaphore pool was somehow
* exhausted. Waiting a bit and retrying may help in that case.
*/
return -EAGAIN;
}
plane_req_config->config.syncParams.semaphoreSpecified = true;
plane_req_config->config.syncParams.u.semaphore.index = semaphore_index;
fence_data = nv_drm_calloc(1, sizeof(*fence_data));
if (!fence_data) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate callback data for plane fence");
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
return -ENOMEM;
}
fence_data->nv_dev = nv_dev;
fence_data->semaphore_index = semaphore_index;
ret = nv_dma_fence_add_callback(plane_state->fence,
&fence_data->dma_fence_cb,
__nv_drm_plane_fence_cb);
switch (ret) {
case -ENOENT:
/* The fence is already signaled */
__nv_drm_plane_fence_cb(plane_state->fence,
&fence_data->dma_fence_cb);
#if defined(fallthrough)
fallthrough;
#else
/* Fallthrough */
#endif
case 0:
/*
* The plane state's fence reference has either been consumed or
* belongs to the outstanding callback now.
*/
plane_state->fence = NULL;
break;
default:
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed plane fence callback registration");
/* Fence callback registration failed */
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
nv_drm_free(fence_data);
return ret;
}
}
return 0;
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
static int __nv_drm_get_syncpt_data(
struct nv_drm_device *nv_dev,
struct drm_crtc *crtc,
@@ -258,11 +421,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
commit ? crtc->state : crtc_state;
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
requested_config->headRequestedConfig[nv_crtc->head] =
to_nv_crtc_state(new_crtc_state)->req_config;
requested_config->headsMask |= 1 << nv_crtc->head;
if (commit) {
struct drm_crtc_state *old_crtc_state = crtc_state;
struct nv_drm_crtc_state *nv_new_crtc_state =
@@ -282,7 +440,27 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
nv_new_crtc_state->nv_flip = NULL;
}
#if defined(NV_DRM_FENCE_AVAILABLE)
ret = __nv_drm_convert_in_fences(nv_dev,
state,
crtc,
new_crtc_state);
if (ret != 0) {
return ret;
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
}
/*
* Do this deep copy after calling __nv_drm_convert_in_fences,
* which modifies the new CRTC state's req_config member
*/
requested_config->headRequestedConfig[nv_crtc->head] =
to_nv_crtc_state(new_crtc_state)->req_config;
requested_config->headsMask |= 1 << nv_crtc->head;
}
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
@@ -313,6 +491,10 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
}
}
if (commit && nv_dev->requiresVrrSemaphores && reply_config.vrrFlip) {
nvKms->signalVrrSemaphore(nv_dev->pDevice, reply_config.vrrSemaphoreIndex);
}
return 0;
}
@@ -506,7 +688,6 @@ int nv_drm_atomic_commit(struct drm_device *dev,
goto done;
}
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_FALSE;
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);

View File

@@ -147,22 +147,18 @@ struct nv_drm_device {
NvBool hasVideoMemory;
NvBool supportsSyncpts;
NvBool requiresVrrSemaphores;
NvBool subOwnershipGranted;
NvBool hasFramebufferConsole;
/**
* @drmMasterChangedSinceLastAtomicCommit:
*
* This flag is set in nv_drm_master_set and reset after a completed atomic
* commit. It is used to restore or recommit state that is lost by the
* NvKms modeset owner change, such as the CRTC color management
* properties.
*/
NvBool drmMasterChangedSinceLastAtomicCommit;
struct drm_property *nv_out_fence_property;
struct drm_property *nv_input_colorspace_property;
struct {
NvU32 count;
NvU32 next_index;
} display_semaphores;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property *nv_hdr_output_metadata_property;
#endif
@@ -170,6 +166,19 @@ struct nv_drm_device {
struct nv_drm_device *next;
};
static inline NvU32 nv_drm_next_display_semaphore(
struct nv_drm_device *nv_dev)
{
NvU32 current_index = nv_dev->display_semaphores.next_index++;
if (nv_dev->display_semaphores.next_index >=
nv_dev->display_semaphores.count) {
nv_dev->display_semaphores.next_index = 0;
}
return current_index;
}
static inline struct nv_drm_device *to_nv_device(
struct drm_device *dev)
{