580.65.06

This commit is contained in:
Maneet Singh
2025-08-04 11:15:02 -07:00
parent d890313300
commit 307159f262
1315 changed files with 477791 additions and 279973 deletions

View File

@@ -29,12 +29,7 @@
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
#include <linux/bug.h>
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:

View File

@@ -1,156 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DMA_FENCE_HELPER_H__
#define __NVIDIA_DMA_FENCE_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* Fence headers are moved to file dma-fence.h and struct fence has
* been renamed to dma_fence by commit -
*
* 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429
*/
#if defined(NV_LINUX_FENCE_H_PRESENT)
#include <linux/fence.h>
#else
#include <linux/dma-fence.h>
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
typedef struct fence nv_dma_fence_t;
typedef struct fence_ops nv_dma_fence_ops_t;
typedef struct fence_cb nv_dma_fence_cb_t;
typedef fence_func_t nv_dma_fence_func_t;
#else
typedef struct dma_fence nv_dma_fence_t;
typedef struct dma_fence_ops nv_dma_fence_ops_t;
typedef struct dma_fence_cb nv_dma_fence_cb_t;
typedef dma_fence_func_t nv_dma_fence_func_t;
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
#else
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
#endif
static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_is_signaled(fence);
#else
return dma_fence_is_signaled(fence);
#endif
}
static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_get(fence);
#else
return dma_fence_get(fence);
#endif
}
static inline void nv_dma_fence_put(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_put(fence);
#else
dma_fence_put(fence);
#endif
}
static inline signed long
nv_dma_fence_default_wait(nv_dma_fence_t *fence,
bool intr, signed long timeout) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_default_wait(fence, intr, timeout);
#else
return dma_fence_default_wait(fence, intr, timeout);
#endif
}
static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal(fence);
#else
return dma_fence_signal(fence);
#endif
}
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal_locked(fence);
#else
return dma_fence_signal_locked(fence);
#endif
}
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_context_alloc(num);
#else
return dma_fence_context_alloc(num);
#endif
}
static inline void
nv_dma_fence_init(nv_dma_fence_t *fence,
const nv_dma_fence_ops_t *ops,
spinlock_t *lock, u64 context, uint64_t seqno) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_init(fence, ops, lock, context, seqno);
#else
dma_fence_init(fence, ops, lock, context, seqno);
#endif
}
static inline void
nv_dma_fence_set_error(nv_dma_fence_t *fence,
int error) {
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
return dma_fence_set_error(fence, error);
#elif defined(NV_FENCE_SET_ERROR_PRESENT)
return fence_set_error(fence, error);
#else
fence->status = error;
#endif
}
static inline int
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb,
nv_dma_fence_func_t func) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_add_callback(fence, cb, func);
#else
return dma_fence_add_callback(fence, cb, func);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */

View File

@@ -25,8 +25,6 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* linux/reservation.h is renamed to linux/dma-resv.h, by commit
* 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv)
@@ -39,7 +37,7 @@
#include <linux/reservation.h>
#endif
#include <nvidia-dma-fence-helper.h>
#include <linux/dma-fence.h>
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
typedef struct dma_resv nv_dma_resv_t;
@@ -108,7 +106,7 @@ static inline int nv_dma_resv_reserve_fences(nv_dma_resv_t *obj,
}
static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
struct dma_fence *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
@@ -122,7 +120,7 @@ static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
}
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
struct dma_fence *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
@@ -135,6 +133,4 @@ static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */

View File

@@ -55,11 +55,10 @@
#endif
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \
defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#define NV_DRM_FENCE_AVAILABLE
#else
#undef NV_DRM_FENCE_AVAILABLE
#include <linux/kconfig.h> // for IS_ENABLED()
#if IS_ENABLED(CONFIG_DRM) || defined(__FreeBSD__)
#define NV_DRM_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \
@@ -87,17 +86,6 @@
#endif
#endif
/*
* We can support color management if either drm_helper_crtc_enable_color_mgmt()
* or drm_crtc_enable_color_mgmt() exist.
*/
#if defined(NV_DRM_HELPER_CRTC_ENABLE_COLOR_MGMT_PRESENT) || \
defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
#define NV_DRM_COLOR_MGMT_AVAILABLE
#else
#undef NV_DRM_COLOR_MGMT_AVAILABLE
#endif
/*
* Adapt to quirks in FreeBSD's Linux kernel compatibility layer.
*/

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-helper.h"
#include "nvidia-drm-priv.h"
@@ -228,9 +228,6 @@ nv_drm_connector_detect(struct drm_connector *connector, bool force)
}
static struct drm_connector_funcs nv_connector_funcs = {
#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT
.dpms = drm_atomic_helper_connector_dpms,
#endif
.destroy = nv_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.force = __nv_drm_connector_force,
@@ -588,16 +585,11 @@ nv_drm_get_connector(struct drm_device *dev,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
{
struct drm_connector *connector = NULL;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#else
struct drm_mode_config *config = &dev->mode_config;
mutex_lock(&config->mutex);
#endif
drm_connector_list_iter_begin(dev, &conn_iter);
/* Lookup for existing connector with same physical index */
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_for_each_connector_iter(connector, &conn_iter) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->physicalIndex == physicalIndex) {
@@ -612,11 +604,7 @@ nv_drm_get_connector(struct drm_device *dev,
connector = NULL;
done:
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#else
mutex_unlock(&config->mutex);
#endif
drm_connector_list_iter_end(&conn_iter);
if (!connector) {
connector = nv_drm_connector_new(dev,

View File

@@ -25,15 +25,13 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
#include "nvtypes.h"
#include "nvkms-api-types.h"
@@ -101,6 +99,6 @@ nv_drm_get_connector(struct drm_device *dev,
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
struct nv_drm_connector *nv_connector);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_CONNECTOR_H__ */

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-helper.h"
#include "nvidia-drm-priv.h"
@@ -42,10 +42,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
#include <drm/drm_color_mgmt.h>
#endif
/*
* The two arrays below specify the PQ EOTF transfer function that's used to
@@ -150,15 +147,15 @@ nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
if ((expected_size > 0) &&
(new_blob->length != expected_size)) {
nv_drm_property_blob_put(new_blob);
drm_property_blob_put(new_blob);
return -EINVAL;
}
}
if (old_blob != new_blob) {
nv_drm_property_blob_put(old_blob);
drm_property_blob_put(old_blob);
if (new_blob) {
nv_drm_property_blob_get(new_blob);
drm_property_blob_get(new_blob);
}
*blob = new_blob;
*replaced = true;
@@ -166,7 +163,7 @@ nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
*replaced = false;
}
nv_drm_property_blob_put(new_blob);
drm_property_blob_put(new_blob);
return 0;
}
@@ -204,6 +201,15 @@ plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig *req_config)
req_config->flags.srcWHChanged = NV_TRUE;
req_config->flags.dstXYChanged = NV_TRUE;
req_config->flags.dstWHChanged = NV_TRUE;
req_config->flags.cscChanged = NV_TRUE;
req_config->flags.inputTfChanged = NV_TRUE;
req_config->flags.outputTfChanged = NV_TRUE;
req_config->flags.inputColorSpaceChanged = NV_TRUE;
req_config->flags.inputColorRangeChanged = NV_TRUE;
req_config->flags.hdrMetadataChanged = NV_TRUE;
req_config->flags.matrixOverridesChanged = NV_TRUE;
req_config->flags.ilutChanged = NV_TRUE;
req_config->flags.tmoChanged = NV_TRUE;
}
static inline void
@@ -244,7 +250,6 @@ static NvU64 ctm_val_to_csc_val(NvU64 ctm_val)
return csc_val;
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
static void ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc,
struct drm_color_ctm *drm_ctm)
{
@@ -261,7 +266,6 @@ static void ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc,
}
}
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
static void ctm_3x4_to_csc(struct NvKmsCscMatrix *nvkms_csc,
struct drm_color_ctm_3x4 *drm_ctm_3x4)
@@ -394,6 +398,14 @@ static int init_drm_nvkms_surface(struct nv_drm_device *nv_dev,
struct NvKmsKapiDevice *pDevice = nv_dev->pDevice;
NvU8 compressible = 0; // No compression
struct NvKmsKapiAllocateMemoryParams allocParams = {
.layout = NvKmsSurfaceMemoryLayoutPitch,
.type = NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
.size = surface_params->surface_size,
.useVideoMemory = nv_dev->hasVideoMemory,
.compressible = &compressible,
};
struct NvKmsKapiCreateSurfaceParams params = {};
struct NvKmsKapiMemory *surface_mem;
struct NvKmsKapiSurface *surface;
@@ -404,21 +416,7 @@ static int init_drm_nvkms_surface(struct nv_drm_device *nv_dev,
params.height = surface_params->height;
/* Allocate displayable memory. */
if (nv_dev->hasVideoMemory) {
surface_mem =
nvKms->allocateVideoMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
surface_params->surface_size,
&compressible);
} else {
surface_mem =
nvKms->allocateSystemMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
surface_params->surface_size,
&compressible);
}
surface_mem = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
if (surface_mem == NULL) {
return -ENOMEM;
}
@@ -1187,7 +1185,6 @@ plane_req_config_update(struct drm_plane *plane,
req_config->config.csc = old_config.csc;
#if defined(NV_DRM_ROTATION_AVAILABLE)
/*
* plane_state->rotation is only valid when plane->rotation_property
* is non-NULL.
@@ -1225,7 +1222,6 @@ plane_req_config_update(struct drm_plane *plane,
break;
}
}
#endif
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) {
@@ -1630,7 +1626,6 @@ static int nv_drm_plane_atomic_check(struct drm_plane *plane,
return ret;
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (crtc_state->color_mgmt_changed) {
/*
* According to the comment in the Linux kernel's
@@ -1646,7 +1641,6 @@ static int nv_drm_plane_atomic_check(struct drm_plane *plane,
plane_requested_config->config.cscUseMain = NV_FALSE;
plane_requested_config->flags.cscChanged = NV_TRUE;
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
if (__is_async_flip_requested(plane, crtc_state)) {
/*
@@ -1666,7 +1660,6 @@ static int nv_drm_plane_atomic_check(struct drm_plane *plane,
return 0;
}
#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
static bool nv_drm_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -1674,7 +1667,6 @@ static bool nv_drm_plane_format_mod_supported(struct drm_plane *plane,
/* All supported modifiers are compatible with all supported formats */
return true;
}
#endif
static int nv_drm_atomic_crtc_get_property(
struct drm_crtc *crtc,
@@ -1961,34 +1953,34 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
nv_plane_state->hdr_output_metadata = nv_old_plane_state->hdr_output_metadata;
if (nv_plane_state->hdr_output_metadata) {
nv_drm_property_blob_get(nv_plane_state->hdr_output_metadata);
drm_property_blob_get(nv_plane_state->hdr_output_metadata);
}
#endif
nv_plane_state->lms_ctm = nv_old_plane_state->lms_ctm;
if (nv_plane_state->lms_ctm) {
nv_drm_property_blob_get(nv_plane_state->lms_ctm);
drm_property_blob_get(nv_plane_state->lms_ctm);
}
nv_plane_state->lms_to_itp_ctm = nv_old_plane_state->lms_to_itp_ctm;
if (nv_plane_state->lms_to_itp_ctm) {
nv_drm_property_blob_get(nv_plane_state->lms_to_itp_ctm);
drm_property_blob_get(nv_plane_state->lms_to_itp_ctm);
}
nv_plane_state->itp_to_lms_ctm = nv_old_plane_state->itp_to_lms_ctm;
if (nv_plane_state->itp_to_lms_ctm) {
nv_drm_property_blob_get(nv_plane_state->itp_to_lms_ctm);
drm_property_blob_get(nv_plane_state->itp_to_lms_ctm);
}
nv_plane_state->blend_ctm = nv_old_plane_state->blend_ctm;
if (nv_plane_state->blend_ctm) {
nv_drm_property_blob_get(nv_plane_state->blend_ctm);
drm_property_blob_get(nv_plane_state->blend_ctm);
}
nv_plane_state->degamma_tf = nv_old_plane_state->degamma_tf;
nv_plane_state->degamma_lut = nv_old_plane_state->degamma_lut;
if (nv_plane_state->degamma_lut) {
nv_drm_property_blob_get(nv_plane_state->degamma_lut);
drm_property_blob_get(nv_plane_state->degamma_lut);
}
nv_plane_state->degamma_multiplier = nv_old_plane_state->degamma_multiplier;
nv_plane_state->degamma_changed = false;
@@ -2000,7 +1992,7 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
nv_plane_state->tmo_lut = nv_old_plane_state->tmo_lut;
if (nv_plane_state->tmo_lut) {
nv_drm_property_blob_get(nv_plane_state->tmo_lut);
drm_property_blob_get(nv_plane_state->tmo_lut);
}
nv_plane_state->tmo_changed = false;
nv_plane_state->tmo_drm_lut_surface =
@@ -2013,32 +2005,27 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
}
static inline void __nv_drm_plane_atomic_destroy_state(
struct drm_plane *plane,
struct drm_plane_state *state)
{
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
#if defined(NV_DRM_ATOMIC_HELPER_PLANE_DESTROY_STATE_HAS_PLANE_ARG)
__drm_atomic_helper_plane_destroy_state(plane, state);
#else
__drm_atomic_helper_plane_destroy_state(state);
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
nv_drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
#endif
nv_drm_property_blob_put(nv_drm_plane_state->lms_ctm);
nv_drm_property_blob_put(nv_drm_plane_state->lms_to_itp_ctm);
nv_drm_property_blob_put(nv_drm_plane_state->itp_to_lms_ctm);
nv_drm_property_blob_put(nv_drm_plane_state->blend_ctm);
drm_property_blob_put(nv_drm_plane_state->lms_ctm);
drm_property_blob_put(nv_drm_plane_state->lms_to_itp_ctm);
drm_property_blob_put(nv_drm_plane_state->itp_to_lms_ctm);
drm_property_blob_put(nv_drm_plane_state->blend_ctm);
nv_drm_property_blob_put(nv_drm_plane_state->degamma_lut);
drm_property_blob_put(nv_drm_plane_state->degamma_lut);
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
nv_drm_property_blob_put(nv_drm_plane_state->tmo_lut);
drm_property_blob_put(nv_drm_plane_state->tmo_lut);
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount,
free_drm_lut_surface);
@@ -2049,7 +2036,7 @@ static void nv_drm_plane_atomic_destroy_state(
struct drm_plane *plane,
struct drm_plane_state *state)
{
__nv_drm_plane_atomic_destroy_state(plane, state);
__nv_drm_plane_atomic_destroy_state(state);
nv_drm_free(to_nv_drm_plane_state(state));
}
@@ -2063,9 +2050,7 @@ static const struct drm_plane_funcs nv_plane_funcs = {
.atomic_set_property = nv_drm_plane_atomic_set_property,
.atomic_duplicate_state = nv_drm_plane_atomic_duplicate_state,
.atomic_destroy_state = nv_drm_plane_atomic_destroy_state,
#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
.format_mod_supported = nv_drm_plane_format_mod_supported,
#endif
};
static const struct drm_plane_helper_funcs nv_plane_helper_funcs = {
@@ -2081,17 +2066,6 @@ static void nv_drm_crtc_destroy(struct drm_crtc *crtc)
nv_drm_free(nv_crtc);
}
static inline void
__nv_drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
#if defined(NV_DRM_ATOMIC_HELPER_CRTC_DESTROY_STATE_HAS_CRTC_ARG)
__drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
#else
__drm_atomic_helper_crtc_destroy_state(crtc_state);
#endif
}
static inline bool nv_drm_crtc_duplicate_req_head_modeset_config(
const struct NvKmsKapiHeadRequestedConfig *old,
struct NvKmsKapiHeadRequestedConfig *new)
@@ -2234,7 +2208,7 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
nv_state->regamma_tf = nv_old_state->regamma_tf;
nv_state->regamma_lut = nv_old_state->regamma_lut;
if (nv_state->regamma_lut) {
nv_drm_property_blob_get(nv_state->regamma_lut);
drm_property_blob_get(nv_state->regamma_lut);
}
nv_state->regamma_divisor = nv_old_state->regamma_divisor;
if (nv_state->regamma_drm_lut_surface) {
@@ -2263,9 +2237,9 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
nv_state->nv_flip = NULL;
}
__nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base);
__drm_atomic_helper_crtc_destroy_state(&nv_state->base);
nv_drm_property_blob_put(nv_state->regamma_lut);
drm_property_blob_put(nv_state->regamma_lut);
if (nv_state->regamma_drm_lut_surface != NULL) {
kref_put(&nv_state->regamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
@@ -2291,21 +2265,6 @@ static struct drm_crtc_funcs nv_crtc_funcs = {
#endif
};
/*
* In kernel versions before the addition of
* drm_crtc_state::connectors_changed, connector changes were
* reflected in drm_crtc_state::mode_changed.
*/
static inline bool
nv_drm_crtc_state_connectors_changed(struct drm_crtc_state *crtc_state)
{
#if defined(NV_DRM_CRTC_STATE_HAS_CONNECTORS_CHANGED)
return crtc_state->connectors_changed;
#else
return crtc_state->mode_changed;
#endif
}
static int head_modeset_config_attach_connector(
struct nv_drm_connector *nv_connector,
struct NvKmsKapiHeadModeSetConfig *head_modeset_config)
@@ -2322,7 +2281,6 @@ static int head_modeset_config_attach_connector(
return 0;
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
static int color_mgmt_config_copy_lut(struct NvKmsLutRamps *nvkms_lut,
struct drm_color_lut *drm_lut,
uint64_t lut_len)
@@ -2434,7 +2392,6 @@ static int color_mgmt_config_set_luts(struct nv_drm_crtc_state *nv_crtc_state,
return 0;
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
/**
* nv_drm_crtc_atomic_check() can fail after it has modified
@@ -2466,7 +2423,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
req_config->flags.modeChanged = NV_TRUE;
}
if (nv_drm_crtc_state_connectors_changed(crtc_state)) {
if (crtc_state->connectors_changed) {
struct NvKmsKapiHeadModeSetConfig *config = &req_config->modeSetConfig;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -2501,13 +2458,11 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
req_config->modeSetConfig.vrrEnabled = crtc_state->vrr_enabled;
#endif
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (crtc_state->color_mgmt_changed) {
if ((ret = color_mgmt_config_set_luts(nv_crtc_state, req_config)) != 0) {
return ret;
}
}
#endif
if (nv_crtc_state->regamma_changed) {
if (nv_crtc_state->regamma_drm_lut_surface != NULL) {
@@ -2754,7 +2709,6 @@ static void
__nv_drm_plane_create_rotation_property(struct drm_plane *plane,
NvU16 validLayerRRTransforms)
{
#if defined(NV_DRM_ROTATION_AVAILABLE)
enum NvKmsRotation curRotation;
NvU32 supported_rotations = 0;
struct NvKmsRRParams rrParams = {
@@ -2803,7 +2757,6 @@ __nv_drm_plane_create_rotation_property(struct drm_plane *plane,
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
supported_rotations);
}
#endif
}
static struct drm_plane*
@@ -2813,13 +2766,11 @@ nv_drm_plane_create(struct drm_device *dev,
NvU32 head,
const struct NvKmsKapiDeviceResourcesInfo *pResInfo)
{
#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
struct nv_drm_device *nv_dev = to_nv_device(dev);
const NvU64 linear_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
#endif
enum NvKmsCompositionBlendingMode defaultCompositionMode;
struct nv_drm_plane *nv_plane = NULL;
struct nv_drm_plane_state *nv_plane_state = NULL;
@@ -2884,16 +2835,10 @@ nv_drm_plane_create(struct drm_device *dev,
(1 << head) : 0,
&nv_plane_funcs,
formats, formats_count,
#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
(plane_type == DRM_PLANE_TYPE_CURSOR) ?
linear_modifiers : nv_dev->modifiers,
#endif
plane_type
#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG)
, NULL
#endif
);
linear_modifiers : nv_dev->modifiers,
plane_type,
NULL);
if (ret != 0) {
goto failed_plane_init;
}
@@ -2990,12 +2935,8 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
ret = drm_crtc_init_with_planes(nv_dev->dev,
&nv_crtc->base,
primary_plane, cursor_plane,
&nv_crtc_funcs
#if defined(NV_DRM_CRTC_INIT_WITH_PLANES_HAS_NAME_ARG)
, NULL
#endif
);
&nv_crtc_funcs,
NULL);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
@@ -3011,21 +2952,14 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
nv_drm_crtc_install_properties(&nv_crtc->base);
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
#if defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
drm_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE, true,
NVKMS_LUT_ARRAY_SIZE);
#else
drm_helper_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE,
NVKMS_LUT_ARRAY_SIZE);
#endif
ret = drm_mode_crtc_set_gamma_size(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE);
if (ret != 0) {
NV_DRM_DEV_LOG_WARN(
nv_dev,
"Failed to initialize legacy gamma support for head %u", head);
}
#endif
return &nv_crtc->base;
@@ -3160,7 +3094,7 @@ int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
}
crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
crtc = drm_crtc_find(dev, filep, params->crtc_id);
if (!crtc) {
return -ENOENT;
}
@@ -3188,7 +3122,7 @@ int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
}
crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
crtc = drm_crtc_find(dev, filep, params->crtc_id);
if (!crtc) {
return -ENOENT;
}

View File

@@ -25,7 +25,7 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-helper.h"
@@ -349,6 +349,6 @@ int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_CRTC_H__ */

View File

@@ -20,7 +20,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE and NV_DRM_DRM_GEM_H_PRESENT */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#include "nvidia-drm-priv.h"
#include "nvidia-drm-drv.h"
@@ -50,21 +50,10 @@
#include <drm/drm_atomic_uapi.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRM_IOCTL_H_PRESENT)
#include <drm/drm_ioctl.h>
#endif
#if defined(NV_LINUX_APERTURE_H_PRESENT)
#include <linux/aperture.h>
@@ -103,18 +92,9 @@
#include <drm/drm_probe_helper.h>
#endif
#include <drm/drm_crtc_helper.h>
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#if defined(NV_DRM_DRM_AUTH_H_PRESENT)
#include <drm/drm_auth.h>
#endif
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include <drm/drm_atomic_helper.h>
#endif
static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
struct drm_file *filep,
@@ -161,34 +141,25 @@ static char* nv_get_transfer_function_name(
}
};
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_OUTPUT_POLL_CHANGED_PRESENT)
static void nv_drm_output_poll_changed(struct drm_device *dev)
{
struct drm_connector *connector = NULL;
struct drm_mode_config *config = &dev->mode_config;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
drm_connector_list_iter_begin(dev, &conn_iter);
/*
* Here drm_mode_config::mutex has been acquired unconditionally:
*
* - In the non-NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must
* be held for the duration of walking over the connectors.
*
* - In the NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must be
* held for the duration of a fill_modes() call chain:
* Here drm_mode_config::mutex has been acquired unconditionally. The
* mutex must be held for the duration of a fill_modes() call chain:
* connector->funcs->fill_modes()
* |-> drm_helper_probe_single_connector_modes()
*
* It is easiest to always acquire the mutext for the entire connector
* It is easiest to always acquire the mutex for the entire connector
* loop.
*/
mutex_lock(&config->mutex);
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_for_each_connector_iter(connector, &conn_iter) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
@@ -203,39 +174,10 @@ static void nv_drm_output_poll_changed(struct drm_device *dev)
}
mutex_unlock(&config->mutex);
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#endif
drm_connector_list_iter_end(&conn_iter);
}
#endif /* NV_DRM_OUTPUT_POLL_CHANGED_PRESENT */
static struct drm_framebuffer *nv_drm_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
#if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG)
const struct drm_mode_fb_cmd2 *cmd
#else
struct drm_mode_fb_cmd2 *cmd
#endif
)
{
struct drm_mode_fb_cmd2 local_cmd;
struct drm_framebuffer *fb;
local_cmd = *cmd;
fb = nv_drm_internal_framebuffer_create(
dev,
file,
&local_cmd);
#if !defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG)
*cmd = local_cmd;
#endif
return fb;
}
static const struct drm_mode_config_funcs nv_mode_config_funcs = {
.fb_create = nv_drm_framebuffer_create,
@@ -443,8 +385,7 @@ nv_drm_init_mode_config(struct nv_drm_device *nv_dev,
dev->mode_config.async_page_flip = false;
#endif
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) && \
defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS)
#if defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS)
/* Allow clients to define framebuffer layouts using DRM format modifiers */
dev->mode_config.allow_fb_modifiers = true;
#endif
@@ -539,8 +480,6 @@ static void nv_drm_enumerate_encoders_and_connectors
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
/*!
* 'NV_DRM_OUT_FENCE_PTR' is an atomic per-plane property that clients can use
* to request an out-fence fd for a particular plane that's being flipped.
@@ -701,7 +640,6 @@ static int nv_drm_create_properties(struct nv_drm_device *nv_dev)
return 0;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* We can't just call drm_kms_helper_hotplug_event directly because
* fbdev_generic may attempt to set a mode from inside the hotplug event
@@ -720,29 +658,22 @@ static void nv_drm_handle_hotplug_event(struct work_struct *work)
drm_kms_helper_hotplug_event(nv_dev->dev);
}
#endif
static int nv_drm_load(struct drm_device *dev, unsigned long flags)
static int nv_drm_dev_load(struct drm_device *dev)
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
struct NvKmsKapiDevice *pDevice;
struct NvKmsKapiAllocateDeviceParams allocateDeviceParams;
struct NvKmsKapiDeviceResourcesInfo resInfo;
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
NvU64 kind;
NvU64 gen;
int i;
#endif
int ret;
struct nv_drm_device *nv_dev = to_nv_device(dev);
NV_DRM_DEV_LOG_INFO(nv_dev, "Loading driver");
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return 0;
}
@@ -826,7 +757,6 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->vtFbBaseAddress = resInfo.vtFbBaseAddress;
nv_dev->vtFbSize = resInfo.vtFbSize;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
gen = nv_dev->pageKindGeneration;
kind = nv_dev->genericPageKind;
@@ -843,7 +773,6 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->modifiers[i++] = DRM_FORMAT_MOD_LINEAR;
nv_dev->modifiers[i++] = DRM_FORMAT_MOD_INVALID;
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
/* Initialize drm_device::mode_config */
@@ -897,23 +826,17 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
mutex_unlock(&nv_dev->lock);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
return 0;
}
static void __nv_drm_unload(struct drm_device *dev)
static void nv_drm_dev_unload(struct drm_device *dev)
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
struct NvKmsKapiDevice *pDevice = NULL;
#endif
struct nv_drm_device *nv_dev = to_nv_device(dev);
NV_DRM_DEV_LOG_INFO(nv_dev, "Unloading driver");
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return;
}
@@ -956,26 +879,8 @@ static void __nv_drm_unload(struct drm_device *dev)
mutex_unlock(&nv_dev->lock);
nvKms->freeDevice(pDevice);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
}
#if defined(NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE)
static int nv_drm_unload(struct drm_device *dev)
{
__nv_drm_unload(dev);
return 0;
}
#else
static void nv_drm_unload(struct drm_device *dev)
{
__nv_drm_unload(dev);
}
#endif
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
static int __nv_drm_master_set(struct drm_device *dev,
struct drm_file *file_priv, bool from_open)
{
@@ -1052,28 +957,15 @@ int nv_drm_reset_input_colorspace(struct drm_device *dev)
}
out:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
// In case of success, drm_atomic_commit() takes care to cleanup and free state.
if (ret != 0) {
drm_atomic_state_free(state);
}
#endif
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
#if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG)
static
void nv_drm_master_drop(struct drm_device *dev,
struct drm_file *file_priv, bool from_release)
#else
static
void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
#endif
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
@@ -1116,29 +1008,6 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
}
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#if defined(NV_DRM_BUS_PRESENT) || defined(NV_DRM_DRIVER_HAS_SET_BUSID)
static int nv_drm_pci_set_busid(struct drm_device *dev,
struct drm_master *master)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
master->unique = nv_drm_asprintf("pci:%04x:%02x:%02x.%d",
nv_dev->gpu_info.pci_info.domain,
nv_dev->gpu_info.pci_info.bus,
nv_dev->gpu_info.pci_info.slot,
nv_dev->gpu_info.pci_info.function);
if (master->unique == NULL) {
return -ENOMEM;
}
master->unique_len = strlen(master->unique);
return 0;
}
#endif
static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
@@ -1160,7 +1029,6 @@ static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
params->supports_sync_fd = false;
params->supports_semsurf = false;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/* Memory allocation and semaphore surfaces are only supported
* if the modeset = 1 parameter is set */
if (nv_dev->pDevice != NULL) {
@@ -1171,12 +1039,9 @@ static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
if (nv_dev->semsurf_stride != 0) {
params->supports_semsurf = true;
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
params->supports_sync_fd = true;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
return 0;
}
@@ -1228,7 +1093,6 @@ int nv_drm_get_client_capability_ioctl(struct drm_device *dev,
return 0;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
static bool nv_drm_connector_is_dpy_id(struct drm_connector *connector,
NvU32 dpyId)
{
@@ -1250,10 +1114,10 @@ static int nv_drm_get_dpy_id_for_connector_id_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
}
// Importantly, drm_connector_lookup (with filep) will only return the
// Importantly, drm_connector_lookup will only return the
// connector if we are master, a lessee with the connector, or not master at
// all. It will return NULL if we are a lessee with other connectors.
connector = nv_drm_connector_lookup(dev, filep, params->connectorId);
connector = drm_connector_lookup(dev, filep, params->connectorId);
if (!connector) {
return -EINVAL;
@@ -1273,7 +1137,7 @@ static int nv_drm_get_dpy_id_for_connector_id_ioctl(struct drm_device *dev,
params->dpyId = nv_connector->nv_detected_encoder->hDisplay;
done:
nv_drm_connector_put(connector);
drm_connector_put(connector);
return ret;
}
@@ -1284,27 +1148,21 @@ static int nv_drm_get_connector_id_for_dpy_id_ioctl(struct drm_device *dev,
struct drm_nvidia_get_connector_id_for_dpy_id_params *params = data;
struct drm_connector *connector;
int ret = -EINVAL;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
#endif
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EOPNOTSUPP;
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
/* Lookup for existing connector with same dpyId */
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (nv_drm_connector_is_dpy_id(connector, params->dpyId)) {
params->connectorId = connector->base.id;
ret = 0;
break;
}
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#endif
drm_connector_list_iter_end(&conn_iter);
return ret;
}
@@ -1337,9 +1195,7 @@ static int nv_drm_grant_modeset_permission(struct drm_device *dev,
struct drm_crtc *crtc;
NvU32 head = 0, freeHeadBits, targetHeadBit, possible_crtcs;
int ret = 0;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
#endif
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
@@ -1349,19 +1205,15 @@ static int nv_drm_grant_modeset_permission(struct drm_device *dev,
#endif
/* Get the connector for the dpyId. */
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (nv_drm_connector_is_dpy_id(connector, params->dpyId)) {
target_connector =
nv_drm_connector_lookup(dev, filep, connector->base.id);
drm_connector_lookup(dev, filep, connector->base.id);
break;
}
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#endif
drm_connector_list_iter_end(&conn_iter);
// Importantly, drm_connector_lookup/drm_crtc_find (with filep) will only
// return the object if we are master, a lessee with the object, or not
@@ -1384,7 +1236,7 @@ static int nv_drm_grant_modeset_permission(struct drm_device *dev,
freeHeadBits = 0;
nv_drm_for_each_crtc(crtc, dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
if (nv_drm_crtc_find(dev, filep, crtc->base.id) &&
if (drm_crtc_find(dev, filep, crtc->base.id) &&
!nv_crtc->modeset_permission_filep &&
(drm_crtc_mask(crtc) & possible_crtcs)) {
freeHeadBits |= NVBIT(nv_crtc->head);
@@ -1397,15 +1249,11 @@ static int nv_drm_grant_modeset_permission(struct drm_device *dev,
freeHeadBits = targetHeadBit;
} else {
/* Otherwise, remove heads that are in use by other connectors. */
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
freeHeadBits &= ~nv_drm_get_head_bit_from_connector(connector);
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#endif
drm_connector_list_iter_end(&conn_iter);
}
/* Fail if no heads are available. */
@@ -1439,7 +1287,7 @@ static int nv_drm_grant_modeset_permission(struct drm_device *dev,
done:
if (target_connector) {
nv_drm_connector_put(target_connector);
drm_connector_put(target_connector);
}
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
@@ -1554,9 +1402,7 @@ static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
struct drm_connector *connector;
struct drm_crtc *crtc;
int ret = 0;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
#endif
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
@@ -1578,10 +1424,8 @@ static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
* If dpyId is set, only revoke those specific resources. Otherwise,
* it is from closing the file so revoke all resources for that filep.
*/
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->modeset_permission_filep == filep &&
(!dpyId || nv_drm_connector_is_dpy_id(connector, dpyId))) {
@@ -1594,9 +1438,7 @@ static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
nv_drm_connector_revoke_permissions(dev, nv_connector);
}
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#endif
drm_connector_list_iter_end(&conn_iter);
nv_drm_for_each_crtc(crtc, dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
@@ -1607,22 +1449,7 @@ static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
ret = drm_atomic_commit(state);
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
@@ -1703,7 +1530,6 @@ static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep)
nv_drm_revoke_modeset_permission(dev, filep, 0);
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
static int nv_drm_open(struct drm_device *dev, struct drm_file *filep)
{
@@ -1716,7 +1542,6 @@ static int nv_drm_open(struct drm_device *dev, struct drm_file *filep)
return 0;
}
#if defined(NV_DRM_MASTER_HAS_LEASES)
static struct drm_master *nv_drm_find_lessee(struct drm_master *master,
int lessee_id)
{
@@ -1812,9 +1637,7 @@ static void nv_drm_finish_revoking_objects(struct drm_device *dev,
{
struct drm_connector *connector;
struct drm_crtc *crtc;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
#endif
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
int ret = 0;
struct drm_modeset_acquire_ctx ctx;
@@ -1824,19 +1647,15 @@ static void nv_drm_finish_revoking_objects(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
#endif
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#endif
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->modeset_permission_filep &&
nv_drm_is_in_objects(connector->base.id, objects, objects_count)) {
nv_drm_connector_revoke_permissions(dev, nv_connector);
}
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#endif
drm_connector_list_iter_end(&conn_iter);
nv_drm_for_each_crtc(crtc, dev) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
@@ -1852,38 +1671,6 @@ static void nv_drm_finish_revoking_objects(struct drm_device *dev,
mutex_unlock(&dev->mode_config.mutex);
#endif
}
#endif /* NV_DRM_MASTER_HAS_LEASES */
#if defined(NV_DRM_BUS_PRESENT)
#if defined(NV_DRM_BUS_HAS_GET_IRQ)
static int nv_drm_bus_get_irq(struct drm_device *dev)
{
return 0;
}
#endif
#if defined(NV_DRM_BUS_HAS_GET_NAME)
static const char *nv_drm_bus_get_name(struct drm_device *dev)
{
return "nvidia-drm";
}
#endif
static struct drm_bus nv_drm_bus = {
#if defined(NV_DRM_BUS_HAS_BUS_TYPE)
.bus_type = DRIVER_BUS_PCI,
#endif
#if defined(NV_DRM_BUS_HAS_GET_IRQ)
.get_irq = nv_drm_bus_get_irq,
#endif
#if defined(NV_DRM_BUS_HAS_GET_NAME)
.get_name = nv_drm_bus_get_name,
#endif
.set_busid = nv_drm_pci_set_busid,
};
#endif /* NV_DRM_BUS_PRESENT */
/*
* Wrapper around drm_ioctl to hook in to upstream ioctl.
@@ -1894,7 +1681,6 @@ static long nv_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
long retcode;
#if defined(NV_DRM_MASTER_HAS_LEASES)
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
int *objects = NULL;
@@ -1905,11 +1691,9 @@ static long nv_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
nv_drm_get_revoked_objects(dev, file_priv, cmd, arg, &objects,
&objects_count);
}
#endif
retcode = drm_ioctl(filp, cmd, arg);
#if defined(NV_DRM_MASTER_HAS_LEASES)
if (cmd == DRM_IOCTL_MODE_REVOKE_LEASE && objects) {
if (retcode == 0) {
// If revoking was successful, finish revoking the objects.
@@ -1918,7 +1702,6 @@ static long nv_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
nv_drm_free(objects);
}
#endif
return retcode;
}
@@ -1933,9 +1716,7 @@ static const struct file_operations nv_drm_fops = {
.compat_ioctl = drm_compat_ioctl,
#endif
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
.mmap = nv_drm_mmap,
#endif
.poll = drm_poll,
.read = drm_read,
@@ -1948,12 +1729,9 @@ static const struct file_operations nv_drm_fops = {
};
static const struct drm_ioctl_desc nv_drm_ioctls[] = {
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY,
nv_drm_gem_import_nvkms_memory_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY,
nv_drm_gem_import_userspace_memory_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
@@ -1967,7 +1745,6 @@ static const struct drm_ioctl_desc nv_drm_ioctls[] = {
nv_drm_get_drm_file_unique_id_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
#if defined(NV_DRM_FENCE_AVAILABLE)
DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED,
nv_drm_fence_supported_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
@@ -1989,7 +1766,6 @@ static const struct drm_ioctl_desc nv_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_ATTACH,
nv_drm_semsurf_fence_attach_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
#endif
/*
* DRM_UNLOCKED is implicit for all non-legacy DRM driver IOCTLs since Linux
@@ -2006,7 +1782,6 @@ static const struct drm_ioctl_desc nv_drm_ioctls[] = {
nv_drm_get_client_capability_ioctl,
0),
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32,
nv_drm_get_crtc_crc32_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
@@ -2040,7 +1815,6 @@ static const struct drm_ioctl_desc nv_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(NVIDIA_REVOKE_PERMISSIONS,
nv_drm_revoke_permission_ioctl,
DRM_UNLOCKED|DRM_MASTER),
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
};
static struct drm_driver nv_drm_driver = {
@@ -2114,23 +1888,11 @@ static struct drm_driver nv_drm_driver = {
.gem_prime_res_obj = nv_drm_gem_prime_res_obj,
#endif
#if defined(NV_DRM_DRIVER_HAS_SET_BUSID)
.set_busid = nv_drm_pci_set_busid,
#endif
.load = nv_drm_load,
.unload = nv_drm_unload,
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
.postclose = nv_drm_postclose,
#endif
.open = nv_drm_open,
.fops = &nv_drm_fops,
#if defined(NV_DRM_BUS_PRESENT)
.bus = &nv_drm_bus,
#endif
.name = "nvidia-drm",
.desc = "NVIDIA DRM driver",
@@ -2139,9 +1901,7 @@ static struct drm_driver nv_drm_driver = {
.date = "20160202",
#endif
#if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST)
.device_list = LIST_HEAD_INIT(nv_drm_driver.device_list),
#elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
#if defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
.legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list),
#endif
// XXX implement nvidia-drm's own .fbdev_probe callback that uses NVKMS kapi directly
@@ -2160,8 +1920,6 @@ static struct drm_driver nv_drm_driver = {
*/
void nv_drm_update_drm_driver_features(void)
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (!nv_drm_modeset_module_param) {
return;
}
@@ -2176,7 +1934,6 @@ void nv_drm_update_drm_driver_features(void)
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy;
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
}
@@ -2208,9 +1965,7 @@ void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *gpu_info)
nv_dev->gpu_info = gpu_info->gpuInfo;
nv_dev->gpu_mig_device = gpu_info->migDevice;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
mutex_init(&nv_dev->lock);
#endif
/* Allocate DRM device */
@@ -2237,6 +1992,12 @@ void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *gpu_info)
}
#endif
/* Load DRM device before registering it */
if (nv_drm_dev_load(dev) != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to load device");
goto failed_drm_load;
}
/* Register DRM device to DRM sub-system */
if (drm_dev_register(dev, 0) != 0) {
@@ -2304,7 +2065,11 @@ void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *gpu_info)
failed_drm_register:
nv_drm_dev_free(dev);
nv_drm_dev_unload(dev);
failed_drm_load:
drm_dev_put(dev);
failed_drm_alloc:
@@ -2317,45 +2082,18 @@ failed_drm_alloc:
#if defined(NV_LINUX)
int nv_drm_probe_devices(void)
{
struct NvKmsKapiGpuInfo *gpu_info = NULL;
NvU32 gpu_count = 0;
NvU32 i;
int ret = 0;
NvU32 gpu_count;
nv_drm_update_drm_driver_features();
/* Enumerate NVIDIA GPUs */
gpu_info = nv_drm_calloc(NV_MAX_GPUS, sizeof(*gpu_info));
if (gpu_info == NULL) {
ret = -ENOMEM;
NV_DRM_LOG_ERR("Failed to allocate gpu ids arrays");
goto done;
}
gpu_count = nvKms->enumerateGpus(gpu_info);
/* Register DRM device for each NVIDIA GPU available via NVKMS. */
gpu_count = nvKms->enumerateGpus(nv_drm_register_drm_device);
if (gpu_count == 0) {
NV_DRM_LOG_INFO("Not found NVIDIA GPUs");
goto done;
NV_DRM_LOG_INFO("No NVIDIA GPUs found");
}
WARN_ON(gpu_count > NV_MAX_GPUS);
/* Register DRM device for each NVIDIA GPU */
for (i = 0; i < gpu_count; i++) {
nv_drm_register_drm_device(&gpu_info[i]);
}
done:
nv_drm_free(gpu_info);
return ret;
return 0;
}
#endif
@@ -2369,7 +2107,8 @@ void nv_drm_remove_devices(void)
struct drm_device *dev = dev_list->dev;
drm_dev_unregister(dev);
nv_drm_dev_free(dev);
nv_drm_dev_unload(dev);
drm_dev_put(dev);
nv_drm_free(dev_list);
@@ -2420,7 +2159,6 @@ void nv_drm_suspend_resume(NvBool suspend)
}
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
nv_dev = dev_list;
/*
@@ -2446,7 +2184,6 @@ void nv_drm_suspend_resume(NvBool suspend)
drm_kms_helper_poll_enable(dev);
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
done:
mutex_unlock(&nv_drm_suspend_mutex);

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-encoder.h"
@@ -139,12 +139,8 @@ nv_drm_encoder_new(struct drm_device *dev,
ret = drm_encoder_init(dev,
&nv_encoder->base, &nv_encoder_funcs,
nvkms_connector_signal_to_drm_encoder_signal(format)
#if defined(NV_DRM_ENCODER_INIT_HAS_NAME_ARG)
, NULL
#endif
);
nvkms_connector_signal_to_drm_encoder_signal(format),
NULL);
if (ret != 0) {
nv_drm_free(nv_encoder);
@@ -336,17 +332,6 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
return;
}
/*
* On some kernels, DRM has the notion of a "primary group" that
* tracks the global mode setting state for the device.
*
* On kernels where DRM has a primary group, we need to reinitialize
* after adding encoders and connectors.
*/
#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT)
drm_reinit_primary_mode_group(dev);
#endif
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
}
#endif

View File

@@ -25,15 +25,11 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#if defined(NV_DRM_DRM_ENCODER_H_PRESENT)
#include <drm/drm_encoder.h>
#else
#include <drm/drmP.h>
#endif
#include "nvkms-kapi.h"
@@ -63,6 +59,6 @@ void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_ENCODER_H__ */

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-ioctl.h"
@@ -41,7 +41,7 @@ static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
/* Unreference gem object */
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
struct drm_gem_object *gem = nv_fb_get_gem_obj(fb, i);
struct drm_gem_object *gem = fb->obj[i];
if (gem != NULL) {
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
nv_drm_gem_object_unreference_unlocked(nv_gem);
@@ -73,7 +73,7 @@ nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file, unsigned int *handle)
{
return nv_drm_gem_handle_create(file,
to_nv_gem_object(nv_fb_get_gem_obj(fb, 0)),
to_nv_gem_object(fb->obj[0]),
handle);
}
@@ -83,11 +83,10 @@ static struct drm_framebuffer_funcs nv_framebuffer_funcs = {
};
static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
struct drm_device *dev,
struct nv_drm_device *nv_dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
const struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
struct nv_drm_gem_object *nv_gem;
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
@@ -109,7 +108,7 @@ static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
}
for (i = 0; i < num_planes; i++) {
nv_gem = nv_drm_gem_object_lookup(dev, file, cmd->handles[i]);
nv_gem = nv_drm_gem_object_lookup(file, cmd->handles[i]);
if (nv_gem == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(
@@ -118,7 +117,7 @@ static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
goto failed;
}
nv_fb_set_gem_obj(&nv_fb->base, i, &nv_gem->base);
nv_fb->base.obj[i] = &nv_gem->base;
}
return nv_fb;
@@ -154,27 +153,13 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
}
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
struct drm_gem_object *gem = nv_fb_get_gem_obj(fb, i);
struct drm_gem_object *gem = fb->obj[i];
if (gem != NULL) {
nv_gem = to_nv_gem_object(gem);
params.planes[i].memory = nv_gem->pMemory;
params.planes[i].offset = fb->offsets[i];
params.planes[i].pitch = fb->pitches[i];
/*
* XXX Use drm_framebuffer_funcs.dirty and
* drm_fb_helper_funcs.fb_dirty instead
*
* Currently using noDisplayCaching when registering surfaces with
* NVKMS that are using memory allocated through the DRM
* Dumb-Buffers API. This prevents Display Idle Frame Rate from
* kicking in and preventing CPU updates to the surface memory from
* not being reflected on the display. Ideally, DIFR would be
* dynamically disabled whenever a user of the memory blits to the
* frontbuffer. DRM provides the needed callbacks to achieve this.
*/
params.noDisplayCaching |= !!nv_gem->is_drm_dumb;
}
}
params.height = fb->height;
@@ -254,19 +239,17 @@ fail:
return -EINVAL;
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_framebuffer *nv_drm_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
const struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
uint64_t modifier = 0;
int ret;
enum NvKmsSurfaceMemoryFormat format;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
int i;
#endif
bool have_modifier = false;
/* Check whether NvKms supports the given pixel format */
@@ -277,7 +260,6 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
return ERR_PTR(-EINVAL);
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
if (cmd->flags & DRM_MODE_FB_MODIFIERS) {
have_modifier = true;
modifier = cmd->modifier[0];
@@ -296,9 +278,8 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
return ERR_PTR(-EINVAL);
}
}
#endif
nv_fb = nv_drm_framebuffer_alloc(dev, file, cmd);
nv_fb = nv_drm_framebuffer_alloc(nv_dev, file, cmd);
if (IS_ERR(nv_fb)) {
return (struct drm_framebuffer *)nv_fb;
}
@@ -306,9 +287,7 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
/* Fill out framebuffer metadata from the userspace fb creation request */
drm_helper_mode_fill_fb_struct(
#if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG)
dev,
#endif
&nv_fb->base,
cmd);

View File

@@ -25,27 +25,19 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvkms-kapi.h"
struct nv_drm_framebuffer {
struct NvKmsKapiSurface *pSurface;
#if !defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
struct drm_gem_object*
obj[NVKMS_MAX_PLANES_PER_SURFACE];
#endif
struct drm_framebuffer base;
};
@@ -58,34 +50,11 @@ static inline struct nv_drm_framebuffer *to_nv_framebuffer(
return container_of(fb, struct nv_drm_framebuffer, base);
}
static inline struct drm_gem_object *nv_fb_get_gem_obj(
struct drm_framebuffer *fb,
uint32_t plane)
{
#if defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
return fb->obj[plane];
#else
return to_nv_framebuffer(fb)->obj[plane];
#endif
}
static inline void nv_fb_set_gem_obj(
struct drm_framebuffer *fb,
uint32_t plane,
struct drm_gem_object *obj)
{
#if defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
fb->obj[plane] = obj;
#else
to_nv_framebuffer(fb)->obj[plane] = obj;
#endif
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_framebuffer *nv_drm_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd);
const struct drm_mode_fb_cmd2 *cmd);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_FB_H__ */

View File

@@ -34,9 +34,7 @@
#include "nvidia-drm-fence.h"
#include "nvidia-dma-resv-helper.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include <linux/dma-fence.h>
#define NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS 5000
@@ -83,42 +81,42 @@ struct nv_drm_prime_fence_context {
struct nv_drm_prime_fence {
struct list_head list_entry;
nv_dma_fence_t base;
struct dma_fence base;
spinlock_t lock;
};
static inline
struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence)
struct nv_drm_prime_fence *to_nv_drm_prime_fence(struct dma_fence *fence)
{
return container_of(fence, struct nv_drm_prime_fence, base);
}
static const char*
nv_drm_gem_fence_op_get_driver_name(nv_dma_fence_t *fence)
nv_drm_gem_fence_op_get_driver_name(struct dma_fence *fence)
{
return "NVIDIA";
}
static const char*
nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence)
nv_drm_gem_prime_fence_op_get_timeline_name(struct dma_fence *fence)
{
return "nvidia.prime";
}
static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence)
static bool nv_drm_gem_prime_fence_op_enable_signaling(struct dma_fence *fence)
{
// DO NOTHING
return true;
}
static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence)
static void nv_drm_gem_prime_fence_op_release(struct dma_fence *fence)
{
struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence);
nv_drm_free(nv_fence);
}
static signed long
nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
nv_drm_gem_prime_fence_op_wait(struct dma_fence *fence,
bool intr, signed long timeout)
{
/*
@@ -131,12 +129,12 @@ nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
* that it should never get hit during normal operation, but not so long
* that the system becomes unresponsive.
*/
return nv_dma_fence_default_wait(fence, intr,
return dma_fence_default_wait(fence, intr,
(timeout == MAX_SCHEDULE_TIMEOUT) ?
msecs_to_jiffies(96) : timeout);
}
static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = {
static const struct dma_fence_ops nv_drm_gem_prime_fence_ops = {
.get_driver_name = nv_drm_gem_fence_op_get_driver_name,
.get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name,
.enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling,
@@ -148,8 +146,8 @@ static inline void
__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence)
{
list_del(&nv_fence->list_entry);
nv_dma_fence_signal(&nv_fence->base);
nv_dma_fence_put(&nv_fence->base);
dma_fence_signal(&nv_fence->base);
dma_fence_put(&nv_fence->base);
}
static void nv_drm_gem_prime_force_fence_signal(
@@ -289,13 +287,13 @@ __nv_drm_prime_fence_context_new(
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
nv_prime_fence_context->base.ops = &nv_drm_prime_fence_context_ops;
nv_prime_fence_context->base.nv_dev = nv_dev;
nv_prime_fence_context->base.context = nv_dma_fence_context_alloc(1);
nv_prime_fence_context->base.context = dma_fence_context_alloc(1);
nv_prime_fence_context->base.fenceSemIndex = p->index;
nv_prime_fence_context->pSemSurface = pSemSurface;
nv_prime_fence_context->pLinearAddress = pLinearAddress;
@@ -343,7 +341,7 @@ failed:
return NULL;
}
static nv_dma_fence_t *__nv_drm_prime_fence_context_create_fence(
static struct dma_fence *__nv_drm_prime_fence_context_create_fence(
struct nv_drm_prime_fence_context *nv_prime_fence_context,
unsigned int seqno)
{
@@ -369,12 +367,12 @@ static nv_dma_fence_t *__nv_drm_prime_fence_context_create_fence(
spin_lock_init(&nv_fence->lock);
nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_prime_fence_context->base.context,
seqno);
dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_prime_fence_context->base.context,
seqno);
/* The context maintains a reference to any pending fences. */
nv_dma_fence_get(&nv_fence->base);
dma_fence_get(&nv_fence->base);
list_add_tail(&nv_fence->list_entry, &nv_prime_fence_context->pending);
@@ -424,12 +422,11 @@ const struct nv_drm_gem_object_funcs nv_fence_context_gem_ops = {
static inline
struct nv_drm_fence_context *
__nv_drm_fence_context_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_fence_context_gem_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
@@ -491,7 +488,7 @@ done:
}
static int __nv_drm_gem_attach_fence(struct nv_drm_gem_object *nv_gem,
nv_dma_fence_t *fence,
struct dma_fence *fence,
bool shared)
{
nv_dma_resv_t *resv = nv_drm_gem_res_obj(nv_gem);
@@ -524,7 +521,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem;
struct nv_drm_fence_context *nv_fence_context;
nv_dma_fence_t *fence;
struct dma_fence *fence;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
@@ -536,7 +533,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
goto done;
}
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
nv_gem = nv_drm_gem_object_lookup(filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
@@ -548,7 +545,6 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
}
if((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
@@ -587,7 +583,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
ret = __nv_drm_gem_attach_fence(nv_gem, fence, true /* exclusive */);
nv_dma_fence_put(fence);
dma_fence_put(fence);
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
@@ -600,7 +596,7 @@ done:
}
struct nv_drm_semsurf_fence {
nv_dma_fence_t base;
struct dma_fence base;
spinlock_t lock;
/*
@@ -628,7 +624,7 @@ struct nv_drm_semsurf_fence_callback {
};
struct nv_drm_sync_fd_wait_data {
nv_dma_fence_cb_t dma_fence_cb;
struct dma_fence_cb dma_fence_cb;
struct nv_drm_semsurf_fence_ctx *ctx;
nv_drm_work work; /* Deferred second half of fence wait callback */
@@ -759,15 +755,15 @@ __nv_drm_semsurf_force_complete_pending(struct nv_drm_semsurf_fence_ctx *ctx)
&ctx->pending_fences,
typeof(*nv_fence),
pending_node);
nv_dma_fence_t *fence = &nv_fence->base;
struct dma_fence *fence = &nv_fence->base;
list_del(&nv_fence->pending_node);
nv_dma_fence_set_error(fence, -ETIMEDOUT);
nv_dma_fence_signal(fence);
dma_fence_set_error(fence, -ETIMEDOUT);
dma_fence_signal(fence);
/* Remove the pending list's reference */
nv_dma_fence_put(fence);
dma_fence_put(fence);
}
/*
@@ -824,7 +820,7 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
struct list_head finished;
struct list_head timed_out;
struct nv_drm_semsurf_fence *nv_fence;
nv_dma_fence_t *fence;
struct dma_fence *fence;
NvU64 currentSeqno = __nv_drm_get_semsurf_ctx_seqno(ctx);
NvU64 fenceSeqno = 0;
unsigned long flags;
@@ -888,8 +884,8 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
nv_fence = list_first_entry(&finished, typeof(*nv_fence), pending_node);
list_del_init(&nv_fence->pending_node);
fence = &nv_fence->base;
nv_dma_fence_signal(fence);
nv_dma_fence_put(fence); /* Drops the pending list's reference */
dma_fence_signal(fence);
dma_fence_put(fence); /* Drops the pending list's reference */
}
while (!list_empty(&timed_out)) {
@@ -897,9 +893,9 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
pending_node);
list_del_init(&nv_fence->pending_node);
fence = &nv_fence->base;
nv_dma_fence_set_error(fence, -ETIMEDOUT);
nv_dma_fence_signal(fence);
nv_dma_fence_put(fence); /* Drops the pending list's reference */
dma_fence_set_error(fence, -ETIMEDOUT);
dma_fence_signal(fence);
dma_fence_put(fence); /* Drops the pending list's reference */
}
}
@@ -1265,13 +1261,13 @@ __nv_drm_semsurf_fence_ctx_new(
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
ctx->base.ops = &nv_drm_semsurf_fence_ctx_ops;
ctx->base.nv_dev = nv_dev;
ctx->base.context = nv_dma_fence_context_alloc(1);
ctx->base.context = dma_fence_context_alloc(1);
ctx->base.fenceSemIndex = p->index;
ctx->pSemSurface = pSemSurface;
ctx->pSemMapping.pVoid = semMapping;
@@ -1343,26 +1339,26 @@ int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
}
static inline struct nv_drm_semsurf_fence*
to_nv_drm_semsurf_fence(nv_dma_fence_t *fence)
to_nv_drm_semsurf_fence(struct dma_fence *fence)
{
return container_of(fence, struct nv_drm_semsurf_fence, base);
}
static const char*
__nv_drm_semsurf_fence_op_get_timeline_name(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_get_timeline_name(struct dma_fence *fence)
{
return "nvidia.semaphore_surface";
}
static bool
__nv_drm_semsurf_fence_op_enable_signaling(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_enable_signaling(struct dma_fence *fence)
{
// DO NOTHING - Could defer RM callback registration until this point
return true;
}
static void
__nv_drm_semsurf_fence_op_release(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_release(struct dma_fence *fence)
{
struct nv_drm_semsurf_fence *nv_fence =
to_nv_drm_semsurf_fence(fence);
@@ -1370,12 +1366,12 @@ __nv_drm_semsurf_fence_op_release(nv_dma_fence_t *fence)
nv_drm_free(nv_fence);
}
static const nv_dma_fence_ops_t nv_drm_semsurf_fence_ops = {
static const struct dma_fence_ops nv_drm_semsurf_fence_ops = {
.get_driver_name = nv_drm_gem_fence_op_get_driver_name,
.get_timeline_name = __nv_drm_semsurf_fence_op_get_timeline_name,
.enable_signaling = __nv_drm_semsurf_fence_op_enable_signaling,
.release = __nv_drm_semsurf_fence_op_release,
.wait = nv_dma_fence_default_wait,
.wait = dma_fence_default_wait,
#if defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO)
.use_64bit_seqno = true,
#endif
@@ -1401,7 +1397,7 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
}
/* Add a reference to the fence for the list */
nv_dma_fence_get(&nv_fence->base);
dma_fence_get(&nv_fence->base);
INIT_LIST_HEAD(&nv_fence->pending_node);
nv_fence->timeout = nv_drm_timeout_from_ms(timeoutMS);
@@ -1434,14 +1430,14 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
__nv_drm_semsurf_ctx_reg_callbacks(ctx);
}
static nv_dma_fence_t *__nv_drm_semsurf_fence_ctx_create_fence(
static struct dma_fence *__nv_drm_semsurf_fence_ctx_create_fence(
struct nv_drm_device *nv_dev,
struct nv_drm_semsurf_fence_ctx *ctx,
NvU64 wait_value,
NvU64 timeout_value_ms)
{
struct nv_drm_semsurf_fence *nv_fence;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = 0;
if (timeout_value_ms == 0 ||
@@ -1461,9 +1457,9 @@ static nv_dma_fence_t *__nv_drm_semsurf_fence_ctx_create_fence(
#endif
/* Initializes the fence with one reference (for the caller) */
nv_dma_fence_init(fence, &nv_drm_semsurf_fence_ops,
&nv_fence->lock,
ctx->base.context, wait_value);
dma_fence_init(fence, &nv_drm_semsurf_fence_ops,
&nv_fence->lock,
ctx->base.context, wait_value);
__nv_drm_semsurf_ctx_add_pending(ctx, nv_fence, timeout_value_ms);
@@ -1479,7 +1475,7 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_semsurf_fence_create_params *p = data;
struct nv_drm_fence_context *nv_fence_context;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = -EINVAL;
int fd;
@@ -1494,7 +1490,6 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
}
if ((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
@@ -1550,7 +1545,7 @@ fence_context_create_sync_failed:
* FD will still hold a reference, and the pending list (if the fence hasn't
* already been signaled) will also retain a reference.
*/
nv_dma_fence_put(fence);
dma_fence_put(fence);
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
@@ -1608,8 +1603,8 @@ __nv_drm_semsurf_wait_fence_work_cb
static void
__nv_drm_semsurf_wait_fence_cb
(
nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb
struct dma_fence *fence,
struct dma_fence_cb *cb
)
{
struct nv_drm_sync_fd_wait_data *wait_data =
@@ -1634,7 +1629,7 @@ __nv_drm_semsurf_wait_fence_cb
}
/* Don't need to reference the fence anymore, just the fence context. */
nv_dma_fence_put(fence);
dma_fence_put(fence);
}
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
@@ -1646,7 +1641,7 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
struct nv_drm_fence_context *nv_fence_context;
struct nv_drm_semsurf_fence_ctx *ctx;
struct nv_drm_sync_fd_wait_data *wait_data = NULL;
nv_dma_fence_t *fence;
struct dma_fence *fence;
unsigned long flags;
int ret = -EINVAL;
@@ -1663,7 +1658,6 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
}
if ((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
@@ -1716,9 +1710,9 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
list_add(&wait_data->pending_node, &ctx->pending_waits);
spin_unlock_irqrestore(&ctx->lock, flags);
ret = nv_dma_fence_add_callback(fence,
&wait_data->dma_fence_cb,
__nv_drm_semsurf_wait_fence_cb);
ret = dma_fence_add_callback(fence,
&wait_data->dma_fence_cb,
__nv_drm_semsurf_wait_fence_cb);
if (ret) {
if (ret == -ENOENT) {
@@ -1730,7 +1724,7 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
}
/* Execute second half of wait immediately, avoiding the worker thread */
nv_dma_fence_put(fence);
dma_fence_put(fence);
__nv_drm_semsurf_wait_fence_work_cb(wait_data);
}
@@ -1759,7 +1753,7 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
struct drm_nvidia_semsurf_fence_attach_params *p = data;
struct nv_drm_gem_object *nv_gem = NULL;
struct nv_drm_fence_context *nv_fence_context = NULL;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = -EINVAL;
if (nv_dev->pDevice == NULL) {
@@ -1767,7 +1761,7 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
goto done;
}
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
nv_gem = nv_drm_gem_object_lookup(filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
@@ -1779,7 +1773,6 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
}
nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle);
@@ -1819,7 +1812,7 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
ret = __nv_drm_gem_attach_fence(nv_gem, fence, p->shared);
nv_dma_fence_put(fence);
dma_fence_put(fence);
done:
if (nv_fence_context) {
@@ -1833,6 +1826,4 @@ done:
return ret;
}
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */

View File

@@ -30,8 +30,6 @@
struct drm_file;
struct drm_device;
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
@@ -57,8 +55,6 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>

View File

@@ -25,7 +25,7 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include <drm/drm_fourcc.h>
@@ -40,6 +40,6 @@ uint32_t *nv_drm_format_array_alloc(
bool nv_drm_format_is_yuv(u32 format);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_FORMAT_H__ */

View File

@@ -24,17 +24,13 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-ioctl.h"
@@ -47,12 +43,10 @@ void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem)
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem);
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (nv_dma_buf->base.pMemory) {
/* Free NvKmsKapiMemory handle associated with this gem object */
nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory);
}
#endif
drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt);
@@ -157,13 +151,11 @@ nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
BUG_ON(dma_buf->size % PAGE_SIZE);
pMemory = NULL;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice,
(NvP64)(NvUPtr)dma_buf,
dma_buf->size - 1);
}
#endif
nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base,
&__nv_gem_dma_buf_ops, dma_buf->size, pMemory);
@@ -194,7 +186,7 @@ int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
}
if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(
dev, filep, p->handle)) == NULL) {
filep, p->handle)) == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,
@@ -203,7 +195,6 @@ int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
goto done;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (!nv_dma_buf->base.pMemory) {
/*
@@ -218,7 +209,6 @@ int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
nv_dma_buf->base.base.size - 1);
}
}
#endif
if (!nv_dma_buf->base.pMemory && !pTmpMemory) {
ret = -ENOMEM;

View File

@@ -48,12 +48,11 @@ static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf(
static inline
struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);

View File

@@ -22,27 +22,20 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-helper.h"
#include "nvidia-drm-ioctl.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include <linux/io.h>
#if defined(NV_BSD)
#include <vm/vm_pageout.h>
#endif
#include "nv-mm.h"
static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
@@ -94,10 +87,9 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
unsigned long address = vmf->address;
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset, pfn;
vm_fault_t ret;
@@ -146,8 +138,6 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
}
#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */
return ret;
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
return VM_FAULT_SIGBUS;
}
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
@@ -340,6 +330,7 @@ int nv_drm_dumb_create(
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
uint8_t compressible = 0;
struct NvKmsKapiMemory *pMemory;
struct NvKmsKapiAllocateMemoryParams allocParams = { };
int ret = 0;
args->pitch = roundup(args->width * ((args->bpp + 7) >> 3),
@@ -357,20 +348,14 @@ int nv_drm_dumb_create(
goto fail;
}
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
args->size,
&compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
args->size,
&compressible);
}
allocParams.layout = NvKmsSurfaceMemoryLayoutPitch;
allocParams.type = NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
allocParams.size = args->size;
allocParams.noDisplayCaching = true;
allocParams.useVideoMemory = nv_dev->hasVideoMemory;
allocParams.compressible = &compressible;
pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
if (pMemory == NULL) {
ret = -ENOMEM;
NV_DRM_DEV_LOG_ERR(
@@ -385,8 +370,6 @@ int nv_drm_dumb_create(
goto nvkms_gem_obj_init_failed;
}
nv_nvkms_memory->base.is_drm_dumb = true;
/* Always map dumb buffer memory up front. Clients are only expected
* to use dumb buffers for software rendering, so they're not much use
* without a CPU mapping.
@@ -482,7 +465,6 @@ int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
}
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
filep,
p->handle)) == NULL) {
ret = -EINVAL;
@@ -519,8 +501,7 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
struct NvKmsKapiMemory *pMemory;
enum NvKmsSurfaceMemoryLayout layout;
enum NvKmsKapiAllocationType type;
struct NvKmsKapiAllocateMemoryParams allocParams = { };
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -540,25 +521,15 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
goto failed;
}
layout = p->block_linear ?
allocParams.layout = p->block_linear ?
NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch;
type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
allocParams.type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
allocParams.size = p->memory_size;
allocParams.useVideoMemory = nv_dev->hasVideoMemory;
allocParams.compressible = &p->compressible;
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
layout,
type,
p->memory_size,
&p->compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
layout,
type,
p->memory_size,
&p->compressible);
}
pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
if (pMemory == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev,
@@ -640,7 +611,6 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
int ret = -EINVAL;
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
file,
handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(

View File

@@ -25,7 +25,7 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-gem.h"
@@ -72,12 +72,11 @@ static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const(
static inline
struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);

View File

@@ -24,9 +24,7 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-drm-helper.h"
@@ -116,11 +114,7 @@ static vm_fault_t __nv_vm_insert_mixed_helper(
{
int ret;
#if defined(NV_PFN_TO_PFN_T_PRESENT)
ret = vm_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
#else
ret = vm_insert_mixed(vma, address, pfn);
#endif
switch (ret) {
case 0:
@@ -145,7 +139,7 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
struct vm_fault *vmf)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
unsigned long address = vmf->address;
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset;
unsigned long pfn;
@@ -157,7 +151,13 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
#if !defined(NV_LINUX)
return vmf_insert_pfn(vma, address, pfn);
#elif defined(NV_VMF_INSERT_MIXED_PRESENT)
#if defined(NV_LINUX_PFN_T_H_PRESENT)
return vmf_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
#else
return vmf_insert_mixed(vma, address, pfn);
#endif
#else
return __nv_vm_insert_mixed_helper(vma, address, pfn);
#endif

View File

@@ -52,12 +52,11 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
static inline
struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);

View File

@@ -35,17 +35,10 @@
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-gem-nvkms-memory.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
#include <drm/drm_vma_manager.h>
#include "linux/dma-buf.h"
@@ -58,7 +51,7 @@ void nv_drm_gem_free(struct drm_gem_object *gem)
/* Cleanup core gem object */
drm_gem_object_release(&nv_gem->base);
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_fini(&nv_gem->resv);
#endif
@@ -135,7 +128,7 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
/* Initialize the gem object */
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_init(&nv_gem->resv);
#endif
@@ -155,7 +148,6 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
#if defined(NV_DMA_BUF_OWNER_PRESENT)
struct drm_gem_object *gem_dst;
struct nv_drm_gem_object *nv_gem_src;
@@ -179,7 +171,6 @@ struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
return gem_dst;
}
}
#endif /* NV_DMA_BUF_OWNER_PRESENT */
return drm_gem_prime_import(dev, dma_buf);
}
@@ -231,8 +222,7 @@ int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem;
int ret;
if ((nv_gem = nv_drm_gem_object_lookup(dev,
filep,
if ((nv_gem = nv_drm_gem_object_lookup(filep,
params->handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
@@ -257,7 +247,6 @@ int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
return ret;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct drm_file *priv = file->private_data;
@@ -268,8 +257,8 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
struct nv_drm_gem_object *nv_gem;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = nv_drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff, vma_pages(vma));
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff, vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
@@ -295,7 +284,7 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
goto done;
}
if (!nv_drm_vma_node_is_allowed(node, file)) {
if (!drm_vma_node_is_allowed(node, file->private_data)) {
ret = -EACCES;
goto done;
}
@@ -317,7 +306,6 @@ done:
return ret;
}
#endif
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
@@ -332,23 +320,21 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
}
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(filep, p->handle);
if (nv_dma_buf) {
p->object_type = NV_GEM_OBJECT_DMABUF;
nv_gem = &nv_dma_buf->base;
goto done;
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(dev, filep, p->handle);
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(filep, p->handle);
if (nv_nvkms_memory) {
p->object_type = NV_GEM_OBJECT_NVKMS;
nv_gem = &nv_nvkms_memory->base;
goto done;
}
#endif
nv_user_memory = nv_drm_gem_object_user_memory_lookup(dev, filep, p->handle);
nv_user_memory = nv_drm_gem_object_user_memory_lookup(filep, p->handle);
if (nv_user_memory) {
p->object_type = NV_GEM_OBJECT_USERMEMORY;
nv_gem = &nv_user_memory->base;

View File

@@ -33,17 +33,12 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvkms-kapi.h"
#include "nv-mm.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include "nvidia-dma-resv-helper.h"
#endif
#include "linux/dma-buf.h"
@@ -73,9 +68,7 @@ struct nv_drm_gem_object {
struct NvKmsKapiMemory *pMemory;
bool is_drm_dumb;
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_t resv;
#endif
};
@@ -90,47 +83,14 @@ static inline struct nv_drm_gem_object *to_nv_gem_object(
return NULL;
}
/*
* drm_gem_object_{get/put}() added by commit
* e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and
* drm_gem_object_{reference/unreference}() removed by commit
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
*/
static inline void
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_get(&nv_gem->base);
#else
drm_gem_object_reference(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
drm_gem_object_put_unlocked(&nv_gem->base);
#else
drm_gem_object_put(&nv_gem->base);
#endif
#else
drm_gem_object_unreference_unlocked(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_put(&nv_gem->base);
#else
drm_gem_object_unreference(&nv_gem->base);
#endif
}
static inline int nv_drm_gem_handle_create_drop_reference(
@@ -171,17 +131,10 @@ done:
void nv_drm_gem_free(struct drm_gem_object *gem);
static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
#if (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 3)
return to_nv_gem_object(drm_gem_object_lookup(dev, filp, handle));
#elif (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 2)
return to_nv_gem_object(drm_gem_object_lookup(filp, handle));
#else
#error "Unknown argument count of drm_gem_object_lookup()"
#endif
}
static inline int nv_drm_gem_handle_create(struct drm_file *filp,
@@ -191,7 +144,6 @@ static inline int nv_drm_gem_handle_create(struct drm_file *filp,
return drm_gem_handle_create(filp, &nv_gem->base, handle);
}
#if defined(NV_DRM_FENCE_AVAILABLE)
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
@@ -200,7 +152,6 @@ static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
#endif
}
#endif
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,

View File

@@ -33,7 +33,7 @@
#include "nvmisc.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
@@ -43,27 +43,7 @@
#include <drm/drm_atomic_uapi.h>
#endif
/*
* The inclusion of drm_framebuffer.h was removed from drm_crtc.h by commit
* 720cf96d8fec ("drm: Drop drm_framebuffer.h from drm_crtc.h") in v6.0.
*
* We only need drm_framebuffer.h for drm_framebuffer_put(), and it is always
* present (v4.9+) when drm_framebuffer_{put,get}() is present (v4.12+), so it
* is safe to unconditionally include it when drm_framebuffer_get() is present.
*/
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb)
{
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
drm_framebuffer_put(fb);
#else
drm_framebuffer_unreference(fb);
#endif
}
/*
* drm_atomic_helper_disable_all() has been added by commit
@@ -149,7 +129,6 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
}
#if defined(NV_DRM_ROTATION_AVAILABLE)
nv_drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -159,7 +138,6 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
plane_state->rotation = DRM_MODE_ROTATE_0;
}
#endif
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
@@ -189,29 +167,15 @@ free:
WARN_ON(plane->state->crtc);
if (plane->old_fb)
__nv_drm_framebuffer_put(plane->old_fb);
drm_framebuffer_put(plane->old_fb);
}
plane->old_fb = NULL;
}
}
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
return ret;
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */

View File

@@ -31,43 +31,17 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_ROTATE_* , DRM_REFLECT_* */
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
#include <drm/drm_blend.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE) || \
defined(NV_DRM_COLOR_CTM_3X4_PRESENT) || \
defined(NV_DRM_COLOR_LUT_PRESENT)
/*
* For DRM_MODE_ROTATE_*, DRM_MODE_REFLECT_*, struct drm_color_ctm_3x4, and
* struct drm_color_lut.
*/
#include <uapi/drm/drm_mode.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE)
/*
* 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added
* DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed
* DRM_ROTATE_* and DRM_REFLECT_*
*/
#if !defined(DRM_MODE_ROTATE_0)
#define DRM_MODE_ROTATE_0 DRM_ROTATE_0
#define DRM_MODE_ROTATE_90 DRM_ROTATE_90
#define DRM_MODE_ROTATE_180 DRM_ROTATE_180
#define DRM_MODE_ROTATE_270 DRM_ROTATE_270
#define DRM_MODE_REFLECT_X DRM_REFLECT_X
#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y
#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK
#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK
#endif
#endif //NV_DRM_ROTATION_AVAILABLE
/*
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
@@ -76,29 +50,7 @@
*/
#define NV_DRM_USE_EXTENDED_PROPERTIES (DRM_OBJECT_MAX_PROPERTY >= 64)
/*
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
* (2017-09-26) and drm_dev_unref() is removed by
* ba1d345401476a5f7fbad622607c5a1f95e59b31 (2018-11-15).
*
* drm_dev_unref() has been added and drm_dev_free() removed by commit -
*
* 2014-01-29: 099d1c290e2ebc3b798961a6c177c3aef5f0b789
*/
static inline void nv_drm_dev_free(struct drm_device *dev)
{
#if defined(NV_DRM_DEV_PUT_PRESENT)
drm_dev_put(dev);
#elif defined(NV_DRM_DEV_UNREF_PRESENT)
drm_dev_unref(dev);
#else
drm_dev_free(dev);
#endif
}
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
static inline struct sg_table*
nv_drm_prime_pages_to_sg(struct drm_device *dev,
@@ -111,8 +63,6 @@ nv_drm_prime_pages_to_sg(struct drm_device *dev,
#endif
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
* drm_for_each_encoder and drm_for_each_plane() were added by kernel
@@ -166,18 +116,6 @@ nv_drm_prime_pages_to_sg(struct drm_device *dev,
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
#endif
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector_iter(connector, conn_iter)
#elif defined(drm_for_each_connector)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector(connector, dev)
#else
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); \
list_for_each_entry(connector, &(dev)->mode_config.connector_list, head)
#endif
#if defined(drm_for_each_encoder)
#define nv_drm_for_each_encoder(encoder, dev) \
drm_for_each_encoder(encoder, dev)
@@ -348,72 +286,8 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
#endif
static inline struct drm_connector *
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
uint32_t id)
{
#if !defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
return drm_connector_find(dev, id);
#elif defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_connector_lookup(dev, filep, id);
#else
return drm_connector_lookup(dev, id);
#endif
}
static inline void nv_drm_connector_put(struct drm_connector *connector)
{
#if defined(NV_DRM_CONNECTOR_PUT_PRESENT)
drm_connector_put(connector);
#elif defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
drm_connector_unreference(connector);
#endif
}
static inline void nv_drm_property_blob_put(struct drm_property_blob *blob)
{
#if defined(NV_DRM_PROPERTY_BLOB_PUT_PRESENT)
drm_property_blob_put(blob);
#else
drm_property_unreference_blob(blob);
#endif
}
static inline void nv_drm_property_blob_get(struct drm_property_blob *blob)
{
#if defined(NV_DRM_PROPERTY_BLOB_PUT_PRESENT)
drm_property_blob_get(blob);
#else
drm_property_reference_blob(blob);
#endif
}
static inline struct drm_crtc *
nv_drm_crtc_find(struct drm_device *dev, struct drm_file *filep, uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_crtc_find(dev, filep, id);
#else
return drm_crtc_find(dev, id);
#endif
}
static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev,
uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_encoder_find(dev, NULL /* file_priv */, id);
#else
return drm_encoder_find(dev, id);
#endif
}
#if defined(NV_DRM_DRM_AUTH_H_PRESENT)
#include <drm/drm_auth.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
/*
* drm_file_get_master() added by commit 56f0729a510f ("drm: protect drm_master
@@ -438,10 +312,6 @@ static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
* Ville Syrjälä <ville.syrjala@linux.intel.com>
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h and modified to use nv_drm_encoder_find()
* instead of drm_encoder_find().
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h @
* 83aefbb887b59df0b3520965c3701e01deacfc52
* which has the following copyright and license information:
@@ -467,9 +337,7 @@ static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
* OF THIS SOFTWARE.
*/
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
/**
* nv_drm_connector_for_each_possible_encoder - iterate connector's possible
@@ -488,8 +356,9 @@ static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
(connector)->encoder_ids[(__i)] != 0; (__i)++) \
for_each_if((encoder) = \
nv_drm_encoder_find((connector)->dev, \
(connector)->encoder_ids[(__i)]))
drm_encoder_find((connector)->dev, NULL, \
(connector)->encoder_ids[(__i)]))
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
{ \
@@ -544,80 +413,14 @@ nv_drm_connector_update_edid_property(struct drm_connector *connector,
#endif
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#include <drm/drm_connector.h>
static inline
void nv_drm_connector_list_iter_begin(struct drm_device *dev,
struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_begin(dev, iter);
#else
drm_connector_list_iter_get(dev, iter);
#endif
}
static inline
void nv_drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_end(iter);
#else
drm_connector_list_iter_put(iter);
#endif
}
#endif
/*
* The drm_format_num_planes() function was added by commit d0d110e09629 drm:
* Add drm_format_num_planes() utility function in v3.3 (2011-12-20). Prototype
* was moved from drm_crtc.h to drm_fourcc.h by commit ae4df11a0f53 (drm: Move
* format-related helpers to drm_fourcc.c) in v4.8 (2016-06-09).
* drm_format_num_planes() has been removed by commit 05c452c115bf (drm: Remove
* users of drm_format_num_planes) in v5.3 (2019-05-16).
*
* drm_format_info() is available only from v4.10 (2016-10-18), added by commit
* 84770cc24f3a (drm: Centralize format information).
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
static inline int nv_drm_format_num_planes(uint32_t format)
{
#if defined(NV_DRM_FORMAT_NUM_PLANES_PRESENT)
return drm_format_num_planes(format);
#else
const struct drm_format_info *info = drm_format_info(format);
return info != NULL ? info->num_planes : 1;
#endif
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
/*
* DRM_FORMAT_MOD_LINEAR was also defined after the original modifier support
* was added to the kernel, as a more explicit alias of DRM_FORMAT_MOD_NONE
*/
#if !defined(DRM_FORMAT_MOD_VENDOR_NONE)
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#endif
#if !defined(DRM_FORMAT_MOD_LINEAR)
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
#endif
/*
* DRM_FORMAT_MOD_INVALID was defined after the original modifier support was
* added to the kernel, for use as a sentinel value.
*/
#if !defined(DRM_FORMAT_RESERVED)
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#endif
#if !defined(DRM_FORMAT_MOD_INVALID)
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
#endif
/*
* DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called
* DRM_FORMAT_MOD_VNEDOR_NV.
@@ -640,8 +443,6 @@ static inline int nv_drm_format_num_planes(uint32_t format)
(((c) & 0x7) << 23)))
#endif
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
/*
* DRM_UNLOCKED was removed with commit 2798ffcc1d6a ("drm: Remove locking for
* legacy ioctls and DRM_UNLOCKED") in v6.8, but it was previously made
@@ -666,92 +467,6 @@ struct drm_color_ctm_3x4 {
};
#endif
/*
* struct drm_color_lut was added by commit 5488dc16fde7 ("drm: introduce pipe
* color correction properties") in v4.6. For backwards compatibility, define it
* when not present.
*/
#if !defined(NV_DRM_COLOR_LUT_PRESENT)
struct drm_color_lut {
__u16 red;
__u16 green;
__u16 blue;
__u16 reserved;
};
#endif
/*
* drm_vma_offset_exact_lookup_locked() were added
* by kernel commit 2225cfe46bcc which was Signed-off-by:
* Daniel Vetter <daniel.vetter@intel.com>
*
* drm_vma_offset_exact_lookup_locked() were copied from
* include/drm/drm_vma_manager.h @ 2225cfe46bcc
* which has the following copyright and license information:
*
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_vma_manager.h>
/**
* nv_drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
nv_drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
#if defined(NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT)
return drm_vma_offset_exact_lookup_locked(mgr, start, pages);
#else
struct drm_vma_offset_node *node;
node = drm_vma_offset_lookup_locked(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
#endif
}
static inline bool
nv_drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp)
{
#if defined(NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG)
return drm_vma_node_is_allowed(node, filp->private_data);
#else
return drm_vma_node_is_allowed(node, filp);
#endif
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_HELPER_H__ */

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvidia-drm-priv.h"
#include "nvidia-drm-modeset.h"
@@ -34,10 +34,7 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -48,9 +45,7 @@
#include <linux/host1x-next.h>
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
#include <linux/dma-fence.h>
struct nv_drm_atomic_state {
struct NvKmsKapiRequestedModeSetConfig config;
@@ -156,24 +151,23 @@ static int __nv_drm_put_back_post_fence_fd(
return ret;
}
#if defined(NV_DRM_FENCE_AVAILABLE)
struct nv_drm_plane_fence_cb_data {
nv_dma_fence_cb_t dma_fence_cb;
struct dma_fence_cb dma_fence_cb;
struct nv_drm_device *nv_dev;
NvU32 semaphore_index;
};
static void
__nv_drm_plane_fence_cb(
nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb_data
struct dma_fence *fence,
struct dma_fence_cb *cb_data
)
{
struct nv_drm_plane_fence_cb_data *fence_data =
container_of(cb_data, typeof(*fence_data), dma_fence_cb);
struct nv_drm_device *nv_dev = fence_data->nv_dev;
nv_dma_fence_put(fence);
dma_fence_put(fence);
nvKms->signalDisplaySemaphore(nv_dev->pDevice, fence_data->semaphore_index);
nv_drm_free(fence_data);
}
@@ -279,9 +273,9 @@ static int __nv_drm_convert_in_fences(
fence_data->nv_dev = nv_dev;
fence_data->semaphore_index = semaphore_index;
ret = nv_dma_fence_add_callback(plane_state->fence,
&fence_data->dma_fence_cb,
__nv_drm_plane_fence_cb);
ret = dma_fence_add_callback(plane_state->fence,
&fence_data->dma_fence_cb,
__nv_drm_plane_fence_cb);
switch (ret) {
case -ENOENT:
@@ -313,7 +307,6 @@ static int __nv_drm_convert_in_fences(
return 0;
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
static int __nv_drm_get_syncpt_data(
struct nv_drm_device *nv_dev,
@@ -414,7 +407,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
return -EINVAL;
}
#if defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
if (commit) {
/*
* This function does what is necessary to prepare the framebuffers
@@ -426,10 +418,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
* in the new state, prefering explicit sync fences when appropriate.
* This must be done prior to converting the per-plane fences to
* semaphore waits below.
*
* Note this only works when the drm_framebuffer:obj[] field is present
* and populated, so skip calling this function on kernels where that
* field is not present.
*/
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -437,7 +425,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
return ret;
}
}
#endif /* defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT) */
memset(requested_config, 0, sizeof(*requested_config));
@@ -472,7 +459,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
nv_new_crtc_state->nv_flip = NULL;
}
#if defined(NV_DRM_FENCE_AVAILABLE)
ret = __nv_drm_convert_in_fences(nv_dev,
state,
crtc,
@@ -481,7 +467,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
if (ret != 0) {
return ret;
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
}
/*
@@ -534,7 +519,6 @@ int nv_drm_atomic_check(struct drm_device *dev,
{
int ret = 0;
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
@@ -550,7 +534,6 @@ int nv_drm_atomic_check(struct drm_device *dev,
}
}
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
goto done;
@@ -678,7 +661,6 @@ int nv_drm_atomic_commit(struct drm_device *dev,
}
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
/*
* If the legacy LUT needs to be updated, ensure that the previous LUT
* update is complete first.
@@ -703,11 +685,8 @@ int nv_drm_atomic_commit(struct drm_device *dev,
}
}
}
#endif
}
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG)
/*
* nv_drm_atomic_commit_internal()
* implements blocking/non-blocking atomic commit using
@@ -718,18 +697,10 @@ int nv_drm_atomic_commit(struct drm_device *dev,
* expected.
*/
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT)
ret = drm_atomic_helper_swap_state(state, false /* stall */);
if (WARN_ON(ret != 0)) {
return ret;
}
#else
drm_atomic_helper_swap_state(state, false /* stall */);
#endif
#else
drm_atomic_helper_swap_state(dev, state);
#endif
/*
* nv_drm_atomic_commit_internal() must not return failure after
@@ -831,7 +802,6 @@ int nv_drm_atomic_commit(struct drm_device *dev,
}
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
@@ -842,20 +812,14 @@ int nv_drm_atomic_commit(struct drm_device *dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
#endif
}
}
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
/*
* If ref counting is present, state will be freed when the caller
* drops its reference after we return.
* State will be freed when the caller drops its reference after we return.
*/
#else
drm_atomic_state_free(state);
#endif
return 0;
}

View File

@@ -25,7 +25,7 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvkms-kapi.h"
@@ -48,6 +48,6 @@ void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
int nv_drm_shut_down_all_crtcs(struct drm_device *dev);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_MODESET_H__ */

View File

@@ -26,11 +26,8 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
#include <linux/file.h>
#include <linux/sync_file.h>
#endif
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/device.h>
@@ -66,18 +63,6 @@ void nv_drm_free(void *ptr)
kfree(ptr);
}
char *nv_drm_asprintf(const char *fmt, ...)
{
va_list ap;
char *p;
va_start(ap, fmt);
p = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
return p;
}
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_PPC64LE)
@@ -236,10 +221,8 @@ unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
int nv_drm_create_sync_file(struct dma_fence *fence)
{
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
struct sync_file *sync;
int fd = get_unused_fd_flags(O_CLOEXEC);
@@ -258,20 +241,12 @@ int nv_drm_create_sync_file(nv_dma_fence_t *fence)
fd_install(fd, sync->file);
return fd;
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
return -EINVAL;
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
}
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
struct dma_fence *nv_drm_sync_file_get_fence(int fd)
{
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
return sync_file_get_fence(fd);
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
return NULL;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void)
{

View File

@@ -29,9 +29,7 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
#include "linux/dma-fence.h"
#if defined(NV_LINUX) || defined(NV_BSD)
#include "nv-kthread-q.h"
@@ -71,8 +69,6 @@ void *nv_drm_calloc(size_t nmemb, size_t size);
void nv_drm_free(void *ptr);
char *nv_drm_asprintf(const char *fmt, ...);
void nv_drm_write_combine_flush(void);
int nv_drm_lock_user_pages(unsigned long address,
@@ -105,11 +101,9 @@ unsigned long nv_drm_timer_now(void);
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
int nv_drm_create_sync_file(struct dma_fence *fence);
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
struct dma_fence *nv_drm_sync_file_get_fence(int fd);
void nv_drm_yield(void);

View File

@@ -31,13 +31,8 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DEVICE_H_PRESENT)
#include <drm/drm_device.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvidia-drm-os-interface.h"
@@ -99,7 +94,6 @@ struct nv_drm_device {
struct NvKmsKapiDevice *pDevice;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
/*
* Lock to protect drm-subsystem and fields of this structure
* from concurrent access.
@@ -129,9 +123,7 @@ struct nv_drm_device {
NvU8 genericPageKind;
NvU8 pageKindGeneration;
NvU8 sectorLayout;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
#endif
struct delayed_work hotplug_event_work;
atomic_t enable_event_handling;
@@ -144,12 +136,8 @@ struct nv_drm_device {
*/
wait_queue_head_t flip_event_wq;
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
NvU64 semsurf_stride;
NvU64 semsurf_max_submitted_offset;
#endif
NvBool hasVideoMemory;

View File

@@ -30,8 +30,6 @@ NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-os-interface.c
# Register the conftests needed by nvidia-drm.ko
#
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
@@ -40,33 +38,18 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_hand
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices
@@ -75,43 +58,20 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_plane_create_color_properties
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pfn_to_pfn_t
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
@@ -129,11 +89,8 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_has_leases
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
@@ -143,11 +100,8 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffe
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_framebuffer_obj_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_lut
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_info_has_is_yuv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_property_blob_put
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date

View File

@@ -20,18 +20,15 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_PLANE_H_PRESENT)
#include <drm/drm_plane.h>
#endif
#include <drm/drm_modes.h>
#include <uapi/drm/drm_fourcc.h>

View File

@@ -25,7 +25,7 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#if defined(NV_DRM_AVAILABLE)
#include "nvkms-kapi.h"
@@ -49,6 +49,6 @@ void nvkms_display_mode_to_drm_mode(
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
struct NvKmsKapiDisplayMode *dst);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */
#endif /* __NVIDIA_DRM_UTILS_H__ */