575.51.02

This commit is contained in:
Bernhard Stoeckner
2025-04-17 19:35:38 +02:00
parent e8113f665d
commit 4159579888
1142 changed files with 309085 additions and 272273 deletions

View File

@@ -62,6 +62,20 @@
#undef NV_DRM_FENCE_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
#endif
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))

View File

@@ -314,7 +314,11 @@ static int nv_drm_connector_get_modes(struct drm_connector *connector)
}
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG)
const struct drm_display_mode *mode)
#else
struct drm_display_mode *mode)
#endif
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);

View File

@@ -372,23 +372,88 @@ cursor_plane_req_config_update(struct drm_plane *plane,
old_config.dstY != req_config->dstY;
}
static void free_drm_lut_surface(struct kref *ref)
static void release_drm_nvkms_surface(struct nv_drm_nvkms_surface *drm_nvkms_surface)
{
struct nv_drm_lut_surface *drm_lut_surface =
container_of(ref, struct nv_drm_lut_surface, refcount);
struct NvKmsKapiDevice *pDevice = drm_lut_surface->pDevice;
struct NvKmsKapiDevice *pDevice = drm_nvkms_surface->pDevice;
BUG_ON(drm_lut_surface->nvkms_surface == NULL);
BUG_ON(drm_lut_surface->nvkms_memory == NULL);
BUG_ON(drm_lut_surface->buffer == NULL);
BUG_ON(drm_nvkms_surface->nvkms_surface == NULL);
BUG_ON(drm_nvkms_surface->nvkms_memory == NULL);
BUG_ON(drm_nvkms_surface->buffer == NULL);
nvKms->destroySurface(pDevice, drm_lut_surface->nvkms_surface);
nvKms->unmapMemory(pDevice, drm_lut_surface->nvkms_memory,
nvKms->destroySurface(pDevice, drm_nvkms_surface->nvkms_surface);
nvKms->unmapMemory(pDevice, drm_nvkms_surface->nvkms_memory,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
drm_lut_surface->buffer);
nvKms->freeMemory(pDevice, drm_lut_surface->nvkms_memory);
drm_nvkms_surface->buffer);
nvKms->freeMemory(pDevice, drm_nvkms_surface->nvkms_memory);
}
nv_drm_free(drm_lut_surface);
static int init_drm_nvkms_surface(struct nv_drm_device *nv_dev,
struct nv_drm_nvkms_surface *drm_nvkms_surface,
struct nv_drm_nvkms_surface_params *surface_params)
{
struct NvKmsKapiDevice *pDevice = nv_dev->pDevice;
NvU8 compressible = 0; // No compression
struct NvKmsKapiCreateSurfaceParams params = {};
struct NvKmsKapiMemory *surface_mem;
struct NvKmsKapiSurface *surface;
void *buffer;
params.format = surface_params->format;
params.width = surface_params->width;
params.height = surface_params->height;
/* Allocate displayable memory. */
if (nv_dev->hasVideoMemory) {
surface_mem =
nvKms->allocateVideoMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
surface_params->surface_size,
&compressible);
} else {
surface_mem =
nvKms->allocateSystemMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
surface_params->surface_size,
&compressible);
}
if (surface_mem == NULL) {
return -ENOMEM;
}
/* Map memory in order to populate it. */
if (!nvKms->mapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
&buffer)) {
nvKms->freeMemory(pDevice, surface_mem);
return -ENOMEM;
}
params.planes[0].memory = surface_mem;
params.planes[0].offset = 0;
params.planes[0].pitch = surface_params->surface_size;
/* Create surface. */
surface = nvKms->createSurface(pDevice, &params);
if (surface == NULL) {
nvKms->unmapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL, buffer);
nvKms->freeMemory(pDevice, surface_mem);
return -ENOMEM;
}
/* Pack into struct nv_drm_nvkms_surface. */
drm_nvkms_surface->pDevice = pDevice;
drm_nvkms_surface->nvkms_memory = surface_mem;
drm_nvkms_surface->nvkms_surface = surface;
drm_nvkms_surface->buffer = buffer;
/* Init refcount. */
kref_init(&drm_nvkms_surface->refcount);
return 0;
}
static struct nv_drm_lut_surface *alloc_drm_lut_surface(
@@ -399,86 +464,49 @@ static struct nv_drm_lut_surface *alloc_drm_lut_surface(
NvU32 num_vss_header_entries,
NvU32 num_entries)
{
struct NvKmsKapiDevice *pDevice = nv_dev->pDevice;
struct nv_drm_lut_surface *drm_lut_surface;
NvU8 compressible = 0; // No compression
size_t size =
const size_t surface_size =
(((num_vss_header_entries + num_entries) *
NVKMS_LUT_CAPS_LUT_ENTRY_SIZE) + 255) & ~255; // 256-byte aligned
struct NvKmsKapiMemory *surface_mem;
struct NvKmsKapiSurface *surface;
struct NvKmsKapiCreateSurfaceParams params = {};
NvU16 *lut_data;
struct nv_drm_nvkms_surface_params params = {};
/* Allocate displayable memory. */
if (nv_dev->hasVideoMemory) {
surface_mem =
nvKms->allocateVideoMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
size,
&compressible);
} else {
surface_mem =
nvKms->allocateSystemMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
size,
&compressible);
}
if (surface_mem == NULL) {
return NULL;
}
/* Map memory in order to populate it. */
if (!nvKms->mapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
(void **) &lut_data)) {
nvKms->freeMemory(pDevice, surface_mem);
return NULL;
}
/* Create surface. */
params.format = NvKmsSurfaceMemoryFormatR16G16B16A16;
params.width = num_vss_header_entries + num_entries;
params.height = 1;
params.planes[0].memory = surface_mem;
params.planes[0].offset = 0;
params.planes[0].pitch = size;
params.surface_size = surface_size;
surface = nvKms->createSurface(pDevice, &params);
if (surface == NULL) {
nvKms->unmapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL, (void *) lut_data);
nvKms->freeMemory(pDevice, surface_mem);
return NULL;
}
/* Pack into struct nv_drm_lut_surface. */
drm_lut_surface = nv_drm_calloc(1, sizeof(struct nv_drm_lut_surface));
if (drm_lut_surface == NULL) {
nvKms->destroySurface(pDevice, surface);
nvKms->unmapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL, (void *) lut_data);
nvKms->freeMemory(pDevice, surface_mem);
return NULL;
}
drm_lut_surface->pDevice = pDevice;
drm_lut_surface->nvkms_memory = surface_mem;
drm_lut_surface->nvkms_surface = surface;
drm_lut_surface->buffer = lut_data;
if (init_drm_nvkms_surface(nv_dev, &drm_lut_surface->base, &params) != 0) {
nv_drm_free(drm_lut_surface);
return NULL;
}
drm_lut_surface->properties.vssSegments = num_vss_header_segments;
drm_lut_surface->properties.vssType = vss_type;
drm_lut_surface->properties.lutEntries = num_entries;
drm_lut_surface->properties.entryFormat = entry_format;
/* Init refcount. */
kref_init(&drm_lut_surface->refcount);
return drm_lut_surface;
}
static void free_drm_lut_surface(struct kref *ref)
{
struct nv_drm_nvkms_surface *drm_nvkms_surface =
container_of(ref, struct nv_drm_nvkms_surface, refcount);
struct nv_drm_lut_surface *drm_lut_surface =
container_of(drm_nvkms_surface, struct nv_drm_lut_surface, base);
// Clean up base
release_drm_nvkms_surface(drm_nvkms_surface);
nv_drm_free(drm_lut_surface);
}
static NvU32 fp32_lut_interp(
NvU16 entry0,
NvU16 entry1,
@@ -582,7 +610,7 @@ static struct nv_drm_lut_surface *create_drm_ilut_surface_vss(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Calculate VSS header. */
if (vss_header_seg_sizes != NULL) {
@@ -733,7 +761,7 @@ static struct nv_drm_lut_surface *create_drm_ilut_surface_legacy(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Fill LUT surface. */
for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) {
@@ -799,7 +827,7 @@ static struct nv_drm_lut_surface *create_drm_tmo_surface(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Calculate linear VSS header. */
for (entry_idx = 0; entry_idx < NUM_VSS_HEADER_ENTRIES; entry_idx++) {
@@ -901,7 +929,7 @@ static struct nv_drm_lut_surface *create_drm_olut_surface_vss(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Calculate VSS header. */
if (vss_header_seg_sizes != NULL) {
@@ -1021,7 +1049,7 @@ static struct nv_drm_lut_surface *create_drm_olut_surface_legacy(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Fill LUT surface. */
for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) {
@@ -1057,6 +1085,74 @@ update_matrix_override(struct drm_property_blob *blob,
return enabled;
}
static enum NvKmsInputColorSpace nv_get_nvkms_input_colorspace(
enum nv_drm_input_color_space colorSpace)
{
switch (colorSpace) {
case NV_DRM_INPUT_COLOR_SPACE_NONE:
return NVKMS_INPUT_COLOR_SPACE_NONE;
case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR:
return NVKMS_INPUT_COLOR_SPACE_BT709;
case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ:
return NVKMS_INPUT_COLOR_SPACE_BT2100;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported input colorspace");
return NVKMS_INPUT_COLOR_SPACE_NONE;
}
}
static enum NvKmsInputTf nv_get_nvkms_input_tf(
enum nv_drm_input_color_space colorSpace)
{
switch (colorSpace) {
case NV_DRM_INPUT_COLOR_SPACE_NONE:
return NVKMS_INPUT_TF_LINEAR;
case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR:
return NVKMS_INPUT_TF_LINEAR;
case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ:
return NVKMS_INPUT_TF_PQ;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported input colorspace");
return NVKMS_INPUT_TF_LINEAR;
}
}
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
static enum NvKmsInputColorSpace nv_drm_color_encoding_to_nvkms_colorspace(
enum drm_color_encoding color_encoding)
{
switch(color_encoding) {
case DRM_COLOR_YCBCR_BT601:
return NVKMS_INPUT_COLOR_SPACE_BT601;
case DRM_COLOR_YCBCR_BT709:
return NVKMS_INPUT_COLOR_SPACE_BT709;
case DRM_COLOR_YCBCR_BT2020:
return NVKMS_INPUT_COLOR_SPACE_BT2020;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported DRM color_encoding");
return NVKMS_INPUT_COLOR_SPACE_NONE;
}
}
static enum NvKmsInputColorRange nv_drm_color_range_to_nvkms_color_range(
enum drm_color_range color_range)
{
switch(color_range) {
case DRM_COLOR_YCBCR_FULL_RANGE:
return NVKMS_INPUT_COLOR_RANGE_FULL;
case DRM_COLOR_YCBCR_LIMITED_RANGE:
return NVKMS_INPUT_COLOR_RANGE_LIMITED;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported DRM color_range");
return NVKMS_INPUT_COLOR_RANGE_DEFAULT;
}
}
#endif
static int
plane_req_config_update(struct drm_plane *plane,
struct drm_plane_state *plane_state,
@@ -1190,8 +1286,37 @@ plane_req_config_update(struct drm_plane *plane,
nv_plane->defaultCompositionMode;
#endif
req_config->config.inputColorSpace =
nv_drm_plane_state->input_colorspace;
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
if ((nv_drm_plane_state->input_colorspace == NV_DRM_INPUT_COLOR_SPACE_NONE) &&
nv_drm_format_is_yuv(plane_state->fb->format->format)) {
if (nv_plane->supportsColorProperties) {
req_config->config.inputColorSpace =
nv_drm_color_encoding_to_nvkms_colorspace(plane_state->color_encoding);
req_config->config.inputColorRange =
nv_drm_color_range_to_nvkms_color_range(plane_state->color_range);
} else {
req_config->config.inputColorSpace = NVKMS_INPUT_COLOR_SPACE_NONE;
req_config->config.inputColorRange = NVKMS_INPUT_COLOR_RANGE_DEFAULT;
}
req_config->config.inputTf = NVKMS_INPUT_TF_LINEAR;
} else {
#endif
req_config->config.inputColorSpace =
nv_get_nvkms_input_colorspace(nv_drm_plane_state->input_colorspace);
req_config->config.inputColorRange = NVKMS_INPUT_COLOR_RANGE_DEFAULT;
req_config->config.inputTf =
nv_get_nvkms_input_tf(nv_drm_plane_state->input_colorspace);
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
}
#endif
req_config->flags.inputTfChanged =
(old_config.inputTf != req_config->config.inputTf);
req_config->flags.inputColorSpaceChanged =
(old_config.inputColorSpace != req_config->config.inputColorSpace);
req_config->flags.inputColorRangeChanged =
(old_config.inputColorRange != req_config->config.inputColorRange);
req_config->config.syncParams.preSyncptSpecified = false;
req_config->config.syncParams.postSyncptRequested = false;
@@ -1240,10 +1365,10 @@ plane_req_config_update(struct drm_plane *plane,
switch (info_frame->eotf) {
case HDMI_EOTF_SMPTE_ST2084:
req_config->config.tf = NVKMS_OUTPUT_TF_PQ;
req_config->config.outputTf = NVKMS_OUTPUT_TF_PQ;
break;
case HDMI_EOTF_TRADITIONAL_GAMMA_SDR:
req_config->config.tf =
req_config->config.outputTf =
NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR;
break;
default:
@@ -1254,7 +1379,7 @@ plane_req_config_update(struct drm_plane *plane,
req_config->config.hdrMetadata.enabled = true;
} else {
req_config->config.hdrMetadata.enabled = false;
req_config->config.tf = NVKMS_OUTPUT_TF_NONE;
req_config->config.outputTf = NVKMS_OUTPUT_TF_NONE;
}
req_config->flags.hdrMetadataChanged =
@@ -1264,7 +1389,7 @@ plane_req_config_update(struct drm_plane *plane,
&req_config->config.hdrMetadata.val,
sizeof(struct NvKmsHDRStaticMetadata)));
req_config->flags.tfChanged = (old_config.tf != req_config->config.tf);
req_config->flags.outputTfChanged = (old_config.outputTf != req_config->config.outputTf);
#endif
req_config->config.matrixOverrides.enabled.lmsCtm =
@@ -1295,7 +1420,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->degamma_changed) {
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
nv_drm_plane_state->degamma_drm_lut_surface = NULL;
}
@@ -1327,7 +1452,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
req_config->config.ilut.enabled = NV_TRUE;
req_config->config.ilut.lutSurface =
nv_drm_plane_state->degamma_drm_lut_surface->nvkms_surface;
nv_drm_plane_state->degamma_drm_lut_surface->base.nvkms_surface;
req_config->config.ilut.offset = 0;
req_config->config.ilut.vssSegments =
nv_drm_plane_state->degamma_drm_lut_surface->properties.vssSegments;
@@ -1346,7 +1471,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->tmo_changed) {
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount,
free_drm_lut_surface);
nv_drm_plane_state->tmo_drm_lut_surface = NULL;
}
@@ -1363,7 +1488,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
req_config->config.tmo.enabled = NV_TRUE;
req_config->config.tmo.lutSurface =
nv_drm_plane_state->tmo_drm_lut_surface->nvkms_surface;
nv_drm_plane_state->tmo_drm_lut_surface->base.nvkms_surface;
req_config->config.tmo.offset = 0;
req_config->config.tmo.vssSegments =
nv_drm_plane_state->tmo_drm_lut_surface->properties.vssSegments;
@@ -1870,7 +1995,7 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
nv_plane_state->degamma_drm_lut_surface =
nv_old_plane_state->degamma_drm_lut_surface;
if (nv_plane_state->degamma_drm_lut_surface) {
kref_get(&nv_plane_state->degamma_drm_lut_surface->refcount);
kref_get(&nv_plane_state->degamma_drm_lut_surface->base.refcount);
}
nv_plane_state->tmo_lut = nv_old_plane_state->tmo_lut;
@@ -1881,7 +2006,7 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
nv_plane_state->tmo_drm_lut_surface =
nv_old_plane_state->tmo_drm_lut_surface;
if (nv_plane_state->tmo_drm_lut_surface) {
kref_get(&nv_plane_state->tmo_drm_lut_surface->refcount);
kref_get(&nv_plane_state->tmo_drm_lut_surface->base.refcount);
}
return &nv_plane_state->base;
@@ -1909,13 +2034,13 @@ static inline void __nv_drm_plane_atomic_destroy_state(
nv_drm_property_blob_put(nv_drm_plane_state->degamma_lut);
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
nv_drm_property_blob_put(nv_drm_plane_state->tmo_lut);
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
}
@@ -2113,7 +2238,7 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
}
nv_state->regamma_divisor = nv_old_state->regamma_divisor;
if (nv_state->regamma_drm_lut_surface) {
kref_get(&nv_state->regamma_drm_lut_surface->refcount);
kref_get(&nv_state->regamma_drm_lut_surface->base.refcount);
}
nv_state->regamma_changed = false;
@@ -2142,7 +2267,7 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
nv_drm_property_blob_put(nv_state->regamma_lut);
if (nv_state->regamma_drm_lut_surface != NULL) {
kref_put(&nv_state->regamma_drm_lut_surface->refcount,
kref_put(&nv_state->regamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
@@ -2386,7 +2511,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (nv_crtc_state->regamma_changed) {
if (nv_crtc_state->regamma_drm_lut_surface != NULL) {
kref_put(&nv_crtc_state->regamma_drm_lut_surface->refcount,
kref_put(&nv_crtc_state->regamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
nv_crtc_state->regamma_drm_lut_surface = NULL;
}
@@ -2417,7 +2542,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (nv_crtc_state->regamma_drm_lut_surface != NULL) {
req_config->modeSetConfig.olut.enabled = NV_TRUE;
req_config->modeSetConfig.olut.lutSurface =
nv_crtc_state->regamma_drm_lut_surface->nvkms_surface;
nv_crtc_state->regamma_drm_lut_surface->base.nvkms_surface;
req_config->modeSetConfig.olut.offset = 0;
req_config->modeSetConfig.olut.vssSegments =
nv_crtc_state->regamma_drm_lut_surface->properties.vssSegments;
@@ -2521,7 +2646,7 @@ static void nv_drm_plane_install_properties(
if (nv_dev->nv_input_colorspace_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_input_colorspace_property,
NVKMS_INPUT_COLORSPACE_NONE);
NV_DRM_INPUT_COLOR_SPACE_NONE);
}
if (supportsICtCp) {
@@ -2531,17 +2656,14 @@ static void nv_drm_plane_install_properties(
&plane->base, nv_dev->nv_hdr_output_metadata_property, 0);
}
#endif
}
/*
* The old DRM_OBJECT_MAX_PROPERTY limit of 24 is too small to
* accomodate all of the properties for the ICtCp pipeline.
*
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
* accommodate new color props") in Linux v6.8 increased the limit to
* 64. To be safe, require this before attaching any properties for the
* ICtCp pipeline.
*/
if (DRM_OBJECT_MAX_PROPERTY >= 64) {
/*
* Per-plane HDR properties get us dangerously close to the 24 property
* limit on kernels that don't support NV_DRM_USE_EXTENDED_PROPERTIES.
*/
if (NV_DRM_USE_EXTENDED_PROPERTIES) {
if (supportsICtCp) {
if (nv_dev->nv_plane_lms_ctm_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_lms_ctm_property, 0);
@@ -2568,36 +2690,36 @@ static void nv_drm_plane_install_properties(
NVKMS_LUT_ARRAY_SIZE);
}
}
}
if (nv_dev->nv_plane_blend_ctm_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_blend_ctm_property, 0);
}
if (nv_dev->nv_plane_blend_ctm_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_blend_ctm_property, 0);
}
if (nv_plane->ilut_caps.supported) {
if (nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) {
if (nv_dev->nv_plane_degamma_tf_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_tf_property,
NV_DRM_TRANSFER_FUNCTION_DEFAULT);
if (nv_plane->ilut_caps.supported) {
if (nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) {
if (nv_dev->nv_plane_degamma_tf_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_tf_property,
NV_DRM_TRANSFER_FUNCTION_DEFAULT);
}
if (nv_dev->nv_plane_degamma_multiplier_property) {
/* Default to 1 in S31.32 Sign-Magnitude Format */
nv_plane_state->degamma_multiplier = ((uint64_t) 1) << 32;
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_multiplier_property,
nv_plane_state->degamma_multiplier);
}
}
if (nv_dev->nv_plane_degamma_multiplier_property) {
/* Default to 1 in S31.32 Sign-Magnitude Format */
nv_plane_state->degamma_multiplier = ((uint64_t) 1) << 32;
if (nv_dev->nv_plane_degamma_lut_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_multiplier_property,
nv_plane_state->degamma_multiplier);
&plane->base, nv_dev->nv_plane_degamma_lut_property, 0);
}
if (nv_dev->nv_plane_degamma_lut_size_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_lut_size_property,
NVKMS_LUT_ARRAY_SIZE);
}
}
if (nv_dev->nv_plane_degamma_lut_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_lut_property, 0);
}
if (nv_dev->nv_plane_degamma_lut_size_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_lut_size_property,
NVKMS_LUT_ARRAY_SIZE);
}
}
}
@@ -2776,6 +2898,29 @@ nv_drm_plane_create(struct drm_device *dev,
goto failed_plane_init;
}
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
if (pResInfo->caps.supportsInputColorSpace &&
pResInfo->caps.supportsInputColorRange) {
nv_plane->supportsColorProperties = true;
drm_plane_create_color_properties(
plane,
NVBIT(DRM_COLOR_YCBCR_BT601) |
NVBIT(DRM_COLOR_YCBCR_BT709) |
NVBIT(DRM_COLOR_YCBCR_BT2020),
NVBIT(DRM_COLOR_YCBCR_FULL_RANGE) |
NVBIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_FULL_RANGE
);
} else {
nv_plane->supportsColorProperties = false;
}
#else
nv_plane->supportsColorProperties = false;
#endif
drm_plane_helper_add(plane, &nv_plane_helper_funcs);
if (plane_type != DRM_PLANE_TYPE_CURSOR) {

View File

@@ -191,6 +191,13 @@ struct nv_drm_plane {
*/
uint32_t layer_idx;
/**
* @supportsColorProperties
*
* If true, supports the COLOR_ENCODING and COLOR_RANGE properties.
*/
bool supportsColorProperties;
struct NvKmsLUTCaps ilut_caps;
struct NvKmsLUTCaps tmo_caps;
};
@@ -203,10 +210,23 @@ static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
return container_of(plane, struct nv_drm_plane, base);
}
struct nv_drm_lut_surface {
struct nv_drm_nvkms_surface {
struct NvKmsKapiDevice *pDevice;
struct NvKmsKapiMemory *nvkms_memory;
struct NvKmsKapiSurface *nvkms_surface;
void *buffer;
struct kref refcount;
};
struct nv_drm_nvkms_surface_params {
NvU32 width;
NvU32 height;
size_t surface_size;
enum NvKmsSurfaceMemoryFormat format;
};
struct nv_drm_lut_surface {
struct nv_drm_nvkms_surface base;
struct {
NvU32 vssSegments;
enum NvKmsLUTVssType vssType;
@@ -215,14 +235,12 @@ struct nv_drm_lut_surface {
enum NvKmsLUTFormat entryFormat;
} properties;
void *buffer;
struct kref refcount;
};
struct nv_drm_plane_state {
struct drm_plane_state base;
s32 __user *fd_user_ptr;
enum NvKmsInputColorSpace input_colorspace;
enum nv_drm_input_color_space input_colorspace;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property_blob *hdr_output_metadata;
#endif

View File

@@ -35,6 +35,8 @@
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-utils.h"
#include "nv_dpy_id.h"
#if defined(NV_DRM_AVAILABLE)
@@ -90,6 +92,7 @@
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/sort.h>
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
@@ -120,15 +123,15 @@ static int nv_drm_revoke_sub_ownership(struct drm_device *dev);
static struct nv_drm_device *dev_list = NULL;
static char* nv_get_input_colorspace_name(
enum NvKmsInputColorSpace colorSpace)
static const char* nv_get_input_colorspace_name(
enum nv_drm_input_color_space colorSpace)
{
switch (colorSpace) {
case NVKMS_INPUT_COLORSPACE_NONE:
case NV_DRM_INPUT_COLOR_SPACE_NONE:
return "None";
case NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR:
case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR:
return "scRGB Linear FP16";
case NVKMS_INPUT_COLORSPACE_BT2100_PQ:
case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ:
return "BT.2100 PQ";
default:
/* We shoudn't hit this */
@@ -284,6 +287,123 @@ done:
mutex_unlock(&nv_dev->lock);
}
struct nv_drm_mst_display_info {
NvKmsKapiDisplay handle;
NvBool isDpMST;
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH];
};
/*
* Helper function to get DpMST display info.
* dpMSTDisplayInfos is allocated dynamically,
* so it needs to be freed after finishing the query.
*/
static int nv_drm_get_mst_display_infos
(
struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay,
struct nv_drm_mst_display_info **dpMSTDisplayInfos,
NvU32 *nDynamicDisplays
)
{
struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL;
struct NvKmsKapiStaticDisplayInfo *dynamicDisplayInfo = NULL;
struct NvKmsKapiConnectorInfo *connectorInfo = NULL;
struct nv_drm_mst_display_info *displayInfos = NULL;
NvU32 i = 0;
int ret = 0;
NVDpyId dpyId;
*nDynamicDisplays = 0;
/* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */
if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) {
ret = -ENOMEM;
goto done;
}
if ((dynamicDisplayInfo = nv_drm_calloc(1, sizeof(*dynamicDisplayInfo))) == NULL) {
ret = -ENOMEM;
goto done;
}
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) {
ret = -EINVAL;
goto done;
}
connectorInfo = nvkms_get_connector_info(nv_dev->pDevice,
displayInfo->connectorHandle);
if (IS_ERR(connectorInfo)) {
ret = PTR_ERR(connectorInfo);
goto done;
}
*nDynamicDisplays = nvCountDpyIdsInDpyIdList(connectorInfo->dynamicDpyIdList);
if (*nDynamicDisplays == 0) {
goto done;
}
if ((displayInfos = nv_drm_calloc(*nDynamicDisplays, sizeof(*displayInfos))) == NULL) {
ret = -ENOMEM;
goto done;
}
FOR_ALL_DPY_IDS(dpyId, connectorInfo->dynamicDpyIdList) {
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice,
nvDpyIdToNvU32(dpyId),
dynamicDisplayInfo)) {
ret = -EINVAL;
nv_drm_free(displayInfos);
goto done;
}
displayInfos[i].handle = dynamicDisplayInfo->handle;
displayInfos[i].isDpMST = dynamicDisplayInfo->isDpMST;
memcpy(displayInfos[i].dpAddress, dynamicDisplayInfo->dpAddress, sizeof(dynamicDisplayInfo->dpAddress));
i++;
}
*dpMSTDisplayInfos = displayInfos;
done:
nv_drm_free(displayInfo);
nv_drm_free(dynamicDisplayInfo);
nv_drm_free(connectorInfo);
return ret;
}
static int nv_drm_disp_cmp (const void *l, const void *r)
{
struct nv_drm_mst_display_info *l_info = (struct nv_drm_mst_display_info *)l;
struct nv_drm_mst_display_info *r_info = (struct nv_drm_mst_display_info *)r;
return strcmp(l_info->dpAddress, r_info->dpAddress);
}
/*
* Helper function to sort the dpAddress in terms of string.
* This function is to create DRM connectors ID order deterministically.
* It's not numerically.
*/
static void nv_drm_sort_dynamic_displays_by_dp_addr
(
struct nv_drm_mst_display_info *infos,
int nDynamicDisplays
)
{
sort(infos, nDynamicDisplays, sizeof(*infos), nv_drm_disp_cmp, NULL);
}
/*
* Helper function to initialize drm_device::mode_config from
* NvKmsKapiDevice's resource information.
@@ -365,9 +485,11 @@ static void nv_drm_enumerate_encoders_and_connectors
nv_dev,
"Failed to enumurate NvKmsKapiDisplay handles");
} else {
NvU32 i;
NvU32 i, j;
NvU32 nDynamicDisplays = 0;
for (i = 0; i < nDisplays; i++) {
struct nv_drm_mst_display_info *displayInfos = NULL;
struct drm_encoder *encoder =
nv_drm_add_encoder(dev, hDisplays[i]);
@@ -377,6 +499,34 @@ static void nv_drm_enumerate_encoders_and_connectors
"Failed to add connector for NvKmsKapiDisplay 0x%08x",
hDisplays[i]);
}
if (nv_drm_get_mst_display_infos(nv_dev, hDisplays[i],
&displayInfos, &nDynamicDisplays)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to get dynamic displays");
} else if (nDynamicDisplays) {
nv_drm_sort_dynamic_displays_by_dp_addr(displayInfos, nDynamicDisplays);
for (j = 0; j < nDynamicDisplays; j++) {
if (displayInfos[j].isDpMST) {
struct drm_encoder *mst_encoder =
nv_drm_add_encoder(dev, displayInfos[j].handle);
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "found DP MST port display handle %u",
displayInfos[j].handle);
if (IS_ERR(mst_encoder)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to add connector for NvKmsKapiDisplay 0x%08x",
displayInfos[j].handle);
}
}
}
nv_drm_free(displayInfos);
}
}
}
@@ -602,6 +752,7 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams));
allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id;
allocateDeviceParams.migDevice = nv_dev->gpu_mig_device;
allocateDeviceParams.privateData = nv_dev;
allocateDeviceParams.eventCallback = nv_drm_event_callback;
@@ -672,6 +823,9 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->requiresVrrSemaphores = resInfo.caps.requiresVrrSemaphores;
nv_dev->vtFbBaseAddress = resInfo.vtFbBaseAddress;
nv_dev->vtFbSize = resInfo.vtFbSize;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
gen = nv_dev->pageKindGeneration;
kind = nv_dev->genericPageKind;
@@ -855,6 +1009,62 @@ static void nv_drm_master_set(struct drm_device *dev,
}
#endif
static
int nv_drm_reset_input_colorspace(struct drm_device *dev)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
struct drm_plane *plane;
struct nv_drm_plane_state *nv_drm_plane_state;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
bool do_reset = false;
NvU32 flags = 0;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
#if defined(DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
#endif
drm_modeset_acquire_init(&ctx, flags);
state->acquire_ctx = &ctx;
nv_drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto out;
}
nv_drm_plane_state = to_nv_drm_plane_state(plane_state);
if (nv_drm_plane_state) {
if (nv_drm_plane_state->input_colorspace != NV_DRM_INPUT_COLOR_SPACE_NONE) {
nv_drm_plane_state->input_colorspace = NV_DRM_INPUT_COLOR_SPACE_NONE;
do_reset = true;
}
}
}
if (do_reset) {
ret = drm_atomic_commit(state);
}
out:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
// In case of success, drm_atomic_commit() takes care to cleanup and free state.
if (ret != 0) {
drm_atomic_state_free(state);
}
#endif
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
#if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG)
static
@@ -898,6 +1108,12 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
drm_modeset_unlock_all(dev);
nvKms->releaseOwnership(nv_dev->pDevice);
} else {
int err = nv_drm_reset_input_colorspace(dev);
if (err != 0) {
NV_DRM_DEV_LOG_WARN(nv_dev,
"nv_drm_reset_input_colorspace failed with error code: %d !", err);
}
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
@@ -935,6 +1151,7 @@ static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
}
params->gpu_id = nv_dev->gpu_info.gpu_id;
params->mig_device = nv_dev->gpu_mig_device;
params->primary_index = dev->primary->index;
params->supports_alloc = false;
params->generic_page_kind = 0;
@@ -1725,7 +1942,7 @@ static const struct file_operations nv_drm_fops = {
.llseek = noop_llseek,
#if defined(NV_FILE_OPERATIONS_FOP_UNSIGNED_OFFSET_PRESENT)
#if defined(FOP_UNSIGNED_OFFSET)
.fop_flags = FOP_UNSIGNED_OFFSET,
#endif
};
@@ -1967,16 +2184,16 @@ void nv_drm_update_drm_driver_features(void)
/*
* Helper function for allocate/register DRM device for given NVIDIA GPU ID.
*/
void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *gpu_info)
{
struct nv_drm_device *nv_dev = NULL;
struct drm_device *dev = NULL;
struct device *device = gpu_info->os_device_ptr;
struct device *device = gpu_info->gpuInfo.os_device_ptr;
bool bus_is_pci;
DRM_DEBUG(
"Registering device for NVIDIA GPU ID 0x08%x",
gpu_info->gpu_id);
gpu_info->gpuInfo.gpu_id);
/* Allocate NVIDIA-DRM device */
@@ -1988,7 +2205,8 @@ void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
return;
}
nv_dev->gpu_info = *gpu_info;
nv_dev->gpu_info = gpu_info->gpuInfo;
nv_dev->gpu_mig_device = gpu_info->migDevice;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
mutex_init(&nv_dev->lock);
@@ -2045,9 +2263,30 @@ void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
aperture_remove_conflicting_pci_devices(pdev, nv_drm_driver.name);
#endif
nvKms->framebufferConsoleDisabled(nv_dev->pDevice);
} else {
resource_size_t base = (resource_size_t) nv_dev->vtFbBaseAddress;
resource_size_t size = (resource_size_t) nv_dev->vtFbSize;
if (base > 0 && size > 0) {
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_PRESENT)
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_DRIVER_ARG)
drm_aperture_remove_conflicting_framebuffers(base, size, false, &nv_drm_driver);
#elif defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_NO_PRIMARY_ARG)
drm_aperture_remove_conflicting_framebuffers(base, size, &nv_drm_driver);
#else
drm_aperture_remove_conflicting_framebuffers(base, size, false, nv_drm_driver.name);
#endif
#elif defined(NV_APERTURE_REMOVE_CONFLICTING_DEVICES_PRESENT)
aperture_remove_conflicting_devices(base, size, nv_drm_driver.name);
#endif
} else {
NV_DRM_DEV_LOG_INFO(nv_dev, "Invalid framebuffer console info");
}
}
#if defined(NV_DRM_CLIENT_AVAILABLE)
drm_client_setup(dev, NULL);
drm_client_setup(dev, NULL);
#elif defined(NV_DRM_FBDEV_TTM_AVAILABLE)
drm_fbdev_ttm_setup(dev, 32);
#elif defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
@@ -2078,7 +2317,7 @@ failed_drm_alloc:
#if defined(NV_LINUX)
int nv_drm_probe_devices(void)
{
nv_gpu_info_t *gpu_info = NULL;
struct NvKmsKapiGpuInfo *gpu_info = NULL;
NvU32 gpu_count = 0;
NvU32 i;

View File

@@ -27,13 +27,15 @@
#if defined(NV_DRM_AVAILABLE)
struct NvKmsKapiGpuInfo;
int nv_drm_probe_devices(void);
void nv_drm_remove_devices(void);
void nv_drm_suspend_resume(NvBool suspend);
void nv_drm_register_drm_device(const nv_gpu_info_t *);
void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *);
void nv_drm_update_drm_driver_features(void);

View File

@@ -319,7 +319,7 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
if (nv_encoder != NULL) {
NV_DRM_DEV_LOG_ERR(
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Encoder with NvKmsKapiDisplay 0x%08x already exists.",
hDisplay);

View File

@@ -202,6 +202,43 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
params.explicit_layout = false;
}
/*
* XXX work around an invalid pitch assumption in DRM.
*
* The smallest pitch the display hardware allows is 256.
*
* If a DRM client allocates a 32x32 cursor surface through
* DRM_IOCTL_MODE_CREATE_DUMB, we'll correctly round the pitch to 256:
*
* pitch = round(32width * 4Bpp, 256) = 256
*
* and then allocate an 8k surface:
*
* size = pitch * 32height = 8196
*
* and report the rounded pitch and size back to the client through the
* struct drm_mode_create_dumb ioctl params.
*
* But when the DRM client passes that buffer object handle to
* DRM_IOCTL_MODE_CURSOR, the client has no way to specify the pitch. This
* path in drm:
*
* DRM_IOCTL_MODE_CURSOR
* drm_mode_cursor_ioctl()
* drm_mode_cursor_common()
* drm_mode_cursor_universal()
*
* will implicitly create a framebuffer from the buffer object, and compute
* the pitch as width x 32 (without aligning to our minimum pitch).
*
* Intercept this case and force the pitch back to 256.
*/
if ((params.width == 32) &&
(params.height == 32) &&
(params.planes[0].pitch == 128)) {
params.planes[0].pitch = 256;
}
/* Create NvKmsKapiSurface */
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, &params);

View File

@@ -166,4 +166,37 @@ uint32_t *nv_drm_format_array_alloc(
return array;
}
bool nv_drm_format_is_yuv(u32 format)
{
#if defined(NV_DRM_FORMAT_INFO_HAS_IS_YUV)
const struct drm_format_info *format_info = drm_format_info(format);
return (format_info != NULL) && format_info->is_yuv;
#else
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
#if defined(DRM_FORMAT_P210)
case DRM_FORMAT_P210:
#endif
#if defined(DRM_FORMAT_P010)
case DRM_FORMAT_P010:
#endif
#if defined(DRM_FORMAT_P012)
case DRM_FORMAT_P012:
#endif
return true;
default:
return false;
}
#endif
}
#endif

View File

@@ -38,6 +38,8 @@ uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask);
bool nv_drm_format_is_yuv(u32 format);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FORMAT_H__ */

View File

@@ -308,12 +308,12 @@ static int __nv_drm_nvkms_gem_obj_init(
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
nv_nvkms_memory->physically_mapped = false;
if (!nvKms->getMemoryPages(nv_dev->pDevice,
if (!nvKms->isVidmem(pMemory) &&
!nvKms->getMemoryPages(nv_dev->pDevice,
pMemory,
&pages,
&numPages) &&
!nvKms->isVidmem(pMemory)) {
/* GetMemoryPages may fail for vidmem allocations,
&numPages)) {
/* GetMemoryPages will fail for vidmem allocations,
* but it should not fail for sysmem allocations. */
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to get memory pages for NvKmsKapiMemory 0x%p",

View File

@@ -69,6 +69,13 @@
#endif //NV_DRM_ROTATION_AVAILABLE
/*
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
* accommodate new color props") in Linux v6.8 increased the pre-object
* property limit to from 24 to 64.
*/
#define NV_DRM_USE_EXTENDED_PROPERTIES (DRM_OBJECT_MAX_PROPERTY >= 64)
/*
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
* (2017-09-26) and drm_dev_unref() is removed by

View File

@@ -182,6 +182,7 @@ struct drm_nvidia_gem_import_userspace_memory_params {
struct drm_nvidia_get_dev_info_params {
uint32_t gpu_id; /* OUT */
uint32_t mig_device; /* OUT */
uint32_t primary_index; /* OUT; the "card%d" value */
uint32_t supports_alloc; /* OUT */

View File

@@ -677,6 +677,33 @@ int nv_drm_atomic_commit(struct drm_device *dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
/*
* If the legacy LUT needs to be updated, ensure that the previous LUT
* update is complete first.
*/
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
!nonblock /* waitForCompletion */);
/* If checking the LUT notifier failed, assume no LUT notifier is set. */
if (!complete) {
if (nonblock) {
return -EBUSY;
} else {
/*
* checkLutNotifier should wait on the notifier in this
* case, so we should only get here if the wait timed out.
*/
NV_DRM_DEV_LOG_ERR(
nv_dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
}
#endif
}
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG)
@@ -803,6 +830,19 @@ int nv_drm_atomic_commit(struct drm_device *dev,
__nv_drm_handle_flip_event(nv_crtc);
}
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
true /* waitForCompletion */);
if (!complete) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
#endif
}
}

View File

@@ -58,16 +58,6 @@ typedef struct nv_timer nv_drm_timer;
#error "Need to define kernel timer callback primitives for this OS"
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
struct page;
/* Set to true when the atomic modeset feature is enabled. */

View File

@@ -85,8 +85,15 @@
DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \
__dev->gpu_info.gpu_id, ##__VA_ARGS__)
enum nv_drm_input_color_space {
NV_DRM_INPUT_COLOR_SPACE_NONE,
NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR,
NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ
};
struct nv_drm_device {
nv_gpu_info_t gpu_info;
MIGDeviceId gpu_mig_device;
struct drm_device *dev;
@@ -182,6 +189,9 @@ struct nv_drm_device {
struct drm_property *nv_crtc_regamma_divisor_property;
struct nv_drm_device *next;
NvU64 vtFbBaseAddress;
NvU64 vtFbSize;
};
static inline NvU32 nv_drm_next_display_semaphore(

View File

@@ -65,6 +65,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_pci_devices
@@ -74,6 +75,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_plane_create_color_properties
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pfn_to_pfn_t
@@ -133,6 +135,8 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present
@@ -140,8 +144,9 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_framebuffer_obj_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_lut
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_info_has_is_yuv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_property_blob_put
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg