535.43.02

This commit is contained in:
Andy Ritger
2023-05-30 10:11:36 -07:00
parent 6dd092ddb7
commit eb5c7665a1
1403 changed files with 295367 additions and 86235 deletions

View File

@@ -44,6 +44,8 @@
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#include <linux/nvhost.h>
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
#include <linux/host1x-next.h>
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
@@ -361,6 +363,21 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->fd_user_ptr) {
req_config->config.syncptParams.postSyncptRequested = true;
}
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
if (plane_state->fence != NULL) {
int ret = host1x_fence_extract(
plane_state->fence,
&req_config->config.syncptParams.preSyncptId,
&req_config->config.syncptParams.preSyncptValue);
if (ret != 0) {
return ret;
}
req_config->config.syncptParams.preSyncptSpecified = true;
}
if (nv_drm_plane_state->fd_user_ptr) {
req_config->config.syncptParams.postSyncptRequested = true;
}
#else
return -1;

View File

@@ -1436,7 +1436,9 @@ static void nv_drm_update_drm_driver_features(void)
nv_drm_driver.dumb_create = nv_drm_dumb_create;
nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset;
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy;
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
}

View File

@@ -205,7 +205,7 @@ nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay)
encoder = nv_drm_encoder_new(dev,
displayInfo->handle,
connectorInfo->signalFormat,
get_crtc_mask(dev, connectorInfo->headMask));
get_crtc_mask(dev, displayInfo->headMask));
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);

View File

@@ -150,6 +150,14 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
if (!nvKms->isMemoryValidForDisplay(nv_dev->pDevice,
nv_fb->nv_gem[i]->pMemory)) {
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Framebuffer memory not appropriate for scanout");
goto fail;
}
params.planes[i].memory = nv_fb->nv_gem[i]->pMemory;
params.planes[i].offset = nv_fb->base.offsets[i];
params.planes[i].pitch = nv_fb->base.pitches[i];
@@ -164,6 +172,17 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
params.layout = (modifier & 0x10) ?
NvKmsSurfaceMemoryLayoutBlockLinear :
NvKmsSurfaceMemoryLayoutPitch;
// See definition of DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D, we are testing
// 'c', the lossless compression field of the modifier
if (params.layout == NvKmsSurfaceMemoryLayoutBlockLinear &&
(modifier >> 23) & 0x7) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Cannot create FB from compressible surface allocation");
goto fail;
}
params.log2GobsPerBlockY = modifier & 0xf;
} else {
params.explicit_layout = false;
@@ -174,11 +193,14 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, &params);
if (nv_fb->pSurface == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface");
drm_framebuffer_cleanup(&nv_fb->base);
return -EINVAL;
goto fail;
}
return 0;
fail:
drm_framebuffer_cleanup(&nv_fb->base);
return -EINVAL;
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(

View File

@@ -516,6 +516,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
struct nv_drm_gem_fence_context *nv_gem_fence_context;
nv_dma_fence_t *fence;
nv_dma_resv_t *resv;
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
@@ -566,18 +567,20 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
goto fence_context_create_fence_failed;
}
nv_dma_resv_lock(&nv_gem->resv, NULL);
resv = nv_drm_gem_res_obj(nv_gem);
ret = nv_dma_resv_reserve_fences(&nv_gem->resv, 1, false);
nv_dma_resv_lock(resv, NULL);
ret = nv_dma_resv_reserve_fences(resv, 1, false);
if (ret == 0) {
nv_dma_resv_add_excl_fence(&nv_gem->resv, fence);
nv_dma_resv_add_excl_fence(resv, fence);
} else {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to reserve fence. Error code: %d", ret);
}
nv_dma_resv_unlock(&nv_gem->resv);
nv_dma_resv_unlock(resv);
/* dma_resv_add_excl_fence takes its own reference to the fence. */
nv_dma_fence_put(fence);

View File

@@ -131,11 +131,11 @@ static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
const struct nv_drm_gem_object *nv_gem_src);
static int __nv_drm_gem_nvkms_map(
struct nv_drm_device *nv_dev,
struct NvKmsKapiMemory *pMemory,
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
uint64_t size)
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
{
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
if (!nv_dev->hasVideoMemory) {
return 0;
}
@@ -153,7 +153,7 @@ static int __nv_drm_gem_nvkms_map(
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
(uintptr_t)nv_nvkms_memory->pPhysicalAddress,
size);
nv_nvkms_memory->base.base.size);
if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) {
NV_DRM_DEV_LOG_INFO(
@@ -167,6 +167,22 @@ static int __nv_drm_gem_nvkms_map(
return 0;
}
static void *__nv_drm_gem_nvkms_prime_vmap(
struct nv_drm_gem_object *nv_gem)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ERR_PTR(ret);
}
}
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
}
static int __nv_drm_gem_map_nvkms_memory_offset(
struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
@@ -176,10 +192,7 @@ static int __nv_drm_gem_map_nvkms_memory_offset(
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_dev,
nv_nvkms_memory->base.pMemory,
nv_nvkms_memory,
nv_nvkms_memory->base.base.size);
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ret;
}
@@ -214,6 +227,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
.free = __nv_drm_gem_nvkms_memory_free,
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
.mmap = __nv_drm_gem_nvkms_mmap,
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
@@ -314,7 +328,7 @@ int nv_drm_dumb_create(
* to use dumb buffers for software rendering, so they're not much use
* without a CPU mapping.
*/
ret = __nv_drm_gem_nvkms_map(nv_dev, pMemory, nv_nvkms_memory, args->size);
ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
goto fail;
@@ -583,11 +597,13 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
return ret;
}
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
#endif

View File

@@ -97,9 +97,11 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
struct drm_gem_object *nv_drm_gem_nvkms_prime_import(
struct drm_device *dev,

View File

@@ -81,10 +81,13 @@ typedef struct dma_buf_map nv_sysio_map_t;
static int nv_drm_gem_vmap(struct drm_gem_object *gem,
nv_sysio_map_t *map)
{
map->vaddr = nv_drm_gem_prime_vmap(gem);
if (map->vaddr == NULL) {
void *vaddr = nv_drm_gem_prime_vmap(gem);
if (vaddr == NULL) {
return -ENOMEM;
} else if (IS_ERR(vaddr)) {
return PTR_ERR(vaddr);
}
map->vaddr = vaddr;
map->is_iomem = true;
return 0;
}
@@ -132,13 +135,8 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
/* Initialize the gem object */
#if defined(NV_DRM_FENCE_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_init(&nv_gem->resv);
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_gem->base.resv = &nv_gem->resv;
#endif
#endif
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
@@ -212,8 +210,7 @@ void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address)
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj);
return &nv_gem->resv;
return nv_drm_gem_res_obj(nv_gem);
}
#endif

View File

@@ -45,6 +45,8 @@
#include "nvidia-dma-resv-helper.h"
#endif
#include "linux/dma-buf.h"
struct nv_drm_gem_object;
struct nv_drm_gem_object_funcs {
@@ -71,7 +73,7 @@ struct nv_drm_gem_object {
struct NvKmsKapiMemory *pMemory;
#if defined(NV_DRM_FENCE_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_t resv;
#endif
};
@@ -177,6 +179,15 @@ static inline int nv_drm_gem_handle_create(struct drm_file *filp,
return drm_gem_handle_create(filp, &nv_gem->base, handle);
}
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
return nv_gem->base.resv;
#else
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
#endif
}
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,
const struct nv_drm_gem_object_funcs * const ops,

View File

@@ -130,3 +130,4 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy