580.65.06

This commit is contained in:
Maneet Singh
2025-08-04 11:15:02 -07:00
parent d890313300
commit 307159f262
1315 changed files with 477791 additions and 279973 deletions

View File

@@ -34,9 +34,7 @@
#include "nvidia-drm-fence.h"
#include "nvidia-dma-resv-helper.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include <linux/dma-fence.h>
#define NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS 5000
@@ -83,42 +81,42 @@ struct nv_drm_prime_fence_context {
struct nv_drm_prime_fence {
struct list_head list_entry;
nv_dma_fence_t base;
struct dma_fence base;
spinlock_t lock;
};
static inline
struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence)
struct nv_drm_prime_fence *to_nv_drm_prime_fence(struct dma_fence *fence)
{
return container_of(fence, struct nv_drm_prime_fence, base);
}
static const char*
nv_drm_gem_fence_op_get_driver_name(nv_dma_fence_t *fence)
nv_drm_gem_fence_op_get_driver_name(struct dma_fence *fence)
{
return "NVIDIA";
}
static const char*
nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence)
nv_drm_gem_prime_fence_op_get_timeline_name(struct dma_fence *fence)
{
return "nvidia.prime";
}
static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence)
static bool nv_drm_gem_prime_fence_op_enable_signaling(struct dma_fence *fence)
{
// DO NOTHING
return true;
}
static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence)
static void nv_drm_gem_prime_fence_op_release(struct dma_fence *fence)
{
struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence);
nv_drm_free(nv_fence);
}
static signed long
nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
nv_drm_gem_prime_fence_op_wait(struct dma_fence *fence,
bool intr, signed long timeout)
{
/*
@@ -131,12 +129,12 @@ nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
* that it should never get hit during normal operation, but not so long
* that the system becomes unresponsive.
*/
return nv_dma_fence_default_wait(fence, intr,
return dma_fence_default_wait(fence, intr,
(timeout == MAX_SCHEDULE_TIMEOUT) ?
msecs_to_jiffies(96) : timeout);
}
static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = {
static const struct dma_fence_ops nv_drm_gem_prime_fence_ops = {
.get_driver_name = nv_drm_gem_fence_op_get_driver_name,
.get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name,
.enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling,
@@ -148,8 +146,8 @@ static inline void
__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence)
{
list_del(&nv_fence->list_entry);
nv_dma_fence_signal(&nv_fence->base);
nv_dma_fence_put(&nv_fence->base);
dma_fence_signal(&nv_fence->base);
dma_fence_put(&nv_fence->base);
}
static void nv_drm_gem_prime_force_fence_signal(
@@ -289,13 +287,13 @@ __nv_drm_prime_fence_context_new(
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
nv_prime_fence_context->base.ops = &nv_drm_prime_fence_context_ops;
nv_prime_fence_context->base.nv_dev = nv_dev;
nv_prime_fence_context->base.context = nv_dma_fence_context_alloc(1);
nv_prime_fence_context->base.context = dma_fence_context_alloc(1);
nv_prime_fence_context->base.fenceSemIndex = p->index;
nv_prime_fence_context->pSemSurface = pSemSurface;
nv_prime_fence_context->pLinearAddress = pLinearAddress;
@@ -343,7 +341,7 @@ failed:
return NULL;
}
static nv_dma_fence_t *__nv_drm_prime_fence_context_create_fence(
static struct dma_fence *__nv_drm_prime_fence_context_create_fence(
struct nv_drm_prime_fence_context *nv_prime_fence_context,
unsigned int seqno)
{
@@ -369,12 +367,12 @@ static nv_dma_fence_t *__nv_drm_prime_fence_context_create_fence(
spin_lock_init(&nv_fence->lock);
nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_prime_fence_context->base.context,
seqno);
dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_prime_fence_context->base.context,
seqno);
/* The context maintains a reference to any pending fences. */
nv_dma_fence_get(&nv_fence->base);
dma_fence_get(&nv_fence->base);
list_add_tail(&nv_fence->list_entry, &nv_prime_fence_context->pending);
@@ -424,12 +422,11 @@ const struct nv_drm_gem_object_funcs nv_fence_context_gem_ops = {
static inline
struct nv_drm_fence_context *
__nv_drm_fence_context_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_fence_context_gem_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
@@ -491,7 +488,7 @@ done:
}
static int __nv_drm_gem_attach_fence(struct nv_drm_gem_object *nv_gem,
nv_dma_fence_t *fence,
struct dma_fence *fence,
bool shared)
{
nv_dma_resv_t *resv = nv_drm_gem_res_obj(nv_gem);
@@ -524,7 +521,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem;
struct nv_drm_fence_context *nv_fence_context;
nv_dma_fence_t *fence;
struct dma_fence *fence;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
@@ -536,7 +533,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
goto done;
}
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
nv_gem = nv_drm_gem_object_lookup(filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
@@ -548,7 +545,6 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
}
if((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
@@ -587,7 +583,7 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
ret = __nv_drm_gem_attach_fence(nv_gem, fence, true /* exclusive */);
nv_dma_fence_put(fence);
dma_fence_put(fence);
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
@@ -600,7 +596,7 @@ done:
}
struct nv_drm_semsurf_fence {
nv_dma_fence_t base;
struct dma_fence base;
spinlock_t lock;
/*
@@ -628,7 +624,7 @@ struct nv_drm_semsurf_fence_callback {
};
struct nv_drm_sync_fd_wait_data {
nv_dma_fence_cb_t dma_fence_cb;
struct dma_fence_cb dma_fence_cb;
struct nv_drm_semsurf_fence_ctx *ctx;
nv_drm_work work; /* Deferred second half of fence wait callback */
@@ -759,15 +755,15 @@ __nv_drm_semsurf_force_complete_pending(struct nv_drm_semsurf_fence_ctx *ctx)
&ctx->pending_fences,
typeof(*nv_fence),
pending_node);
nv_dma_fence_t *fence = &nv_fence->base;
struct dma_fence *fence = &nv_fence->base;
list_del(&nv_fence->pending_node);
nv_dma_fence_set_error(fence, -ETIMEDOUT);
nv_dma_fence_signal(fence);
dma_fence_set_error(fence, -ETIMEDOUT);
dma_fence_signal(fence);
/* Remove the pending list's reference */
nv_dma_fence_put(fence);
dma_fence_put(fence);
}
/*
@@ -824,7 +820,7 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
struct list_head finished;
struct list_head timed_out;
struct nv_drm_semsurf_fence *nv_fence;
nv_dma_fence_t *fence;
struct dma_fence *fence;
NvU64 currentSeqno = __nv_drm_get_semsurf_ctx_seqno(ctx);
NvU64 fenceSeqno = 0;
unsigned long flags;
@@ -888,8 +884,8 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
nv_fence = list_first_entry(&finished, typeof(*nv_fence), pending_node);
list_del_init(&nv_fence->pending_node);
fence = &nv_fence->base;
nv_dma_fence_signal(fence);
nv_dma_fence_put(fence); /* Drops the pending list's reference */
dma_fence_signal(fence);
dma_fence_put(fence); /* Drops the pending list's reference */
}
while (!list_empty(&timed_out)) {
@@ -897,9 +893,9 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
pending_node);
list_del_init(&nv_fence->pending_node);
fence = &nv_fence->base;
nv_dma_fence_set_error(fence, -ETIMEDOUT);
nv_dma_fence_signal(fence);
nv_dma_fence_put(fence); /* Drops the pending list's reference */
dma_fence_set_error(fence, -ETIMEDOUT);
dma_fence_signal(fence);
dma_fence_put(fence); /* Drops the pending list's reference */
}
}
@@ -1265,13 +1261,13 @@ __nv_drm_semsurf_fence_ctx_new(
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
ctx->base.ops = &nv_drm_semsurf_fence_ctx_ops;
ctx->base.nv_dev = nv_dev;
ctx->base.context = nv_dma_fence_context_alloc(1);
ctx->base.context = dma_fence_context_alloc(1);
ctx->base.fenceSemIndex = p->index;
ctx->pSemSurface = pSemSurface;
ctx->pSemMapping.pVoid = semMapping;
@@ -1343,26 +1339,26 @@ int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
}
static inline struct nv_drm_semsurf_fence*
to_nv_drm_semsurf_fence(nv_dma_fence_t *fence)
to_nv_drm_semsurf_fence(struct dma_fence *fence)
{
return container_of(fence, struct nv_drm_semsurf_fence, base);
}
static const char*
__nv_drm_semsurf_fence_op_get_timeline_name(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_get_timeline_name(struct dma_fence *fence)
{
return "nvidia.semaphore_surface";
}
static bool
__nv_drm_semsurf_fence_op_enable_signaling(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_enable_signaling(struct dma_fence *fence)
{
// DO NOTHING - Could defer RM callback registration until this point
return true;
}
static void
__nv_drm_semsurf_fence_op_release(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_release(struct dma_fence *fence)
{
struct nv_drm_semsurf_fence *nv_fence =
to_nv_drm_semsurf_fence(fence);
@@ -1370,12 +1366,12 @@ __nv_drm_semsurf_fence_op_release(nv_dma_fence_t *fence)
nv_drm_free(nv_fence);
}
static const nv_dma_fence_ops_t nv_drm_semsurf_fence_ops = {
static const struct dma_fence_ops nv_drm_semsurf_fence_ops = {
.get_driver_name = nv_drm_gem_fence_op_get_driver_name,
.get_timeline_name = __nv_drm_semsurf_fence_op_get_timeline_name,
.enable_signaling = __nv_drm_semsurf_fence_op_enable_signaling,
.release = __nv_drm_semsurf_fence_op_release,
.wait = nv_dma_fence_default_wait,
.wait = dma_fence_default_wait,
#if defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO)
.use_64bit_seqno = true,
#endif
@@ -1401,7 +1397,7 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
}
/* Add a reference to the fence for the list */
nv_dma_fence_get(&nv_fence->base);
dma_fence_get(&nv_fence->base);
INIT_LIST_HEAD(&nv_fence->pending_node);
nv_fence->timeout = nv_drm_timeout_from_ms(timeoutMS);
@@ -1434,14 +1430,14 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
__nv_drm_semsurf_ctx_reg_callbacks(ctx);
}
static nv_dma_fence_t *__nv_drm_semsurf_fence_ctx_create_fence(
static struct dma_fence *__nv_drm_semsurf_fence_ctx_create_fence(
struct nv_drm_device *nv_dev,
struct nv_drm_semsurf_fence_ctx *ctx,
NvU64 wait_value,
NvU64 timeout_value_ms)
{
struct nv_drm_semsurf_fence *nv_fence;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = 0;
if (timeout_value_ms == 0 ||
@@ -1461,9 +1457,9 @@ static nv_dma_fence_t *__nv_drm_semsurf_fence_ctx_create_fence(
#endif
/* Initializes the fence with one reference (for the caller) */
nv_dma_fence_init(fence, &nv_drm_semsurf_fence_ops,
&nv_fence->lock,
ctx->base.context, wait_value);
dma_fence_init(fence, &nv_drm_semsurf_fence_ops,
&nv_fence->lock,
ctx->base.context, wait_value);
__nv_drm_semsurf_ctx_add_pending(ctx, nv_fence, timeout_value_ms);
@@ -1479,7 +1475,7 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_semsurf_fence_create_params *p = data;
struct nv_drm_fence_context *nv_fence_context;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = -EINVAL;
int fd;
@@ -1494,7 +1490,6 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
}
if ((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
@@ -1550,7 +1545,7 @@ fence_context_create_sync_failed:
* FD will still hold a reference, and the pending list (if the fence hasn't
* already been signaled) will also retain a reference.
*/
nv_dma_fence_put(fence);
dma_fence_put(fence);
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
@@ -1608,8 +1603,8 @@ __nv_drm_semsurf_wait_fence_work_cb
static void
__nv_drm_semsurf_wait_fence_cb
(
nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb
struct dma_fence *fence,
struct dma_fence_cb *cb
)
{
struct nv_drm_sync_fd_wait_data *wait_data =
@@ -1634,7 +1629,7 @@ __nv_drm_semsurf_wait_fence_cb
}
/* Don't need to reference the fence anymore, just the fence context. */
nv_dma_fence_put(fence);
dma_fence_put(fence);
}
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
@@ -1646,7 +1641,7 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
struct nv_drm_fence_context *nv_fence_context;
struct nv_drm_semsurf_fence_ctx *ctx;
struct nv_drm_sync_fd_wait_data *wait_data = NULL;
nv_dma_fence_t *fence;
struct dma_fence *fence;
unsigned long flags;
int ret = -EINVAL;
@@ -1663,7 +1658,6 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
}
if ((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
@@ -1716,9 +1710,9 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
list_add(&wait_data->pending_node, &ctx->pending_waits);
spin_unlock_irqrestore(&ctx->lock, flags);
ret = nv_dma_fence_add_callback(fence,
&wait_data->dma_fence_cb,
__nv_drm_semsurf_wait_fence_cb);
ret = dma_fence_add_callback(fence,
&wait_data->dma_fence_cb,
__nv_drm_semsurf_wait_fence_cb);
if (ret) {
if (ret == -ENOENT) {
@@ -1730,7 +1724,7 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
}
/* Execute second half of wait immediately, avoiding the worker thread */
nv_dma_fence_put(fence);
dma_fence_put(fence);
__nv_drm_semsurf_wait_fence_work_cb(wait_data);
}
@@ -1759,7 +1753,7 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
struct drm_nvidia_semsurf_fence_attach_params *p = data;
struct nv_drm_gem_object *nv_gem = NULL;
struct nv_drm_fence_context *nv_fence_context = NULL;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = -EINVAL;
if (nv_dev->pDevice == NULL) {
@@ -1767,7 +1761,7 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
goto done;
}
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
nv_gem = nv_drm_gem_object_lookup(filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
@@ -1779,7 +1773,6 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
}
nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle);
@@ -1819,7 +1812,7 @@ int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
ret = __nv_drm_gem_attach_fence(nv_gem, fence, p->shared);
nv_dma_fence_put(fence);
dma_fence_put(fence);
done:
if (nv_fence_context) {
@@ -1833,6 +1826,4 @@ done:
return ret;
}
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */