550.40.07

This commit is contained in:
Bernhard Stoeckner
2024-01-24 17:51:53 +01:00
parent bb2dac1f20
commit 91676d6628
1411 changed files with 261367 additions and 145959 deletions

View File

@@ -448,6 +448,12 @@
//
// CE semaphore payloads are encrypted, and require to take the CSL lock
// (UVM_LOCK_ORDER_LEAF) to decrypt the payload.
// - CSL Context
// Order: UVM_LOCK_ORDER_CSL_CTX
// When the Confidential Computing feature is enabled, encrypt/decrypt
// operations to communicate with GPU are handled by the CSL context.
// This lock protects RM calls that use this context.
//
// - Leaf locks
// Order: UVM_LOCK_ORDER_LEAF
@@ -492,6 +498,11 @@ typedef enum
UVM_LOCK_ORDER_VA_SPACE_TOOLS,
UVM_LOCK_ORDER_SEMA_POOL_TRACKER,
UVM_LOCK_ORDER_SECURE_SEMAPHORE,
// TODO: Bug 4184836: [uvm][hcc] Remove UVM_LOCK_ORDER_CSL_CTX
// This lock order can be removed after RM no longer relies on RPC event
// notifications.
UVM_LOCK_ORDER_CSL_CTX,
UVM_LOCK_ORDER_LEAF,
UVM_LOCK_ORDER_COUNT,
} uvm_lock_order_t;
@@ -648,6 +659,15 @@ bool __uvm_locking_initialized(void);
#define uvm_assert_lockable_order(order) UVM_ASSERT(__uvm_check_lockable_order(order, UVM_LOCK_FLAGS_MODE_ANY))
#define uvm_assert_unlocked_order(order) UVM_ASSERT(__uvm_check_unlocked_order(order))
#if UVM_IS_DEBUG()
#define uvm_lock_debug_init(lock, order) ({ \
uvm_locking_assert_initialized(); \
(lock)->lock_order = (order); \
})
#else
#define uvm_lock_debug_init(lock, order) ((void) order)
#endif
// Helpers for locking mmap_lock (mmap_sem in kernels < 5.8)
// and recording its usage
#define uvm_assert_mmap_lock_locked_mode(mm, flags) ({ \
@@ -738,15 +758,12 @@ typedef struct
#define uvm_assert_rwsem_unlocked(uvm_sem) UVM_ASSERT(!rwsem_is_locked(&(uvm_sem)->sem))
static void uvm_init_rwsem(uvm_rw_semaphore_t *uvm_sem, uvm_lock_order_t lock_order)
{
init_rwsem(&uvm_sem->sem);
#if UVM_IS_DEBUG()
uvm_locking_assert_initialized();
uvm_sem->lock_order = lock_order;
#endif
uvm_assert_rwsem_unlocked(uvm_sem);
}
#define uvm_init_rwsem(uvm_sem, order) ({ \
uvm_rw_semaphore_t *uvm_sem_ ## order = (uvm_sem); \
init_rwsem(&uvm_sem_ ## order->sem); \
uvm_lock_debug_init(uvm_sem, order); \
uvm_assert_rwsem_unlocked(uvm_sem); \
})
#define uvm_down_read(uvm_sem) ({ \
typeof(uvm_sem) _sem = (uvm_sem); \
@@ -874,15 +891,12 @@ typedef struct
UVM_ASSERT_MSG(!irqs_disabled() && !in_interrupt(), "Mutexes cannot be used with interrupts disabled"); \
})
static void uvm_mutex_init(uvm_mutex_t *mutex, uvm_lock_order_t lock_order)
{
mutex_init(&mutex->m);
#if UVM_IS_DEBUG()
uvm_locking_assert_initialized();
mutex->lock_order = lock_order;
#endif
uvm_assert_mutex_unlocked(mutex);
}
#define uvm_mutex_init(mutex, order) ({ \
uvm_mutex_t *mutex_ ## order = (mutex); \
mutex_init(&mutex_ ## order->m); \
uvm_lock_debug_init(mutex, order); \
uvm_assert_mutex_unlocked(mutex); \
})
#define uvm_mutex_lock(mutex) ({ \
typeof(mutex) _mutex = (mutex); \
@@ -892,11 +906,14 @@ static void uvm_mutex_init(uvm_mutex_t *mutex, uvm_lock_order_t lock_order)
uvm_assert_mutex_locked(_mutex); \
})
// Lock w/o any tracking. This should be extremely rare and *_no_tracking
// helpers will be added only as needed.
#define uvm_mutex_lock_no_tracking(mutex) ({ \
// Lock while already holding a lock of the same order taken with
// uvm_mutex_lock() variant. Note this shouldn't be used if the held lock was
// taken with uvm_mutex_lock_nested() because we only support a single level of
// nesting. This should be extremely rare and *_nested helpers will only be
// added as needed.
#define uvm_mutex_lock_nested(mutex) ({ \
uvm_assert_mutex_interrupts(); \
mutex_lock(&(mutex)->m); \
mutex_lock_nested(&(mutex)->m, 1); \
})
#define uvm_mutex_trylock(mutex) ({ \
@@ -926,9 +943,8 @@ static void uvm_mutex_init(uvm_mutex_t *mutex, uvm_lock_order_t lock_order)
uvm_record_unlock_out_of_order(_mutex, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \
})
// Unlock w/o any tracking. This should be extremely rare and *_no_tracking
// helpers will be added only as needed.
#define uvm_mutex_unlock_no_tracking(mutex) ({ \
// Unlock w/o any tracking.
#define uvm_mutex_unlock_nested(mutex) ({ \
uvm_assert_mutex_interrupts(); \
mutex_unlock(&(mutex)->m); \
})
@@ -941,14 +957,11 @@ typedef struct
#endif
} uvm_semaphore_t;
static void uvm_sema_init(uvm_semaphore_t *semaphore, int val, uvm_lock_order_t lock_order)
{
sema_init(&semaphore->sem, val);
#if UVM_IS_DEBUG()
uvm_locking_assert_initialized();
semaphore->lock_order = lock_order;
#endif
}
#define uvm_sema_init(semaphore, val, order) ({ \
uvm_semaphore_t *sem_ ## order = (semaphore); \
sema_init(&sem_ ## order->sem, (val)); \
uvm_lock_debug_init(semaphore, order); \
})
#define uvm_sem_is_locked(uvm_sem) uvm_check_locked(uvm_sem, UVM_LOCK_FLAGS_MODE_SHARED)
@@ -1012,15 +1025,12 @@ typedef struct
#define uvm_assert_spinlock_unlocked(spinlock) UVM_ASSERT(!spin_is_locked(&(spinlock)->lock))
static void uvm_spin_lock_init(uvm_spinlock_t *spinlock, uvm_lock_order_t lock_order)
{
spin_lock_init(&spinlock->lock);
#if UVM_IS_DEBUG()
uvm_locking_assert_initialized();
spinlock->lock_order = lock_order;
#endif
uvm_assert_spinlock_unlocked(spinlock);
}
#define uvm_spin_lock_init(spinlock, order) ({ \
uvm_spinlock_t *spinlock_ ## order = (spinlock); \
spin_lock_init(&spinlock_ ## order->lock); \
uvm_lock_debug_init(spinlock, order); \
uvm_assert_spinlock_unlocked(spinlock); \
})
#define uvm_spin_lock(uvm_lock) ({ \
typeof(uvm_lock) _lock = (uvm_lock); \
@@ -1036,15 +1046,12 @@ static void uvm_spin_lock_init(uvm_spinlock_t *spinlock, uvm_lock_order_t lock_o
uvm_record_unlock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \
})
static void uvm_spin_lock_irqsave_init(uvm_spinlock_irqsave_t *spinlock, uvm_lock_order_t lock_order)
{
spin_lock_init(&spinlock->lock);
#if UVM_IS_DEBUG()
uvm_locking_assert_initialized();
spinlock->lock_order = lock_order;
#endif
uvm_assert_spinlock_unlocked(spinlock);
}
#define uvm_spin_lock_irqsave_init(spinlock, order) ({ \
uvm_spinlock_irqsave_t *spinlock_ ## order = (spinlock); \
spin_lock_init(&spinlock_ ## order->lock); \
uvm_lock_debug_init(spinlock, order); \
uvm_assert_spinlock_unlocked(spinlock); \
})
// Use a temp to not rely on flags being written after acquiring the lock.
#define uvm_spin_lock_irqsave(uvm_lock) ({ \
@@ -1119,16 +1126,12 @@ static void uvm_rwlock_irqsave_dec(uvm_rwlock_irqsave_t *rwlock)
#define uvm_assert_rwlock_unlocked(uvm_rwlock)
#endif
static void uvm_rwlock_irqsave_init(uvm_rwlock_irqsave_t *rwlock, uvm_lock_order_t lock_order)
{
rwlock_init(&rwlock->lock);
#if UVM_IS_DEBUG()
uvm_locking_assert_initialized();
rwlock->lock_order = lock_order;
atomic_set(&rwlock->lock_count, 0);
#endif
uvm_assert_rwlock_unlocked(rwlock);
}
#define uvm_rwlock_irqsave_init(rwlock, order) ({ \
uvm_rwlock_irqsave_t *rwlock_ ## order = rwlock; \
rwlock_init(&rwlock_ ## order->lock); \
uvm_lock_debug_init(rwlock, order); \
uvm_assert_rwlock_unlocked(rwlock); \
})
// We can't store the irq_flags within the lock itself for readers, so they must
// pass in their flags.