535.43.02

This commit is contained in:
Andy Ritger
2023-05-30 10:11:36 -07:00
parent 6dd092ddb7
commit eb5c7665a1
1403 changed files with 295367 additions and 86235 deletions

View File

@@ -32,27 +32,50 @@
#include <linux/mmu_notifier.h>
#if defined(NV_LINUX_SCHED_MM_H_PRESENT)
#include <linux/sched/mm.h>
#elif defined(NV_LINUX_SCHED_H_PRESENT)
#include <linux/sched.h>
#endif
typedef enum
{
// The va_space_mm has not yet been initialized by
// uvm_api_mm_initialize(). Only current->mm can be retained for
// !uvm_va_space_mm_enabled() VA spaces.
UVM_VA_SPACE_MM_STATE_UNINITIALIZED,
// The va_space_mm has been initialized and if this is a
// uvm_va_space_mm_enabled() space CPU user space page tables are
// valid due to holding a mm_struct reference. However the GPU VA
// space may be in the process of being torn down.
UVM_VA_SPACE_MM_STATE_ALIVE,
// The va_space_mm has been released. There is no longer any
// mm_struct associated with this va_space_mm and CPU page tables
// may have been released. GPU VA state has been destroyed.
UVM_VA_SPACE_MM_STATE_RELEASED,
} uvm_va_space_mm_state_t;
struct uvm_va_space_mm_struct
{
// The mm currently associated with this VA space. Most callers shouldn't
// access this directly, but should instead use uvm_va_space_mm_retain()/
// uvm_va_space_mm_release().
//
// The pointer itself is valid between uvm_va_space_mm_register() and
// uvm_va_space_mm_unregister(), but should only be considered usable when
// retained or current.
struct mm_struct *mm;
// The mm currently associated with this VA space. Most callers shouldn't
// access this directly, but should instead use uvm_va_space_mm_retain()/
// uvm_va_space_mm_release().
//
// The pointer itself is valid when the va_space_mm state is
// UVM_VA_SPACE_MM_STATE_ALIVE, but should only be considered usable
// when retained or current.
struct mm_struct *mm;
#if UVM_CAN_USE_MMU_NOTIFIERS()
struct mmu_notifier mmu_notifier;
#endif
// Lock protecting the alive and retained_count fields.
// Lock protecting the state and retained_count fields.
uvm_spinlock_t lock;
// Whether the mm is usable. uvm_va_space_mm_register() marks the mm as
// alive and uvm_va_space_mm_shutdown() marks it as dead.
bool alive;
uvm_va_space_mm_state_t state;
// Refcount for uvm_va_space_mm_retain()/uvm_va_space_mm_release()
NvU32 retained_count;
@@ -60,22 +83,13 @@ struct uvm_va_space_mm_struct
// Wait queue for threads waiting for retainers to finish (retained_count
// going to 0 when not alive).
wait_queue_head_t last_retainer_wait_queue;
// State which is only injected by test ioctls
struct
{
// Whether uvm_va_space_mm_shutdown() should do a timed wait for other
// threads to arrive.
bool delay_shutdown;
bool verbose;
// Number of threads which have called uvm_va_space_mm_shutdown(). Only
// used when delay_shutdown is true.
atomic_t num_mm_shutdown_threads;
} test;
};
static bool uvm_va_space_mm_alive(struct uvm_va_space_mm_struct *va_space_mm)
{
return va_space_mm->state == UVM_VA_SPACE_MM_STATE_ALIVE;
}
// Whether the system can support creating an association between a VA space and
// an mm.
bool uvm_va_space_mm_enabled_system(void);
@@ -94,18 +108,24 @@ bool uvm_va_space_mm_enabled(uvm_va_space_t *va_space);
// Locking: mmap_lock and the VA space lock must both be held for write.
NV_STATUS uvm_va_space_mm_register(uvm_va_space_t *va_space);
// De-associate the mm from the va_space. This function won't return until all
// in-flight retainers have called uvm_va_space_mm_release().
// uvm_va_space_mm_retain() and friends must not be called after this returns.
// Handles the va_space's mm being torn down while the VA space still exists.
// This function won't return until all in-flight retainers have called
// uvm_va_space_mm_release(). Subsequent calls to uvm_va_space_mm_retain() will
// return NULL.
//
// This function may invoke uvm_va_space_mm_shutdown() so the caller must not
// hold either mmap_lock or the VA space lock. Since this API must provide the
// same guarantees as uvm_va_space_mm_shutdown(), the caller must also guarantee
// prior to calling this function that all GPUs in this VA space have stopped
// making accesses under this mm and will not be able to start again under that
// VA space.
// After this call returns the VA space is essentially dead. GPUs cannot make
// any new memory accesses in registered GPU VA spaces, and no more GPU faults
// which are attributed to this VA space will arrive. Additionally, no more
// registration within the VA space is allowed (GPU, GPU VA space, or channel).
//
// Locking: This function may take both mmap_lock and the VA space lock.
// The requirements for this call are that, once we return, the GPU and
// driver are completely done using the associated mm_struct. This includes:
//
// 1) GPUs will not issue any more memory accesses under this mm
// 2) [ATS only] GPUs will not issue any more ATRs under this mm
// 3) The driver will not ask the kernel to service faults on this mm
//
// Locking: This function will take both mmap_lock and the VA space lock.
void uvm_va_space_mm_unregister(uvm_va_space_t *va_space);
// Retains the current mm registered with this VA space. If no mm is currently
@@ -114,26 +134,23 @@ void uvm_va_space_mm_unregister(uvm_va_space_t *va_space);
// (locking mmap_lock, find_vma, get_user_pages, cgroup-accounted allocations,
// etc) until uvm_va_space_mm_release() is called.
//
// Please, note that a retained mm could have mm->users == 0.
// A retained mm is guaranteed to have mm->users >= 1.
//
// It is NOT necessary to hold the VA space lock when calling this function.
struct mm_struct *uvm_va_space_mm_retain(uvm_va_space_t *va_space);
// Similar to uvm_va_space_mm_retain(), but falls back to returning current->mm
// when there is no mm registered with the VA space (that is,
// uvm_va_space_mm_enabled() would return false). This is both a convenience and
// an optimization of the common case in which current->mm == va_space_mm.
// uvm_va_space_mm_or_current_release() must be called to release the mm, and it
// must be called from the same thread which called
// Similar to uvm_va_space_mm_retain(), but falls back to returning
// current->mm when there is no mm registered with the VA space (that
// is, uvm_va_space_mm_enabled() would return false).
// uvm_va_space_mm_or_current_release() must be called to release the
// mm, and it must be called from the same thread which called
// uvm_va_space_mm_or_current_retain().
//
// If a non-NULL mm is returned, the guarantees described by
// uvm_va_space_mm_retain() apply. Unlike uvm_va_space_mm_retain() however,
// mm_users is guaranteed to be greater than 0 until
// uvm_va_space_mm_or_current_release().
// If uvm_va_space_mm_enabled() is false, the caller is responsible for
// validating that the returned mm matches the desired mm before performing an
// operation such as vm_insert_page(). See uvm_va_range_vma_check().
// uvm_va_space_mm_retain() apply. If uvm_va_space_mm_enabled() is
// false, the caller is responsible for validating that the returned
// mm matches the desired mm before performing an operation such as
// vm_insert_page(). See uvm_va_range_vma_check().
//
// This should not be called from a kernel thread.
struct mm_struct *uvm_va_space_mm_or_current_retain(uvm_va_space_t *va_space);
@@ -185,8 +202,50 @@ static void uvm_va_space_mm_or_current_release_unlock(uvm_va_space_t *va_space,
}
}
#if !defined(NV_MMGET_NOT_ZERO_PRESENT)
static bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
}
#endif
#if UVM_CAN_USE_MMU_NOTIFIERS()
static void uvm_mmput(struct mm_struct *mm)
{
mmput(mm);
}
static void uvm_mmgrab(struct mm_struct *mm)
{
#if defined(NV_MMGRAB_PRESENT)
mmgrab(mm);
#else
atomic_inc(&mm->mm_count);
#endif
}
static void uvm_mmdrop(struct mm_struct *mm)
{
mmdrop(mm);
}
#else
static void uvm_mmput(struct mm_struct *mm)
{
UVM_ASSERT(0);
}
static void uvm_mmgrab(struct mm_struct *mm)
{
UVM_ASSERT(0);
}
static void uvm_mmdrop(struct mm_struct *mm)
{
UVM_ASSERT(0);
}
#endif
NV_STATUS uvm_test_va_space_mm_retain(UVM_TEST_VA_SPACE_MM_RETAIN_PARAMS *params, struct file *filp);
NV_STATUS uvm_test_va_space_mm_delay_shutdown(UVM_TEST_VA_SPACE_MM_DELAY_SHUTDOWN_PARAMS *params, struct file *filp);
NV_STATUS uvm_test_va_space_mm_or_current_retain(UVM_TEST_VA_SPACE_MM_OR_CURRENT_RETAIN_PARAMS *params,
struct file *filp);