565.57.01

This commit is contained in:
Bernhard Stoeckner
2024-10-22 17:38:58 +02:00
parent ed4be64962
commit d5a0858f90
1049 changed files with 209491 additions and 167508 deletions

View File

@@ -60,11 +60,11 @@
// location preferences) must take the lock in write mode.
//
// VA ranges with type == UVM_VA_RANGE_TYPE_MANAGED:
// Each va_range is contained completely within a parent vma. There can be
// multiple va_ranges under the same vma, but not vice versa. All VAs within
// the va_range share the same policy state.
// Each managed range is contained completely within a parent vma. There can
// be multiple managed ranges under the same vma, but not vice versa. All
// VAs within the managed range share the same policy state.
//
// Each va_range is a collection of VA blocks. The VA blocks each have
// Each managed range is a collection of VA blocks. The VA blocks each have
// individual locks, and they hold the current mapping and location state
// for their block across all processors (CPU and all GPUs).
//
@@ -110,6 +110,7 @@ typedef enum
UVM_VA_RANGE_TYPE_CHANNEL,
UVM_VA_RANGE_TYPE_SKED_REFLECTED,
UVM_VA_RANGE_TYPE_SEMAPHORE_POOL,
UVM_VA_RANGE_TYPE_DEVICE_P2P,
UVM_VA_RANGE_TYPE_MAX
} uvm_va_range_type_t;
@@ -123,30 +124,10 @@ typedef struct
uvm_rw_semaphore_t lock;
} uvm_vma_wrapper_t;
// TODO: Bug 1733295. VA range types should really be inverted. Instead of
// maintaining common node state with a union of structs, we should have
// separate C types for each VA range type. Each type would embed a common
// VA range node.
// TODO: Bug 1733295.
//
// There's a lot of state in the top-level uvm_va_range_t struct below
// which really belongs in the per-type structs (for example, blocks).
// We're deferring that cleanup to the full refactor.
// va_range state when va_range.type == UVM_VA_RANGE_TYPE_MANAGED
typedef struct
{
// This is null in the case of a zombie allocation. Zombie allocations are
// created from unfreed allocations at termination of a process which used
// UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE, when at least one other
// process is sharing the UVM file descriptor.
uvm_vma_wrapper_t *vma_wrapper;
// Managed allocations only use this policy and never use the policy
// stored in the va_block for HMM allocations.
uvm_va_policy_t policy;
uvm_perf_module_data_desc_t perf_modules_data[UVM_PERF_MODULE_TYPE_COUNT];
} uvm_va_range_managed_t;
typedef struct
{
@@ -233,103 +214,6 @@ typedef struct
uvm_range_tree_t tree;
} uvm_ext_gpu_range_tree_t;
typedef struct
{
// Mask of GPUs which have mappings to this VA range. If a bit in this mask
// is set, the corresponding pointer in gpu_ranges is valid.
// The bitmap can be safely accessed by following the locking rules:
// * If the VA space lock is held for write, the mask can be read or written
// normally.
// * If the VA space lock is held for read, and one of the range tree locks is
// held, only the bit corresponding to that GPU range tree can be accessed.
// Writes must use uvm_processor_mask_set_atomic and
// uvm_processor_mask_clear_atomic to avoid clobbering other bits in the
// mask. If no range tree lock is held, the mask cannot be accessed.
// * If the VA space lock is not held, the mask cannot be accessed
uvm_processor_mask_t mapped_gpus;
// Per-GPU tree of mapped external allocations. This has to be per-GPU in the VA
// range because each GPU is able to map a completely different set of
// allocations to the same VA range.
uvm_ext_gpu_range_tree_t gpu_ranges[UVM_ID_MAX_GPUS];
// Dynamically allocated page mask allocated in
// uvm_va_range_create_external() and used and freed in uvm_free().
uvm_processor_mask_t *retained_mask;
} uvm_va_range_external_t;
// va_range state when va_range.type == UVM_VA_RANGE_TYPE_CHANNEL. This
// represents a channel buffer resource and mapping.
typedef struct
{
// Only a single GPU can map a channel resource, so we only need one GPU
// VA space parent.
uvm_gpu_va_space_t *gpu_va_space;
// Page tables mapped by this range
uvm_page_table_range_vec_t pt_range_vec;
// Physical location of this channel resource. All pages have the same
// aperture.
uvm_aperture_t aperture;
// Note that this is not a normal RM object handle. It is a non-zero opaque
// identifier underneath the GPU VA space which represents this channel
// resource. Each channel using this VA range has retained this descriptor
// and is responsible for releasing it. That's safe because channels outlive
// their VA ranges.
NvP64 rm_descriptor;
// This is an ID assigned by RM to each resource descriptor.
NvU32 rm_id;
// The TSG which owns this mapping. Sharing of VA ranges is only allowed
// within the same TSG. If valid == false, no sharing is allowed because the
// channel is not in a TSG.
struct
{
bool valid;
NvU32 id;
} tsg;
NvU64 ref_count;
// Storage in the corresponding uvm_gpu_va_space's channel_va_ranges list
struct list_head list_node;
} uvm_va_range_channel_t;
// va_range state when va_range.type == UVM_VA_RANGE_TYPE_SKED_REFLECTED. This
// represents a sked reflected mapping.
typedef struct
{
// Each SKED reflected range is unique to a single GPU so only a single GPU
// VA space needs to be tracked.
uvm_gpu_va_space_t *gpu_va_space;
// Page tables mapped by this range
uvm_page_table_range_vec_t pt_range_vec;
} uvm_va_range_sked_reflected_t;
typedef struct
{
uvm_mem_t *mem;
// The optional owner is a GPU (at most one) that has the allocation cached -
// in this case, all writes must be done from this GPU.
// protected by va_space lock
uvm_gpu_t *owner;
// Per-gpu attributes
uvm_mem_gpu_mapping_attrs_t gpu_attrs[UVM_ID_MAX_GPUS];
// Default attributes to assign when a new GPU is registered
uvm_mem_gpu_mapping_attrs_t default_gpu_attrs;
// Tracks all outstanding GPU work using this allocation.
uvm_tracker_t tracker;
uvm_mutex_t tracker_lock;
} uvm_va_range_semaphore_pool_t;
struct uvm_va_range_struct
{
// Parent uvm_va_space.
@@ -380,20 +264,250 @@ struct uvm_va_range_struct
atomic_long_t *blocks;
uvm_va_range_type_t type;
union
{
uvm_va_range_managed_t managed;
uvm_va_range_external_t external;
uvm_va_range_channel_t channel;
uvm_va_range_sked_reflected_t sked_reflected;
uvm_va_range_semaphore_pool_t semaphore_pool;
};
};
// Subclass of va_range state for va_range.type == UVM_VA_RANGE_TYPE_MANAGED
struct uvm_va_range_managed_struct
{
// Base class
uvm_va_range_t va_range;
// This is null in the case of a zombie allocation. Zombie allocations are
// created from unfreed allocations at termination of a process which used
// UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE, when at least one other
// process is sharing the UVM file descriptor.
uvm_vma_wrapper_t *vma_wrapper;
// Managed allocations only use this policy and never use the policy
// stored in the va_block for HMM allocations.
uvm_va_policy_t policy;
uvm_perf_module_data_desc_t perf_modules_data[UVM_PERF_MODULE_TYPE_COUNT];
};
// Subclass of va_range state for va_range.type == UVM_VA_RANGE_TYPE_EXTERNAL
struct uvm_va_range_external_struct
{
// Base class
uvm_va_range_t va_range;
// Mask of GPUs which have mappings to this VA range. If a bit in this mask
// is set, the corresponding pointer in gpu_ranges is valid.
// The bitmap can be safely accessed by following the locking rules:
// * If the VA space lock is held for write, the mask can be read or written
// normally.
// * If the VA space lock is held for read, and one of the range tree locks is
// held, only the bit corresponding to that GPU range tree can be accessed.
// Writes must use uvm_processor_mask_set_atomic and
// uvm_processor_mask_clear_atomic to avoid clobbering other bits in the
// mask. If no range tree lock is held, the mask cannot be accessed.
// * If the VA space lock is not held, the mask cannot be accessed
uvm_processor_mask_t mapped_gpus;
// Per-GPU tree of mapped external allocations. This has to be per-GPU in the VA
// range because each GPU is able to map a completely different set of
// allocations to the same VA range.
uvm_ext_gpu_range_tree_t gpu_ranges[UVM_ID_MAX_GPUS];
// Dynamically allocated page mask allocated in
// uvm_va_range_create_external() and used and freed in uvm_free().
uvm_processor_mask_t *retained_mask;
};
// Subclass of va_range state for va_range.type == UVM_VA_RANGE_TYPE_CHANNEL.
// This represents a channel buffer resource and mapping.
struct uvm_va_range_channel_struct
{
// Base class
uvm_va_range_t va_range;
// Only a single GPU can map a channel resource, so we only need one GPU
// VA space parent.
uvm_gpu_va_space_t *gpu_va_space;
// Page tables mapped by this range
uvm_page_table_range_vec_t pt_range_vec;
// Physical location of this channel resource. All pages have the same
// aperture.
uvm_aperture_t aperture;
// Note that this is not a normal RM object handle. It is a non-zero opaque
// identifier underneath the GPU VA space which represents this channel
// resource. Each channel using this VA range has retained this descriptor
// and is responsible for releasing it. That's safe because channels outlive
// their VA ranges.
NvP64 rm_descriptor;
// This is an ID assigned by RM to each resource descriptor.
NvU32 rm_id;
// The TSG which owns this mapping. Sharing of VA ranges is only allowed
// within the same TSG. If valid == false, no sharing is allowed because the
// channel is not in a TSG.
struct
{
bool valid;
NvU32 id;
} tsg;
NvU64 ref_count;
// Storage in the corresponding uvm_gpu_va_space's channel_va_ranges list
struct list_head list_node;
};
// Subclass of va_range for va_range.type == UVM_VA_RANGE_TYPE_SKED_REFLECTED.
// This represents a sked reflected mapping.
struct uvm_va_range_sked_reflected_struct
{
// Base class
uvm_va_range_t va_range;
// Each SKED reflected range is unique to a single GPU so only a single GPU
// VA space needs to be tracked.
uvm_gpu_va_space_t *gpu_va_space;
// Page tables mapped by this range
uvm_page_table_range_vec_t pt_range_vec;
};
// Subclass of va_range for va_range.type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL.
struct uvm_va_range_semaphore_pool_struct
{
// Base class
uvm_va_range_t va_range;
uvm_mem_t *mem;
// The optional owner is a GPU (at most one) that has the allocation cached -
// in this case, all writes must be done from this GPU.
// protected by va_space lock
uvm_gpu_t *owner;
// Per-gpu attributes
uvm_mem_gpu_mapping_attrs_t gpu_attrs[UVM_ID_MAX_GPUS];
// Default attributes to assign when a new GPU is registered
uvm_mem_gpu_mapping_attrs_t default_gpu_attrs;
// Tracks all outstanding GPU work using this allocation.
uvm_tracker_t tracker;
uvm_mutex_t tracker_lock;
};
// This represents a physical RM allocation used for device peer-to-peer access.
// This is distinct from the va_range type because the physical allocations can be
// shared by multiple va_ranges.
typedef struct
{
// The physical GPU memory backing device P2P ranges can be referenced by
// two entities - kernel device drivers and the va_range(s) that UVM uses
// to create CPU mappings. The physical memory can not be freed until after
// both entities have finished accessing it. This refcount tracks total
// kernel users of the memory.
nv_kref_t refcount;
// The number of va ranges referencing this physical range.
nv_kref_t va_range_count;
uvm_gpu_t *gpu;
NvHandle rm_memory_handle;
NvU64 *pfns;
NvU64 pfn_count;
NvLength page_size;
uvm_deferred_free_object_t deferred_free;
struct list_head *deferred_free_list;
wait_queue_head_t waitq;
} uvm_device_p2p_mem_t;
struct uvm_va_range_device_p2p_struct
{
uvm_va_range_t va_range;
uvm_gpu_t *gpu;
NvU64 offset;
uvm_device_p2p_mem_t *p2p_mem;
};
// Managed Range dynamic cast
static inline uvm_va_range_managed_t *uvm_va_range_to_managed(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range);
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED);
return container_of(va_range, uvm_va_range_managed_t, va_range);
}
static inline uvm_va_range_managed_t *uvm_va_range_to_managed_or_null(uvm_va_range_t *va_range)
{
if (!va_range)
return NULL;
if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
return NULL;
return container_of(va_range, uvm_va_range_managed_t, va_range);
}
// External Range dynamic cast
static inline uvm_va_range_external_t *uvm_va_range_to_external(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range);
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL);
return container_of(va_range, uvm_va_range_external_t, va_range);
}
// Channel Range dynamic cast
static inline uvm_va_range_channel_t *uvm_va_range_to_channel(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range);
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_CHANNEL);
return container_of(va_range, uvm_va_range_channel_t, va_range);
}
// SKED Reflected Range dynamic cast
static inline uvm_va_range_sked_reflected_t *uvm_va_range_to_sked_reflected(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range);
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_SKED_REFLECTED);
return container_of(va_range, uvm_va_range_sked_reflected_t, va_range);
}
// Semaphore Pool Range dynamic cast
static inline uvm_va_range_semaphore_pool_t *uvm_va_range_to_semaphore_pool(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range);
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL);
return container_of(va_range, uvm_va_range_semaphore_pool_t, va_range);
}
// Device P2P Range dynamic cast
static inline uvm_va_range_device_p2p_t *uvm_va_range_to_device_p2p(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range);
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_DEVICE_P2P);
return container_of(va_range, uvm_va_range_device_p2p_t, va_range);
}
// Module load/exit
NV_STATUS uvm_va_range_init(void);
void uvm_va_range_exit(void);
NV_STATUS uvm_va_range_initialize_reclaim(uvm_va_range_t *va_range,
struct mm_struct *mm,
uvm_va_range_type_t type,
uvm_va_space_t *va_space,
NvU64 start,
NvU64 end);
NV_STATUS uvm_va_range_device_p2p_init(void);
void uvm_va_range_device_p2p_exit(void);
NV_STATUS uvm_va_range_alloc_reclaim(uvm_va_space_t *va_space,
struct mm_struct *mm,
uvm_va_range_type_t type,
NvU64 start,
NvU64 end,
uvm_va_range_t **out_va_range);
static NvU64 uvm_va_range_size(uvm_va_range_t *va_range)
{
return uvm_range_tree_node_size(&va_range->node);
@@ -406,21 +520,20 @@ static bool uvm_va_range_is_aligned(uvm_va_range_t *va_range, NvU64 alignment)
static bool uvm_va_range_is_managed_zombie(uvm_va_range_t *va_range)
{
return va_range->type == UVM_VA_RANGE_TYPE_MANAGED && va_range->managed.vma_wrapper == NULL;
return va_range->type == UVM_VA_RANGE_TYPE_MANAGED &&
uvm_va_range_to_managed(va_range)->vma_wrapper == NULL;
}
// Create a va_range with type UVM_VA_RANGE_TYPE_MANAGED. The out va_range pointer
// is optional.
// Create a managed range. out_managed_range is optional.
//
// Returns NV_ERR_UVM_ADDRESS_IN_USE if the vma overlaps with an existing range
// in the va_space tree.
NV_STATUS uvm_va_range_create_mmap(uvm_va_space_t *va_space,
struct mm_struct *mm,
uvm_vma_wrapper_t *vma_wrapper,
uvm_va_range_t **out_va_range);
uvm_va_range_managed_t **out_managed_range);
// Create a va_range with type UVM_VA_RANGE_TYPE_EXTERNAL. The out va_range
// pointer is optional.
// Create an external range. out_external_range is optional.
//
// Returns NV_ERR_UVM_ADDRESS_IN_USE if the range overlaps with an existing
// range in the va_space tree.
@@ -428,10 +541,9 @@ NV_STATUS uvm_va_range_create_external(uvm_va_space_t *va_space,
struct mm_struct *mm,
NvU64 start,
NvU64 length,
uvm_va_range_t **out_va_range);
uvm_va_range_external_t **out_external_range);
// Create a va_range with type UVM_VA_RANGE_TYPE_CHANNEL. The out va_range
// pointer is optional.
// Create a channel range. out_channel_range is optional.
//
// Returns NV_ERR_UVM_ADDRESS_IN_USE if the range overlaps with an existing
// range in the va_space tree.
@@ -439,13 +551,13 @@ NV_STATUS uvm_va_range_create_channel(uvm_va_space_t *va_space,
struct mm_struct *mm,
NvU64 start,
NvU64 end,
uvm_va_range_t **out_va_range);
uvm_va_range_channel_t **out_channel_range);
NV_STATUS uvm_va_range_create_sked_reflected(uvm_va_space_t *va_space,
struct mm_struct *mm,
NvU64 start,
NvU64 length,
uvm_va_range_t **out_va_range);
uvm_va_range_sked_reflected_t **out_sked_reflected_range);
NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space,
struct mm_struct *mm,
@@ -453,7 +565,7 @@ NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space,
NvU64 length,
const UvmGpuMappingAttributes *per_gpu_attrs,
NvU32 per_gpu_attrs_count,
uvm_va_range_t **out_va_range);
uvm_va_range_semaphore_pool_t **out_semaphore_pool_range);
// Destroys any state associated with this VA range, removes the VA range from
// the VA space, and frees the VA range.
@@ -463,7 +575,7 @@ NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space,
// processing by uvm_deferred_free_object_list.
void uvm_va_range_destroy(uvm_va_range_t *va_range, struct list_head *deferred_free_list);
void uvm_va_range_zombify(uvm_va_range_t *va_range);
void uvm_va_range_zombify(uvm_va_range_managed_t *managed_range);
NV_STATUS uvm_api_clean_up_zombie_resources(UVM_CLEAN_UP_ZOMBIE_RESOURCES_PARAMS *params, struct file *filp);
NV_STATUS uvm_api_validate_va_range(UVM_VALIDATE_VA_RANGE_PARAMS *params, struct file *filp);
@@ -527,21 +639,19 @@ void uvm_va_range_unregister_gpu(uvm_va_range_t *va_range,
struct mm_struct *mm,
struct list_head *deferred_free_list);
// Splits existing_va_range into two pieces, with new_va_range always after
// existing. existing is updated to have new_end. new_end+1 must be page-
// Splits existing_managed_range into two pieces, with new_managed_range always
// after existing. existing is updated to have new_end. new_end+1 must be page-
// aligned.
//
// Before: [----------- existing ------------]
// After: [---- existing ----][---- new ----]
// ^new_end
//
// On error, existing_va_range is still accessible and is left in its original
// functional state.
//
// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED.
NV_STATUS uvm_va_range_split(uvm_va_range_t *existing_va_range,
// On error, existing_managed_range is still accessible and is left in its
// original functional state.
NV_STATUS uvm_va_range_split(uvm_va_range_managed_t *existing_managed_range,
NvU64 new_end,
uvm_va_range_t **new_va_range);
uvm_va_range_managed_t **new_managed_range);
// TODO: Bug 1707562: Merge va ranges
@@ -555,6 +665,49 @@ static uvm_va_range_t *uvm_va_range_container(uvm_range_tree_node_t *node)
// Returns the va_range containing addr, if any
uvm_va_range_t *uvm_va_range_find(uvm_va_space_t *va_space, NvU64 addr);
static uvm_va_range_managed_t *uvm_va_range_managed_find(uvm_va_space_t *va_space, NvU64 addr)
{
return uvm_va_range_to_managed_or_null(uvm_va_range_find(va_space, addr));
}
static uvm_va_range_external_t *uvm_va_range_external_find(uvm_va_space_t *va_space, NvU64 addr)
{
uvm_va_range_t *va_range;
va_range = uvm_va_range_find(va_space, addr);
if (!va_range)
return NULL;
if (va_range->type != UVM_VA_RANGE_TYPE_EXTERNAL)
return NULL;
return uvm_va_range_to_external(va_range);
}
static uvm_va_range_semaphore_pool_t *uvm_va_range_semaphore_pool_find(uvm_va_space_t *va_space,
NvU64 addr)
{
uvm_va_range_t *va_range;
va_range = uvm_va_range_find(va_space, addr);
if (!va_range)
return NULL;
if (va_range->type != UVM_VA_RANGE_TYPE_SEMAPHORE_POOL)
return NULL;
return uvm_va_range_to_semaphore_pool(va_range);
}
static uvm_va_range_device_p2p_t *uvm_va_range_device_p2p_find(uvm_va_space_t *va_space,
NvU64 addr)
{
uvm_va_range_t *va_range;
va_range = uvm_va_range_find(va_space, addr);
if (!va_range)
return NULL;
if (va_range->type != UVM_VA_RANGE_TYPE_DEVICE_P2P)
return NULL;
return uvm_va_range_to_device_p2p(va_range);
}
static uvm_ext_gpu_map_t *uvm_ext_gpu_map_container(uvm_range_tree_node_t *node)
{
if (!node)
@@ -596,48 +749,86 @@ static bool uvm_va_space_range_empty(uvm_va_space_t *va_space, NvU64 start, NvU6
return uvm_va_space_iter_first(va_space, start, end) == NULL;
}
#define uvm_for_each_va_range_in(va_range, va_space, start, end) \
for ((va_range) = uvm_va_space_iter_first((va_space), (start), (end)); \
(va_range); \
(va_range) = uvm_va_space_iter_next((va_range), (end)))
// Iterators for managed ranges
#define uvm_for_each_va_range_in_safe(va_range, va_range_next, va_space, start, end) \
for ((va_range) = uvm_va_space_iter_first((va_space), (start), (end)), \
(va_range_next) = uvm_va_space_iter_next((va_range), (end)); \
(va_range); \
(va_range) = (va_range_next), (va_range_next) = uvm_va_space_iter_next((va_range), (end)))
static uvm_va_range_managed_t *uvm_va_space_iter_managed_first(uvm_va_space_t *va_space,
NvU64 start,
NvU64 end)
{
return uvm_va_range_to_managed_or_null(uvm_va_space_iter_first(va_space, start, end));
}
// Iterator for all contiguous VA ranges between [start, end]. If any part of
// [start, end] is not covered by a VA range, iteration stops.
#define uvm_for_each_va_range_in_contig(va_range, va_space, start, end) \
for ((va_range) = uvm_va_space_iter_first((va_space), (start), (start)); \
(va_range); \
(va_range) = uvm_va_space_iter_next_contig((va_range), (end)))
static uvm_va_range_managed_t *uvm_va_space_iter_managed_next(uvm_va_range_managed_t *managed,
NvU64 end)
{
uvm_va_range_managed_t *next_managed_range;
uvm_va_range_t *va_range;
#define uvm_for_each_va_range_in_contig_from(va_range, va_space, first_va_range, end) \
for ((va_range) = (first_va_range); \
(va_range); \
(va_range) = uvm_va_space_iter_next_contig((va_range), (end)))
if (!managed)
return NULL;
// Like uvm_for_each_va_range_in_contig but also stops iteration if any VA range
// has a type other than UVM_VA_RANGE_TYPE_MANAGED.
#define uvm_for_each_managed_va_range_in_contig(va_range, va_space, start, end) \
for ((va_range) = uvm_va_space_iter_first((va_space), (start), (start)); \
(va_range) && (va_range)->type == UVM_VA_RANGE_TYPE_MANAGED; \
(va_range) = uvm_va_space_iter_next_contig((va_range), (end)))
next_managed_range = NULL;
for (va_range = uvm_va_space_iter_next(&managed->va_range, end);
va_range;
va_range = uvm_va_space_iter_next(va_range, end)) {
next_managed_range = uvm_va_range_to_managed_or_null(va_range);
if (next_managed_range)
break;
}
#define uvm_for_each_va_range_in_vma(va_range, vma) \
uvm_for_each_va_range_in(va_range, \
uvm_va_space_get(vma->vm_file), \
vma->vm_start, \
vma->vm_end - 1)
return next_managed_range;
}
#define uvm_for_each_va_range_in_vma_safe(va_range, va_range_next, vma) \
uvm_for_each_va_range_in_safe(va_range, \
va_range_next, \
uvm_va_space_get(vma->vm_file), \
vma->vm_start, \
vma->vm_end - 1)
static uvm_va_range_managed_t *uvm_va_space_iter_managed_next_contig(uvm_va_range_managed_t *managed_range,
NvU64 end)
{
uvm_va_range_managed_t *next = uvm_va_space_iter_managed_next(managed_range, end);
if (next && next->va_range.node.start != managed_range->va_range.node.end + 1)
return NULL;
return next;
}
#define uvm_for_each_va_range_managed(managed_range, va_space) \
for ((managed_range) = uvm_va_space_iter_managed_first((va_space), 0, ~0ULL); \
(managed_range); \
(managed_range) = uvm_va_space_iter_managed_next((managed_range), ~0ULL))
#define uvm_for_each_va_range_managed_in(managed_range, va_space, start, end) \
for ((managed_range) = uvm_va_space_iter_managed_first((va_space), (start), (end)); \
(managed_range); \
(managed_range) = uvm_va_space_iter_managed_next((managed_range), (end)))
// Iterator for all contiguous managed ranges between [start, end]. If any part
// of [start, end] is not covered by a managed range, iteration stops.
#define uvm_for_each_va_range_managed_in_contig(managed_range, va_space, start, end) \
for ((managed_range) = uvm_va_space_iter_managed_first((va_space), (start), (start)); \
(managed_range); \
(managed_range) = uvm_va_space_iter_managed_next_contig((managed_range), (end)))
#define uvm_for_each_va_range_managed_in_contig_from(managed_range, va_space, first_managed_range, end) \
for ((managed_range) = (first_managed_range); \
(managed_range); \
(managed_range) = uvm_va_space_iter_managed_next_contig((managed_range), (end)))
#define uvm_for_each_va_range_managed_in_vma(managed_range, vma) \
uvm_for_each_va_range_managed_in(managed_range, \
uvm_va_space_get(vma->vm_file), \
vma->vm_start, \
vma->vm_end - 1)
#define uvm_for_each_va_range_managed_in_safe(managed_range, managed_range_next, va_space, start, end) \
for ((managed_range) = uvm_va_space_iter_managed_first((va_space), (start), (end)), \
(managed_range_next) = uvm_va_space_iter_managed_next((managed_range), (end)); \
(managed_range); \
(managed_range) = (managed_range_next), \
(managed_range_next) = uvm_va_space_iter_managed_next((managed_range), (end)))
#define uvm_for_each_va_range_managed_in_vma_safe(managed_range, managed_range_next, vma) \
uvm_for_each_va_range_managed_in_safe(managed_range, \
managed_range_next, \
uvm_va_space_get(vma->vm_file), \
vma->vm_start, \
vma->vm_end - 1)
// Only call this if you're sure that either:
// 1) You have a reference on the vma's vm_mm and that vma->vm_mm's mmap_lock is
@@ -648,60 +839,57 @@ static bool uvm_va_space_range_empty(uvm_va_space_t *va_space, NvU64 start, NvU6
//
// Otherwise, use uvm_va_range_vma_current or uvm_va_range_vma_check and be
// prepared to handle a NULL return value.
static struct vm_area_struct *uvm_va_range_vma(uvm_va_range_t *va_range)
static struct vm_area_struct *uvm_va_range_vma(uvm_va_range_managed_t *managed_range)
{
struct vm_area_struct *vma;
UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type: %d", va_range->type);
UVM_ASSERT(va_range->managed.vma_wrapper);
UVM_ASSERT(managed_range->vma_wrapper);
uvm_assert_rwsem_locked(&va_range->va_space->lock);
uvm_assert_rwsem_locked(&managed_range->va_range.va_space->lock);
// vm_file, vm_private_data, vm_start, and vm_end are all safe to access
// here because they can't change without the kernel calling vm_ops->open
// or vm_ops->close, which both take va_space->lock.
vma = va_range->managed.vma_wrapper->vma;
vma = managed_range->vma_wrapper->vma;
UVM_ASSERT(vma);
UVM_ASSERT_MSG(vma->vm_private_data == va_range->managed.vma_wrapper,
UVM_ASSERT_MSG(vma->vm_private_data == managed_range->vma_wrapper,
"vma: 0x%llx [0x%lx, 0x%lx] has vm_private_data 0x%llx\n",
(NvU64)vma,
vma->vm_start,
vma->vm_end - 1,
(NvU64)vma->vm_private_data);
UVM_ASSERT_MSG(va_range->va_space == uvm_va_space_get(vma->vm_file),
"va_range va_space: 0x%llx vm_file: 0x%llx vm_file va_space: 0x%llx",
(NvU64)va_range->va_space,
UVM_ASSERT_MSG(managed_range->va_range.va_space == uvm_va_space_get(vma->vm_file),
"managed_range va_space: 0x%llx vm_file: 0x%llx vm_file va_space: 0x%llx",
(NvU64)managed_range->va_range.va_space,
(NvU64)vma->vm_file,
(NvU64)uvm_va_space_get(vma->vm_file));
UVM_ASSERT_MSG(va_range->node.start >= vma->vm_start,
"Range mismatch: va_range: [0x%llx, 0x%llx] vma: [0x%lx, 0x%lx]\n",
va_range->node.start,
va_range->node.end,
UVM_ASSERT_MSG(managed_range->va_range.node.start >= vma->vm_start,
"Range mismatch: managed_range: [0x%llx, 0x%llx] vma: [0x%lx, 0x%lx]\n",
managed_range->va_range.node.start,
managed_range->va_range.node.end,
vma->vm_start,
vma->vm_end - 1);
UVM_ASSERT_MSG(va_range->node.end <= vma->vm_end - 1,
"Range mismatch: va_range: [0x%llx, 0x%llx] vma: [0x%lx, 0x%lx]\n",
va_range->node.start,
va_range->node.end,
UVM_ASSERT_MSG(managed_range->va_range.node.end <= vma->vm_end - 1,
"Range mismatch: managed_range: [0x%llx, 0x%llx] vma: [0x%lx, 0x%lx]\n",
managed_range->va_range.node.start,
managed_range->va_range.node.end,
vma->vm_start,
vma->vm_end - 1);
return vma;
}
// Check that the VA range's vma is safe to use under mm. If not, NULL is
// Check that the managed range's vma is safe to use under mm. If not, NULL is
// returned. If the vma is returned, there must be a reference on mm and
// mm->mmap_lock must be held.
static struct vm_area_struct *uvm_va_range_vma_check(uvm_va_range_t *va_range, struct mm_struct *mm)
static struct vm_area_struct *uvm_va_range_vma_check(uvm_va_range_managed_t *managed_range, struct mm_struct *mm)
{
struct vm_area_struct *vma;
UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type: %d\n", va_range->type);
// Zombies don't have a vma_wrapper.
if (!va_range->managed.vma_wrapper)
if (!managed_range->vma_wrapper)
return NULL;
vma = uvm_va_range_vma(va_range);
vma = uvm_va_range_vma(managed_range);
// Examples of mm on various paths:
// - CPU fault vma->vm_mm
@@ -713,7 +901,7 @@ static struct vm_area_struct *uvm_va_range_vma_check(uvm_va_range_t *va_range, s
// on the vma's owning mm_struct. We won't know that until we look at the
// vma. By then it's too late to take mmap_lock since mmap_lock is above the
// va_space lock in our lock ordering, and we must be holding the va_space
// lock to query the va_range. Hence the need to detect the cases in which
// lock to query the managed_range. Hence the need to detect the cases in which
// it's safe to operate on the vma.
//
// When we can't detect for certain that mm is safe to use, we shouldn't
@@ -736,49 +924,40 @@ static struct vm_area_struct *uvm_va_range_vma_check(uvm_va_range_t *va_range, s
}
// Helper for use when the only mm which is known is current->mm
static struct vm_area_struct *uvm_va_range_vma_current(uvm_va_range_t *va_range)
static struct vm_area_struct *uvm_va_range_vma_current(uvm_va_range_managed_t *managed_range)
{
return uvm_va_range_vma_check(va_range, current->mm);
return uvm_va_range_vma_check(managed_range, current->mm);
}
// Returns the maximum number of VA blocks which could be contained with the
// given va_range (number of elements in the va_range->blocks array).
// va_range->node.start and .end must be set.
//
// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED.
size_t uvm_va_range_num_blocks(uvm_va_range_t *va_range);
// given managed_range (number of elements in the managed_range->va_range.blocks
// array). managed_range->va_range.node.start and .end must be set.
size_t uvm_va_range_num_blocks(uvm_va_range_managed_t *managed_range);
// Get the index within the va_range->blocks array of the VA block
// Get the index within the managed_range->va_range.blocks array of the VA block
// corresponding to addr. The block pointer is not guaranteed to be valid. Use
// either uvm_va_range_block or uvm_va_range_block_create to look up the block.
//
// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED.
size_t uvm_va_range_block_index(uvm_va_range_t *va_range, NvU64 addr);
size_t uvm_va_range_block_index(uvm_va_range_managed_t *managed_range, NvU64 addr);
// Looks up the VA block at va_range->blocks[index]. If no block is present at
// that index, NULL is returned.
//
// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED.
static uvm_va_block_t *uvm_va_range_block(uvm_va_range_t *va_range, size_t index)
// Looks up the VA block at managed_range->va_range.blocks[index]. If no block
// is present at that index, NULL is returned.
static uvm_va_block_t *uvm_va_range_block(uvm_va_range_managed_t *managed_range, size_t index)
{
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED);
UVM_ASSERT(index < uvm_va_range_num_blocks(va_range));
uvm_assert_rwsem_locked(&va_range->va_space->lock);
UVM_ASSERT(index < uvm_va_range_num_blocks(managed_range));
uvm_assert_rwsem_locked(&managed_range->va_range.va_space->lock);
return (uvm_va_block_t *)atomic_long_read(&va_range->blocks[index]);
return (uvm_va_block_t *)atomic_long_read(&managed_range->va_range.blocks[index]);
}
// Same as uvm_va_range_block except that the block is created if not already
// present in the array. If NV_OK is returned, the block has been allocated
// successfully.
//
// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED.
NV_STATUS uvm_va_range_block_create(uvm_va_range_t *va_range, size_t index, uvm_va_block_t **out_block);
NV_STATUS uvm_va_range_block_create(uvm_va_range_managed_t *managed_range, size_t index, uvm_va_block_t **out_block);
// Returns the first populated VA block in the VA range after the input
// Returns the first populated VA block in the managed range after the input
// va_block, or NULL if none. If the input va_block is NULL, this returns the
// first VA block in the VA range, if any exists.
uvm_va_block_t *uvm_va_range_block_next(uvm_va_range_t *va_range, uvm_va_block_t *va_block);
// first VA block in the managed range, if any exists.
uvm_va_block_t *uvm_va_range_block_next(uvm_va_range_managed_t *managed_range, uvm_va_block_t *va_block);
// Iterate over populated VA blocks in the range. Does not create new VA blocks.
#define for_each_va_block_in_va_range(__va_range, __va_block) \
@@ -794,18 +973,16 @@ uvm_va_block_t *uvm_va_range_block_next(uvm_va_range_t *va_range, uvm_va_block_t
__va_block = __va_block_next, \
__va_block_next = __va_block? uvm_va_range_block_next(__va_range, __va_block) : NULL)
// Set the VA range preferred location (or unset it if preferred location is
// UVM_ID_INVALID).
// Set the managed range preferred location (or unset it if preferred location
// is UVM_ID_INVALID).
//
// Unsetting the preferred location potentially changes the range group
// association to UVM_RANGE_GROUP_ID_NONE if the VA range was previously
// association to UVM_RANGE_GROUP_ID_NONE if the managed range was previously
// associated with a non-migratable range group.
//
// Changing the preferred location also updates the mask and mappings of GPUs
// in UVM-Lite mode.
//
// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED.
//
// If mm != NULL, that mm is used for any CPU mappings which may be created as
// a result of this call. See uvm_va_block_context_t::mm for details.
//
@@ -813,7 +990,7 @@ uvm_va_block_t *uvm_va_range_block_next(uvm_va_range_t *va_range, uvm_va_block_t
//
// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read
// mode.
NV_STATUS uvm_va_range_set_preferred_location(uvm_va_range_t *va_range,
NV_STATUS uvm_va_range_set_preferred_location(uvm_va_range_managed_t *managed_range,
uvm_processor_id_t preferred_location,
int preferred_cpu_nid,
struct mm_struct *mm,
@@ -831,7 +1008,7 @@ NV_STATUS uvm_va_range_set_preferred_location(uvm_va_range_t *va_range,
//
// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read
// mode.
NV_STATUS uvm_va_range_set_accessed_by(uvm_va_range_t *va_range,
NV_STATUS uvm_va_range_set_accessed_by(uvm_va_range_managed_t *managed_range,
uvm_processor_id_t processor_id,
struct mm_struct *mm,
uvm_tracker_t *out_tracker);
@@ -841,7 +1018,7 @@ NV_STATUS uvm_va_range_set_accessed_by(uvm_va_range_t *va_range,
// If out_tracker != NULL any block work will be added to that tracker.
//
// This also updates the mask and mappings of the UVM-Lite GPUs if required.
void uvm_va_range_unset_accessed_by(uvm_va_range_t *va_range,
void uvm_va_range_unset_accessed_by(uvm_va_range_managed_t *managed_range,
uvm_processor_id_t processor_id,
uvm_tracker_t *out_tracker);
@@ -852,7 +1029,7 @@ void uvm_va_range_unset_accessed_by(uvm_va_range_t *va_range,
//
// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read
// mode.
NV_STATUS uvm_va_range_set_read_duplication(uvm_va_range_t *va_range, struct mm_struct *mm);
NV_STATUS uvm_va_range_set_read_duplication(uvm_va_range_managed_t *managed_range, struct mm_struct *mm);
// Unset read-duplication and establish accessed_by mappings
//
@@ -861,17 +1038,18 @@ NV_STATUS uvm_va_range_set_read_duplication(uvm_va_range_t *va_range, struct mm_
//
// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read
// mode.
NV_STATUS uvm_va_range_unset_read_duplication(uvm_va_range_t *va_range, struct mm_struct *mm);
NV_STATUS uvm_va_range_unset_read_duplication(uvm_va_range_managed_t *managed_range, struct mm_struct *mm);
// Create and destroy vma wrappers
uvm_vma_wrapper_t *uvm_vma_wrapper_alloc(struct vm_area_struct *vma);
void uvm_vma_wrapper_destroy(uvm_vma_wrapper_t *vma_wrapper);
static uvm_va_policy_t *uvm_va_range_get_policy(uvm_va_range_t *va_range)
{
UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED);
return &va_range->managed.policy;
}
NV_STATUS uvm_va_range_device_p2p_map_cpu(uvm_va_space_t *va_space,
struct vm_area_struct *vma,
uvm_va_range_device_p2p_t *device_p2p_range);
void uvm_va_range_deinit_device_p2p(uvm_va_range_device_p2p_t *device_p2p_range, struct list_head *deferred_free_list);
void uvm_va_range_destroy_device_p2p(uvm_va_range_device_p2p_t *device_p2p_range, struct list_head *deferred_free_list);
void uvm_va_range_free_device_p2p_mem(uvm_device_p2p_mem_t *p2p_mem);
NV_STATUS uvm_test_va_range_info(UVM_TEST_VA_RANGE_INFO_PARAMS *params, struct file *filp);
NV_STATUS uvm_test_va_range_split(UVM_TEST_VA_RANGE_SPLIT_PARAMS *params, struct file *filp);