This commit is contained in:
Andy Ritger
2022-06-28 08:00:06 -07:00
parent 965db98552
commit 94eaea9726
37 changed files with 556 additions and 353 deletions

View File

@@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"515.48.07\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"515.57\"
EXTRA_CFLAGS += -Wno-unused-function

View File

@@ -1942,45 +1942,32 @@ static NV_STATUS cancel_faults_precise_va(uvm_gpu_t *gpu,
// Function called when the system has found a global error and needs to
// trigger RC in RM.
// We cancel one entry per uTLB
static void cancel_fault_batch_tlb(uvm_gpu_t *gpu,
uvm_fault_service_batch_context_t *batch_context,
UvmEventFatalReason reason)
{
NvU32 i;
// Fault filtering is not allowed in the TLB-based fault cancel path
UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults);
for (i = 0; i < batch_context->num_cached_faults; ++i) {
NV_STATUS status;
for (i = 0; i < batch_context->num_coalesced_faults; ++i) {
NV_STATUS status = NV_OK;
uvm_fault_buffer_entry_t *current_entry;
uvm_fault_utlb_info_t *utlb;
uvm_fault_buffer_entry_t *coalesced_entry;
current_entry = &batch_context->fault_cache[i];
utlb = &batch_context->utlbs[current_entry->fault_source.utlb_id];
current_entry = batch_context->ordered_fault_cache[i];
// If this uTLB has been already cancelled, skip it
if (utlb->cancelled)
continue;
// The list iteration below skips the entry used as 'head'.
// Report the 'head' entry explicitly.
uvm_va_space_down_read(current_entry->va_space);
uvm_tools_record_gpu_fatal_fault(gpu->parent->id, current_entry->va_space, current_entry, reason);
record_fatal_fault_helper(gpu, current_entry, reason);
list_for_each_entry(coalesced_entry, &current_entry->merged_instances_list, merged_instances_list)
uvm_tools_record_gpu_fatal_fault(gpu->parent->id, current_entry->va_space, coalesced_entry, reason);
uvm_va_space_up_read(current_entry->va_space);
// Although the global cancellation method can be used here instead of
// targeted, we still use the targeted method since this function is
// only invoked in GPUs without support for VA fault cancellation, for
// which the targeted version is already required in
// cancel_faults_precise_tlb(). To maintain consistency, we use the
// targeted variant in both cases.
status = push_cancel_on_gpu_targeted(gpu,
current_entry->instance_ptr,
current_entry->fault_source.gpc_id,
current_entry->fault_source.client_id,
&batch_context->tracker);
// We need to cancel each instance pointer to correctly handle faults from multiple contexts.
status = push_cancel_on_gpu_global(gpu, current_entry->instance_ptr, &batch_context->tracker);
if (status != NV_OK)
break;
utlb->cancelled = true;
}
}

View File

@@ -899,9 +899,14 @@ static NV_STATUS migrate_pageable(migrate_vma_state_t *state)
// VMAs are validated and migrated one at a time, since migrate_vma works
// on one vma at a time
for (; vma->vm_start <= prev_outer; vma = vma->vm_next) {
for (; vma->vm_start <= prev_outer; vma = find_vma_intersection(mm, prev_outer, outer)) {
unsigned long next_addr = 0;
NV_STATUS status = migrate_pageable_vma(vma, start, outer, state, &next_addr);
NV_STATUS status;
// Callers have already validated the range so the vma should be valid.
UVM_ASSERT(vma);
status = migrate_pageable_vma(vma, start, outer, state, &next_addr);
if (status == NV_WARN_NOTHING_TO_DO) {
NV_STATUS populate_status = NV_OK;
bool touch = uvm_migrate_args->touch;

View File

@@ -44,7 +44,7 @@ bool uvm_is_valid_vma_range(struct mm_struct *mm, NvU64 start, NvU64 length)
if (vma->vm_end >= end)
return true;
start = vma->vm_end;
vma = vma->vm_next;
vma = find_vma_intersection(mm, start, end);
}
return false;

View File

@@ -158,7 +158,7 @@ NV_STATUS uvm_populate_pageable(struct mm_struct *mm,
// VMAs are validated and populated one at a time, since they may have
// different protection flags
// Validation of VM_SPECIAL flags is delegated to get_user_pages
for (; vma->vm_start <= prev_end; vma = vma->vm_next) {
for (; vma && vma->vm_start <= prev_end; vma = find_vma_intersection(mm, prev_end, end)) {
NV_STATUS status = uvm_populate_pageable_vma(vma, start, end - start, min_prot, touch, populate_permissions);
if (status != NV_OK)