580.65.06

This commit is contained in:
Maneet Singh
2025-08-04 11:15:02 -07:00
parent d890313300
commit 307159f262
1315 changed files with 477791 additions and 279973 deletions

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2016-2021 NVIDIA Corporation
Copyright (c) 2016-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -69,7 +69,9 @@ typedef enum
// Locking: uvm_va_space: at least in read mode, uvm_va_block: exclusive (if uvm_va_block is not NULL)
UVM_PERF_EVENT_FAULT,
// Locking: uvm_va_block: exclusive. Notably the uvm_va_space lock may not be held on eviction.
// Locking: None for pageable migrations. Otherwise, exclusive lock on
// uvm_va_block is needed. Notably the uvm_va_space lock may not be held on
// eviction.
UVM_PERF_EVENT_MIGRATION,
// Locking: uvm_va_space: at least in read mode, uvm_va_block: exclusive
@@ -119,11 +121,6 @@ typedef union
struct
{
// This field contains the VA space where this fault was reported.
// If block is not NULL, this field must match
// uvm_va_block_get_va_space(block).
uvm_va_space_t *space;
// VA block for the page where the fault was triggered if it exists,
// NULL otherwise (this can happen if the fault is fatal or the
// VA block could not be created).
@@ -195,6 +192,10 @@ typedef union
// Whether the page has been copied or moved
uvm_va_block_transfer_mode_t transfer_mode;
// Access counters notification buffer index. Only valid when cause is
// UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER.
NvU32 access_counters_buffer_index;
// Event that performed the call to make_resident
uvm_make_resident_cause_t cause;
@@ -226,11 +227,12 @@ typedef union
// Type of the function that can be registered as a callback
//
// va_space: is the va_space under which the event was generated.
// event_id: is the event being notified. Passing it to the callback enables using the same function to handle
// different events.
// event_data: extra event data that is passed to the callback function. The format of data passed for each event type
// is declared in the uvm_perf_event_data_t union
typedef void (*uvm_perf_event_callback_t)(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data);
typedef void (*uvm_perf_event_callback_t)(uvm_va_space_t *va_space, uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data);
typedef struct
{
@@ -257,25 +259,30 @@ void uvm_perf_destroy_va_space_events(uvm_perf_va_space_events_t *va_space_event
// Register a callback to be executed under the given event. The given callback cannot have been already registered for
// the same event, although the same callback can be registered for different events.
NV_STATUS uvm_perf_register_event_callback(uvm_perf_va_space_events_t *va_space_events,
uvm_perf_event_t event_id, uvm_perf_event_callback_t callback);
uvm_perf_event_t event_id,
uvm_perf_event_callback_t callback);
// Same as uvm_perf_register_event_callback(), but the caller must hold
// va_space_events lock in write mode.
NV_STATUS uvm_perf_register_event_callback_locked(uvm_perf_va_space_events_t *va_space_events,
uvm_perf_event_t event_id, uvm_perf_event_callback_t callback);
uvm_perf_event_t event_id,
uvm_perf_event_callback_t callback);
// Removes a callback for the given event. It's safe to call with a callback that hasn't been registered.
void uvm_perf_unregister_event_callback(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id,
void uvm_perf_unregister_event_callback(uvm_perf_va_space_events_t *va_space_events,
uvm_perf_event_t event_id,
uvm_perf_event_callback_t callback);
// Same as uvm_perf_unregister_event_callback(), but the caller must hold
// va_space_events lock in write mode.
void uvm_perf_unregister_event_callback_locked(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id,
void uvm_perf_unregister_event_callback_locked(uvm_perf_va_space_events_t *va_space_events,
uvm_perf_event_t event_id,
uvm_perf_event_callback_t callback);
// Invoke the callbacks registered for the given event. Callbacks cannot fail.
// Acquires the va_space_events lock internally
void uvm_perf_event_notify(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id,
void uvm_perf_event_notify(uvm_perf_va_space_events_t *va_space_events,
uvm_perf_event_t event_id,
uvm_perf_event_data_t *event_data);
// Checks if the given callback is already registered for the event.
@@ -297,6 +304,7 @@ static inline void uvm_perf_event_notify_migration(uvm_perf_va_space_events_t *v
NvU64 address,
NvU64 bytes,
uvm_va_block_transfer_mode_t transfer_mode,
NvU32 access_counters_buffer_index,
uvm_make_resident_cause_t cause,
uvm_make_resident_context_t *make_resident_context)
{
@@ -304,15 +312,16 @@ static inline void uvm_perf_event_notify_migration(uvm_perf_va_space_events_t *v
{
.migration =
{
.push = push,
.block = va_block,
.dst = dst,
.src = src,
.address = address,
.bytes = bytes,
.transfer_mode = transfer_mode,
.cause = cause,
.make_resident_context = make_resident_context,
.push = push,
.block = va_block,
.dst = dst,
.src = src,
.address = address,
.bytes = bytes,
.transfer_mode = transfer_mode,
.access_counters_buffer_index = access_counters_buffer_index,
.cause = cause,
.make_resident_context = make_resident_context,
}
};
@@ -327,6 +336,7 @@ static inline void uvm_perf_event_notify_migration_cpu(uvm_perf_va_space_events_
NvU64 bytes,
NvU64 begin_timestamp,
uvm_va_block_transfer_mode_t transfer_mode,
NvU32 access_counters_buffer_index,
uvm_make_resident_cause_t cause,
uvm_make_resident_context_t *make_resident_context)
{
@@ -334,21 +344,57 @@ static inline void uvm_perf_event_notify_migration_cpu(uvm_perf_va_space_events_
{
.migration =
{
.cpu_start_timestamp = begin_timestamp,
.block = va_block,
.dst = UVM_ID_CPU,
.src = UVM_ID_CPU,
.src_nid = (NvS16)src_nid,
.dst_nid = (NvS16)dst_nid,
.address = address,
.bytes = bytes,
.transfer_mode = transfer_mode,
.cause = cause,
.make_resident_context = make_resident_context,
.cpu_start_timestamp = begin_timestamp,
.block = va_block,
.dst = UVM_ID_CPU,
.src = UVM_ID_CPU,
.src_nid = (NvS16)src_nid,
.dst_nid = (NvS16)dst_nid,
.address = address,
.bytes = bytes,
.transfer_mode = transfer_mode,
.access_counters_buffer_index = access_counters_buffer_index,
.cause = cause,
.make_resident_context = make_resident_context,
}
};
BUILD_BUG_ON(MAX_NUMNODES > (NvU16)-1);
// Ensure src_nid can fit within its type in event data.migration
BUILD_BUG_ON(MAX_NUMNODES >= (1 << (8 * sizeof(event_data.migration.src_nid) - 1)));
uvm_perf_event_notify(va_space_events, UVM_PERF_EVENT_MIGRATION, &event_data);
}
static inline void uvm_perf_event_notify_migration_ats(uvm_perf_va_space_events_t *va_space_events,
uvm_push_t *push,
uvm_processor_id_t dst_id,
uvm_processor_id_t src_id,
int dst_nid,
int src_nid,
NvU64 address,
NvU64 bytes,
NvU32 access_counters_buffer_index,
uvm_make_resident_cause_t cause)
{
uvm_perf_event_data_t event_data =
{
.migration =
{
.push = push,
.block = NULL,
.dst = dst_id,
.src = src_id,
.src_nid = (NvS16)src_nid,
.dst_nid = (NvS16)dst_nid,
.address = address,
.bytes = bytes,
.access_counters_buffer_index = access_counters_buffer_index,
.cause = cause,
}
};
// Ensure src_nid can fit within its type in event data.migration
BUILD_BUG_ON(MAX_NUMNODES >= (1 << (8 * sizeof(event_data.migration.src_nid) - 1)));
uvm_perf_event_notify(va_space_events, UVM_PERF_EVENT_MIGRATION, &event_data);
}
@@ -366,7 +412,6 @@ static inline void uvm_perf_event_notify_gpu_fault(uvm_perf_va_space_events_t *v
{
.fault =
{
.space = va_space_events->va_space,
.block = va_block,
.proc_id = gpu_id,
.preferred_location = preferred_location,
@@ -393,7 +438,6 @@ static inline void uvm_perf_event_notify_cpu_fault(uvm_perf_va_space_events_t *v
{
.fault =
{
.space = va_space_events->va_space,
.block = va_block,
.proc_id = UVM_ID_CPU,
.preferred_location = preferred_location,