mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-07 08:39:58 +00:00
520.61.05
This commit is contained in:
@@ -281,6 +281,7 @@ typedef struct nv_usermap_access_params_s
|
||||
NvU64 access_size;
|
||||
NvU64 remap_prot_extra;
|
||||
NvBool contig;
|
||||
NvU32 caching;
|
||||
} nv_usermap_access_params_t;
|
||||
|
||||
/*
|
||||
@@ -298,6 +299,7 @@ typedef struct nv_alloc_mapping_context_s {
|
||||
NvU64 remap_prot_extra;
|
||||
NvU32 prot;
|
||||
NvBool valid;
|
||||
NvU32 caching;
|
||||
} nv_alloc_mapping_context_t;
|
||||
|
||||
typedef enum
|
||||
@@ -326,6 +328,9 @@ typedef struct nv_soc_irq_info_s {
|
||||
#define NV_MAX_DPAUX_NUM_DEVICES 4
|
||||
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
|
||||
|
||||
#define NV_IGPU_LEGACY_STALL_IRQ 70
|
||||
#define NV_IGPU_MAX_STALL_IRQS 3
|
||||
#define NV_IGPU_MAX_NONSTALL_IRQS 1
|
||||
/*
|
||||
* per device state
|
||||
*/
|
||||
@@ -362,6 +367,7 @@ typedef struct nv_state_t
|
||||
nv_aperture_t *hdacodec_regs;
|
||||
nv_aperture_t *mipical_regs;
|
||||
nv_aperture_t *fb, ud;
|
||||
nv_aperture_t *simregs;
|
||||
|
||||
NvU32 num_dpaux_instance;
|
||||
NvU32 interrupt_line;
|
||||
@@ -374,6 +380,11 @@ typedef struct nv_state_t
|
||||
NvU32 soc_dcb_size;
|
||||
NvU32 disp_sw_soc_chip_id;
|
||||
|
||||
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
|
||||
NvU32 igpu_nonstall_irq;
|
||||
NvU32 num_stall_irqs;
|
||||
NvU64 dma_mask;
|
||||
|
||||
NvBool primary_vga;
|
||||
|
||||
NvU32 sim_env;
|
||||
@@ -451,6 +462,9 @@ typedef struct nv_state_t
|
||||
|
||||
NvBool printed_openrm_enable_unsupported_gpus_error;
|
||||
|
||||
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
|
||||
NvBool nvpcf_dsm_in_gpu_scope;
|
||||
|
||||
} nv_state_t;
|
||||
|
||||
// These define need to be in sync with defines in system.h
|
||||
@@ -515,7 +529,7 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64);
|
||||
#define NV_FLAG_USES_MSIX 0x0040
|
||||
#define NV_FLAG_PASSTHRU 0x0080
|
||||
#define NV_FLAG_SUSPENDED 0x0100
|
||||
// Unused 0x0200
|
||||
#define NV_FLAG_SOC_IGPU 0x0200
|
||||
// Unused 0x0400
|
||||
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
|
||||
#define NV_FLAG_IN_RECOVERY 0x1000
|
||||
@@ -564,6 +578,9 @@ typedef enum
|
||||
#define NV_IS_SOC_DISPLAY_DEVICE(nv) \
|
||||
((nv)->flags & NV_FLAG_SOC_DISPLAY)
|
||||
|
||||
#define NV_IS_SOC_IGPU_DEVICE(nv) \
|
||||
((nv)->flags & NV_FLAG_SOC_IGPU)
|
||||
|
||||
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
|
||||
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
|
||||
|
||||
@@ -782,7 +799,7 @@ void NV_API_CALL nv_put_firmware(const void *);
|
||||
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
|
||||
void NV_API_CALL nv_put_file_private(void *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvU32 *, NvS32 *);
|
||||
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
|
||||
@@ -961,7 +978,7 @@ void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
|
||||
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *);
|
||||
|
||||
/* vGPU VFIO specific functions */
|
||||
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
|
||||
|
||||
@@ -125,6 +125,7 @@ NvU32 NV_API_CALL os_get_cpu_number (void);
|
||||
void NV_API_CALL os_disable_console_access (void);
|
||||
void NV_API_CALL os_enable_console_access (void);
|
||||
NV_STATUS NV_API_CALL os_registry_init (void);
|
||||
NvU64 NV_API_CALL os_get_max_user_va (void);
|
||||
NV_STATUS NV_API_CALL os_schedule (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
|
||||
void NV_API_CALL os_free_spinlock (void *);
|
||||
@@ -193,6 +194,13 @@ void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||
|
||||
enum os_pci_req_atomics_type {
|
||||
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
|
||||
OS_INTF_PCIE_REQ_ATOMICS_64BIT,
|
||||
OS_INTF_PCIE_REQ_ATOMICS_128BIT
|
||||
};
|
||||
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
|
||||
|
||||
extern NvU32 os_page_size;
|
||||
extern NvU64 os_page_mask;
|
||||
extern NvU8 os_page_shift;
|
||||
|
||||
@@ -144,7 +144,7 @@ NV_STATUS rm_free_os_event (NvHandle, NvU32);
|
||||
NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
|
||||
void rm_client_free_os_events (NvHandle);
|
||||
|
||||
NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32);
|
||||
NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32);
|
||||
NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *);
|
||||
|
||||
NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *);
|
||||
|
||||
@@ -285,6 +285,7 @@ NV_STATUS RmIoctl(
|
||||
if (rm_create_mmap_context(nv, pParms->hRoot,
|
||||
pParms->hObjectParent, pParms->hObjectNew,
|
||||
pParms->pMemory, pParms->limit + 1, 0,
|
||||
NV_MEMORY_DEFAULT,
|
||||
pApi->fd) != NV_OK)
|
||||
{
|
||||
NV_PRINTF(LEVEL_WARNING,
|
||||
@@ -457,6 +458,8 @@ NV_STATUS RmIoctl(
|
||||
goto done;
|
||||
}
|
||||
|
||||
// Don't allow userspace to override the caching type
|
||||
pParms->flags = FLD_SET_DRF(OS33, _FLAGS, _CACHING_TYPE, _DEFAULT, pParms->flags);
|
||||
Nv04MapMemoryWithSecInfo(pParms, secInfo);
|
||||
|
||||
if (pParms->status == NV_OK)
|
||||
@@ -464,7 +467,9 @@ NV_STATUS RmIoctl(
|
||||
pParms->status = rm_create_mmap_context(nv, pParms->hClient,
|
||||
pParms->hDevice, pParms->hMemory,
|
||||
pParms->pLinearAddress, pParms->length,
|
||||
pParms->offset, pApi->fd);
|
||||
pParms->offset,
|
||||
DRF_VAL(OS33, _FLAGS, _CACHING_TYPE, pParms->flags),
|
||||
pApi->fd);
|
||||
if (pParms->status != NV_OK)
|
||||
{
|
||||
NVOS34_PARAMETERS params;
|
||||
|
||||
@@ -75,7 +75,8 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
|
||||
const NvU8 *pMdevUuid,
|
||||
NvU32 vgpuTypeId,
|
||||
NvU16 *vgpuId,
|
||||
NvU32 gpuPciBdf
|
||||
NvU32 gpuPciBdf,
|
||||
NvBool *is_driver_vm
|
||||
)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
|
||||
@@ -718,6 +718,11 @@ void osSpinLoop(void)
|
||||
{
|
||||
}
|
||||
|
||||
NvU64 osGetMaxUserVa(void)
|
||||
{
|
||||
return os_get_max_user_va();
|
||||
}
|
||||
|
||||
NV_STATUS osSchedule(void)
|
||||
{
|
||||
return os_schedule();
|
||||
@@ -3497,17 +3502,14 @@ osRemoveGpuSupported
|
||||
* - All address values are in the System Physical Address (SPA) space
|
||||
* - Targets can either be "Local" (bIsPeer=False) or for a specified "Peer"
|
||||
* (bIsPeer=True, peerIndex=#) GPU
|
||||
* - Granularity of the target address space is returned as a bit shift value
|
||||
* (e.g. granularity=37 implies a granularity of 128GiB)
|
||||
* - Target address and mask values have a specified bit width, and represent
|
||||
* the higher order bits above the target address granularity
|
||||
*
|
||||
* @param[in] pGpu GPU object pointer
|
||||
* @param[out] pAddrSysPhys Pointer to hold SPA aligned at 128GB boundary
|
||||
* @param[out] pAddrSysPhys Pointer to hold SPA
|
||||
* @param[out] pAddrWidth Address range width value pointer
|
||||
* @param[out] pMask Mask value pointer
|
||||
* @param[out] pMaskWidth Mask width value pointer
|
||||
* @param[out] pGranularity Granularity value pointer
|
||||
* @param[in] bIsPeer NV_TRUE if this is a peer, local GPU otherwise
|
||||
* @param[in] peerIndex Peer index
|
||||
*
|
||||
@@ -3520,11 +3522,10 @@ NV_STATUS
|
||||
osGetAtsTargetAddressRange
|
||||
(
|
||||
OBJGPU *pGpu,
|
||||
NvU32 *pAddrSysPhys,
|
||||
NvU64 *pAddrSysPhys,
|
||||
NvU32 *pAddrWidth,
|
||||
NvU32 *pMask,
|
||||
NvU32 *pMaskWidth,
|
||||
NvU32 *pGranularity,
|
||||
NvBool bIsPeer,
|
||||
NvU32 peerIndex
|
||||
)
|
||||
@@ -3548,27 +3549,21 @@ osGetAtsTargetAddressRange
|
||||
if (bIsPeer)
|
||||
{
|
||||
const int addrWidth = 0x10;
|
||||
const NvU32 guestAddrGranularity = 37;
|
||||
|
||||
*pAddrSysPhys = 0;
|
||||
*pAddrWidth = addrWidth;
|
||||
*pMask = 0;
|
||||
*pMaskWidth = addrMaskWidth;
|
||||
*pGranularity = guestAddrGranularity;
|
||||
return NV_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
NvU64 addrSysPhys;
|
||||
|
||||
NV_STATUS status = nv_get_device_memory_config(nv, &addrSysPhys, NULL,
|
||||
pAddrWidth, pGranularity, NULL);
|
||||
NV_STATUS status = nv_get_device_memory_config(nv, pAddrSysPhys, NULL,
|
||||
pAddrWidth, NULL);
|
||||
if (status == NV_OK)
|
||||
{
|
||||
*pMask = NVBIT(*pAddrWidth) - 1U;
|
||||
*pMaskWidth = addrMaskWidth;
|
||||
|
||||
*pAddrSysPhys = addrSysPhys >> *pGranularity;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@@ -3615,7 +3610,7 @@ osGetFbNumaInfo
|
||||
|
||||
nv = NV_GET_NV_STATE(pGpu);
|
||||
|
||||
NV_STATUS status = nv_get_device_memory_config(nv, NULL, pAddrPhys, NULL, NULL, pNodeId);
|
||||
NV_STATUS status = nv_get_device_memory_config(nv, NULL, pAddrPhys, NULL, pNodeId);
|
||||
|
||||
return status;
|
||||
#endif
|
||||
@@ -3901,7 +3896,7 @@ osNumaOnliningEnabled
|
||||
// Note that this numaNodeId value fetched from Linux layer might not be
|
||||
// accurate since it is possible to overwrite it with regkey on some configs
|
||||
//
|
||||
if (nv_get_device_memory_config(pOsGpuInfo, NULL, NULL, NULL, NULL,
|
||||
if (nv_get_device_memory_config(pOsGpuInfo, NULL, NULL, NULL,
|
||||
&numaNodeId) != NV_OK)
|
||||
{
|
||||
return NV_FALSE;
|
||||
@@ -4959,6 +4954,51 @@ osGetSyncpointAperture
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Enable PCIe AtomicOp Requester Enable and return
|
||||
* the completer side capabilities that the requester can send.
|
||||
*
|
||||
* @param[in] pOsGpuInfo OS_GPU_INFO OS specific GPU information pointer
|
||||
* @param[out] pMask mask of supported atomic size, including one or more of:
|
||||
* OS_PCIE_CAP_MASK_REQ_ATOMICS_32
|
||||
* OS_PCIE_CAP_MASK_REQ_ATOMICS_64
|
||||
* OS_PCIE_CAP_MASK_REQ_ATOMICS_128
|
||||
*
|
||||
* @returns NV_STATUS, NV_OK if success
|
||||
* NV_ERR_NOT_SUPPORTED if platform doesn't support this
|
||||
* feature.
|
||||
* NV_ERR_GENERIC for any other error
|
||||
*/
|
||||
|
||||
NV_STATUS
|
||||
osConfigurePcieReqAtomics
|
||||
(
|
||||
OS_GPU_INFO *pOsGpuInfo,
|
||||
NvU32 *pMask
|
||||
)
|
||||
{
|
||||
if (pMask)
|
||||
{
|
||||
*pMask = 0U;
|
||||
if (pOsGpuInfo)
|
||||
{
|
||||
if (os_enable_pci_req_atomics(pOsGpuInfo->handle,
|
||||
OS_INTF_PCIE_REQ_ATOMICS_32BIT) == NV_OK)
|
||||
*pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_32;
|
||||
if (os_enable_pci_req_atomics(pOsGpuInfo->handle,
|
||||
OS_INTF_PCIE_REQ_ATOMICS_64BIT) == NV_OK)
|
||||
*pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_64;
|
||||
if (os_enable_pci_req_atomics(pOsGpuInfo->handle,
|
||||
OS_INTF_PCIE_REQ_ATOMICS_128BIT) == NV_OK)
|
||||
*pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_128;
|
||||
|
||||
if (*pMask != 0)
|
||||
return NV_OK;
|
||||
}
|
||||
}
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Check GPU is accessible or not
|
||||
*
|
||||
|
||||
@@ -1843,6 +1843,7 @@ static NV_STATUS RmCreateMmapContextLocked(
|
||||
NvP64 address,
|
||||
NvU64 size,
|
||||
NvU64 offset,
|
||||
NvU32 cachingType,
|
||||
NvU32 fd
|
||||
)
|
||||
{
|
||||
@@ -1884,6 +1885,7 @@ static NV_STATUS RmCreateMmapContextLocked(
|
||||
nvuap->addr = addr;
|
||||
nvuap->size = size;
|
||||
nvuap->offset = offset;
|
||||
nvuap->caching = cachingType;
|
||||
|
||||
//
|
||||
// Assume the allocation is contiguous until RmGetMmapPteArray
|
||||
@@ -1975,6 +1977,7 @@ NV_STATUS rm_create_mmap_context(
|
||||
NvP64 address,
|
||||
NvU64 size,
|
||||
NvU64 offset,
|
||||
NvU32 cachingType,
|
||||
NvU32 fd
|
||||
)
|
||||
{
|
||||
@@ -1995,7 +1998,7 @@ NV_STATUS rm_create_mmap_context(
|
||||
else if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) == NV_OK)
|
||||
{
|
||||
rmStatus = RmCreateMmapContextLocked(hClient, hDevice, hMemory,
|
||||
address, size, offset, fd);
|
||||
address, size, offset, cachingType, fd);
|
||||
// UNLOCK: release GPUs lock
|
||||
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
|
||||
}
|
||||
@@ -3364,13 +3367,29 @@ static NV_STATUS RmNonDPAuxI2CTransfer
|
||||
break;
|
||||
|
||||
case NV_I2C_CMD_SMBUS_WRITE:
|
||||
params->transData.smbusByteData.bWrite = NV_TRUE;
|
||||
if (len == 2)
|
||||
{
|
||||
params->transData.smbusWordData.bWrite = NV_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
params->transData.smbusByteData.bWrite = NV_TRUE;
|
||||
}
|
||||
/* fall through*/
|
||||
|
||||
case NV_I2C_CMD_SMBUS_READ:
|
||||
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW;
|
||||
params->transData.smbusByteData.message = pData[0];
|
||||
params->transData.smbusByteData.registerAddress = command;
|
||||
if (len == 2)
|
||||
{
|
||||
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW;
|
||||
params->transData.smbusWordData.message = pData[0] | ((NvU16)pData[1] << 8);
|
||||
params->transData.smbusWordData.registerAddress = command;
|
||||
}
|
||||
else
|
||||
{
|
||||
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW;
|
||||
params->transData.smbusByteData.message = pData[0];
|
||||
params->transData.smbusByteData.registerAddress = command;
|
||||
}
|
||||
break;
|
||||
|
||||
case NV_I2C_CMD_SMBUS_BLOCK_WRITE:
|
||||
@@ -3408,7 +3427,15 @@ static NV_STATUS RmNonDPAuxI2CTransfer
|
||||
//
|
||||
if (rmStatus == NV_OK && type == NV_I2C_CMD_SMBUS_READ)
|
||||
{
|
||||
pData[0] = params->transData.smbusByteData.message;
|
||||
if (len == 2)
|
||||
{
|
||||
pData[0] = (params->transData.smbusWordData.message & 0xff);
|
||||
pData[1] = params->transData.smbusWordData.message >> 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
pData[0] = params->transData.smbusByteData.message;
|
||||
}
|
||||
}
|
||||
|
||||
portMemFree(params);
|
||||
|
||||
@@ -58,6 +58,7 @@
|
||||
#include <nvSha256.h>
|
||||
#include <gpu/gsp/kernel_gsp.h>
|
||||
#include <logdecode.h>
|
||||
#include <gpu/fsp/kern_fsp.h>
|
||||
|
||||
#include <mem_mgr/virt_mem_mgr.h>
|
||||
|
||||
@@ -637,6 +638,15 @@ osInitNvMapping(
|
||||
sysApplyLockingPolicy(pSys);
|
||||
|
||||
pGpu->busInfo.IntLine = nv->interrupt_line;
|
||||
|
||||
//
|
||||
// Set the DMA address size as soon as we have the HAL to call to
|
||||
// determine the precise number of physical address bits supported
|
||||
// by the architecture. DMA allocations should not be made before
|
||||
// this point.
|
||||
//
|
||||
nv_set_dma_address_size(nv, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM));
|
||||
|
||||
pGpu->dmaStartAddress = (RmPhysAddr)nv_get_dma_start_address(nv);
|
||||
if (nv->fb != NULL)
|
||||
{
|
||||
@@ -725,15 +735,6 @@ osTeardownScalability(
|
||||
return clTeardownPcie(pGpu, pCl);
|
||||
}
|
||||
|
||||
static inline void
|
||||
RmSetDeviceDmaAddressSize(
|
||||
nv_state_t *nv,
|
||||
NvU8 numDmaAddressBits
|
||||
)
|
||||
{
|
||||
nv_set_dma_address_size(nv, numDmaAddressBits);
|
||||
}
|
||||
|
||||
static void
|
||||
populateDeviceAttributes(
|
||||
OBJGPU *pGpu,
|
||||
@@ -883,8 +884,6 @@ RmInitNvDevice(
|
||||
return;
|
||||
}
|
||||
|
||||
RmSetDeviceDmaAddressSize(nv, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM));
|
||||
|
||||
os_disable_console_access();
|
||||
|
||||
status->rmStatus = gpumgrStateInitGpu(pGpu);
|
||||
@@ -1188,7 +1187,7 @@ NvBool RmInitPrivateState(
|
||||
// Set up a reasonable default DMA address size, based on the minimum
|
||||
// possible on currently supported GPUs.
|
||||
//
|
||||
RmSetDeviceDmaAddressSize(pNv, NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
|
||||
nv_set_dma_address_size(pNv, NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
|
||||
|
||||
os_mem_set(nvp, 0, sizeof(*nvp));
|
||||
nvp->status = NV_ERR_INVALID_STATE;
|
||||
@@ -1582,7 +1581,7 @@ NvBool RmInitAdapter(
|
||||
//
|
||||
if (nv->request_firmware)
|
||||
{
|
||||
RmSetDeviceDmaAddressSize(nv, NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
|
||||
nv_set_dma_address_size(nv, NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
|
||||
|
||||
gspFwHandle = nv_get_firmware(nv, NV_FIRMWARE_GSP,
|
||||
&gspFw.pBuf,
|
||||
@@ -1655,6 +1654,17 @@ NvBool RmInitAdapter(
|
||||
goto shutdown;
|
||||
}
|
||||
|
||||
KernelFsp *pKernelFsp = GPU_GET_KERNEL_FSP(pGpu);
|
||||
if ((pKernelFsp != NULL) && !IS_GSP_CLIENT(pGpu) && !IS_VIRTUAL(pGpu))
|
||||
{
|
||||
status.rmStatus = kfspSendBootCommands_HAL(pGpu, pKernelFsp);
|
||||
if (status.rmStatus != NV_OK)
|
||||
{
|
||||
NV_PRINTF(LEVEL_ERROR, "FSP boot command failed.\n");
|
||||
goto shutdown;
|
||||
}
|
||||
}
|
||||
|
||||
RmSetConsolePreservationParams(pGpu);
|
||||
|
||||
//
|
||||
@@ -1830,7 +1840,7 @@ NvBool RmInitAdapter(
|
||||
RmInitS0ixPowerManagement(nv);
|
||||
RmInitDeferredDynamicPowerManagement(nv);
|
||||
|
||||
if (!NV_IS_SOC_DISPLAY_DEVICE(nv))
|
||||
if (!NV_IS_SOC_DISPLAY_DEVICE(nv) && !NV_IS_SOC_IGPU_DEVICE(nv))
|
||||
{
|
||||
status.rmStatus = RmRegisterGpudb(pGpu);
|
||||
if (status.rmStatus != NV_OK)
|
||||
|
||||
@@ -534,6 +534,27 @@ static void NV_API_CALL rm_nvlink_ops_training_complete
|
||||
NV_EXIT_RM_RUNTIME(sp, fp);
|
||||
}
|
||||
|
||||
static NvlStatus NV_API_CALL rm_nvlink_ops_ali_training
|
||||
(
|
||||
struct nvlink_link *link
|
||||
)
|
||||
{
|
||||
void *fp;
|
||||
NvlStatus status;
|
||||
THREAD_STATE_NODE threadState = {0};
|
||||
KNVLINK_RM_LINK * pLink = link->link_info;
|
||||
nvidia_stack_t * sp = (nvidia_stack_t *)pLink->pOsInfo;
|
||||
|
||||
NV_ENTER_RM_RUNTIME(sp, fp);
|
||||
|
||||
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
|
||||
status = knvlinkCoreAliTrainingCallback(link);
|
||||
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
|
||||
|
||||
NV_EXIT_RM_RUNTIME(sp, fp);
|
||||
return status;
|
||||
}
|
||||
|
||||
#endif /* defined(INCLUDE_NVLINK_LIB) */
|
||||
|
||||
const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void)
|
||||
@@ -560,6 +581,7 @@ const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void)
|
||||
.read_discovery_token = rm_nvlink_ops_read_link_discovery_token,
|
||||
.training_complete = rm_nvlink_ops_training_complete,
|
||||
.get_uphy_load = rm_nvlink_get_uphy_load,
|
||||
.ali_training = rm_nvlink_ops_ali_training,
|
||||
};
|
||||
|
||||
return &rm_nvlink_link_ops;
|
||||
@@ -647,7 +669,7 @@ osGetPlatformNvlinkLinerate
|
||||
)
|
||||
{
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
|
||||
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
|
||||
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu);
|
||||
|
||||
if (!pKernelNvlink)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -396,12 +396,13 @@ NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *sp,
|
||||
gpuAddressSpaceHandle srcVaSpace,
|
||||
NvU64 srcAddress,
|
||||
gpuAddressSpaceHandle dstVaSpace,
|
||||
NvU64 dstVaAlignment,
|
||||
NvU64 *dstAddress)
|
||||
{
|
||||
NV_STATUS rmStatus;
|
||||
void *fp;
|
||||
NV_ENTER_RM_RUNTIME(sp,fp);
|
||||
rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstAddress);
|
||||
rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstVaAlignment, dstAddress);
|
||||
NV_EXIT_RM_RUNTIME(sp,fp);
|
||||
return rmStatus;
|
||||
}
|
||||
|
||||
@@ -339,6 +339,7 @@ void osEnableInterrupts(OBJGPU *pGpu)
|
||||
intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user