530.30.02

This commit is contained in:
Andy Ritger
2023-02-28 11:12:44 -08:00
parent e598191e8e
commit 4397463e73
928 changed files with 124728 additions and 88525 deletions

View File

@@ -116,6 +116,7 @@ struct DACP2060EXTERNALDEVICE
NvU32 tSwapRdyHiLsrMinTime; /* Value of LSR_MIN_TIME in accordance to the time (in us)
* swap ready line will remain high.(Provided via a regkey)
*/
NV30F1_CTRL_GSYNC_MULTIPLY_DIVIDE_SETTINGS mulDivSettings; // Cached multiplier-divider settings
NvU32 syncSkewResolutionInNs; // resolution in ns
NvU32 syncSkewMax; // max syncSkew setting in raw units
@@ -248,6 +249,8 @@ NV_STATUS gsyncGetStereoLockMode_P2060 (OBJGPU *, PDACEXTERNALDEVICE, NvU3
NV_STATUS gsyncSetStereoLockMode_P2060 (OBJGPU *, PDACEXTERNALDEVICE, NvU32);
NV_STATUS gsyncSetMosaic_P2060 (OBJGPU *, PDACEXTERNALDEVICE, NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS *);
NV_STATUS gsyncConfigFlashGsync_P2060 (OBJGPU *, PDACEXTERNALDEVICE, NvU32);
NV_STATUS gsyncGetMulDiv_P2060 (OBJGPU *, DACEXTERNALDEVICE *, NV30F1_CTRL_GSYNC_MULTIPLY_DIVIDE_SETTINGS *);
NV_STATUS gsyncSetMulDiv_P2060 (OBJGPU *, DACEXTERNALDEVICE *, NV30F1_CTRL_GSYNC_MULTIPLY_DIVIDE_SETTINGS *);
NvBool gsyncSupportsLargeSyncSkew_P2060 (DACEXTERNALDEVICE *);
#endif

View File

@@ -1,381 +1,3 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _GPU_ACCESS_H_
#define _GPU_ACCESS_H_
#include "ioaccess/ioaccess.h"
#include "gpu/gpu_device_mapping.h"
#include "g_gpu_access_nvoc.h"
// Go straight at the memory or hardware.
#define MEM_RD08(a) (*(const volatile NvU8 *)(a))
#define MEM_RD16(a) (*(const volatile NvU16 *)(a))
#define MEM_RD32(a) (*(const volatile NvU32 *)(a))
#define MEM_WR08(a, d) do { *(volatile NvU8 *)(a) = (d); } while (0)
#define MEM_WR16(a, d) do { *(volatile NvU16 *)(a) = (d); } while (0)
#define MEM_WR32(a, d) do { *(volatile NvU32 *)(a) = (d); } while (0)
#define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (d); } while (0)
//
// Define the signature of the register filter callback function
//
// flags can be optionally used for filters to decide whether to actually
// touch HW or not. flags should be OR'ed every time a new filter is found. (see objgpu.c)
//
typedef void (*GpuWriteRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 flags);
typedef NvU32 (*GpuReadRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 accessSize, NvU32 flags);
union GPUHWREG
{
volatile NvV8 Reg008[1];
volatile NvV16 Reg016[1];
volatile NvV32 Reg032[1];
};
typedef union GPUHWREG GPUHWREG;
//
// Register filter record
//
// If REGISTER_FILTER_FLAGS_READ is set, then that means that the base RegRead
// function will not read the register, so the provided read callback function
// is expected to read the register and return the value.
//
// If REGISTER_FILTER_FLAGS_WRITE is set, then that means that the base RegWrite
// function will not write the register, so the provided callback write function
// is expected to write the given value to the register.
//
// It is an error to specify REGISTER_FILTER_FLAGS_READ and not provide a
// read callback function.
//
// It is an error to specify REGISTER_FILTER_FLAGS_WRITE and not provide a
// write callback function.
//
#define REGISTER_FILTER_FLAGS_READ (NVBIT(0))
#define REGISTER_FILTER_FLAGS_WRITE (NVBIT(1))
// filter is in the list but it is invalid and should be removed
#define REGISTER_FILTER_FLAGS_INVALID (NVBIT(2))
#define REGISTER_FILTER_FLAGS_VIRTUAL (0)
#define REGISTER_FILTER_FLAGS_READ_WRITE (REGISTER_FILTER_FLAGS_READ | REGISTER_FILTER_FLAGS_WRITE)
typedef struct REGISTER_FILTER REGISTER_FILTER;
struct REGISTER_FILTER
{
REGISTER_FILTER *pNext; //!< pointer to next filter
NvU32 flags; //!< attributes of this filter
DEVICE_INDEX devIndex; //!< filter device
NvU32 devInstance; //!< filter device instance
NvU32 rangeStart; //!< filter range start (can overlap)
NvU32 rangeEnd; //!< filter range end (can overlap)
GpuWriteRegCallback pWriteCallback; //!< callback for write
GpuReadRegCallback pReadCallback; //!< callback for read
void *pParam; //!< pointer to param which gets passed to callbacks
};
typedef struct {
REGISTER_FILTER *pRegFilterList; // Active filters
REGISTER_FILTER *pRegFilterRecycleList; // Inactive filters
PORT_SPINLOCK * pRegFilterLock; // Thread-safe list management
NvU32 regFilterRefCnt; // Thread-safe list management
NvBool bRegFilterNeedRemove; // Thread-safe list garbage collection
} DEVICE_REGFILTER_INFO;
typedef struct DEVICE_MAPPING
{
GPUHWREG *gpuNvAddr; // CPU Virtual Address
RmPhysAddr gpuNvPAddr; // Physical Base Address
NvU32 gpuNvLength; // Length of the Aperture
NvU32 gpuNvSaveLength;
NvU32 gpuDeviceEnum; // Device ID NV_DEVID_*
NvU32 refCount; // refCount for the device map.
DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list
} DEVICE_MAPPING;
typedef struct
{
IO_DEVICE parent;
OBJGPU *pGpu;
DEVICE_INDEX deviceIndex;
NvU32 instance;
// The following members are initialized in objgpu.c,
// but are not used anywhere. gpuApertureReg* functions
// fall back to DEVICE_MAPPING instead
GPUHWREG *gpuNvAddr; // CPU Virtual Address
RmPhysAddr gpuNvPAddr; // Physical Base Address
NvU32 gpuNvLength; // Length of Aperture
NvU32 gpuDeviceEnum; // Device ID NV_DEVID_*
NvU32 refCount; // refCount for the device map.
DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list
} GPU_IO_DEVICE;
typedef struct
{
// Pointer to GPU linked to this RegisterAccess object
OBJGPU *pGpu;
// HW register access tools
GPUHWREG *gpuFbAddr;
GPUHWREG *gpuInstAddr;
// Register access profiling
NvU32 regReadCount;
NvU32 regWriteCount;
} RegisterAccess;
/*! Init register IO access path */
NV_STATUS regAccessConstruct(RegisterAccess *, OBJGPU *pGpu);
/*! Shutdown register IO access path */
void regAccessDestruct(RegisterAccess *);
/*! Writes to 8 bit register */
void regWrite008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV8);
/*! Writes to 16 bit register */
void regWrite016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV16);
/*! Writes to 32 bit register, with thread state on the stack */
void regWrite032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *);
/*! Unicast register access, with thread state on the stack */
void regWrite032Unicast(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *);
/*! Reads from 8 bit register */
NvU8 regRead008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32);
/*! Reads from 16 bit register */
NvU16 regRead016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32);
/*! Reads from 32 bit register, with thread state on the stack */
NvU32 regRead032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, THREAD_STATE_NODE *);
/*! Reads from 32 bit register and checks bit mask, with thread state on the stack */
NvU32 regCheckRead032(RegisterAccess *, NvU32, NvU32, THREAD_STATE_NODE *);
/*! Reads 32 bit register and polls bit field for specific value */
NV_STATUS regRead032_AndPoll(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvU32);
/*! Adds a register filter */
NV_STATUS regAddRegisterFilter(RegisterAccess *, NvU32, DEVICE_INDEX, NvU32, NvU32, NvU32, GpuWriteRegCallback, GpuReadRegCallback, void *, REGISTER_FILTER **);
/*! Removes register filter */
void regRemoveRegisterFilter(RegisterAccess *, REGISTER_FILTER *);
/*! Check status of read return value for GPU/bus errors */
void regCheckAndLogReadFailure(RegisterAccess *, NvU32 addr, NvU32 mask, NvU32 value);
//
// GPU register I/O macros.
//
//
// GPU neutral macros typically used for register I/O.
//
#define GPU_DRF_SHIFT(drf) ((0?drf) % 32)
#define GPU_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32)))
#define GPU_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<<GPU_DRF_SHIFT(NV ## d ## r ## f))
#define GPU_DRF_NUM(d,r,f,n) (((n)&GPU_DRF_MASK(NV ## d ## r ## f))<<GPU_DRF_SHIFT(NV ## d ## r ## f))
#define GPU_DRF_VAL(d,r,f,v) (((v)>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define GPU_DRF_SHIFTMASK(drf) (GPU_DRF_MASK(drf)<<(GPU_DRF_SHIFT(drf)))
#define GPU_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1)
// Device independent macros
// Multiple device instance macros
#define REG_INST_RD08(g,dev,inst,a) regRead008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a)
#define REG_INST_RD16(g,dev,inst,a) regRead016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a)
#define REG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, NULL)
#define REG_INST_WR08(g,dev,inst,a,v) regWrite008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v)
#define REG_INST_WR16(g,dev,inst,a,v) regWrite016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v)
#define REG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL)
#define REG_INST_WR32_UC(g,dev,inst,a,v) regWrite032Unicast(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL)
#define REG_INST_RD32_EX(g,dev,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, t)
#define REG_INST_WR32_EX(g,dev,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, t)
#define REG_INST_DEVIDX_RD32_EX(g,devidx,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, t)
#define REG_INST_DEVIDX_WR32_EX(g,devidx,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, v, t)
// GPU macros defined in terms of DEV_ macros
#define GPU_REG_RD08(g,a) REG_INST_RD08(g,GPU,0,a)
#define GPU_REG_RD16(g,a) REG_INST_RD16(g,GPU,0,a)
#define GPU_REG_RD32(g,a) REG_INST_RD32(g,GPU,0,a)
#define GPU_CHECK_REG_RD32(g,a,m) regCheckRead032(GPU_GET_REGISTER_ACCESS(g),a,m,NULL)
#define GPU_REG_RD32_AND_POLL(g,r,m,v) regRead032_AndPoll(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_GPU, r, m, v)
#define GPU_REG_WR08(g,a,v) REG_INST_WR08(g,GPU,0,a,v)
#define GPU_REG_WR16(g,a,v) REG_INST_WR16(g,GPU,0,a,v)
#define GPU_REG_WR32(g,a,v) REG_INST_WR32(g,GPU,0,a,v)
#define GPU_REG_WR32_UC(g,a,v) REG_INST_WR32_UC(g,GPU,0,a,v)
// GPU macros for SR-IOV
#define GPU_VREG_RD32(g, a) GPU_REG_RD32(g, g->sriovState.virtualRegPhysOffset + a)
#define GPU_VREG_WR32(g, a, v) GPU_REG_WR32(g, g->sriovState.virtualRegPhysOffset + a, v)
#define GPU_VREG_RD32_EX(g,a,t) REG_INST_RD32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, t)
#define GPU_VREG_WR32_EX(g,a,v,t) REG_INST_WR32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, v, t)
#define GPU_VREG_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_VREG_WR32(g, NV##d##r,(GPU_VREG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define GPU_VREG_RD_DRF(g,d,r,f) (((GPU_VREG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define VREG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, NULL)
#define VREG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, v, NULL)
#define GPU_VREG_FLD_WR_DRF_NUM(g,d,r,f,n) VREG_INST_WR32(g,GPU,0,NV##d##r,(VREG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define GPU_VREG_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_VREG_RD_DRF(g, d, r, f) == NV##d##r##f##c)
#define GPU_GET_VREG_OFFSET(g, a) (g->sriovState.virtualRegPhysOffset + a)
#define GPU_VREG_IDX_RD_DRF(g,d,r,i,f) (((GPU_VREG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define GPU_VREG_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_VREG_WR32(g, NV##d##r(i),(GPU_VREG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define GPU_REG_RD32_EX(g,a,t) REG_INST_RD32_EX(g,GPU,0,a,t)
#define GPU_REG_WR32_EX(g,a,v,t) REG_INST_WR32_EX(g,GPU,0,a,v,t)
// Uncomment this to enable register access dump in gsp client
// #define GPU_REGISTER_ACCESS_DUMP RMCFG_FEATURE_GSP_CLIENT_RM
#ifndef GPU_REGISTER_ACCESS_DUMP
#define GPU_REGISTER_ACCESS_DUMP 0
#endif
#if GPU_REGISTER_ACCESS_DUMP
NvU8 gpuRegRd08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr);
NvU16 gpuRegRd16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr);
NvU32 gpuRegRd32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr);
void gpuRegWr08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV8 val);
void gpuRegWr16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV16 val);
void gpuRegWr32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val);
void gpuRegWr32Uc_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val);
#undef GPU_REG_RD08
#undef GPU_REG_RD16
#undef GPU_REG_RD32
#undef GPU_REG_WR08
#undef GPU_REG_WR16
#undef GPU_REG_WR32
#undef GPU_REG_WR32_UC
#undef GPU_VREG_RD32
#undef GPU_VREG_WR32
#define GPU_REG_RD08(g,a) gpuRegRd08_dumpinfo(__FUNCTION__,#a,"",g,a)
#define GPU_REG_RD16(g,a) gpuRegRd16_dumpinfo(__FUNCTION__,#a,"",g,a)
#define GPU_REG_RD32(g,a) gpuRegRd32_dumpinfo(__FUNCTION__,#a,"",g,a)
#define GPU_REG_WR08(g,a,v) gpuRegWr08_dumpinfo(__FUNCTION__,#a,"",g,a,v)
#define GPU_REG_WR16(g,a,v) gpuRegWr16_dumpinfo(__FUNCTION__,#a,"",g,a,v)
#define GPU_REG_WR32(g,a,v) gpuRegWr32_dumpinfo(__FUNCTION__,#a,"",g,a,v)
#define GPU_REG_WR32_UC(g,a,v) gpuRegWr32Uc_dumpinfo(__FUNCTION__,#a,"",g,a,v)
#define GPU_VREG_RD32(g, a) gpuRegRd32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a)
#define GPU_VREG_WR32(g, a, v) gpuRegWr32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a, v)
#endif // GPU_REGISTER_ACCESS_DUMP
//
// Macros for register I/O
//
#define GPU_FLD_WR_DRF_NUM(g,d,r,f,n) REG_INST_WR32(g,GPU,0,NV##d##r,(REG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define GPU_FLD_WR_DRF_NUM_UC(g,d,r,f,n) GPU_REG_WR32_UC(g, NV##d##r,(GPU_REG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define GPU_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_REG_WR32(g, NV##d##r,(GPU_REG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define GPU_REG_RD_DRF(g,d,r,f) (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define GPU_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_REG_RD_DRF(g, d, r, f) == NV##d##r##f##c)
#define GPU_FLD_TEST_DRF_NUM(g,d,r,f,n) (GPU_REG_RD_DRF(g, d, r, f) == n)
#define GPU_FLD_IDX_TEST_DRF_DEF(g,d,r,f,c,i) (GPU_REG_IDX_RD_DRF(g, d, r, i, f) == NV##d##r##f##c)
#define GPU_FLD_2IDX_TEST_DRF_DEF(g,d,r,f,c,i,j) (GPU_REG_2IDX_RD_DRF(g, d, r, i, j, f) == NV##d##r##f##c)
#define GPU_REG_RD_DRF_EX(g,d,r,f,t) (((GPU_REG_RD32_EX(g, NV ## d ## r, t))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define GPU_FLD_WR_DRF_NUM_EX(g,d,r,f,n,t) REG_INST_WR32_EX(g,GPU,0,NV##d##r,(REG_INST_RD32_EX(g,GPU,0,NV##d##r,t)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n),t)
// Read/write a field or entire register of which there are several copies each accessed via an index
#define GPU_REG_IDX_WR_DRF_NUM(g,d,r,i,f,n) GPU_REG_WR32(g, NV ## d ## r(i), GPU_DRF_NUM(d,r,f,n))
#define GPU_REG_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_REG_WR32(g, NV ## d ## r(i), GPU_DRF_DEF(d,r,f,c))
#define GPU_FLD_IDX_WR_DRF_NUM(g,d,r,i,f,n) GPU_REG_WR32(g, NV##d##r(i),(GPU_REG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define GPU_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_REG_WR32(g, NV##d##r(i),(GPU_REG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define GPU_REG_IDX_WR_DRF_NUM_UC(g,d,r,i,f,n) GPU_REG_WR32_UC(g, NV ## d ## r(i), GPU_DRF_NUM(d,r,f,n))
#define GPU_REG_IDX_WR_DRF_DEF_UC(g,d,r,i,f,c) GPU_REG_WR32_UC(g, NV ## d ## r(i), GPU_DRF_DEF(d,r,f,c))
#define GPU_FLD_IDX_WR_DRF_DEF_UC(g,d,r,i,f,c) GPU_REG_WR32_UC(g, NV##d##r(i),(GPU_REG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define GPU_REG_IDX_RD_DRF(g,d,r,i,f) (((GPU_REG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define GPU_REG_2IDX_RD_DRF(g,d,r,i,j,f) (((GPU_REG_RD32(g, NV ## d ## r(i, j)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define GPU_REG_RD_DRF_IDX(g,d,r,f,i) (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f(i)))&GPU_DRF_MASK(NV ## d ## r ## f(i)))
#define GPU_REG_IDX_OFFSET_RD_DRF(g,d,r,i,o,f) (((GPU_REG_RD32(g, NV ## d ## r(i,o)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
//
// Macros that abstract the use of bif object to access GPU bus config registers
// This is the preferred set >= NV50
//
#define GPU_BUS_CFG_RD32(g,r,d) gpuReadBusConfigReg_HAL(g, r, d)
#define GPU_BUS_CFG_WR32(g,r,d) gpuWriteBusConfigReg_HAL(g, r, d)
#define GPU_BUS_CFG_FLD_WR_DRF_DEF(g,x,d,r,f,c) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define GPU_BUS_CFG_FLD_WR_DRF_NUM(g,x,d,r,f,n) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define GPU_BUS_CFG_RD32_EX(g,r,d,t) gpuReadBusConfigRegEx_HAL(g, r, d, t)
//
// Macros that provide access to the config space of functions other than the gpu
//
#define PCI_FUNCTION_BUS_CFG_RD32(g,f,r,d) gpuReadFunctionConfigReg_HAL(g, f, r, d)
#define PCI_FUNCTION_BUS_CFG_WR32(g,f,r,d) gpuWriteFunctionConfigReg_HAL(g, f, r, d)
#define PCI_FUNCTION_BUS_CFG_FLD_WR_DRF_NUM(g,fn,x,d,r,f,n) gpuWriteFunctionConfigReg_HAL(g, fn, NV##d##r, (x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define PCI_FUNCTION_BUS_CFG_FLD_WR_DRF_DEF(g,fn,x,d,r,f,c) gpuWriteFunctionConfigReg_HAL(g, fn, NV##d##r, (x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define PCI_FUNCTION_BUS_CFG_WR32_EX(g,f,r,d,t) gpuWriteFunctionConfigRegEx_HAL(g, f, r, d, t)
#define GPU_BUS_CFG_CYCLE_RD32(g,r,d) gpuReadBusConfigCycle(g, r, d)
#define GPU_BUS_CFG_CYCLE_WR32(g,r,d) gpuWriteBusConfigCycle(g, r, d)
#define GPU_BUS_CFG_CYCLE_FLD_WR_DRF_DEF(g,x,d,r,f,c) gpuWriteBusConfigCycle(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
//
// Instance memory structure access definitions.
//
// DRF macros (nvmisc.h) should be used when possible instead of these
// definitions.
//
// Key difference is SF variants take structure ## field (2-level nested
// namespace), DRF take device ## register ## field (3-level nested
// namespace).
//
// SF variants are primarily used for GPU host memory structures. DRF
// should be used for manipulation of most registers
//
#define SF_INDEX(sf) ((0?sf)/32)
#define SF_OFFSET(sf) (((0?sf)/32)<<2)
#define SF_SHIFT(sf) ((0?sf)&31)
#undef SF_MASK
#define SF_MASK(sf) (0xFFFFFFFF>>(31-(1?sf)+(0?sf)))
#define SF_SHIFTMASK(sf) (SF_MASK(sf) << SF_SHIFT(sf))
#define SF_DEF(s,f,c) ((NV ## s ## f ## c)<<SF_SHIFT(NV ## s ## f))
#define SF_IDX_DEF(s,f,c,i) ((NV ## s ## f ## c)<<SF_SHIFT(NV ## s ## f(i)))
#define SF_NUM(s,f,n) (((n)&SF_MASK(NV ## s ## f))<<SF_SHIFT(NV ## s ## f))
#define SF_IDX_NUM(s,f,n,i) (((n)&SF_MASK(NV ## s ## f(i)))<<SF_SHIFT(NV ## s ## f(i)))
#define SF_VAL(s,f,v) (((v)>>SF_SHIFT(NV ## s ## f))&SF_MASK(NV ## s ## f))
#define SF_WIDTH(sf) ((1?sf) - (0?sf) + 1)
// This macro parses multi-word/array defines
#define SF_ARR32_VAL(s,f,arr) \
(((arr)[SF_INDEX(NV ## s ## f)] >> SF_SHIFT(NV ## s ## f)) & SF_MASK(NV ## s ## f))
#define FLD_SF_DEF(s,f,d,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_DEF(s,f,d)
#define FLD_SF_NUM(s,f,n,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_NUM(s,f,n)
#define FLD_SF_IDX_DEF(s,f,c,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_DEF(s,f,c,i))
#define FLD_SF_IDX_NUM(s,f,n,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_NUM(s,f,n,i))
#endif // _GPU_ACCESS_H_

View File

@@ -71,8 +71,9 @@ typedef struct DOD_METHOD_DATA
typedef struct JT_METHOD_DATA
{
NV_STATUS status;
NvU16 jtRevId;
NvU32 jtCaps;
NvU16 jtRevId;
NvBool bSBIOSCaps;
} JT_METHOD_DATA;
typedef struct MUX_METHOD_DATA_ELEMENT

View File

@@ -53,252 +53,252 @@
#define GPU_CHILD_MODULE(_rmcfgModule) RMCFG_MODULE_ENABLED(_rmcfgModule)
#endif
/* Class Name Accessor Name Max Instances bConstructEarly bAlwaysCreate OBJGPU Field */
/* Class Name Accessor Name Max Instances bConstructEarly OBJGPU Field */
#if GPU_CHILD_MODULE(FUSE)
GPU_CHILD_SINGLE_INST( OBJFUSE, GPU_GET_FUSE, 1, NV_TRUE, NV_TRUE, pFuse )
GPU_CHILD_SINGLE_INST( OBJFUSE, GPU_GET_FUSE, 1, NV_TRUE, pFuse )
#endif
#if GPU_CHILD_MODULE(BIF)
GPU_CHILD_SINGLE_INST( OBJBIF, GPU_GET_BIF, 1, NV_TRUE, NV_FALSE, pBif )
GPU_CHILD_SINGLE_INST( OBJBIF, GPU_GET_BIF, 1, NV_TRUE, pBif )
#endif
#if GPU_CHILD_MODULE(KERNEL_BIF)
GPU_CHILD_SINGLE_INST( KernelBif, GPU_GET_KERNEL_BIF, 1, NV_TRUE, NV_FALSE, pKernelBif )
GPU_CHILD_SINGLE_INST( KernelBif, GPU_GET_KERNEL_BIF, 1, NV_TRUE, pKernelBif )
#endif
#if GPU_CHILD_MODULE(NNE)
GPU_CHILD_SINGLE_INST( OBJNNE, GPU_GET_NNE, 1, NV_TRUE, NV_FALSE, pNne )
GPU_CHILD_SINGLE_INST( Nne, GPU_GET_NNE, 1, NV_TRUE, pNne )
#endif
#if GPU_CHILD_MODULE(MC)
GPU_CHILD_SINGLE_INST( OBJMC, GPU_GET_MC, 1, NV_FALSE, NV_FALSE, pMc )
GPU_CHILD_SINGLE_INST( OBJMC, GPU_GET_MC, 1, NV_FALSE, pMc )
#endif
#if GPU_CHILD_MODULE(KERNEL_MC)
GPU_CHILD_SINGLE_INST( KernelMc, GPU_GET_KERNEL_MC, 1, NV_FALSE, NV_FALSE, pKernelMc )
GPU_CHILD_SINGLE_INST( KernelMc, GPU_GET_KERNEL_MC, 1, NV_FALSE, pKernelMc )
#endif
#if GPU_CHILD_MODULE(PRIV_RING)
GPU_CHILD_SINGLE_INST( PrivRing, GPU_GET_PRIV_RING, 1, NV_FALSE, NV_FALSE, pPrivRing )
GPU_CHILD_SINGLE_INST( PrivRing, GPU_GET_PRIV_RING, 1, NV_FALSE, pPrivRing )
#endif
#if GPU_CHILD_MODULE(INTR)
GPU_CHILD_SINGLE_INST( SwIntr, GPU_GET_SW_INTR, 1, NV_FALSE, NV_FALSE, pSwIntr )
GPU_CHILD_SINGLE_INST( SwIntr, GPU_GET_SW_INTR, 1, NV_FALSE, pSwIntr )
#endif
#if GPU_CHILD_MODULE(MEMORY_SYSTEM)
GPU_CHILD_SINGLE_INST( MemorySystem, GPU_GET_MEMORY_SYSTEM, 1, NV_FALSE, NV_FALSE, pMemorySystem )
GPU_CHILD_SINGLE_INST( MemorySystem, GPU_GET_MEMORY_SYSTEM, 1, NV_FALSE, pMemorySystem )
#endif
#if GPU_CHILD_MODULE(KERNEL_MEMORY_SYSTEM)
GPU_CHILD_SINGLE_INST( KernelMemorySystem, GPU_GET_KERNEL_MEMORY_SYSTEM, 1, NV_FALSE, NV_FALSE, pKernelMemorySystem )
GPU_CHILD_SINGLE_INST( KernelMemorySystem, GPU_GET_KERNEL_MEMORY_SYSTEM, 1, NV_FALSE, pKernelMemorySystem )
#endif
#if GPU_CHILD_MODULE(MEMORY_MANAGER)
GPU_CHILD_SINGLE_INST( MemoryManager, GPU_GET_MEMORY_MANAGER, 1, NV_FALSE, NV_FALSE, pMemoryManager )
GPU_CHILD_SINGLE_INST( MemoryManager, GPU_GET_MEMORY_MANAGER, 1, NV_FALSE, pMemoryManager )
#endif
#if GPU_CHILD_MODULE(FBFLCN)
GPU_CHILD_SINGLE_INST( OBJFBFLCN, GPU_GET_FBFLCN, 1, NV_FALSE, NV_FALSE, pFbflcn )
GPU_CHILD_SINGLE_INST( OBJFBFLCN, GPU_GET_FBFLCN, 1, NV_FALSE, pFbflcn )
#endif
#if GPU_CHILD_MODULE(HSHUBMANAGER)
GPU_CHILD_SINGLE_INST( OBJHSHUBMANAGER, GPU_GET_HSHUBMANAGER, 1, NV_FALSE, NV_FALSE, pHshMgr )
GPU_CHILD_SINGLE_INST( OBJHSHUBMANAGER, GPU_GET_HSHUBMANAGER, 1, NV_FALSE, pHshMgr )
#endif
#if GPU_CHILD_MODULE(HSHUB)
GPU_CHILD_MULTI_INST ( Hshub, GPU_GET_HSHUB, GPU_MAX_HSHUBS, NV_FALSE, NV_FALSE, pHshub )
GPU_CHILD_MULTI_INST ( Hshub, GPU_GET_HSHUB, GPU_MAX_HSHUBS, NV_FALSE, pHshub )
#endif
#if GPU_CHILD_MODULE(SEQ)
GPU_CHILD_SINGLE_INST( OBJSEQ, GPU_GET_SEQ, 1, NV_FALSE, NV_TRUE, pSeq )
GPU_CHILD_SINGLE_INST( OBJSEQ, GPU_GET_SEQ, 1, NV_FALSE, pSeq )
#endif
#if GPU_CHILD_MODULE(GpuMutexMgr)
GPU_CHILD_SINGLE_INST( GpuMutexMgr, GPU_GET_MUTEX_MGR, 1, NV_FALSE, NV_TRUE, pMutexMgr )
GPU_CHILD_SINGLE_INST( GpuMutexMgr, GPU_GET_MUTEX_MGR, 1, NV_FALSE, pMutexMgr )
#endif
#if GPU_CHILD_MODULE(KERNEL_DISPLAY)
GPU_CHILD_SINGLE_INST( KernelDisplay, GPU_GET_KERNEL_DISPLAY, 1, NV_FALSE, NV_FALSE, pKernelDisplay )
GPU_CHILD_SINGLE_INST( KernelDisplay, GPU_GET_KERNEL_DISPLAY, 1, NV_FALSE, pKernelDisplay )
#endif
#if GPU_CHILD_MODULE(DISP)
GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, NV_FALSE, pDisp )
GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, pDisp )
#endif
#if GPU_CHILD_MODULE(TMR)
GPU_CHILD_SINGLE_INST( OBJTMR, GPU_GET_TIMER, 1, NV_TRUE, NV_TRUE, pTmr )
GPU_CHILD_SINGLE_INST( OBJTMR, GPU_GET_TIMER, 1, NV_TRUE, pTmr )
#endif
#if GPU_CHILD_MODULE(BUS)
GPU_CHILD_SINGLE_INST( OBJBUS, GPU_GET_BUS, 1, NV_FALSE, NV_FALSE, pBus )
GPU_CHILD_SINGLE_INST( OBJBUS, GPU_GET_BUS, 1, NV_FALSE, pBus )
#endif
#if GPU_CHILD_MODULE(KERNEL_BUS)
GPU_CHILD_SINGLE_INST( KernelBus, GPU_GET_KERNEL_BUS, 1, NV_FALSE, NV_FALSE, pKernelBus )
GPU_CHILD_SINGLE_INST( KernelBus, GPU_GET_KERNEL_BUS, 1, NV_FALSE, pKernelBus )
#endif
#if GPU_CHILD_MODULE(GMMU)
GPU_CHILD_SINGLE_INST( OBJGMMU, GPU_GET_GMMU, 1, NV_FALSE, NV_FALSE, pGmmu )
GPU_CHILD_SINGLE_INST( OBJGMMU, GPU_GET_GMMU, 1, NV_FALSE, pGmmu )
#endif
#if GPU_CHILD_MODULE(KERNEL_GMMU)
GPU_CHILD_SINGLE_INST( KernelGmmu, GPU_GET_KERNEL_GMMU, 1, NV_FALSE, NV_FALSE, pKernelGmmu )
GPU_CHILD_SINGLE_INST( KernelGmmu, GPU_GET_KERNEL_GMMU, 1, NV_FALSE, pKernelGmmu )
#endif
#if GPU_CHILD_MODULE(KERNEL_SEC2)
GPU_CHILD_SINGLE_INST( KernelSec2, GPU_GET_KERNEL_SEC2, 1, NV_FALSE, NV_FALSE, pKernelSec2 )
GPU_CHILD_SINGLE_INST( KernelSec2, GPU_GET_KERNEL_SEC2, 1, NV_FALSE, pKernelSec2 )
#endif
#if GPU_CHILD_MODULE(KERNEL_GSP)
GPU_CHILD_SINGLE_INST( KernelGsp, GPU_GET_KERNEL_GSP, 1, NV_FALSE, NV_FALSE, pKernelGsp )
GPU_CHILD_SINGLE_INST( KernelGsp, GPU_GET_KERNEL_GSP, 1, NV_FALSE, pKernelGsp )
#endif
#if GPU_CHILD_MODULE(DCECLIENTRM)
GPU_CHILD_SINGLE_INST( OBJDCECLIENTRM, GPU_GET_DCECLIENTRM, 1, NV_FALSE, NV_FALSE, pDceclientrm )
GPU_CHILD_SINGLE_INST( OBJDCECLIENTRM, GPU_GET_DCECLIENTRM, 1, NV_FALSE, pDceclientrm )
#endif
#if GPU_CHILD_MODULE(VIRT_MEM_ALLOCATOR)
GPU_CHILD_SINGLE_INST( VirtMemAllocator, GPU_GET_DMA, 1, NV_FALSE, NV_FALSE, pDma )
GPU_CHILD_SINGLE_INST( VirtMemAllocator, GPU_GET_DMA, 1, NV_FALSE, pDma )
#endif
#if GPU_CHILD_MODULE(GRMGR)
GPU_CHILD_SINGLE_INST( GraphicsManager, GPU_GET_GRMGR, 1, NV_FALSE, NV_TRUE, pGrMgr )
GPU_CHILD_SINGLE_INST( GraphicsManager, GPU_GET_GRMGR, 1, NV_FALSE, pGrMgr )
#endif
#if GPU_CHILD_MODULE(MIG_MANAGER)
GPU_CHILD_SINGLE_INST( MIGManager, GPU_GET_MIG_MANAGER, 1, NV_FALSE, NV_TRUE, pMIGManager )
GPU_CHILD_SINGLE_INST( MIGManager, GPU_GET_MIG_MANAGER, 1, NV_FALSE, pMIGManager )
#endif
#if GPU_CHILD_MODULE(KERNEL_MIG_MANAGER)
GPU_CHILD_SINGLE_INST( KernelMIGManager, GPU_GET_KERNEL_MIG_MANAGER, 1, NV_FALSE, NV_TRUE, pKernelMIGManager )
GPU_CHILD_SINGLE_INST( KernelMIGManager, GPU_GET_KERNEL_MIG_MANAGER, 1, NV_FALSE, pKernelMIGManager )
#endif
#if GPU_CHILD_MODULE(KERNEL_GRAPHICS_MANAGER)
GPU_CHILD_SINGLE_INST( KernelGraphicsManager, GPU_GET_KERNEL_GRAPHICS_MANAGER, 1, NV_FALSE, NV_TRUE, pKernelGraphicsManager )
GPU_CHILD_SINGLE_INST( KernelGraphicsManager, GPU_GET_KERNEL_GRAPHICS_MANAGER, 1, NV_FALSE, pKernelGraphicsManager )
#endif
#if GPU_CHILD_MODULE(GR)
GPU_CHILD_MULTI_INST ( Graphics, GPU_GET_GR_UNSAFE, GPU_MAX_GRS, NV_FALSE, NV_FALSE, pGr )
GPU_CHILD_MULTI_INST ( Graphics, GPU_GET_GR_UNSAFE, GPU_MAX_GRS, NV_FALSE, pGr )
#endif
#if GPU_CHILD_MODULE(KERNEL_GRAPHICS)
GPU_CHILD_MULTI_INST ( KernelGraphics, GPU_GET_KERNEL_GRAPHICS, GPU_MAX_GRS, NV_FALSE, NV_FALSE, pKernelGraphics )
GPU_CHILD_MULTI_INST ( KernelGraphics, GPU_GET_KERNEL_GRAPHICS, GPU_MAX_GRS, NV_FALSE, pKernelGraphics )
#endif
#if GPU_CHILD_MODULE(ClockManager)
GPU_CHILD_SINGLE_INST( ClockManager, GPU_GET_CLK_MGR, 1, NV_FALSE, NV_FALSE, pClk )
GPU_CHILD_SINGLE_INST( ClockManager, GPU_GET_CLK_MGR, 1, NV_FALSE, pClk )
#endif
#if GPU_CHILD_MODULE(FAN)
GPU_CHILD_SINGLE_INST( Fan, GPU_GET_FAN, 1, NV_FALSE, NV_FALSE, pFan )
GPU_CHILD_SINGLE_INST( Fan, GPU_GET_FAN, 1, NV_FALSE, pFan )
#endif
#if GPU_CHILD_MODULE(PERF)
GPU_CHILD_SINGLE_INST( Perf, GPU_GET_PERF, 1, NV_FALSE, NV_FALSE, pPerf )
GPU_CHILD_SINGLE_INST( Perf, GPU_GET_PERF, 1, NV_FALSE, pPerf )
#endif
#if GPU_CHILD_MODULE(KERNEL_PERF)
GPU_CHILD_SINGLE_INST( KernelPerf, GPU_GET_KERNEL_PERF, 1, NV_FALSE, NV_FALSE, pKernelPerf )
GPU_CHILD_SINGLE_INST( KernelPerf, GPU_GET_KERNEL_PERF, 1, NV_FALSE, pKernelPerf )
#endif
#if GPU_CHILD_MODULE(THERM)
GPU_CHILD_SINGLE_INST( Therm, GPU_GET_THERM, 1, NV_FALSE, NV_FALSE, pTherm )
GPU_CHILD_SINGLE_INST( Therm, GPU_GET_THERM, 1, NV_FALSE, pTherm )
#endif
#if GPU_CHILD_MODULE(BSP)
GPU_CHILD_MULTI_INST ( OBJBSP, GPU_GET_BSP, GPU_MAX_NVDECS, NV_FALSE, NV_FALSE, pBsp )
GPU_CHILD_MULTI_INST ( OBJBSP, GPU_GET_BSP, GPU_MAX_NVDECS, NV_FALSE, pBsp )
#endif
#if GPU_CHILD_MODULE(CIPHER)
GPU_CHILD_SINGLE_INST( OBJCIPHER, GPU_GET_CIPHER, 1, NV_FALSE, NV_FALSE, pCipher )
GPU_CHILD_SINGLE_INST( OBJCIPHER, GPU_GET_CIPHER, 1, NV_FALSE, pCipher )
#endif
#if GPU_CHILD_MODULE(VBIOS)
GPU_CHILD_SINGLE_INST( OBJVBIOS, GPU_GET_VBIOS, 1, NV_FALSE, NV_TRUE, pVbios )
GPU_CHILD_SINGLE_INST( OBJVBIOS, GPU_GET_VBIOS, 1, NV_FALSE, pVbios )
#endif
#if GPU_CHILD_MODULE(DCB)
GPU_CHILD_SINGLE_INST( OBJDCB, GPU_GET_DCB, 1, NV_FALSE, NV_TRUE, pDcb )
GPU_CHILD_SINGLE_INST( OBJDCB, GPU_GET_DCB, 1, NV_FALSE, pDcb )
#endif
#if GPU_CHILD_MODULE(GPIO)
GPU_CHILD_SINGLE_INST( OBJGPIO, GPU_GET_GPIO, 1, NV_FALSE, NV_TRUE, pGpio )
GPU_CHILD_SINGLE_INST( OBJGPIO, GPU_GET_GPIO, 1, NV_FALSE, pGpio )
#endif
#if GPU_CHILD_MODULE(VOLT)
GPU_CHILD_SINGLE_INST( OBJVOLT, GPU_GET_VOLT, 1, NV_FALSE, NV_FALSE, pVolt )
GPU_CHILD_SINGLE_INST( OBJVOLT, GPU_GET_VOLT, 1, NV_FALSE, pVolt )
#endif
#if GPU_CHILD_MODULE(I2C)
GPU_CHILD_SINGLE_INST( I2c, GPU_GET_I2C, 1, NV_FALSE, NV_TRUE, pI2c )
GPU_CHILD_SINGLE_INST( I2c, GPU_GET_I2C, 1, NV_FALSE, pI2c )
#endif
#if GPU_CHILD_MODULE(SPI)
GPU_CHILD_SINGLE_INST( Spi, GPU_GET_SPI, 1, NV_FALSE, NV_TRUE, pSpi )
GPU_CHILD_SINGLE_INST( Spi, GPU_GET_SPI, 1, NV_FALSE, pSpi )
#endif
#if GPU_CHILD_MODULE(KERNEL_RC)
GPU_CHILD_SINGLE_INST( KernelRc, GPU_GET_KERNEL_RC, 1, NV_FALSE, NV_TRUE, pKernelRc )
GPU_CHILD_SINGLE_INST( KernelRc, GPU_GET_KERNEL_RC, 1, NV_FALSE, pKernelRc )
#endif
#if GPU_CHILD_MODULE(RC)
GPU_CHILD_SINGLE_INST( OBJRC, GPU_GET_RC, 1, NV_FALSE, NV_TRUE, pRC )
GPU_CHILD_SINGLE_INST( OBJRC, GPU_GET_RC, 1, NV_FALSE, pRC )
#endif
#if GPU_CHILD_MODULE(STEREO)
GPU_CHILD_SINGLE_INST( OBJSTEREO, GPU_GET_STEREO, 1, NV_FALSE, NV_TRUE, pStereo )
GPU_CHILD_SINGLE_INST( OBJSTEREO, GPU_GET_STEREO, 1, NV_FALSE, pStereo )
#endif
#if GPU_CHILD_MODULE(INTR)
GPU_CHILD_SINGLE_INST( Intr, GPU_GET_INTR, 1, NV_FALSE, NV_TRUE, pIntr )
GPU_CHILD_SINGLE_INST( Intr, GPU_GET_INTR, 1, NV_FALSE, pIntr )
#endif
#if GPU_CHILD_MODULE(DPAUX)
GPU_CHILD_SINGLE_INST( OBJDPAUX, GPU_GET_DPAUX, 1, NV_FALSE, NV_FALSE, pDpAux )
GPU_CHILD_SINGLE_INST( OBJDPAUX, GPU_GET_DPAUX, 1, NV_FALSE, pDpAux )
#endif
#if GPU_CHILD_MODULE(PMU)
GPU_CHILD_SINGLE_INST( Pmu, GPU_GET_PMU, 1, NV_FALSE, NV_FALSE, pPmu )
GPU_CHILD_SINGLE_INST( Pmu, GPU_GET_PMU, 1, NV_FALSE, pPmu )
#endif
#if GPU_CHILD_MODULE(KERNEL_PMU)
GPU_CHILD_SINGLE_INST( KernelPmu, GPU_GET_KERNEL_PMU, 1, NV_FALSE, NV_FALSE, pKernelPmu )
GPU_CHILD_SINGLE_INST( KernelPmu, GPU_GET_KERNEL_PMU, 1, NV_FALSE, pKernelPmu )
#endif
#if GPU_CHILD_MODULE(CE)
GPU_CHILD_MULTI_INST ( OBJCE, GPU_GET_CE, GPU_MAX_CES, NV_FALSE, NV_FALSE, pCe )
GPU_CHILD_MULTI_INST ( OBJCE, GPU_GET_CE, GPU_MAX_CES, NV_FALSE, pCe )
#endif
#if GPU_CHILD_MODULE(KERNEL_CE)
GPU_CHILD_MULTI_INST ( KernelCE, GPU_GET_KCE, GPU_MAX_CES, NV_FALSE, NV_FALSE, pKCe )
GPU_CHILD_MULTI_INST ( KernelCE, GPU_GET_KCE, GPU_MAX_CES, NV_FALSE, pKCe )
#endif
#if GPU_CHILD_MODULE(MSENC)
GPU_CHILD_MULTI_INST ( OBJMSENC, GPU_GET_MSENC, GPU_MAX_MSENCS, NV_FALSE, NV_FALSE, pMsenc )
GPU_CHILD_MULTI_INST ( OBJMSENC, GPU_GET_MSENC, GPU_MAX_MSENCS, NV_FALSE, pMsenc )
#endif
#if GPU_CHILD_MODULE(HDA)
GPU_CHILD_SINGLE_INST( OBJHDA, GPU_GET_HDA, 1, NV_FALSE, NV_FALSE, pHda )
GPU_CHILD_SINGLE_INST( OBJHDA, GPU_GET_HDA, 1, NV_FALSE, pHda )
#endif
#if GPU_CHILD_MODULE(HDACODEC)
GPU_CHILD_SINGLE_INST( OBJHDACODEC, GPU_GET_HDACODEC, 1, NV_FALSE, NV_FALSE, pHdacodec )
GPU_CHILD_SINGLE_INST( OBJHDACODEC, GPU_GET_HDACODEC, 1, NV_FALSE, pHdacodec )
#endif
#if GPU_CHILD_MODULE(LPWR)
GPU_CHILD_SINGLE_INST( Lpwr, GPU_GET_LPWR, 1, NV_FALSE, NV_FALSE, pLpwr )
GPU_CHILD_SINGLE_INST( Lpwr, GPU_GET_LPWR, 1, NV_FALSE, pLpwr )
#endif
#if GPU_CHILD_MODULE(KERNEL_FIFO)
GPU_CHILD_SINGLE_INST( KernelFifo, GPU_GET_KERNEL_FIFO_UC, 1, NV_FALSE, NV_FALSE, pKernelFifo )
GPU_CHILD_SINGLE_INST( KernelFifo, GPU_GET_KERNEL_FIFO_UC, 1, NV_FALSE, pKernelFifo )
#endif
#if GPU_CHILD_MODULE(FIFO)
GPU_CHILD_SINGLE_INST( OBJFIFO, GPU_GET_FIFO_UC, 1, NV_FALSE, NV_FALSE, pFifo )
GPU_CHILD_SINGLE_INST( OBJFIFO, GPU_GET_FIFO_UC, 1, NV_FALSE, pFifo )
#endif
#if GPU_CHILD_MODULE(INFOROM)
GPU_CHILD_SINGLE_INST( OBJINFOROM, GPU_GET_INFOROM, 1, NV_FALSE, NV_TRUE, pInforom )
GPU_CHILD_SINGLE_INST( OBJINFOROM, GPU_GET_INFOROM, 1, NV_FALSE, pInforom )
#endif
#if GPU_CHILD_MODULE(PMGR)
GPU_CHILD_SINGLE_INST( Pmgr, GPU_GET_PMGR, 1, NV_FALSE, NV_FALSE, pPmgr )
GPU_CHILD_SINGLE_INST( Pmgr, GPU_GET_PMGR, 1, NV_FALSE, pPmgr )
#endif
#if GPU_CHILD_MODULE(UVM)
GPU_CHILD_SINGLE_INST( OBJUVM, GPU_GET_UVM, 1, NV_FALSE, NV_FALSE, pUvm )
GPU_CHILD_SINGLE_INST( OBJUVM, GPU_GET_UVM, 1, NV_FALSE, pUvm )
#endif
#if GPU_CHILD_MODULE(NV_DEBUG_DUMP)
GPU_CHILD_SINGLE_INST( NvDebugDump, GPU_GET_NVD, 1, NV_FALSE, NV_TRUE, pNvd )
GPU_CHILD_SINGLE_INST( NvDebugDump, GPU_GET_NVD, 1, NV_FALSE, pNvd )
#endif
#if GPU_CHILD_MODULE(GRDBG)
GPU_CHILD_SINGLE_INST( SMDebugger, GPU_GET_GRDBG, 1, NV_FALSE, NV_TRUE, pGrdbg )
GPU_CHILD_SINGLE_INST( SMDebugger, GPU_GET_GRDBG, 1, NV_FALSE, pGrdbg )
#endif
#if GPU_CHILD_MODULE(SEC2)
GPU_CHILD_SINGLE_INST( OBJSEC2, GPU_GET_SEC2, 1, NV_FALSE, NV_FALSE, pSec2 )
GPU_CHILD_SINGLE_INST( OBJSEC2, GPU_GET_SEC2, 1, NV_FALSE, pSec2 )
#endif
#if GPU_CHILD_MODULE(LSFM)
GPU_CHILD_SINGLE_INST( OBJLSFM, GPU_GET_LSFM, 1, NV_FALSE, NV_FALSE, pLsfm )
GPU_CHILD_SINGLE_INST( OBJLSFM, GPU_GET_LSFM, 1, NV_FALSE, pLsfm )
#endif
#if GPU_CHILD_MODULE(ACR)
GPU_CHILD_SINGLE_INST( OBJACR, GPU_GET_ACR, 1, NV_FALSE, NV_FALSE, pAcr )
GPU_CHILD_SINGLE_INST( OBJACR, GPU_GET_ACR, 1, NV_FALSE, pAcr )
#endif
#if GPU_CHILD_MODULE(KERNEL_NVLINK)
GPU_CHILD_SINGLE_INST( KernelNvlink, GPU_GET_KERNEL_NVLINK, 1, NV_FALSE, NV_FALSE, pKernelNvlink )
GPU_CHILD_SINGLE_INST( KernelNvlink, GPU_GET_KERNEL_NVLINK, 1, NV_FALSE, pKernelNvlink )
#endif
#if GPU_CHILD_MODULE(NVLINK)
GPU_CHILD_SINGLE_INST( Nvlink, GPU_GET_NVLINK, 1, NV_FALSE, NV_FALSE, pNvLink )
GPU_CHILD_SINGLE_INST( Nvlink, GPU_GET_NVLINK, 1, NV_FALSE, pNvLink )
#endif
#if GPU_CHILD_MODULE(GPULOG)
GPU_CHILD_SINGLE_INST( OBJGPULOG, GPU_GET_GPULOG, 1, NV_FALSE, NV_TRUE, pGpuLog )
GPU_CHILD_SINGLE_INST( OBJGPULOG, GPU_GET_GPULOG, 1, NV_FALSE, pGpuLog )
#endif
#if GPU_CHILD_MODULE(GPUMON)
GPU_CHILD_SINGLE_INST( OBJGPUMON, GPU_GET_GPUMON, 1, NV_FALSE, NV_TRUE, pGpuMon )
GPU_CHILD_SINGLE_INST( OBJGPUMON, GPU_GET_GPUMON, 1, NV_FALSE, pGpuMon )
#endif
#if GPU_CHILD_MODULE(HWPM)
GPU_CHILD_SINGLE_INST( OBJHWPM, GPU_GET_HWPM, 1, NV_FALSE, NV_FALSE, pHwpm )
GPU_CHILD_SINGLE_INST( OBJHWPM, GPU_GET_HWPM, 1, NV_FALSE, pHwpm )
#endif
#if GPU_CHILD_MODULE(GRIDDISPLAYLESS)
GPU_CHILD_SINGLE_INST( OBJGRIDDISPLAYLESS, GPU_GET_GRIDDISPLAYLESS, 1, NV_FALSE, NV_FALSE, pGridDisplayless )
GPU_CHILD_SINGLE_INST( OBJGRIDDISPLAYLESS, GPU_GET_GRIDDISPLAYLESS, 1, NV_FALSE, pGridDisplayless )
#endif
#if GPU_CHILD_MODULE(SWENG)
GPU_CHILD_SINGLE_INST( OBJSWENG, GPU_GET_SWENG, 1, NV_FALSE, NV_FALSE, pSwEng )
GPU_CHILD_SINGLE_INST( OBJSWENG, GPU_GET_SWENG, 1, NV_FALSE, pSwEng )
#endif
#if GPU_CHILD_MODULE(VMMU)
GPU_CHILD_SINGLE_INST( OBJVMMU, GPU_GET_VMMU, 1, NV_FALSE, NV_FALSE, pVmmu )
GPU_CHILD_SINGLE_INST( OBJVMMU, GPU_GET_VMMU, 1, NV_FALSE, pVmmu )
#endif
#if GPU_CHILD_MODULE(NVJPG)
GPU_CHILD_MULTI_INST( OBJNVJPG, GPU_GET_NVJPG, GPU_MAX_NVJPGS, NV_FALSE, NV_FALSE, pNvjpg )
GPU_CHILD_MULTI_INST( OBJNVJPG, GPU_GET_NVJPG, GPU_MAX_NVJPGS, NV_FALSE, pNvjpg )
#endif
#if GPU_CHILD_MODULE(GSP)
GPU_CHILD_SINGLE_INST( Gsp, GPU_GET_GSP, 1, NV_FALSE, NV_FALSE, pGsp )
GPU_CHILD_SINGLE_INST( Gsp, GPU_GET_GSP, 1, NV_FALSE, pGsp )
#endif
#if RMCFG_MODULE_KERNEL_FSP && GPU_CHILD_MODULE(KERNEL_FSP)
GPU_CHILD_SINGLE_INST( KernelFsp, GPU_GET_KERNEL_FSP, 1, NV_FALSE, NV_FALSE, pKernelFsp )
GPU_CHILD_SINGLE_INST( KernelFsp, GPU_GET_KERNEL_FSP, 1, NV_FALSE, pKernelFsp )
#endif
#if GPU_CHILD_MODULE(OFA)
GPU_CHILD_SINGLE_INST( OBJOFA, GPU_GET_OFA, 1, NV_FALSE, NV_FALSE, pOfa )
GPU_CHILD_SINGLE_INST( OBJOFA, GPU_GET_OFA, 1, NV_FALSE, pOfa )
#endif
#if RMCFG_MODULE_KERNEL_CCU && GPU_CHILD_MODULE(KERNEL_CCU)
GPU_CHILD_SINGLE_INST( KernelCcu, GPU_GET_KERNEL_CCU, 1, NV_FALSE, NV_FALSE, pKernelCcu )
GPU_CHILD_SINGLE_INST( KernelCcu, GPU_GET_KERNEL_CCU, 1, NV_FALSE, pKernelCcu )
#endif
// Undefine the entry macros to simplify call sites

View File

@@ -41,7 +41,16 @@ typedef struct GpuSharedDataMap {
NV00DE_SHARED_DATA data;
} GpuSharedDataMap;
// Start data write, returns data struct to write into
typedef struct GspUserSharedData {
NvU32 gspAssertCount;
} GspUserSharedData;
/**
* Start data write, returns data struct to write into
*
* After updating data in the returned NV00DE_SHARED_DATA struct,
* call gpushareddataWriteFinish to push the new data into the user mapping
*/
NV00DE_SHARED_DATA * gpushareddataWriteStart(OBJGPU *pGpu);
// Finish data write, pushes data cached by above into mapped data
void gpushareddataWriteFinish(OBJGPU *pGpu);

View File

@@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GSP_FW_HEAP_H
#define GSP_FW_HEAP_H
// Static defines for the GSP FW WPR Heap
#define GSP_FW_HEAP_SIZE_LIBOS2 (64u << 20)
#define GSP_FW_HEAP_SIZE_LIBOS3_BAREMETAL (84u << 20)
#define GSP_FW_HEAP_SIZE_LIBOS3_VGPU (549u << 20)
#endif // GSP_FW_HEAP_H

View File

@@ -35,6 +35,8 @@ typedef struct {
NvU32 pageTableEntryCount;
NvLength cmdQueueOffset;
NvLength statQueueOffset;
NvLength locklessCmdQueueOffset;
NvLength locklessStatQueueOffset;
} MESSAGE_QUEUE_INIT_ARGUMENTS;
typedef struct {

View File

@@ -98,6 +98,8 @@ typedef struct GspStaticConfigInfo_t
NvBool bIsTitan;
NvBool bIsTesla;
NvBool bIsMobile;
NvBool bIsGc6Rtd3Allowed;
NvBool bIsGcOffRtd3Allowed;
NvU64 bar1PdeBase;
NvU64 bar2PdeBase;

View File

@@ -27,12 +27,18 @@
#ifndef _MESSAGE_QUEUE_H_
#define _MESSAGE_QUEUE_H_
// Used for indexing into the MESSAGE_QUEUE_COLLECTION array.
#define RPC_TASK_RM_QUEUE_IDX 0
#define RPC_TASK_ISR_QUEUE_IDX 1
#define RPC_QUEUE_COUNT 2
typedef struct _message_queue_info MESSAGE_QUEUE_INFO;
typedef struct MESSAGE_QUEUE_COLLECTION MESSAGE_QUEUE_COLLECTION;
// CPU-side calls
NV_STATUS GspMsgQueueInit(OBJGPU *pGpu, MESSAGE_QUEUE_INFO **ppMQI);
NV_STATUS GspMsgQueuesInit(OBJGPU *pGpu, MESSAGE_QUEUE_COLLECTION **ppMQCollection);
void GspMsgQueuesCleanup(MESSAGE_QUEUE_COLLECTION **ppMQCollection);
NV_STATUS GspStatusQueueInit(OBJGPU *pGpu, MESSAGE_QUEUE_INFO **ppMQI);
void GspMsgQueueCleanup(MESSAGE_QUEUE_INFO **ppMQI);
NV_STATUS GspMsgQueueSendCommand(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu);
NV_STATUS GspMsgQueueReceiveStatus(MESSAGE_QUEUE_INFO *pMQI);

View File

@@ -50,21 +50,14 @@ typedef struct GSP_MSG_QUEUE_ELEMENT
typedef struct _message_queue_info
{
// Parameters
NvLength pageTableEntryCount;
NvLength pageTableSize;
NvLength commandQueueSize;
NvLength statusQueueSize;
// Shared memory area.
MEMORY_DESCRIPTOR *pSharedMemDesc;
RmPhysAddr sharedMemPA; // Page table for all of shared mem.
void *pCommandQueue;
void *pStatusQueue;
rpc_message_header_v *pRpcMsgBuf; // RPC message buffer VA.
void *pInitMsgBuf; // RPC message buffer VA.
RmPhysAddr initMsgBufPA; // RPC message buffer PA.
// Other CPU-side fields
void *pWorkArea;
GSP_MSG_QUEUE_ELEMENT *pCmdQueueElement; // Working copy of command queue element.
@@ -72,8 +65,22 @@ typedef struct _message_queue_info
msgqHandle hQueue; // Do not allow requests when hQueue is null.
NvU32 txSeqNum; // Next sequence number for tx.
NvU32 rxSeqNum; // Next sequence number for rx.
NvU32 queueIdx; // QueueIndex used to identify which task the message is supposed to be sent to.
} MESSAGE_QUEUE_INFO;
typedef struct MESSAGE_QUEUE_COLLECTION
{
// Parameters
NvLength pageTableEntryCount;
NvLength pageTableSize;
// Shared memory area.
MEMORY_DESCRIPTOR *pSharedMemDesc;
RmPhysAddr sharedMemPA; // Page table for all of shared mem.
MESSAGE_QUEUE_INFO rpcQueues[RPC_QUEUE_COUNT];
} MESSAGE_QUEUE_COLLECTION;
//
// Most of the following defines resolve to compile-time constants.
//

View File

@@ -133,7 +133,10 @@
#define MC_ENGINE_IDX_GR6_FECS_LOG 160
#define MC_ENGINE_IDX_GR7_FECS_LOG 161
#define MC_ENGINE_IDX_TMR_SWRL 162
#define MC_ENGINE_IDX_MAX 163 // This must be kept as the max bit if
#define MC_ENGINE_IDX_DISP_GSP 163
#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
#define MC_ENGINE_IDX_MAX 166 // This must be kept as the max bit if
// we need to add more engines
#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF

View File

@@ -47,13 +47,14 @@ typedef struct EVENTNOTIFICATION EVENTNOTIFICATION;
// for address mapping; the system page size can be larger.
//
//---------------------------------------------------------------------------
#define RM_PAGE_SIZE 4096
#define RM_PAGE_SIZE_64K (64 * 1024)
#define RM_PAGE_SIZE_128K (128 * 1024)
#define RM_PAGE_MASK 0x0FFF
#define RM_PAGE_SHIFT 12
#define RM_PAGE_SHIFT_64K 16
#define RM_PAGE_SHIFT_128K 17
#define RM_PAGE_SIZE_INVALID 0
#define RM_PAGE_SIZE 4096
#define RM_PAGE_SIZE_64K (64 * 1024)
#define RM_PAGE_SIZE_128K (128 * 1024)
#define RM_PAGE_MASK 0x0FFF
#define RM_PAGE_SHIFT 12
#define RM_PAGE_SHIFT_64K 16
#define RM_PAGE_SHIFT_128K 17
// Huge page size is 2 MB
#define RM_PAGE_SHIFT_HUGE 21

View File

@@ -0,0 +1,3 @@
#include "g_vidmem_access_bit_buffer_nvoc.h"