515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,68 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _BTREE_H_
#define _BTREE_H_
/*********************** Balanced Tree data structure **********************\
* *
* Module: BTREE.H *
* API to BTREE routines. *
* *
\***************************************************************************/
//
// RED BLACK TREE structure.
//
#include "nvtypes.h"
#include "nvstatus.h"
typedef struct NODE
{
// public:
void *Data;
NvU64 keyStart;
NvU64 keyEnd;
// private:
NvBool isRed; // !IsRed == IsBlack
struct NODE *parent; // tree links
struct NODE *left;
struct NODE *right;
} NODE, *PNODE;
//---------------------------------------------------------------------------
//
// Function prototypes.
//
//---------------------------------------------------------------------------
NV_STATUS btreeInsert(PNODE, PNODE *);
NV_STATUS btreeUnlink(PNODE, PNODE *);
NV_STATUS btreeSearch(NvU64, PNODE *, PNODE);
NV_STATUS btreeEnumStart(NvU64, PNODE *, PNODE);
NV_STATUS btreeEnumNext(PNODE *, PNODE);
NV_STATUS btreeDestroyData(PNODE);
NV_STATUS btreeDestroyNodes(PNODE);
#endif // _BTREE_H_

View File

@@ -0,0 +1,116 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _EHEAP_H_
#define _EHEAP_H_
/*!
* @brief
* EHEAP is an extent allocator. It is just an abstract E(xtent)Heap.
*/
#include "nvtypes.h"
#include "nvos.h"
#include "containers/btree.h"
#include "utils/nvrange.h"
typedef struct OBJEHEAP *POBJEHEAP;
typedef struct OBJEHEAP OBJEHEAP;
typedef struct EMEMBLOCK *PEMEMBLOCK;
typedef struct EMEMBLOCK
{
NvU64 begin;
NvU64 end;
NvU64 align;
NvU32 growFlags;
NvU32 refCount;
NvU32 owner;
NODE node;
PEMEMBLOCK prevFree;
PEMEMBLOCK nextFree;
PEMEMBLOCK prev;
PEMEMBLOCK next;
void *pData;
} EMEMBLOCK;
typedef NvBool EHeapOwnershipComparator(void*, void*);
typedef NV_STATUS (*EHeapDestruct)(POBJEHEAP);
typedef NV_STATUS (*EHeapAlloc)(POBJEHEAP, NvU32, NvU32 *, NvU64 *, NvU64 *, NvU64 , NvU64, PEMEMBLOCK*, void*, EHeapOwnershipComparator*);
typedef NV_STATUS (*EHeapFree)(POBJEHEAP, NvU64);
typedef void (*EHeapInfo)(POBJEHEAP, NvU64 *, NvU64 *,NvU64 *, NvU64 *, NvU32 *, NvU64 *);
typedef void (*EHeapInfoForRange)(POBJEHEAP, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *);
typedef NV_STATUS (*EHeapGetSize)(POBJEHEAP, NvU64 *);
typedef NV_STATUS (*EHeapGetFree)(POBJEHEAP, NvU64 *);
typedef NV_STATUS (*EHeapGetBase)(POBJEHEAP, NvU64 *);
typedef PEMEMBLOCK (*EHeapGetBlock)(POBJEHEAP, NvU64, NvBool bReturnFreeBlock);
typedef NV_STATUS (*EHeapSetAllocRange)(POBJEHEAP, NvU64 rangeLo, NvU64 rangeHi);
typedef NV_STATUS (*EHeapTraversalFn)(POBJEHEAP, void *pEnv, PEMEMBLOCK, NvU32 *pContinue, NvU32 *pInvalCursor);
typedef NV_STATUS (*EHeapTraverse)(POBJEHEAP, void *pEnv, EHeapTraversalFn, NvS32 direction);
typedef NvU32 (*EHeapGetNumBlocks)(POBJEHEAP);
typedef NV_STATUS (*EHeapGetBlockInfo)(POBJEHEAP, NvU32, NVOS32_HEAP_DUMP_BLOCK *);
typedef NV_STATUS (*EHeapSetOwnerIsolation)(POBJEHEAP, NvBool bEnable, NvU32 granularity);
struct OBJEHEAP
{
// Public heap interface methods
EHeapDestruct eheapDestruct;
EHeapAlloc eheapAlloc;
EHeapFree eheapFree;
EHeapInfo eheapInfo;
EHeapInfoForRange eheapInfoForRange;
EHeapGetSize eheapGetSize;
EHeapGetFree eheapGetFree;
EHeapGetBase eheapGetBase;
EHeapGetBlock eheapGetBlock;
EHeapSetAllocRange eheapSetAllocRange;
EHeapTraverse eheapTraverse;
EHeapGetNumBlocks eheapGetNumBlocks;
EHeapGetBlockInfo eheapGetBlockInfo;
EHeapSetOwnerIsolation eheapSetOwnerIsolation;
// private data
NvU64 base;
NvU64 total;
NvU64 free;
NvU64 rangeLo;
NvU64 rangeHi;
NvBool bOwnerIsolation;
NvU32 ownerGranularity;
PEMEMBLOCK pBlockList;
PEMEMBLOCK pFreeBlockList;
NvU32 memHandle;
NvU32 numBlocks;
NvU32 sizeofMemBlock;
PNODE pBlockTree;
// user can specify num of EMEMBLOCK structs to
// be allocated at heap construction time so that
// we will not call portMemAllocNonPaged during eheapAlloc.
NvU32 numPreAllocMemStruct;
PEMEMBLOCK pFreeMemStructList;
PEMEMBLOCK pPreAllocAddr;
};
extern void constructObjEHeap(POBJEHEAP, NvU64, NvU64, NvU32, NvU32);
#endif // _EHEAP_H_

View File

@@ -0,0 +1,331 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CONTAINERS_LIST_H_
#define _NV_CONTAINERS_LIST_H_
// Contains mix of C/C++ declarations.
#include "containers/type_safety.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
#include "nvmisc.h"
#include "nvport/nvport.h"
/**
* @defgroup NV_CONTAINERS_LIST List
*
* @brief List (sequence) of user-defined values.
*
* @details Order of values is not necessarily increasing or sorted, but order is
* preserved across mutation. Please see
* http://en.wikipedia.org/wiki/Sequence for a formal definition.
*
* The provided interface is abstract, decoupling the user from the underlying
* list implementation. Two options are available with regard to memory
* management, intrusive and non-intrusive. Users can select either one based
* on different situations. Despite the two versions of the list, the following
* implementation constraints are guaranteed.
*
* - Time Complexity:
* * Operations are \b O(1),
* * Unless stated otherwise.
*
* - Memory Usage:
* * \b O(N) memory is required for N values.
* * Intrusive and non-intrusive variants are provided.
* See @ref mem-ownership for further details.
*
* - Synchronization:
* * \b None. The container is not thread-safe.
* * Locking must be handled by the user if required.
*/
#define MAKE_LIST(listTypeName, dataType) \
typedef union listTypeName##Iter \
{ \
dataType *pValue; \
ListIterBase iter; \
} listTypeName##Iter; \
typedef union listTypeName \
{ \
NonIntrusiveList real; \
CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \
CONT_TAG_NON_INTRUSIVE(dataType); \
} listTypeName
#define DECLARE_LIST(listTypeName) \
typedef union listTypeName##Iter listTypeName##Iter; \
typedef union listTypeName listTypeName
#define MAKE_INTRUSIVE_LIST(listTypeName, dataType, node) \
typedef union listTypeName##Iter \
{ \
dataType *pValue; \
ListIterBase iter; \
} listTypeName##Iter; \
typedef union listTypeName \
{ \
IntrusiveList real; \
CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \
CONT_TAG_INTRUSIVE(dataType, node); \
} listTypeName \
#define DECLARE_INTRUSIVE_LIST(listTypeName) \
typedef union listTypeName##Iter listTypeName##Iter; \
typedef union listTypeName listTypeName
/**
* @brief Internal node structure to embed within intrusive list values.
*/
typedef struct ListNode ListNode;
/**
* @brief Base type common to both intrusive and non-intrusive variants.
*/
typedef struct ListBase ListBase;
/**
* @brief Non-intrusive list (container-managed memory).
*/
typedef struct NonIntrusiveList NonIntrusiveList;
/**
* @brief Intrusive list (user-managed memory).
*/
typedef struct IntrusiveList IntrusiveList;
/**
* @brief Iterator over a range of list values.
*
* See @ref iterators for usage details.
*/
typedef struct ListIterBase ListIterBase;
struct ListNode
{
/// @privatesection
ListNode *pPrev;
ListNode *pNext;
#if PORT_IS_CHECKED_BUILD
ListBase *pList;
#endif
};
struct ListIterBase
{
void *pValue;
ListBase *pList;
ListNode *pNode;
ListNode *pLast;
#if PORT_IS_CHECKED_BUILD
NvU32 versionNumber;
#endif
};
ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast);
CONT_VTABLE_DECL(ListBase, ListIterBase);
struct ListBase
{
CONT_VTABLE_FIELD(ListBase);
ListNode *pHead;
ListNode *pTail;
NvU32 count;
NvS32 nodeOffset;
#if PORT_IS_CHECKED_BUILD
NvU32 versionNumber;
#endif
};
struct NonIntrusiveList
{
ListBase base;
PORT_MEM_ALLOCATOR *pAllocator;
NvU32 valueSize;
};
struct IntrusiveList
{
ListBase base;
};
#define listInit(pList, pAllocator) \
listInit_IMPL(&((pList)->real), pAllocator, sizeof(*(pList)->valueSize))
#define listInitIntrusive(pList) \
listInitIntrusive_IMPL(&((pList)->real), sizeof(*(pList)->nodeOffset))
#define listDestroy(pList) \
CONT_DISPATCH_ON_KIND(pList, \
listDestroy_IMPL((NonIntrusiveList*)&((pList)->real)), \
listDestroyIntrusive_IMPL(&((pList)->real.base)), \
contDispatchVoid_STUB())
#define listCount(pList) \
listCount_IMPL(&((pList)->real).base)
#define listInsertNew(pList, pNext) \
CONT_CAST_ELEM(pList, \
listInsertNew_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pNext)))
#define listAppendNew(pList) \
CONT_CAST_ELEM(pList, listAppendNew_IMPL(&(pList)->real))
#define listPrependNew(pList) \
CONT_CAST_ELEM(pList, listPrependNew_IMPL(&(pList)->real))
#define listInsertValue(pList, pNext, pValue) \
CONT_CAST_ELEM(pList, \
listInsertValue_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pNext), \
CONT_CHECK_ARG(pList, pValue)))
#define listAppendValue(pList, pValue) \
CONT_CAST_ELEM(pList, \
listAppendValue_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue)))
#define listPrependValue(pList, pValue) \
CONT_CAST_ELEM(pList, \
listPrependValue_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue)))
#define listInsertExisting(pList, pNext, pValue) \
listInsertExisting_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pNext), \
CONT_CHECK_ARG(pList, pValue))
#define listAppendExisting(pList, pValue) \
listAppendExisting_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue))
#define listPrependExisting(pList, pValue) \
listPrependExisting_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue))
#define listRemove(pList, pValue) \
CONT_DISPATCH_ON_KIND(pList, \
listRemove_IMPL((NonIntrusiveList*)&((pList)->real), \
CONT_CHECK_ARG(pList, pValue)), \
listRemoveIntrusive_IMPL(&((pList)->real).base, \
CONT_CHECK_ARG(pList, pValue)), \
contDispatchVoid_STUB())
#define listRemoveFirstByValue(pList, pValue) \
listRemoveFirstByValue_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue))
#define listRemoveAllByValue(pList, pValue) \
listRemoveAllByValue_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue))
#define listClear(pList) \
listDestroy(pList)
#define listFindByValue(pList, pValue) \
CONT_CAST_ELEM(pList, \
listFindByValue_IMPL(&(pList)->real, \
CONT_CHECK_ARG(pList, pValue)))
#define listHead(pList) \
CONT_CAST_ELEM(pList, listHead_IMPL(&((pList)->real).base))
#define listTail(pList) \
CONT_CAST_ELEM(pList, listTail_IMPL(&((pList)->real).base))
#define listNext(pList, pValue) \
CONT_CAST_ELEM(pList, \
listNext_IMPL(&((pList)->real).base, \
CONT_CHECK_ARG(pList, pValue)))
#define listPrev(pList, pValue) \
CONT_CAST_ELEM(pList, \
listPrev_IMPL(&((pList)->real).base, \
CONT_CHECK_ARG(pList, pValue)))
#define listIterAll(pList) \
listIterRange(pList, listHead(pList), listTail(pList))
#define listIterRange(pList, pFirst, pLast) \
CONT_ITER_RANGE(pList, &listIterRange_IMPL, \
CONT_CHECK_ARG(pList, pFirst), CONT_CHECK_ARG(pList, pLast))
#define listIterNext(pIt) \
listIterNext_IMPL(&((pIt)->iter))
void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator,
NvU32 valueSize);
void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset);
void listDestroy_IMPL(NonIntrusiveList *pList);
void listDestroyIntrusive_IMPL(ListBase *pList);
NvU32 listCount_IMPL(ListBase *pList);
void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext);
void *listAppendNew_IMPL(NonIntrusiveList *pList);
void *listPrependNew_IMPL(NonIntrusiveList *pList);
void *listInsertValue_IMPL(NonIntrusiveList *pList, void *pNext, void *pValue);
void *listAppendValue_IMPL(NonIntrusiveList *pList, void *pValue);
void *listPrependValue_IMPL(NonIntrusiveList *pList, void *pValue);
void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue);
void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue);
void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue);
void listRemove_IMPL(NonIntrusiveList *pList, void *pValue);
void listRemoveIntrusive_IMPL(ListBase *pList, void *pValue);
void listRemoveFirstByValue_IMPL(NonIntrusiveList *pList, void *pValue);
void listRemoveAllByValue_IMPL(NonIntrusiveList *pList, void *pValue);
void *listFindByValue_IMPL(NonIntrusiveList *pList, void *pValue);
void *listHead_IMPL(ListBase *pList);
void *listTail_IMPL(ListBase *pList);
void *listNext_IMPL(ListBase *pList, void *pValue);
void *listPrev_IMPL(ListBase *pList, void *pValue);
ListIterBase listIterAll_IMPL(ListBase *pList);
ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast);
NvBool listIterNext_IMPL(ListIterBase *pIt);
static NV_FORCEINLINE ListNode *
listValueToNode(ListBase *pList, void *pValue)
{
if (NULL == pList) return NULL;
if (NULL == pValue) return NULL;
return (ListNode*)((NvU8*)pValue + pList->nodeOffset);
}
static NV_FORCEINLINE void *
listNodeToValue(ListBase *pList, ListNode *pNode)
{
if (NULL == pList) return NULL;
if (NULL == pNode) return NULL;
return (NvU8*)pNode - pList->nodeOffset;
}
#ifdef __cplusplus
}
#endif
#endif // _NV_CONTAINERS_LIST_H_

View File

@@ -0,0 +1,300 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CONTAINERS_MAP_H_
#define _NV_CONTAINERS_MAP_H_
// Contains mix of C/C++ declarations.
#include "containers/type_safety.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
#include "nvmisc.h"
#include "nvport/nvport.h"
#include "utils/nvassert.h"
/**
* @defgroup NV_CONTAINERS_MAP Map
*
* @brief Map (ordered) from 64-bit integer keys to user-defined values.
*
* @details The provided interface is abstract, decoupling the user from the
* underlying ordered map implementation. Two options are available with regard
* to memory management, intrusive and non-intrusive. Users can select either
* one based on different situations. Despite the two versions of the map,
* the following implementation constraints are guaranteed.
*
* - Time Complexity:
* * Operations are \b O(log N),
* * Unless stated otherwise,
* * Where N is the number of values in the map.
*
* - Memory Usage:
* * \b O(N) memory is required for N values.
* * Intrusive and non-intrusive variants are provided.
* See @ref mem-ownership for further details.
*
* - Synchronization:
* * \b None. The container is not thread-safe.
* * Locking must be handled by the user if required.
*
*/
#define MAKE_MAP(mapTypeName, dataType) \
typedef union mapTypeName##Iter \
{ \
dataType *pValue; \
MapIterBase iter; \
} mapTypeName##Iter; \
typedef union mapTypeName \
{ \
NonIntrusiveMap real; \
CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \
CONT_TAG_NON_INTRUSIVE(dataType); \
} mapTypeName
#define DECLARE_MAP(mapTypeName) \
typedef union mapTypeName##Iter mapTypeName##Iter; \
typedef union mapTypeName mapTypeName
#define MAKE_INTRUSIVE_MAP(mapTypeName, dataType, node) \
typedef union mapTypeName##Iter \
{ \
dataType *pValue; \
MapIterBase iter; \
} mapTypeName##Iter; \
typedef union mapTypeName \
{ \
IntrusiveMap real; \
CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \
CONT_TAG_INTRUSIVE(dataType, node); \
} mapTypeName
#define DECLARE_INTRUSIVE_MAP(mapTypeName) \
typedef union mapTypeName##Iter mapTypeName##Iter; \
typedef union mapTypeName mapTypeName
/**
* @brief Internal node structure to embed within intrusive map values.
*/
typedef struct MapNode MapNode;
/**
* @brief Base type common to both intrusive and non-intrusive variants.
*/
typedef struct MapBase MapBase;
/**
* @brief Non-intrusive map (container-managed memory).
*/
typedef struct NonIntrusiveMap NonIntrusiveMap;
/**
* @brief Intrusive map (user-managed memory).
*/
typedef struct IntrusiveMap IntrusiveMap;
/**
* @brief Iterator over a range of map values.
*
* See @ref iterators for usage details.
*/
typedef struct MapIterBase MapIterBase;
struct MapNode
{
/// @privatesection
NvU64 key;
MapNode *pParent;
MapNode *pLeft;
MapNode *pRight;
NvBool bIsRed;
#if PORT_IS_CHECKED_BUILD
MapBase *pMap;
#endif
};
struct MapIterBase
{
void *pValue;
MapBase *pMap;
MapNode *pNode;
MapNode *pLast;
#if PORT_IS_CHECKED_BUILD
NvU32 versionNumber;
#endif
};
MapIterBase mapIterRange_IMPL(MapBase *pMap, void *pFirst, void *pLast);
CONT_VTABLE_DECL(MapBase, MapIterBase);
struct MapBase
{
CONT_VTABLE_FIELD(MapBase);
MapNode *pRoot;
NvS32 nodeOffset;
NvU32 count;
#if PORT_IS_CHECKED_BUILD
NvU32 versionNumber;
#endif
};
struct NonIntrusiveMap
{
MapBase base;
PORT_MEM_ALLOCATOR *pAllocator;
NvU32 valueSize;
};
struct IntrusiveMap
{
MapBase base;
};
#define mapInit(pMap, pAllocator) \
mapInit_IMPL(&((pMap)->real), pAllocator, sizeof(*(pMap)->valueSize))
#define mapInitIntrusive(pMap) \
mapInitIntrusive_IMPL(&((pMap)->real), sizeof(*(pMap)->nodeOffset))
#define mapDestroy(pMap) \
CONT_DISPATCH_ON_KIND(pMap, \
mapDestroy_IMPL((NonIntrusiveMap*)&((pMap)->real)), \
mapDestroyIntrusive_IMPL(&((pMap)->real.base)), \
contDispatchVoid_STUB())
#define mapCount(pMap) \
mapCount_IMPL(&((pMap)->real).base)
#define mapKey(pMap, pValue) \
mapKey_IMPL(&((pMap)->real).base, pValue)
#define mapInsertNew(pMap, key) \
CONT_CAST_ELEM(pMap, mapInsertNew_IMPL(&(pMap)->real, key))
#define mapInsertValue(pMap, key, pValue) \
CONT_CAST_ELEM(pMap, \
mapInsertValue_IMPL(&(pMap)->real, key, \
CONT_CHECK_ARG(pMap, pValue)))
#define mapInsertExisting(pMap, key, pValue) \
mapInsertExisting_IMPL(&(pMap)->real, key, \
CONT_CHECK_ARG(pMap, pValue))
#define mapRemove(pMap, pValue) \
CONT_DISPATCH_ON_KIND(pMap, \
mapRemove_IMPL((NonIntrusiveMap*)&((pMap)->real), \
CONT_CHECK_ARG(pMap, pValue)), \
mapRemoveIntrusive_IMPL(&((pMap)->real).base, \
CONT_CHECK_ARG(pMap, pValue)), \
contDispatchVoid_STUB())
#define mapClear(pMap) \
mapDestroy(pMap)
#define mapRemoveByKey(pMap, key) \
CONT_DISPATCH_ON_KIND(pMap, \
mapRemoveByKey_IMPL((NonIntrusiveMap*)&((pMap)->real), key), \
mapRemoveByKeyIntrusive_IMPL(&((pMap)->real).base, key), \
contDispatchVoid_STUB())
#define mapFind(pMap, key) \
CONT_CAST_ELEM(pMap, mapFind_IMPL(&((pMap)->real).base, key))
#define mapFindGEQ(pMap, keyMin) \
CONT_CAST_ELEM(pMap, \
mapFindGEQ_IMPL(&((pMap)->real).base, keyMin))
#define mapFindLEQ(pMap, keyMax) \
CONT_CAST_ELEM(pMap, \
mapFindLEQ_IMPL(&((pMap)->real).base, keyMax))
#define mapNext(pMap, pValue) \
CONT_CAST_ELEM(pMap, \
mapNext_IMPL(&((pMap)->real).base, \
CONT_CHECK_ARG(pMap, pValue)))
#define mapPrev(pMap, pValue) \
CONT_CAST_ELEM(pMap, \
mapPrev_IMPL(&((pMap)->real).base, \
CONT_CHECK_ARG(pMap, pValue)))
#define mapIterAll(pMap) \
mapIterRange(pMap, mapFindGEQ(pMap, 0), mapFindLEQ(pMap, NV_U64_MAX))
#define mapIterRange(pMap, pFirst, pLast) \
CONT_ITER_RANGE(pMap, &mapIterRange_IMPL, \
CONT_CHECK_ARG(pMap, pFirst), CONT_CHECK_ARG(pMap, pLast))
#define mapIterNext(pIt) \
mapIterNext_IMPL(&((pIt)->iter))
void mapInit_IMPL(NonIntrusiveMap *pMap,
PORT_MEM_ALLOCATOR *pAllocator, NvU32 valueSize);
void mapInitIntrusive_IMPL(IntrusiveMap *pMap, NvS32 nodeOffset);
void mapDestroy_IMPL(NonIntrusiveMap *pMap);
void mapDestroyIntrusive_IMPL(MapBase *pMap);
NvU32 mapCount_IMPL(MapBase *pMap);
NvU64 mapKey_IMPL(MapBase *pMap, void *pValue);
void *mapInsertNew_IMPL(NonIntrusiveMap *pMap, NvU64 key);
void *mapInsertValue_IMPL(NonIntrusiveMap *pMap, NvU64 key, void *pValue);
NvBool mapInsertExisting_IMPL(IntrusiveMap *pMap, NvU64 key, void *pValue);
void mapRemove_IMPL(NonIntrusiveMap *pMap, void *pValue);
void mapRemoveIntrusive_IMPL(MapBase *pMap, void *pValue);
void mapRemoveByKey_IMPL(NonIntrusiveMap *pMap, NvU64 key);
void mapRemoveByKeyIntrusive_IMPL(MapBase *pMap, NvU64 key);
void *mapFind_IMPL(MapBase *pMap, NvU64 key);
void *mapFindGEQ_IMPL(MapBase *pMap, NvU64 keyMin);
void *mapFindLEQ_IMPL(MapBase *pMap, NvU64 keyMax);
void *mapNext_IMPL(MapBase *pMap, void *pValue);
void *mapPrev_IMPL(MapBase *pMap, void *pValue);
MapIterBase mapIterAll_IMPL(MapBase *pMap);
NvBool mapIterNext_IMPL(MapIterBase *pIt);
static NV_FORCEINLINE MapNode *
mapValueToNode(MapBase *pMap, void *pValue)
{
if (NULL == pMap) return NULL;
if (NULL == pValue) return NULL;
return (MapNode*)((NvU8*)pValue + pMap->nodeOffset);
}
static NV_FORCEINLINE void *
mapNodeToValue(MapBase *pMap, MapNode *pNode)
{
if (NULL == pMap) return NULL;
if (NULL == pNode) return NULL;
return (NvU8*)pNode - pMap->nodeOffset;
}
#ifdef __cplusplus
}
#endif
#endif // _NV_CONTAINERS_MAP_H_

View File

@@ -0,0 +1,296 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CONTAINERS_MULTIMAP_H_
#define _NV_CONTAINERS_MULTIMAP_H_
// Contains mix of C/C++ declarations.
#include "containers/type_safety.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "containers/map.h"
/**
* @defgroup NV_CONTAINERS_MULTIMAP Multimap
*
* @brief Two-layer multimap (ordered) from pairs of 64-bit unsigned integer
* keys to user-defined values.
*
* @details The provided interface is abstract, decoupling the user from the
* underlying ordered multimap implementation. Currently, memory management is
* limited to non-intrusive container-managed memory. The following
* implementation constraints are guaranteed.
*
* - Time Complexity:
* * Operations are \b O(log M + log N),
* * Unless stated otherwise,
* * Where M is the number of submaps and N is the total number of values in
* the map.
*
* - Memory Usage:
* * \b O(M + N) memory is required for M submaps and N values.
* * Only a non-intrusive variant is provided.
* See @ref mem-ownership for further details.
*
* - Synchronization:
* * \b None. The container is not thread-safe.
* * Locking must be handled by the user if required.
*
*/
#define MAKE_MULTIMAP(multimapTypeName, dataType) \
typedef struct multimapTypeName##Leaf \
{ \
dataType data; \
MultimapNode node; \
} multimapTypeName##Leaf; \
MAKE_INTRUSIVE_MAP(multimapTypeName##Submap, multimapTypeName##Leaf, \
node.submapNode); \
MAKE_MAP(multimapTypeName##Supermap, multimapTypeName##Submap); \
typedef union multimapTypeName##Iter \
{ \
dataType *pValue; \
MultimapIterBase iter; \
} multimapTypeName##Iter; \
typedef union multimapTypeName \
{ \
CONT_TAG_TYPE(MultimapBase, dataType, multimapTypeName##Iter); \
struct { MultimapBase base; } real; \
struct \
{ \
/* This field simply aligns map with the one in MultimapBase */ \
CONT_VTABLE_FIELD(MultimapBase); \
multimapTypeName##Supermap map; \
} type; \
CONT_TAG_NON_INTRUSIVE(dataType); \
struct {char _[NV_OFFSETOF(multimapTypeName##Leaf, node)];} *nodeOffset; \
struct {char _[sizeof(multimapTypeName##Submap)];} *submapSize; \
} multimapTypeName;
#define DECLARE_MULTIMAP(multimapTypeName) \
typedef struct multimapTypeName##Leaf multimapTypeName##Leaf; \
DECLARE_INTRUSIVE_MAP(multimapTypeName##Submap); \
DECLARE_MAP(multimapTypeName##Supermap); \
typedef union multimapTypeName##Iter multimapTypeName##Iter; \
typedef union multimapTypeName multimapTypeName
/**
* @brief Internal node structure associated with multimap values.
*/
typedef struct MultimapNode MultimapNode;
/**
* @brief Base type common to all multimap iterator types.
*/
typedef struct MultimapIterBase MultimapIterBase;
/**
* @brief Base type common to all multimap types.
*/
typedef struct MultimapBase MultimapBase;
struct MultimapNode
{
void *pSubmap;
MapNode submapNode;
};
struct MultimapIterBase
{
void *pValue;
MultimapBase *pMultimap;
void *pNext;
void *pLast;
};
CONT_VTABLE_DECL(MultimapBase, MultimapIterBase);
struct MultimapBase
{
CONT_VTABLE_FIELD(MultimapBase);
NonIntrusiveMap map;
NvS32 multimapNodeOffset;
NvU32 itemCount;
NvU32 itemSize;
};
#define multimapInit(pMultimap, pAllocator) \
multimapInit_IMPL(&(pMultimap)->real.base, pAllocator, \
sizeof(*(pMultimap)->valueSize), \
sizeof(*(pMultimap)->nodeOffset), \
sizeof(*(pMultimap)->submapSize))
#define multimapDestroy(pMultimap) \
multimapDestroy_IMPL(&(pMultimap)->real.base)
#define multimapClear(pMultimap) \
multimapClear_IMPL(&(pMultimap)->real.base)
#define multimapCountSubmaps(pMultimap) \
mapCount(&(pMultimap)->type.map)
#define multimapCountItems(pMultimap) \
(pMultimap)->real.base.itemCount
#define multimapFindSubmap(pMultimap, submapKey) \
CONT_CAST_ELEM(&(pMultimap)->type.map, \
multimapFindSubmap_IMPL(&(pMultimap)->real.base, submapKey))
#define multimapFindSubmapLEQ(pMultimap, submapKey) \
CONT_CAST_ELEM(&(pMultimap)->type.map, \
multimapFindSubmapLEQ_IMPL(&(pMultimap)->real.base, submapKey))
#define multimapFindSubmapGEQ(pMultimap, submapKey) \
CONT_CAST_ELEM(&(pMultimap)->type.map, \
multimapFindSubmapGEQ_IMPL(&(pMultimap)->real.base, submapKey))
#define multimapCountSubmapItems(pMultimap, pSubmap) \
mapCount(pSubmap)
#define multimapInsertItemNew(pMultimap, submapKey, itemKey) \
CONT_CAST_ELEM(pMultimap, \
multimapInsertItemNew_IMPL(&(pMultimap)->real.base, submapKey, itemKey))
#define multimapInsertItemValue(pMultimap, submapKey, itemKey, pValue) \
CONT_CAST_ELEM(pMultimap, \
multimapInsertItemValue_IMPL(&(pMultimap)->real.base, \
submapKey, itemKey, pValue))
#define multimapInsertSubmap(pMultimap, submapKey) \
CONT_CAST_ELEM(&(pMultimap)->type.map, \
multimapInsertSubmap_IMPL(&(pMultimap)->real.base, submapKey))
#define multimapFindItem(pMultimap, submapKey, itemKey) \
CONT_CAST_ELEM(pMultimap, \
multimapFindItem_IMPL(&(pMultimap)->real.base, submapKey, itemKey))
#define multimapRemoveItem(pMultimap, pValue) \
multimapRemoveItem_IMPL(&(pMultimap)->real.base, pValue)
#define multimapRemoveSubmap(pMultimap, pSubmap) \
multimapRemoveSubmap_IMPL(&(pMultimap)->real.base, &(pSubmap)->real.base)
#define multimapRemoveItemByKey(pMultimap, submapKey, itemKey) \
multimapRemoveItemByKey_IMPL(&(pMultimap)->real.base, submapKey, itemKey)
#define multimapNextItem(pMultimap, pValue) \
CONT_CAST_ELEM(pMultimap, \
multimapNextItem_IMPL(&(pMultimap)->real.base, pValue))
#define multimapPrevItem(pMultimap, pValue) \
CONT_CAST_ELEM(pMultimap, \
multimapPrevItem_IMPL(&(pMultimap)->real.base, pValue))
#define multimapFirstItem(pMultimap) \
CONT_CAST_ELEM(pMultimap, multimapFirstItem_IMPL(&(pMultimap)->real.base))
#define multimapLastItem(pMultimap) \
CONT_CAST_ELEM(pMultimap, multimapLastItem_IMPL(&(pMultimap)->real.base))
#define multimapItemIterAll(pMultimap) \
multimapItemIterRange(pMultimap, \
multimapFirstItem(pMultimap), multimapLastItem(pMultimap))
#define multimapItemIterRange(pMultimap, pFirst, pLast) \
CONT_ITER_RANGE(pMultimap, multimapItemIterRange_IMPL, \
CONT_CHECK_ARG(pMultimap, pFirst), CONT_CHECK_ARG(pMultimap, pLast))
#define multimapSubmapIterItems(pMultimap, pSubmap) \
multimapItemIterRange(pMultimap, \
&mapFindGEQ(pSubmap, 0)->data, &mapFindLEQ(pSubmap, NV_U64_MAX)->data)
#define multimapItemIterNext(pIt) \
multimapItemIterNext_IMPL(&(pIt)->iter)
#define multimapSubmapIterAll(pMultimap) \
mapIterAll(&(pMultimap)->type.map)
#define multimapSubmapIterRange(pMultimap, pFirst, pLast) \
mapIterRange(&(pMultimap)->type.map, pFirst, pLast)
#define multimapSubmapIterNext(pIt) \
mapIterNext(pIt)
#define multimapItemKey(pMultimap, pValue) \
multimapValueToNode(&(pMultimap)->real.base, pValue)->submapNode.key
#define multimapSubmapKey(pMultimap, pSubmap) \
mapKey(&(pMultimap)->type.map, pSubmap)
void multimapInit_IMPL(MultimapBase *pBase, PORT_MEM_ALLOCATOR *pAllocator,
NvU32 valueSize, NvS32 nodeOffset, NvU32 submapSize);
void multimapRemoveSubmap_IMPL(MultimapBase *pMultimap, MapBase *submap);
void multimapDestroy_IMPL(MultimapBase *pBase);
void multimapClear_IMPL(MultimapBase *pBase);
void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey);
void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey);
void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey);
void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey);
void *multimapInsertItemNew_IMPL(MultimapBase *pBase, NvU64 submapKey,
NvU64 itemKey);
void *multimapInsertItemValue_IMPL(MultimapBase *pBase, NvU64 submapKey,
NvU64 itemKey, void *pValue);
void *multimapFindItem_IMPL(MultimapBase *pBase, NvU64 submapKey,
NvU64 itemKey);
void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf);
void multimapRemoveItemByKey_IMPL(MultimapBase *pBase, NvU64 submapKey,
NvU64 itemKey);
void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue);
void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue);
void *multimapFirstItem_IMPL(MultimapBase *pBase);
void *multimapLastItem_IMPL(MultimapBase *pBase);
MultimapIterBase multimapItemIterRange_IMPL(MultimapBase *pBase,
void *pFirst, void *pLast);
NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt);
static NV_FORCEINLINE MultimapNode *
multimapValueToNode(MultimapBase *pBase, void *pValue)
{
if (NULL == pBase || NULL == pValue) return NULL;
return (MultimapNode *)((NvU8*)pValue + pBase->multimapNodeOffset);
}
static NV_FORCEINLINE void *
multimapNodeToValue(MultimapBase *pBase, MultimapNode *pNode)
{
if (NULL == pBase || NULL == pNode) return NULL;
return (NvU8*)pNode - pBase->multimapNodeOffset;
}
#ifdef __cplusplus
}
#endif
#endif // _NV_CONTAINERS_MULTIMAP_H_

View File

@@ -0,0 +1,143 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_CONTAINERS_QUEUE_H
#define NV_CONTAINERS_QUEUE_H
#include "containers/type_safety.h"
#include "nvtypes.h"
#include "nvmisc.h"
#include "nvport/nvport.h"
#include "utils/nvassert.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAKE_QUEUE_CIRCULAR(queueTypeName, dataType) \
typedef struct queueTypeName##Iter_UNUSED \
{ \
NvLength dummyElem; \
} queueTypeName##Iter_UNUSED; \
typedef union queueTypeName \
{ \
Queue real; \
CONT_TAG_TYPE(Queue, dataType, queueTypeName##Iter_UNUSED); \
CONT_TAG_NON_INTRUSIVE(dataType); \
} queueTypeName
#define DECLARE_QUEUE_CIRCULAR(queueTypeName) \
typedef struct queueTypeName##Iter_UNUSED queueTypeName##Iter_UNUSED; \
typedef union queueTypeName queueTypeName
struct Queue;
struct QueueContext;
typedef void QueueCopyData(NvLength msgSize, NvLength opIdx,
struct QueueContext *pCtx, void *pClientData,
NvLength count, NvBool bCopyIn);
typedef struct QueueContext {
QueueCopyData *pCopyData; // Function performing accesses to queue memory.
void *pData; // Private data.
} QueueContext;
typedef struct Queue {
NvLength capacity; // Queue Capacity
PORT_MEM_ALLOCATOR *pAllocator; // Set of functions used for managing queue memory
void *pData; // Queue memory, if managed by pAllocator
NvLength msgSize; // Message size produced by Producer
NvLength getIdx NV_ALIGN_BYTES(64);// GET index modified by Consumer
NvLength putIdx NV_ALIGN_BYTES(64);// PUT index modified by Producer
} Queue;
//for future use (more possible queues - just an example, currently only CIRCULAR will get implemented)
typedef enum
{
QUEUE_TYPE_CIRCULAR = 1,
//QUEUE_TYPE_LINEAR = 2,
//QUEUE_TYPE_PRIORITY = 3,
}QUEUE_TYPE;
#define queueInit(pQueue, pAllocator, capacity) \
circularQueueInit_IMPL(&((pQueue)->real), pAllocator, \
capacity, sizeof(*(pQueue)->valueSize))
#define queueInitNonManaged(pQueue, capacity) \
circularQueueInitNonManaged_IMPL(&((pQueue)->real), \
capacity, sizeof(*(pQueue)->valueSize))
#define queueDestroy(pQueue) \
circularQueueDestroy_IMPL(&((pQueue)->real))
#define queueCount(pQueue) \
circularQueueCount_IMPL(&((pQueue)->real))
#define queueCapacity(pQueue) \
circularQueueCapacity_IMPL(&((pQueue)->real))
#define queueIsEmpty(pQueue) \
circularQueueIsEmpty_IMPL(&((pQueue)->real))
#define queuePush(pQueue, pElements, numElements) \
circularQueuePush_IMPL(&(pQueue)->real, \
CONT_CHECK_ARG(pQueue, pElements), numElements)
#define queuePushNonManaged(pQueue, pCtx, pElements, numElements) \
circularQueuePushNonManaged_IMPL(&(pQueue)->real, pCtx, \
CONT_CHECK_ARG(pQueue, pElements), numElements)
#define queuePeek(pQueue) \
CONT_CAST_ELEM(pQueue, circularQueuePeek_IMPL(&((pQueue)->real)))
#define queuePop(pQueue) \
circularQueuePop_IMPL(&((pQueue)->real))
#define queuePopAndCopy(pQueue, pCopyTo) \
circularQueuePopAndCopy_IMPL(&((pQueue)->real), \
CONT_CHECK_ARG(pQueue, pCopyTo))
#define queuePopAndCopyNonManaged(pQueue, pCtx, pCopyTo) \
circularQueuePopAndCopyNonManaged_IMPL(&((pQueue)->real), pCtx, \
CONT_CHECK_ARG(pQueue, pCopyTo))
NV_STATUS circularQueueInit_IMPL(Queue *pQueue, PORT_MEM_ALLOCATOR *pAllocator,
NvLength capacity, NvLength msgSize);
NV_STATUS circularQueueInitNonManaged_IMPL(Queue *pQueue, NvLength capacity,
NvLength msgSize);
void circularQueueDestroy_IMPL(Queue *pQueue);
NvLength circularQueueCapacity_IMPL(Queue *pQueue);
NvLength circularQueueCount_IMPL(Queue *pQueue);
NvBool circularQueueIsEmpty_IMPL(Queue *pQueue);
NvLength circularQueuePush_IMPL(Queue *pQueue, void* pElements, NvLength numElements);
NvLength circularQueuePushNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx,
void* pElements, NvLength numElements);
void* circularQueuePeek_IMPL(Queue *pQueue);
void circularQueuePop_IMPL(Queue *pQueue);
NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo);
NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx,
void *pCopyTo);
#ifdef __cplusplus
}
#endif
#endif // NV_CONTAINERS_QUEUE_H

View File

@@ -0,0 +1,254 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CONTAINERS_TYPE_SAFETY_H_
#define _NV_CONTAINERS_TYPE_SAFETY_H_
#include "nvtypes.h"
#include "nvport/nvport.h"
// Check for typeof support. For now restricting to GNUC compilers.
#if defined(__GNUC__)
#define NV_TYPEOF_SUPPORTED 1
#else
#define NV_TYPEOF_SUPPORTED 0
#endif
/**
* Tag a non-intrusive container union with the following info:
* valueSize : size of its element type for non-intrusive malloc
* kind : non-intrusive kind ID for static dispatch
*/
#define CONT_TAG_NON_INTRUSIVE(elemType) \
struct {char _[sizeof(elemType)];} *valueSize; \
struct {char _[CONT_KIND_NON_INTRUSIVE];} *kind
/**
* Tag an intrusive container union with the following info:
* nodeOffset : offset of the data structure node within element type
* kind : intrusive kind ID for static dispatch
*/
// FIXME: Do not use this for any structure members with offset 0!
// The size of a 0 length array is undefined according to the C99 standard
// and we've seen non-zero values of sizeof(*nodeOffset) appear at runtime
// leading to corruption. Filed Bug 2858103 to track work against this.
#define CONT_TAG_INTRUSIVE(elemType, node) \
struct {char _[NV_OFFSETOF(elemType, node)];} *nodeOffset; \
struct {char _[CONT_KIND_INTRUSIVE];} *kind
#ifdef __cplusplus
extern "C" {
#endif
/**
* Utility identity function for several type-safety mechanisms.
*/
static NV_FORCEINLINE void *contId(void *pValue)
{
return pValue;
}
#ifdef __cplusplus
}
#endif
/**
* @def CONT_TAG_ELEM_TYPE
* Tag a container union with element type info.
*/
/**
* @def CONT_CHECK_ARG
* Check that a value has a container's element type.
*/
/**
* @def CONT_CAST_ELEM
* Cast a void pointer to a container's element type.
*/
// With C++ we can use typedef and templates for 100% type safety.
#if defined(__cplusplus) && !defined(NV_CONTAINERS_NO_TEMPLATES)
#define CONT_TAG_TYPE(contType, elemType, iterType) \
CONT_VTABLE_TAG(contType, elemType, iterType); \
typedef contType ContType; \
typedef elemType ElemType; \
typedef iterType IterType
template <class T>
typename T::ElemType *CONT_CHECK_ARG(T *pCont, typename T::ElemType *pValue)
{
return pValue;
}
template <class T>
typename T::ElemType *CONT_CAST_ELEM(T *pCont, void *pValue)
{
return (typename T::ElemType *)pValue;
}
template <class T, class It>
typename T::IterType CONT_ITER_RANGE
(
T *pCont,
It (*pFunc)(typename T::ContType *, void *, void *),
void *pFirst,
void *pLast
)
{
typename T::IterType temp;
temp.iter = pFunc(&pCont->real.base, pFirst, pLast);
return temp;
}
template <class T, class It>
typename T::IterType CONT_ITER_RANGE_INDEX
(
T *pCont,
It (*pFunc)(typename T::ContType *, NvU64, NvU64),
NvU64 first,
NvU64 last
)
{
typename T::IterType temp;
temp.iter = pFunc(&pCont->real.base, first, last);
return temp;
}
// Without C++ we need more creativity. :)
#else
// Element tag is a pointer to the element type (no mem overhead in union).
#define CONT_TAG_TYPE(contType, elemType, iterType) \
CONT_VTABLE_TAG(contType, elemType, iterType); \
elemType *elem; \
iterType *iter
// Argument check uses sizeof to get error message without runtime overhead.
#define CONT_CHECK_ARG(pCont, pValue) \
(sizeof((pCont)->elem = (pValue)) ? (pValue) : NULL)
//
// Return checks are more problematic, but typeof is perfect when available.
// Without typeof we resort to a runtime vtable.
//
#if NV_TYPEOF_SUPPORTED
#define CONT_CAST_ELEM(pCont, ret) ((typeof((pCont)->elem))(ret))
//
// The dummy contId prevents compilers from warning about incompatible
// function casts. This is safe since we know the two return structures
// are identical (modulo alpha-conversion).
//
#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast) \
(((typeof(*(pCont)->iter)(*)(void *, void *, void *))contId(pFunc))( \
pCont, pFirst, pLast))
#define CONT_ITER_RANGE_INDEX(pCont, pFunc, first, last) \
(((typeof(*(pCont)->iter)(*)(void *, NvU64, NvU64))contId(pFunc))( \
pCont, first, last))
#else
#define CONT_CAST_ELEM(pCont, ret) ((pCont)->vtable->checkRet(ret))
#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast) \
((pCont)->vtable->iterRange(&(pCont)->real.base, pFirst, pLast))
#define CONT_ITER_RANGE_RANGE(pCont, pFunc, first, last) \
((pCont)->vtable->iterRangeIndex(&(pCont)->real.base, first, last))
#endif
#endif
#if NV_TYPEOF_SUPPORTED
#define CONT_VTABLE_DECL(contType, iterType)
#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex)
#define CONT_VTABLE_TAG(contType, elemType, iterType)
#define CONT_VTABLE_FIELD(contType)
#define CONT_VTABLE_INIT(contType, pCont)
#else
#define CONT_VTABLE_DECL(contType, iterType) \
typedef struct \
{ \
void *(*checkRet)(void *pValue); \
iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \
iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last);\
} contType##_VTABLE; \
#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) \
static const contType##_VTABLE g_##contType##_VTABLE = \
{ \
contId, \
contIterRange, \
contIterRangeIndex, \
}
#define CONT_VTABLE_TAG(contType, elemType, iterType) \
const struct \
{ \
elemType *(*checkRet)(void *pValue); \
iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \
iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last);\
} *vtable
#define CONT_VTABLE_FIELD(contType) const contType##_VTABLE *vtable
#define CONT_VTABLE_INIT(contType, pCont) \
((pCont)->vtable = &g_##contType##_VTABLE)
#endif
enum CONT_KIND
{
CONT_KIND_NON_INTRUSIVE = 1,
CONT_KIND_INTRUSIVE = 2,
};
/**
* Static dispatch uses sizeof with dummy arrays to select a path.
*
* With optimizations enabled the unused paths should be trimmed, so this
* should have zero overhead in release builds.
*/
#define CONT_DISPATCH_ON_KIND(pCont, ret1, ret2, ret3) \
((sizeof(*(pCont)->kind) == CONT_KIND_NON_INTRUSIVE) ? (ret1) : \
(sizeof(*(pCont)->kind) == CONT_KIND_INTRUSIVE) ? (ret2) : \
(ret3))
/**
* Utility stub useful for the above ret3 argument (unreachable path).
* Add stubs for different return types as needed.
*/
static NV_FORCEINLINE void contDispatchVoid_STUB(void)
{
PORT_BREAKPOINT();
}
#endif // _NV_CONTAINERS_TYPE_SAFETY_H_