@ -2,7 +2,7 @@
*
* rmem - raylib memory pool and objects pool
*
* A quick , efficient , and minimal free list and stack - based allocator
* A quick , efficient , and minimal free list and arena - based allocator
*
* PURPOSE :
* - A quicker , efficient memory allocator alternative to ' malloc ' and friends .
@ -55,6 +55,8 @@
# define RMEMAPI / / We are building or using library as a static library (or Linux shared library)
# endif
# define RMEM_VERSION "v1.3" / / changelog at bottom of header.
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/ / Types and Structures Definition
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -66,39 +68,45 @@ struct MemNode {
MemNode * next , * prev ;
} ;
/ / Freelist implementation
typedef struct AllocList {
MemNode * head , * tail ;
size_t len , maxNodes ;
bool autoDefrag : 1 ;
size_t len ;
} AllocList ;
typedef struct Stack {
uint8_t * mem , * base ;
/ / Arena allocator .
typedef struct Arena {
uintptr_t mem , offs ;
size_t size ;
} Stack ;
} Arena ;
# define MEMPOOL_BUCKET_SIZE 8
# define MEMPOOL_BUCKET_BITS 3
enum {
MEMPOOL_BUCKET_SIZE = 8 ,
MEMPOOL_BUCKET_BITS = ( sizeof ( uintptr_t ) > > 1 ) + 1 ,
MEM_SPLIT_THRESHOLD = sizeof ( uintptr_t ) * 4
} ;
typedef struct MemPool {
AllocList freeList ;
Stack stack ;
MemNode * buckets [ MEMPOOL_BUCKET_SIZE ] ;
AllocList large , buckets [ MEMPOOL_BUCKET_SIZE ] ;
Arena arena ;
} MemPool ;
/ / Object Pool
typedef struct ObjPool {
Stack stack ;
size_t objSize , freeBlocks ;
uintptr_t mem , offs ;
size_t objSize , freeBlocks , memSize ;
} ObjPool ;
/ / Double - Ended Stack aka Deque
typedef struct BiStack {
uint8_t * mem , o">* front , * back ;
uintptr_t mem , front , back ;
size_t size ;
} BiStack ;
# if defined(__cplusplus)
extern " C " { / / Prevents name mangling of functions
# endif
@ -115,10 +123,7 @@ RMEMAPI void *MemPoolRealloc(MemPool *mempool, void *ptr, size_t bytes);
RMEMAPI void MemPoolFree ( MemPool * mempool , void * ptr ) ;
RMEMAPI void MemPoolCleanUp ( MemPool * mempool , void * * ptrref ) ;
RMEMAPI void MemPoolReset ( MemPool * mempool ) ;
RMEMAPI bool MemPoolDefrag ( MemPool * mempool ) ;
RMEMAPI size_t GetMemPoolFreeMemory ( const MemPool mempool ) ;
RMEMAPI void ToggleMemPoolAutoDefrag ( MemPool * mempool ) ;
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/ / Functions Declaration - Object Pool
@ -161,7 +166,9 @@ RMEMAPI intptr_t BiStackMargins(BiStack destack);
# if defined(RMEM_IMPLEMENTATION)
# include <stdio.h> // Required for: malloc(), calloc(), free()
# include <stdio.h> // Required for:
# include <stdlib.h> // Required for:
# include <string.h> // Required for:
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/ / Defines and Macros
@ -188,6 +195,145 @@ static inline size_t __AlignSize(const size_t size, const size_t align)
return ( size + ( align - 1 ) ) & - align ;
}
static MemNode * __SplitMemNode ( MemNode * const node , const size_t bytes )
{
uintptr_t n = ( uintptr_t ) node ;
MemNode * const r = ( MemNode * ) ( n + ( node - > size - bytes ) ) ;
node - > size - = bytes ;
r - > size = bytes ;
return r ;
}
static void __InsertMemNodeBefore ( AllocList * const list , MemNode * const insert , MemNode * const curr )
{
insert - > next = curr ;
if ( curr - > prev = = NULL ) list - > head = insert ;
else
{
insert - > prev = curr - > prev ;
curr - > prev - > next = insert ;
}
curr - > prev = insert ;
}
static void __ReplaceMemNode ( MemNode * const old , MemNode * const replace )
{
replace - > prev = old - > prev ;
replace - > next = old - > next ;
if ( old - > prev ! = NULL )
old - > prev - > next = replace ;
if ( old - > next ! = NULL )
old - > next - > prev = replace ;
}
static MemNode * __RemoveMemNode ( AllocList * const list , MemNode * const node )
{
if ( node - > prev ! = NULL ) node - > prev - > next = node - > next ;
else
{
list - > head = node - > next ;
if ( list - > head ! = NULL ) list - > head - > prev = NULL ;
else list - > tail = NULL ;
}
if ( node - > next ! = NULL ) node - > next - > prev = node - > prev ;
else
{
list - > tail = node - > prev ;
if ( list - > tail ! = NULL ) list - > tail - > next = NULL ;
else list - > head = NULL ;
}
list - > len - - ;
return node ;
}
static MemNode * __FindMemNode ( AllocList * const list , const size_t bytes )
{
for ( MemNode * node = list - > head ; node ! = NULL ; node = node - > next )
{
if ( node - > size < bytes ) continue ;
/ / close in size - reduce fragmentation by not splitting .
else if ( node - > size < = bytes + MEM_SPLIT_THRESHOLD ) return __RemoveMemNode ( list , node ) ;
else return __SplitMemNode ( node , bytes ) ;
}
return NULL ;
}
static void __InsertMemNode ( MemPool * const mempool , AllocList * const list , MemNode * const node , const bool is_bucket )
{
if ( list - > head = = NULL )
{
list - > head = node ;
list - > len + + ;
}
else
{
for ( MemNode * iter = list - > head ; iter ! = NULL ; iter = iter - > next )
{
if ( ( uintptr_t ) iter = = mempool - > arena . offs )
{
mempool - > arena . offs + = iter - > size ;
__RemoveMemNode ( list , iter ) ;
iter = list - > head ;
}
const uintptr_t inode = ( uintptr_t ) node ;
const uintptr_t iiter = ( uintptr_t ) iter ;
const uintptr_t iter_end = iiter + iter - > size ;
const uintptr_t node_end = inode + node - > size ;
if ( iter = = node ) return ;
else if ( iter < node )
{
/ / node was coalesced prior .
if ( iter_end > inode ) return ;
else if ( iter_end = = inode & & ! is_bucket )
{
/ / if we can coalesce , do so .
iter - > size + = node - > size ;
return ;
}
}
else if ( iter > node )
{
/ / Address sort , lowest to highest aka ascending order .
if ( iiter < node_end ) return ;
else if ( iter = = list - > head & & ! is_bucket )
{
if ( iter_end = = inode ) iter - > size + = node - > size ;
else if ( node_end = = iiter )
{
node - > size + = list - > head - > size ;
node - > next = list - > head - > next ;
node - > prev = NULL ;
list - > head = node ;
}
else
{
node - > next = iter ;
node - > prev = NULL ;
iter - > prev = node ;
list - > head = node ;
list - > len + + ;
}
return ;
}
else if ( iter_end = = inode & & ! is_bucket )
{
/ / if we can coalesce , do so .
iter - > size + = node - > size ;
return ;
}
else
{
__InsertMemNodeBefore ( list , iter , node ) ;
list - > len + + ;
return ;
}
}
}
}
}
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/ / Module Functions Definition - Memory Pool
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -196,114 +342,77 @@ MemPool CreateMemPool(const size_t size)
{
MemPool mempool = { 0 } ;
if ( size = = 0UL ) return mempool ;
if ( size = = 0 ) return mempool ;
else
{
/ / Align the mempool size to at least the size of an alloc node .
mempool . stack . size = size ;
mempool . stack . mem = malloc ( mempool . stack . size * sizeof * mempool . stack . mem ) ;
if ( mempool . stack . mem = = NULL )
{
mempool . stack . size = 0UL ;
return mempool ;
}
uint8_t * const restrict buf = malloc ( size * sizeof * buf ) ;
if ( buf = = NULL ) return mempool ;
else
{
mempool . stack . base = mempool . stack . mem + mempool . stack . size ;
mempool . arena . size = size ;
mempool . arena . mem = ( uintptr_t ) buf ;
mempool . arena . offs = mempool . arena . mem + mempool . arena . size ;
return mempool ;
}
}
}
MemPool CreateMemPoolFromBuffer ( void * buf , const size_t size )
MemPool CreateMemPoolFromBuffer ( void * k">const restrict buf , const size_t size )
{
MemPool mempool = { 0 } ;
if ( ( size = = 0UL ) | | ( buf = = NULL ) | | ( size < = sizeof ( MemNode ) ) ) return mempool ;
if ( ( size = = 0 ) | | ( buf = = NULL ) | | ( size < = sizeof ( MemNode ) ) ) return mempool ;
else
{
mempool . stack . size = size ;
mempool . stack . mem = buf ;
mempool . stack . base = mempool . stack . mem + mempool . stack . size ;
mempool . arena . size = size ;
mempool . arena . mem = ( uintptr_t ) buf ;
mempool . arena . offs = mempool . arena . mem + mempool . arena . size ;
return mempool ;
}
}
void DestroyMemPool ( MemPool * const mempool )
void DestroyMemPool ( MemPool * const kr">restrict mempool )
{
if ( p">( mempool = = NULL ) | | ( mempool - > stack . mem = = nb">NULL ) ) return ;
if ( mempool - > arena . mem = = mi">0 ) return ;
else
{
free ( mempool - > stack . mem ) ;
void * const restrict ptr = ( void * ) mempool - > arena . mem ;
free ( ptr ) ;
* mempool = ( MemPool ) { 0 } ;
}
}
void * MemPoolAlloc ( MemPool * const mempool , const size_t size )
{
if ( ( mempool = = NULL ) | | ( size = = 0UL ) | | ( size > mempool - > stack . size ) ) return NULL ;
if ( ( size = = 0 ) | | ( size > mempool - > arena . size ) ) return NULL ;
else
{
MemNode * new_mem = NULL ;
const size_t ALLOC_SIZE = __AlignSize ( size + sizeof * new_mem , sizeof ( intptr_t ) ) ;
const size_t BUCKET_INDEX = ( ALLOC_SIZE > > MEMPOOL_BUCKET_BITS ) - 1 ;
const size_t BUCKET_SLOT = ( ALLOC_SIZE > > MEMPOOL_BUCKET_BITS ) - 1 ;
/ / If the size is small enough , let ' s check if our buckets has a fitting memory block .
if ( ( BUCKET_INDEX < MEMPOOL_BUCKET_SIZE ) & &
( mempool - > buckets [ BUCKET_INDEX ] ! = NULL ) & &
( mempool - > buckets [ BUCKET_INDEX ] - > size > = ALLOC_SIZE ) )
if ( BUCKET_SLOT < MEMPOOL_BUCKET_SIZE )
{
new_mem = mempool - > buckets [ BUCKET_INDEX ] ;
mempool - > buckets [ BUCKET_INDEX ] = mempool - > buckets [ BUCKET_INDEX ] - > next ;
if ( mempool - > buckets [ BUCKET_INDEX ] ! = NULL )
mempool - > buckets [ BUCKET_INDEX ] - > prev = NULL ;
new_mem = __FindMemNode ( & mempool - > buckets [ BUCKET_SLOT ] , ALLOC_SIZE ) ;
}
else if ( mempool - > freeList . head ! = NULL )
else if ( mempool - > large . head ! = NULL )
{
const size_t MEM_SPLIT_THRESHOLD = 16 ;
/ / If the freelist is valid , let ' s allocate FROM the freelist then !
for ( MemNode * inode = mempool - > freeList . head ; inode ! = NULL ; inode = inode - > next )
{
if ( inode - > size < ALLOC_SIZE ) continue ;
else if ( inode - > size < = ( ALLOC_SIZE + MEM_SPLIT_THRESHOLD ) )
{
/ / Close in size - reduce fragmentation by not splitting .
new_mem = inode ;
( inode - > prev ! = NULL ) ? ( inode - > prev - > next = inode - > next ) : ( mempool - > freeList . head = inode - > next ) ;
( inode - > next ! = NULL ) ? ( inode - > next - > prev = inode - > prev ) : ( mempool - > freeList . tail = inode - > prev ) ;
if ( mempool - > freeList . head ! = NULL ) mempool - > freeList . head - > prev = NULL ;
else mempool - > freeList . tail = NULL ;
if ( mempool - > freeList . tail ! = NULL ) mempool - > freeList . tail - > next = NULL ;
mempool - > freeList . len - - ;
break ;
}
else
{
/ / Split the memory chunk .
new_mem = ( MemNode * ) ( ( uint8_t * ) inode + ( inode - > size - ALLOC_SIZE ) ) ;
inode - > size - = ALLOC_SIZE ;
new_mem - > size = ALLOC_SIZE ;
break ;
}
}
new_mem = __FindMemNode ( & mempool - > large , ALLOC_SIZE ) ;
}
if ( new_mem = = NULL )
{
/ / not enough memory to support the size !
if ( ( mempool - > stack . base - ALLOC_SIZE ) < mempool - > stack . mem ) return NULL ;
if ( ( mempool - > arena . offs - ALLOC_SIZE ) < mempool - > arena . mem ) return NULL ;
else
{
/ / Couldn ' t allocate from a freelist , allocate from available mempool .
/ / Subtract allocation size from the mempool .
mempool - > stack . base - = ALLOC_SIZE ;
mempool - > arena . offs - = ALLOC_SIZE ;
/ / Use the available mempool space as the new node .
new_mem = ( MemNode * ) mempool - > stack . base ;
new_mem = ( MemNode * ) mempool - > arena . offs ;
new_mem - > size = ALLOC_SIZE ;
}
}
@ -313,33 +422,32 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
/ / | mem size | lowest addr of block
/ / | next node | 12 byte ( 32 - bit ) header
/ / | prev node | 24 byte ( 64 - bit ) header
/ / - - - - - - - - - - - - - -
/ / | - - - - - - - - - - - - |
/ / | alloc ' d |
/ / | memory |
/ / | space | highest addr of block
/ / - - - - - - - - - - - - - -
new_mem - > next = new_mem - > prev = NULL ;
uint8_t * const final_mem = ( uint8_t * ) new_mem + sizeof * new_mem ;
uint8_t * const kr">restrict final_mem = ( uint8_t * ) new_mem + sizeof * new_mem ;
return memset ( final_mem , 0 , new_mem - > size - sizeof * new_mem ) ;
}
}
void * MemPoolRealloc ( MemPool * const restrict mempool , void * ptr , const size_t size )
void * MemPoolRealloc ( MemPool * const restrict mempool , void * k">const ptr , const size_t size )
{
if ( p">( mempool = = NULL ) | | ( size > mempool - > stack . size) ) return NULL ;
if ( size > mempool - > arena . size ) return NULL ;
/ / NULL ptr should make this work like regular Allocation .
else if ( ptr = = NULL ) return MemPoolAlloc ( mempool , size ) ;
else if ( ( uintptr_t ) ptr - sizeof ( MemNode ) < p">( uintptr_t ) mempool - > stack . mem ) return NULL ;
else if ( ( uintptr_t ) ptr - sizeof ( MemNode ) < mempool - > arena . mem ) return NULL ;
else
{
MemNode * const node = ( MemNode * ) ( ( uint8_t * ) ptr - sizeof * node ) ;
MemNode * const node = ( MemNode * ) ( ( uint8_t * ) ptr - sizeof * node ) ;
const size_t NODE_SIZE = sizeof * node ;
uint8_t * const resized_block = MemPoolAlloc ( mempool , size ) ;
if ( resized_block = = NULL ) return NULL ;
else
{
MemNode * const resized = ( MemNode * ) ( resized_block - sizeof * resized ) ;
MemNode * const resized = ( MemNode * ) ( resized_block - sizeof * resized ) ;
memmove ( resized_block , ptr , ( node - > size > resized - > size ) ? ( resized - > size - NODE_SIZE ) : ( node - > size - NODE_SIZE ) ) ;
MemPoolFree ( mempool , ptr ) ;
return resized_block ;
@ -347,72 +455,39 @@ void *MemPoolRealloc(MemPool *const restrict mempool, void *ptr, const size_t si
}
}
void MemPoolFree ( MemPool * const restrict mempool , void * ptr )
void MemPoolFree ( MemPool * const restrict mempool , void * k">const ptr )
{
if ( ( mempool = = NULL ) | | ( ptr = = NULL ) | | ( ( uintptr_t ) ptr - sizeof ( MemNode ) < ( uintptr_t ) mempool - > stack . mem ) ) return ;
const uintptr_t p = ( uintptr_t ) ptr ;
if ( ( ptr = = NULL ) | | ( p - sizeof ( MemNode ) < mempool - > arena . mem ) ) return ;
else
{
/ / Behind the actual pointer data is the allocation info .
MemNode * const mem_node = ( MemNode * ) ( ( uint8_t * ) ptr - sizeof * mem_node ) ;
const size_t BUCKET_INDEX = ( mem_node - > size > > MEMPOOL_BUCKET_BITS ) - 1 ;
const uintptr_t block = p - sizeof ( MemNode ) ;
MemNode * const mem_node = ( MemNode * ) block ;
const size_t BUCKET_SLOT = ( mem_node - > size > > MEMPOOL_BUCKET_BITS ) - 1 ;
/ / Make sure the pointer data is valid .
if ( ( p">( uintptr_t ) mem_node < ( uintptr_t ) mempool - > stack . base ) | |
( ( p">( uintptr_t ) mem_node - ( uintptr_t ) mempool - > stack . mem ) > mempool - > stack . size ) | |
( mem_node - > size = = 0UL ) | |
( mem_node - > size > mempool - > stack . size ) ) return ;
/ / If the mem_node is right at the stack base ptr , then add it to the stack .
else if ( p">( uintptr_t ) mem_node = = p">( uintptr_t ) mempool - > stack . base )
if ( ( n">block < mempool - > arena . offs ) | |
( ( n">block - mempool - > arena . mem ) > mempool - > arena . size ) | |
( mem_node - > size = = 0 ) | |
( mem_node - > size > mempool - > arena . size ) ) return ;
/ / If the mem_node is right at the arena offs , then merge it back to the arena .
else if ( n">block = = mempool - > arena . offs )
{
mempool - > stack . base + = mem_node - > size ;
mempool - > arena . offs + = mem_node - > size ;
}
/ / attempted stack merge failed , try to place it into the memnode buckets
else if ( BUCKET_INDEX < MEMPOOL_BUCKET_SIZE )
{
if ( mempool - > buckets [ BUCKET_INDEX ] = = NULL ) mempool - > buckets [ BUCKET_INDEX ] = mem_node ;
else
{
for ( MemNode * n = mempool - > buckets [ BUCKET_INDEX ] ; n ! = NULL ; n = n - > next ) if ( n = = mem_node ) return ;
mempool - > buckets [ BUCKET_INDEX ] - > prev = mem_node ;
mem_node - > next = mempool - > buckets [ BUCKET_INDEX ] ;
mempool - > buckets [ BUCKET_INDEX ] = mem_node ;
}
}
/ / Otherwise , we add it to the free list .
/ / We also check if the freelist already has the pointer so we can prevent double frees .
else /*if ((mempool->freeList.len == 0UL) || ((uintptr_t)mempool->freeList.head >= (uintptr_t)mempool->stack.mem && (uintptr_t)mempool->freeList.head - (uintptr_t)mempool->stack.mem < mempool->stack.size))*/
else
{
for ( MemNode * n = mempool - > freeList . head ; n ! = NULL ; n = n - > next ) if ( n = = mem_node ) return ;
/ / This code insertion sorts where largest size is last .
if ( mempool - > freeList . head = = NULL )
{
mempool - > freeList . head = mempool - > freeList . tail = mem_node ;
mempool - > freeList . len + + ;
}
else if ( mempool - > freeList . head - > size > = mem_node - > size )
{
mem_node - > next = mempool - > freeList . head ;
mem_node - > next - > prev = mem_node ;
mempool - > freeList . head = mem_node ;
mempool - > freeList . len + + ;
}
else / / if ( mempool - > freeList . tail - > size < = mem_node - > size )
{
mem_node - > prev = mempool - > freeList . tail ;
mempool - > freeList . tail - > next = mem_node ;
mempool - > freeList . tail = mem_node ;
mempool - > freeList . len + + ;
}
if ( mempool - > freeList . autoDefrag & & ( mempool - > freeList . maxNodes ! = 0UL ) & & ( mempool - > freeList . len > mempool - > freeList . maxNodes ) ) MemPoolDefrag ( mempool ) ;
/ / try to place it into bucket or large freelist .
struct AllocList * const l = ( BUCKET_SLOT < MEMPOOL_BUCKET_SIZE ) ? & mempool - > buckets [ BUCKET_SLOT ] : & mempool - > large ;
__InsertMemNode ( mempool , l , mem_node , ( BUCKET_SLOT < MEMPOOL_BUCKET_SIZE ) ) ;
}
}
}
void MemPoolCleanUp ( MemPool * const restrict mempool , void * * ptrref )
void MemPoolCleanUp ( MemPool * const restrict mempool , void * * const ptrref )
{
if ( ( mempool = = NULL ) | | ( ptrref = = NULL ) | | ( * ptrref = = NULL ) ) return ;
if ( ( ptrref = = NULL ) | | ( * ptrref = = NULL ) ) return ;
else
{
MemPoolFree ( mempool , * ptrref ) ;
@ -422,264 +497,127 @@ void MemPoolCleanUp(MemPool *const restrict mempool, void **ptrref)
size_t GetMemPoolFreeMemory ( const MemPool mempool )
{
size_t total_remaining = p">( uintptr_t ) mempool . stack . base - ( uintptr_t ) mempool . stack . mem ;
size_t total_remaining = mempool . arena . offs - mempool . arena . mem ;
for ( MemNode * n = mempool . freeList . head ; n ! = NULL ; n = n - > next ) total_remaining + = n - > size ;
for ( MemNode * n = mempool . large . head ; n ! = NULL ; n = n - > next ) total_remaining + = n - > size ;
for ( kt">int i = 0 ; i < MEMPOOL_BUCKET_SIZE ; i + + ) for ( MemNode * n = mempool . buckets [ i ] ; n ! = NULL ; n = n - > next ) total_remaining + = n - > size ;
for ( n">size_t i = 0 ; i < MEMPOOL_BUCKET_SIZE ; i + + ) for ( MemNode * n = mempool . buckets [ i ]. head ; n ! = NULL ; n = n - > next ) total_remaining + = n - > size ;
return total_remaining ;
}
void MemPoolReset ( MemPool * const mempool )
{
if ( mempool = = NULL ) return ;
mempool - > freeList . head = mempool - > freeList . tail = NULL ;
mempool - > freeList . len = 0 ;
for ( int i = 0 ; i < MEMPOOL_BUCKET_SIZE ; i + + ) mempool - > buckets [ i ] = NULL ;
mempool - > stack . base = mempool - > stack . mem + mempool - > stack . size ;
}
bool MemPoolDefrag ( MemPool * const mempool )
{
if ( mempool = = NULL ) return false ;
else
mempool - > large . head = mempool - > large . tail = NULL ;
mempool - > large . len = 0 ;
for ( size_t i = 0 ; i < MEMPOOL_BUCKET_SIZE ; i + + )
{
/ / If the memory pool has been entirely released , fully defrag it .
if ( mempool - > stack . size = = GetMemPoolFreeMemory ( * mempool ) )
{
MemPoolReset ( mempool ) ;
return true ;
}
else
{
for ( int i = 0 ; i < MEMPOOL_BUCKET_SIZE ; i + + )
{
while ( mempool - > buckets [ i ] ! = NULL )
{
if ( ( uintptr_t ) mempool - > buckets [ i ] = = ( uintptr_t ) mempool - > stack . base )
{
mempool - > stack . base + = mempool - > buckets [ i ] - > size ;
mempool - > buckets [ i ] - > size = 0 ;
mempool - > buckets [ i ] = mempool - > buckets [ i ] - > next ;
if ( mempool - > buckets [ i ] ! = NULL ) mempool - > buckets [ i ] - > prev = NULL ;
}
else break ;
}
}
const size_t PRE_DEFRAG_LEN = mempool - > freeList . len ;
MemNode * * node = & mempool - > freeList . head ;
while ( * node ! = NULL )
{
if ( ( uintptr_t ) * node = = ( uintptr_t ) mempool - > stack . base )
{
/ / If node is right at the stack , merge it back into the stack .
mempool - > stack . base + = ( * node ) - > size ;
( * node ) - > size = 0UL ;
( ( * node ) - > prev ! = NULL ) ? ( ( * node ) - > prev - > next = ( * node ) - > next ) : ( mempool - > freeList . head = ( * node ) - > next ) ;
( ( * node ) - > next ! = NULL ) ? ( ( * node ) - > next - > prev = ( * node ) - > prev ) : ( mempool - > freeList . tail = ( * node ) - > prev ) ;
if ( mempool - > freeList . head ! = NULL ) mempool - > freeList . head - > prev = NULL ;
else mempool - > freeList . tail = NULL ;
if ( mempool - > freeList . tail ! = NULL ) mempool - > freeList . tail - > next = NULL ;
mempool - > freeList . len - - ;
node = & mempool - > freeList . head ;
}
else if ( ( ( uintptr_t ) * node + ( * node ) - > size ) = = ( uintptr_t ) ( * node ) - > next )
{
/ / Next node is at a higher address .
( * node ) - > size + = ( * node ) - > next - > size ;
( * node ) - > next - > size = 0UL ;
/ / < - [ P Curr N ] - > < - [ P Next N ] - > < - [ P NextNext N ] - >
/ /
/ / | - - - - - - - - - - - - - - - - - - - - |
/ / < - [ P Curr N ] - > < - [ P Next N ] - > [ P NextNext N ] - >
if ( ( * node ) - > next - > next ! = NULL ) ( * node ) - > next - > next - > prev = * node ;
/ / < - [ P Curr N ] - > < - [ P NextNext N ] - >
( * node ) - > next = ( * node ) - > next - > next ;
mempool - > freeList . len - - ;
node = & mempool - > freeList . head ;
}
else if ( ( ( ( uintptr_t ) * node + ( * node ) - > size ) = = ( uintptr_t ) ( * node ) - > prev ) & & ( ( * node ) - > prev - > prev ! = NULL ) )
{
/ / Prev node is at a higher address .
( * node ) - > size + = ( * node ) - > prev - > size ;
( * node ) - > prev - > size = 0UL ;
/ / < - [ P PrevPrev N ] - > < - [ P Prev N ] - > < - [ P Curr N ] - >
/ /
/ / | - - - - - - - - - - - - - - - - - - - - |
/ / < - [ P PrevPrev N ] < - [ P Prev N ] - > < - [ P Curr N ] - >
( * node ) - > prev - > prev - > next = * node ;
/ / < - [ P PrevPrev N ] - > < - [ P Curr N ] - >
( * node ) - > prev = ( * node ) - > prev - > prev ;
mempool - > freeList . len - - ;
node = & mempool - > freeList . head ;
}
else if ( ( * node ) - > prev ! = NULL & & ( * node ) - > next ! = NULL & & ( uintptr_t ) * node - ( * node ) - > next - > size = = ( uintptr_t ) ( * node ) - > next )
{
/ / Next node is at a lower address .
( * node ) - > next - > size + = ( * node ) - > size ;
( * node ) - > size = 0UL ;
( * node ) - > next - > prev = ( * node ) - > prev ;
( * node ) - > prev - > next = ( * node ) - > next ;
* node = ( * node ) - > next ;
mempool - > freeList . len - - ;
node = & mempool - > freeList . head ;
}
else if ( ( * node ) - > prev ! = NULL & & ( * node ) - > next ! = NULL & & ( uintptr_t ) * node - ( * node ) - > prev - > size = = ( uintptr_t ) ( * node ) - > prev )
{
/ / Prev node is at a lower address .
( * node ) - > prev - > size + = ( * node ) - > size ;
( * node ) - > size = 0UL ;
( * node ) - > next - > prev = ( * node ) - > prev ;
( * node ) - > prev - > next = ( * node ) - > next ;
* node = ( * node ) - > prev ;
mempool - > freeList . len - - ;
node = & mempool - > freeList . head ;
}
else
{
node = & ( * node ) - > next ;
}
}
return PRE_DEFRAG_LEN > mempool - > freeList . len ;
}
mempool - > buckets [ i ] . head = mempool - > buckets [ i ] . tail = NULL ;
mempool - > buckets [ i ] . len = 0 ;
}
}
void ToggleMemPoolAutoDefrag ( MemPool * const mempool )
{
if ( mempool = = NULL ) return ;
else mempool - > freeList . autoDefrag ^ = true ;
mempool - > arena . offs = mempool - > arena . mem + mempool - > arena . size ;
}
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/ / Module Functions Definition - Object Pool
/ / - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
union ObjInfo {
uint8_t * const byte ;
size_t * const index ;
} ;
ObjPool CreateObjPool ( const size_t objsize , const size_t len )
{
ObjPool objpool = { 0 } ;
if ( ( len = = 0UL ) | | ( objsize = = 0UL ) ) return objpool ;
if ( ( len = = 0 ) | | ( objsize = = 0 ) ) return objpool ;
else
{
objpool . objSize = __AlignSize ( objsize , sizeof ( size_t ) ) ;
objpool . stack . size = objpool . freeBlocks = len ;
objpool . stack . mem = calloc ( objpool . stack . size , objpool . objSize ) ;
if ( objpool . stack . mem = = NULL )
const size_t aligned_size = __AlignSize ( objsize , sizeof ( size_t ) ) ;
uint8_t * const restrict buf = calloc ( len , aligned_size ) ;
if ( buf = = NULL ) return objpool ;
objpool . objSize = aligned_size ;
objpool . memSize = objpool . freeBlocks = len ;
objpool . mem = ( uintptr_t ) buf ;
for ( size_t i = 0 ; i < objpool . freeBlocks ; i + + )
{
objpool . stack . size = 0UL ;
return objpool ;
size_t * const restrict index = ( size_t * ) ( objpool . mem + ( i * aligned_size ) ) ;
* index = i + 1 ;
}
else
{
for ( int i = 0 ; i < objpool . freeBlocks ; i + + )
{
union ObjInfo block = { . byte = & objpool . stack . mem [ i * objpool . objSize ] } ;
* block . index = i + 1 ;
}
objpool . stack . base = objpool . stack . mem ;
return objpool ;
}
objpool . offs = objpool . mem ;
return objpool ;
}
}
ObjPool CreateObjPoolFromBuffer ( void * const buf , const size_t objsize , const size_t len )
ObjPool CreateObjPoolFromBuffer ( void * const restrict buf , const size_t objsize , const size_t len )
{
ObjPool objpool = { 0 } ;
/ / If the object size isn ' t large enough to align to a size_t , then we can ' t use it .
if ( ( buf = = NULL ) | | ( len = = 0UL ) | | ( objsize < sizeof ( size_t ) ) | | ( objsize * len ! = __AlignSize ( objsize , sizeof ( size_t ) ) * len ) ) return objpool ;
const size_t aligned_size = __AlignSize ( objsize , sizeof ( size_t ) ) ;
if ( ( buf = = NULL ) | | ( len = = 0 ) | | ( objsize < sizeof ( size_t ) ) | | ( objsize * len ! = aligned_size * len ) ) return objpool ;
else
{
objpool . objSize = __AlignSize ( objsize , sizeof ( size_t ) ) ;
objpool . stack . s ize = objpool . freeBlocks = len ;
objpool . stack . mem = buf ;
objpool . objSize = aligned_size ;
objpool . memSize = objpool . freeBlocks = len ;
objpool . mem = ( uintptr_t ) buf ;
for ( kt">int i = 0 ; i < objpool . freeBlocks ; i + + )
for ( size_t i = 0 ; i < objpool . freeBlocks ; i + + )
{
k">union ObjInfo block = { . byte = & objpool . stack . mem [ i * objpool . objSize ] } ;
* block . index = i + 1 ;
size_t * const restrict index = ( size_t * ) ( objpool . mem + ( i * aligned_size ) ) ;
* index = i + 1 ;
}
objpool . stack . base = objpool. stack . mem ;
objpool . offs = objpool . mem ;
return objpool ;
}
}
void DestroyObjPool ( ObjPool * const objpool )
void DestroyObjPool ( ObjPool * const restrict objpool )
{
if ( p">( objpool = = NULL ) | | ( objpool - > stack . mem = = nb">NULL ) ) return ;
if ( objpool - > mem = = 0 ) return ;
else
{
free ( objpool - > stack . mem ) ;
void * const restrict ptr = ( void * ) objpool - > mem ;
free ( ptr ) ;
* objpool = ( ObjPool ) { 0 } ;
}
}
void * ObjPoolAlloc ( ObjPool * const objpool )
{
if ( objpool = = NULL ) return NULL ;
else
if ( objpool - > freeBlocks > 0 )
{
if ( objpool - > freeBlocks > 0UL )
{
/ / For first allocation , head points to the very first index .
/ / Head = & pool [ 0 ] ;
/ / ret = Head = = ret = & pool [ 0 ] ;
union ObjInfo ret = { . byte = objpool - > stack . base } ;
objpool - > freeBlocks - - ;
/ / after allocating , we set head to the address of the index that * Head holds .
/ / Head = & pool [ * Head * pool . objsize ] ;
objpool - > stack . base = ( objpool - > freeBlocks ! = 0UL ) ? objpool - > stack . mem + ( * ret . index * objpool - > objSize ) : NULL ;
memset ( ret . byte , 0 , objpool - > objSize ) ;
return ret . byte ;
}
else return NULL ;
/ / For first allocation , head points to the very first index .
/ / Head = & pool [ 0 ] ;
/ / ret = Head = = ret = & pool [ 0 ] ;
size_t * const restrict block = ( size_t * ) objpool - > offs ;
objpool - > freeBlocks - - ;
/ / after allocating , we set head to the address of the index that * Head holds .
/ / Head = & pool [ * Head * pool . objsize ] ;
objpool - > offs = ( objpool - > freeBlocks ! = 0 ) ? objpool - > mem + ( * block * objpool - > objSize ) : 0 ;
return memset ( block , 0 , objpool - > objSize ) ;
}
else return NULL ;
}
void ObjPoolFree ( ObjPool * const restrict objpool , void * ptr )
void ObjPoolFree ( ObjPool * const restrict objpool , void * const ptr )
{
k">union ObjInfo p = { . byte = ptr } ;
if ( ( objpool = = NULL ) | | ( ptr = = NULL ) | | ( p . byte < objpool - > stack . mem) | | ( p . byte > objpool - > stack . mem + objpool - > stack . s ize* objpool - > objSize ) ) return ;
uintptr_t block = ( uintptr_t ) ptr ;
if ( ( ptr = = NULL ) | | ( block < objpool - > mem ) | | ( block > objpool - > mem + objpool - > memSize * objpool - > objSize ) ) return ;
else
{
/ / When we free our pointer , we recycle the pointer space to store the previous index and then we push it as our new head .
/ / * p = index of Head in relation to the buffer ;
/ / Head = p ;
* p . index = ( objpool - > stack . base ! = NULL ) ? ( objpool - > stack . base - objpool - > stack . mem ) / objpool - > objSize : objpool - > stack . size ;
objpool - > stack . base = p . byte ;
size_t * const restrict index = ( size_t * ) block ;
* index = ( objpool - > offs ! = 0 ) ? ( objpool - > offs - objpool - > mem ) / objpool - > objSize : objpool - > memSize ;
objpool - > offs = block ;
objpool - > freeBlocks + + ;
}
}
void ObjPoolCleanUp ( ObjPool * const restrict objpool , void * * ptrref )
void ObjPoolCleanUp ( ObjPool * const restrict objpool , void * * const restrict ptrref )
{
if ( p">( objpool = = NULL ) | | ( ptrref = = NULL) | | ( * ptrref = = NULL ) ) return ;
if ( ptrref = = NULL ) return ;
else
{
ObjPoolFree ( objpool , * ptrref ) ;
@ -694,71 +632,85 @@ void ObjPoolCleanUp(ObjPool *const restrict objpool, void **ptrref)
BiStack CreateBiStack ( const size_t len )
{
BiStack destack = { 0 } ;
if ( len = = 0UL ) return destack ;
if ( len = = 0 ) return destack ;
uint8_t * const buf = malloc ( len * sizeof * buf ) ;
if ( buf = = NULL ) return destack ;
destack . size = len ;
destack . mem = malloc ( len * sizeof * destack . mem ) ;
if ( destack . mem = = NULL ) destack . size = 0UL ;
else
{
destack . front = destack . mem ;
destack . back = destack . mem + len ;
}
destack . mem = ( uintptr_t ) buf ;
destack . front = destack . mem ;
destack . back = destack . mem + len ;
return destack ;
}
BiStack CreateBiStackFromBuffer ( void * const buf , const size_t len )
{
BiStack destack = { 0 } ;
if ( len = = 0UL | | buf = = NULL ) return destack ;
destack . size = len ;
destack . mem = destack . front = buf ;
destack . back = destack . mem + len ;
return destack ;
if ( len = = 0 | | buf = = NULL ) return destack ;
else
{
destack . size = len ;
destack . mem = destack . front = ( uintptr_t ) buf ;
destack . back = destack . mem + len ;
return destack ;
}
}
void DestroyBiStack ( BiStack * const destack )
void DestroyBiStack ( BiStack * const kr">restrict destack )
{
if ( ( destack = = NULL ) | | ( destack - > mem = = NULL ) ) return ;
free ( destack - > mem ) ;
* destack = ( BiStack ) { 0 } ;
if ( destack - > mem = = 0 ) return ;
else
{
uint8_t * const restrict buf = ( uint8_t * ) destack - > mem ;
free ( buf ) ;
* destack = ( BiStack ) { 0 } ;
}
}
void * BiStackAllocFront ( BiStack * const destack , const size_t len )
void * BiStackAllocFront ( BiStack * const kr">restrict destack , const size_t len )
{
if ( ( destack = = NULL ) | | ( destack - > mem = = NULL ) ) return NULL ;
const size_t ALIGNED_LEN = __AlignSize ( len , sizeof ( uintptr_t ) ) ;
/ / front end stack is too high !
if ( destack - > front + ALIGNED_LEN > = destack - > back ) return NULL ;
uint8_t * ptr = destack - > front ;
destack - > front + = ALIGNED_LEN ;
return ptr ;
if ( destack - > mem = = 0 ) return NULL ;
else
{
const size_t ALIGNED_LEN = __AlignSize ( len , sizeof ( uintptr_t ) ) ;
/ / front end arena is too high !
if ( destack - > front + ALIGNED_LEN > = destack - > back ) return NULL ;
else
{
uint8_t * const restrict ptr = ( uint8_t * ) destack - > front ;
destack - > front + = ALIGNED_LEN ;
return ptr ;
}
}
}
void * BiStackAllocBack ( BiStack * const destack , const size_t len )
void * BiStackAllocBack ( BiStack * const kr">restrict destack , const size_t len )
{
if ( ( destack = = NULL ) | | ( destack - > mem = = NULL ) ) return NULL ;
const size_t ALIGNED_LEN = __AlignSize ( len , sizeof ( uintptr_t ) ) ;
/ / back end stack is too low
if ( destack - > back - ALIGNED_LEN < = destack - > front ) return NULL ;
destack - > back - = ALIGNED_LEN ;
return destack - > back ;
if ( destack - > mem = = 0 ) return NULL ;
else
{
const size_t ALIGNED_LEN = __AlignSize ( len , sizeof ( uintptr_t ) ) ;
/ / back end arena is too low
if ( destack - > back - ALIGNED_LEN < = destack - > front ) return NULL ;
else
{
destack - > back - = ALIGNED_LEN ;
uint8_t * const restrict ptr = ( uint8_t * ) destack - > back ;
return ptr ;
}
}
}
void BiStackResetFront ( BiStack * const destack )
{
if ( ( destack = = NULL ) | | ( destack - > mem = = NULL ) ) return ;
destack - > front = destack - > mem ;
if ( destack - > mem = = mi">0 ) return ;
k">else destack - > front = destack - > mem ;
}
void BiStackResetBack ( BiStack * const destack )
{
if ( p">( destack = = NULL ) | | ( destack - > mem = = nb">NULL ) ) return ;
destack - > back = destack - > mem + destack - > size ;
if ( destack - > mem = = mi">0 ) return ;
k">else destack - > back = destack - > mem + destack - > size ;
}
void BiStackResetAll ( BiStack * const destack )
@ -767,9 +719,21 @@ void BiStackResetAll(BiStack *const destack)
BiStackResetFront ( destack ) ;
}
intptr_t BiStackMargins ( const BiStack destack )
inline intptr_t BiStackMargins ( const BiStack destack )
{
return destack . back - destack . front ;
}
# endif / / RMEM_IMPLEMENTATION
/*******
* Changelog
* v1 .0 : First Creation .
* v1 .1 : bug patches for the mempool and addition of object pool .
* v1 .2 : addition of bidirectional arena .
* v1 .3 :
* optimizations of allocators .
* renamed ' Stack ' to ' Arena ' .
* replaced certain define constants with an anonymous enum .
* refactored MemPool to no longer require active or deferred defragging .
* * * * * * * */