Pārlūkot izejas kodu

Removed extras directory

pull/2521/head
Ray pirms 3 gadiem
vecāks
revīzija
043fa4cba7
5 mainītis faili ar 109 papildinājumiem un 77 dzēšanām
  1. +1
    -1
      examples/Makefile
  2. +1
    -1
      examples/Makefile.Web
  3. +2
    -2
      parser/Makefile
  4. +0
    -0
      src/easings.h
  5. +105
    -73
      src/rmem.h

+ 1
- 1
examples/Makefile Parādīt failu

@ -228,7 +228,7 @@ endif
# Define include paths for required headers: INCLUDE_PATHS # Define include paths for required headers: INCLUDE_PATHS
# NOTE: Some external/extras libraries could be required (stb, easings...) # NOTE: Some external/extras libraries could be required (stb, easings...)
#------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external -I$(RAYLIB_PATH)/src/extras
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external
# Define additional directories containing required header files # Define additional directories containing required header files
ifeq ($(PLATFORM),PLATFORM_DESKTOP) ifeq ($(PLATFORM),PLATFORM_DESKTOP)

+ 1
- 1
examples/Makefile.Web Parādīt failu

@ -239,7 +239,7 @@ endif
# Define include paths for required headers # Define include paths for required headers
# NOTE: Some external/extras libraries could be required (stb, easings...) # NOTE: Some external/extras libraries could be required (stb, easings...)
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external -I$(RAYLIB_PATH)/src/extras
INCLUDE_PATHS = -I. -I$(RAYLIB_PATH)/src -I$(RAYLIB_PATH)/src/external
# Define additional directories containing required header files # Define additional directories containing required header files
ifeq ($(PLATFORM),PLATFORM_RPI) ifeq ($(PLATFORM),PLATFORM_RPI)

+ 2
- 2
parser/Makefile Parādīt failu

@ -19,8 +19,8 @@ parse:
./raylib_parser -i ../src/raylib.h -o raylib_api.$(EXTENSION) -f $(FORMAT) -d RLAPI ./raylib_parser -i ../src/raylib.h -o raylib_api.$(EXTENSION) -f $(FORMAT) -d RLAPI
./raylib_parser -i ../src/raymath.h -o raymath_api.$(EXTENSION) -f $(FORMAT) -d RMAPI ./raylib_parser -i ../src/raymath.h -o raymath_api.$(EXTENSION) -f $(FORMAT) -d RMAPI
./raylib_parser -i ../src/rlgl.h -o rlgl_api.$(EXTENSION) -f $(FORMAT) -d RLAPI -t "RLGL IMPLEMENTATION" ./raylib_parser -i ../src/rlgl.h -o rlgl_api.$(EXTENSION) -f $(FORMAT) -d RLAPI -t "RLGL IMPLEMENTATION"
./raylib_parser -i ../src/extras/easings.h -o easings_api.$(EXTENSION) -f $(FORMAT) -d EASEDEF
./raylib_parser -i ../src/extras/rmem.h -o rmem_api.$(EXTENSION) -f $(FORMAT) -d RMEMAPI -t "RMEM IMPLEMENTATION"
./raylib_parser -i ../src/easings.h -o easings_api.$(EXTENSION) -f $(FORMAT) -d EASEDEF
./raylib_parser -i ../src/rmem.h -o rmem_api.$(EXTENSION) -f $(FORMAT) -d RMEMAPI -t "RMEM IMPLEMENTATION"
./raylib_parser -i ../physac.h -o physac_api.$(EXTENSION) -f $(FORMAT) -d PHYSACDEF -t "PHYSAC IMPLEMENTATION" ./raylib_parser -i ../physac.h -o physac_api.$(EXTENSION) -f $(FORMAT) -d PHYSACDEF -t "PHYSAC IMPLEMENTATION"
./raylib_parser -i ../raygui.h -o raygui_api.$(EXTENSION) -f $(FORMAT) -d RAYGUIAPI -t "RAYGUI IMPLEMENTATION" ./raylib_parser -i ../raygui.h -o raygui_api.$(EXTENSION) -f $(FORMAT) -d RAYGUIAPI -t "RAYGUI IMPLEMENTATION"

src/extras/easings.h → src/easings.h Parādīt failu


src/extras/rmem.h → src/rmem.h Parādīt failu

@ -1,12 +1,12 @@
/********************************************************************************************** /**********************************************************************************************
* *
* rmem - raylib memory pool and objects pool
* rmem n">v1.3 - raylib memory pool and objects pool
* *
* A quick, efficient, and minimal free list and arena-based allocator * A quick, efficient, and minimal free list and arena-based allocator
* *
* PURPOSE: * PURPOSE:
* - A quicker, efficient memory allocator alternative to 'malloc' and friends.
* - Reduce the possibilities of memory leaks for beginner developers using Raylib.
* - A quicker, efficient memory allocator alternative to 'mallocp">()' and friends.
* - Reduce the possibilities of memory leaks for beginner developers using raylib.
* - Being able to flexibly range check memory if necessary. * - Being able to flexibly range check memory if necessary.
* *
* CONFIGURATION: * CONFIGURATION:
@ -16,6 +16,17 @@
* If not defined, the library is in header only mode and can be included in other headers * If not defined, the library is in header only mode and can be included in other headers
* or source files without problems. But only ONE file should hold the implementation. * or source files without problems. But only ONE file should hold the implementation.
* *
* CHANGELOG:
*
* v1.0: First version
* v1.1: Bug patches for the mempool and addition of object pool
* v1.2: Addition of bidirectional arena
* v1.3: Several changes:
* Pptimizations of allocators
* Renamed 'Stack' to 'Arena'
* Replaced certain define constants with an anonymous enum
* Refactored MemPool to no longer require active or deferred defragging
*
* *
* LICENSE: zlib/libpng * LICENSE: zlib/libpng
* *
@ -61,7 +72,13 @@
// Types and Structures Definition // Types and Structures Definition
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Memory Pool
enum {
MEMPOOL_BUCKET_SIZE = 8,
MEMPOOL_BUCKET_BITS = (sizeof(uintptr_t) >> 1) + 1,
MEM_SPLIT_THRESHOLD = sizeof(uintptr_t) * 4
};
// Memory pool node
typedef struct MemNode MemNode; typedef struct MemNode MemNode;
struct MemNode { struct MemNode {
size_t size; size_t size;
@ -74,33 +91,25 @@ typedef struct AllocList {
size_t len; size_t len;
} AllocList; } AllocList;
// Arena allocator.
// Arena allocator
typedef struct Arena { typedef struct Arena {
uintptr_t mem, offs; uintptr_t mem, offs;
size_t size; size_t size;
} Arena; } Arena;
enum {
MEMPOOL_BUCKET_SIZE = 8,
MEMPOOL_BUCKET_BITS = (sizeof(uintptr_t) >> 1) + 1,
MEM_SPLIT_THRESHOLD = sizeof(uintptr_t) * 4
};
// Memory pool
typedef struct MemPool { typedef struct MemPool {
AllocList large, buckets[MEMPOOL_BUCKET_SIZE]; AllocList large, buckets[MEMPOOL_BUCKET_SIZE];
Arena arena; Arena arena;
} MemPool; } MemPool;
// Object Pool
// Object pool
typedef struct ObjPool { typedef struct ObjPool {
uintptr_t mem, offs; uintptr_t mem, offs;
size_t objSize, freeBlocks, memSize; size_t objSize, freeBlocks, memSize;
} ObjPool; } ObjPool;
// Double-Ended Stack aka Deque
// Double-ended stack (aka Deque)
typedef struct BiStack { typedef struct BiStack {
uintptr_t mem, front, back; uintptr_t mem, front, back;
size_t size; size_t size;
@ -166,9 +175,8 @@ RMEMAPI intptr_t BiStackMargins(BiStack destack);
#if defined(RMEM_IMPLEMENTATION) #if defined(RMEM_IMPLEMENTATION)
#include <stdio.h> // Required for:
#include <stdlib.h> // Required for:
#include <string.h> // Required for:
#include <stdlib.h> // Required for: malloc(), calloc(), free()
#include <string.h> // Required for: memset(), memcpy(), memmove()
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Defines and Macros // Defines and Macros
@ -201,18 +209,21 @@ static MemNode *__SplitMemNode(MemNode *const node, const size_t bytes)
MemNode *const r = ( MemNode* )(n + (node->size - bytes)); MemNode *const r = ( MemNode* )(n + (node->size - bytes));
node->size -= bytes; node->size -= bytes;
r->size = bytes; r->size = bytes;
return r; return r;
} }
static void __InsertMemNodeBefore(AllocList *const list, MemNode *const insert, MemNode *const curr) static void __InsertMemNodeBefore(AllocList *const list, MemNode *const insert, MemNode *const curr)
{ {
insert->next = curr; insert->next = curr;
if (curr->prev==NULL) list->head = insert; if (curr->prev==NULL) list->head = insert;
else else
{ {
insert->prev = curr->prev; insert->prev = curr->prev;
curr->prev->next = insert; curr->prev->next = insert;
} }
curr->prev = insert; curr->prev = insert;
} }
@ -220,10 +231,9 @@ static void __ReplaceMemNode(MemNode *const old, MemNode *const replace)
{ {
replace->prev = old->prev; replace->prev = old->prev;
replace->next = old->next; replace->next = old->next;
if( old->prev != NULL )
old->prev->next = replace;
if( old->next != NULL )
old->next->prev = replace;
if (old->prev != NULL) old->prev->next = replace;
if (old->next != NULL) old->next->prev = replace;
} }
@ -244,7 +254,9 @@ static MemNode *__RemoveMemNode(AllocList *const list, MemNode *const node)
if (list->tail != NULL) list->tail->next = NULL; if (list->tail != NULL) list->tail->next = NULL;
else list->head = NULL; else list->head = NULL;
} }
list->len--; list->len--;
return node; return node;
} }
@ -253,10 +265,12 @@ static MemNode *__FindMemNode(AllocList *const list, const size_t bytes)
for (MemNode *node = list->head; node != NULL; node = node->next) for (MemNode *node = list->head; node != NULL; node = node->next)
{ {
if (node->size < bytes) continue; if (node->size < bytes) continue;
// close in size - reduce fragmentation by not splitting.
// Close in size - reduce fragmentation by not splitting
else if (node->size <= bytes + MEM_SPLIT_THRESHOLD) return __RemoveMemNode(list, node); else if (node->size <= bytes + MEM_SPLIT_THRESHOLD) return __RemoveMemNode(list, node);
else return __SplitMemNode(node, bytes); else return __SplitMemNode(node, bytes);
} }
return NULL; return NULL;
} }
@ -271,29 +285,34 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
{ {
for (MemNode *iter = list->head; iter != NULL; iter = iter->next) for (MemNode *iter = list->head; iter != NULL; iter = iter->next)
{ {
if (( uintptr_t )iter == mempool->arena.offs)
if ((uintptr_t)iter == mempool->arena.offs)
{ {
mempool->arena.offs += iter->size; mempool->arena.offs += iter->size;
__RemoveMemNode(list, iter); __RemoveMemNode(list, iter);
iter = list->head; iter = list->head;
if (iter == NULL) {
if (iter == NULL)
{
list->head = node; list->head = node;
return; return;
} }
} }
const uintptr_t inode = ( uintptr_t )node;
const uintptr_t iiter = ( uintptr_t )iter;
const uintptr_t inode = (uintptr_t)node;
const uintptr_t iiter = (uintptr_t)iter;
const uintptr_t iter_end = iiter + iter->size; const uintptr_t iter_end = iiter + iter->size;
const uintptr_t node_end = inode + node->size; const uintptr_t node_end = inode + node->size;
if (iter==node) return;
if (iter == node) return;
else if (iter < node) else if (iter < node)
{ {
// node was coalesced prior. // node was coalesced prior.
if (iter_end > inode) return; if (iter_end > inode) return;
else if (iter_end==inode && !is_bucket)
else if (p">(iter_end == inode) && !is_bucket)
{ {
// if we can coalesce, do so. // if we can coalesce, do so.
iter->size += node->size; iter->size += node->size;
return; return;
} }
else if (iter->next == NULL) else if (iter->next == NULL)
@ -302,6 +321,7 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
iter->next = node; iter->next = node;
node->prev = iter; node->prev = iter;
list->len++; list->len++;
return; return;
} }
} }
@ -309,10 +329,10 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
{ {
// Address sort, lowest to highest aka ascending order. // Address sort, lowest to highest aka ascending order.
if (iiter < node_end) return; if (iiter < node_end) return;
else if (iter==list->head && !is_bucket)
else if (p">(iter == list->head) && !is_bucket)
{ {
if (iter_end==inode) iter->size += node->size;
else if (node_end==iiter)
if (iter_end == inode) iter->size += node->size;
else if (node_end == iiter)
{ {
node->size += list->head->size; node->size += list->head->size;
node->next = list->head->next; node->next = list->head->next;
@ -327,9 +347,10 @@ static void __InsertMemNode(MemPool *const mempool, AllocList *const list, MemNo
list->head = node; list->head = node;
list->len++; list->len++;
} }
return; return;
} }
else if (iter_end==inode && !is_bucket)
else if (p">(iter_end == inode) && !is_bucket)
{ {
// if we can coalesce, do so. // if we can coalesce, do so.
iter->size += node->size; iter->size += node->size;
@ -359,12 +380,14 @@ MemPool CreateMemPool(const size_t size)
{ {
// Align the mempool size to at least the size of an alloc node. // Align the mempool size to at least the size of an alloc node.
uint8_t *const restrict buf = malloc(size*sizeof *buf); uint8_t *const restrict buf = malloc(size*sizeof *buf);
if (buf==NULL) return mempool; if (buf==NULL) return mempool;
else else
{ {
mempool.arena.size = size; mempool.arena.size = size;
mempool.arena.mem = ( uintptr_t )buf;
mempool.arena.mem = (uintptr_t)buf;
mempool.arena.offs = mempool.arena.mem + mempool.arena.size; mempool.arena.offs = mempool.arena.mem + mempool.arena.size;
return mempool; return mempool;
} }
} }
@ -373,12 +396,14 @@ MemPool CreateMemPool(const size_t size)
MemPool CreateMemPoolFromBuffer(void *const restrict buf, const size_t size) MemPool CreateMemPoolFromBuffer(void *const restrict buf, const size_t size)
{ {
MemPool mempool = { 0 }; MemPool mempool = { 0 };
if ((size == 0) || (buf == NULL) || (size <= sizeof(MemNode))) return mempool; if ((size == 0) || (buf == NULL) || (size <= sizeof(MemNode))) return mempool;
else else
{ {
mempool.arena.size = size; mempool.arena.size = size;
mempool.arena.mem = ( uintptr_t )buf;
mempool.arena.mem = (uintptr_t)buf;
mempool.arena.offs = mempool.arena.mem + mempool.arena.size; mempool.arena.offs = mempool.arena.mem + mempool.arena.size;
return mempool; return mempool;
} }
} }
@ -388,7 +413,7 @@ void DestroyMemPool(MemPool *const restrict mempool)
if (mempool->arena.mem == 0) return; if (mempool->arena.mem == 0) return;
else else
{ {
void *const restrict ptr = ( void* )mempool->arena.mem;
void *const restrict ptr = (void *)mempool->arena.mem;
free(ptr); free(ptr);
*mempool = (MemPool){ 0 }; *mempool = (MemPool){ 0 };
} }
@ -440,7 +465,8 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
// | space | highest addr of block // | space | highest addr of block
// -------------- // --------------
new_mem->next = new_mem->prev = NULL; new_mem->next = new_mem->prev = NULL;
uint8_t *const restrict final_mem = ( uint8_t* )new_mem + sizeof *new_mem;
uint8_t *const restrict final_mem = (uint8_t *)new_mem + sizeof *new_mem;
return memset(final_mem, 0, new_mem->size - sizeof *new_mem); return memset(final_mem, 0, new_mem->size - sizeof *new_mem);
} }
} }
@ -448,20 +474,22 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const size_t size) void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const size_t size)
{ {
if (size > mempool->arena.size) return NULL; if (size > mempool->arena.size) return NULL;
// NULL ptr should make this work like regular Allocation.
// NULL ptr should make this work like regular Allocation
else if (ptr == NULL) return MemPoolAlloc(mempool, size); else if (ptr == NULL) return MemPoolAlloc(mempool, size);
else if ((uintptr_t)ptr - sizeof(MemNode) < mempool->arena.mem) return NULL; else if ((uintptr_t)ptr - sizeof(MemNode) < mempool->arena.mem) return NULL;
else else
{ {
MemNode *const node = ( MemNode* )(( uint8_t* )ptr - sizeof *node);
MemNode *const node = (MemNode *)((uint8_t *)ptr - sizeof *node);
const size_t NODE_SIZE = sizeof *node; const size_t NODE_SIZE = sizeof *node;
uint8_t *const resized_block = MemPoolAlloc(mempool, size); uint8_t *const resized_block = MemPoolAlloc(mempool, size);
if (resized_block == NULL) return NULL; if (resized_block == NULL) return NULL;
else else
{ {
MemNode *const resized = ( MemNode* )(resized_block - sizeof *resized);
MemNode *const resized = (MemNode *)(resized_block - sizeof *resized);
memmove(resized_block, ptr, (node->size > resized->size)? (resized->size - NODE_SIZE) : (node->size - NODE_SIZE)); memmove(resized_block, ptr, (node->size > resized->size)? (resized->size - NODE_SIZE) : (node->size - NODE_SIZE));
MemPoolFree(mempool, ptr); MemPoolFree(mempool, ptr);
return resized_block; return resized_block;
} }
} }
@ -469,7 +497,8 @@ void *MemPoolRealloc(MemPool *const restrict mempool, void *const ptr, const siz
void MemPoolFree(MemPool *const restrict mempool, void *const ptr) void MemPoolFree(MemPool *const restrict mempool, void *const ptr)
{ {
const uintptr_t p = ( uintptr_t )ptr;
const uintptr_t p = (uintptr_t)ptr;
if ((ptr == NULL) || (p - sizeof(MemNode) < mempool->arena.mem)) return; if ((ptr == NULL) || (p - sizeof(MemNode) < mempool->arena.mem)) return;
else else
{ {
@ -511,9 +540,9 @@ size_t GetMemPoolFreeMemory(const MemPool mempool)
{ {
size_t total_remaining = mempool.arena.offs - mempool.arena.mem; size_t total_remaining = mempool.arena.offs - mempool.arena.mem;
for (MemNode *n=mempool.large.head; n != NULL; n = n->next) total_remaining += n->size;
for (MemNode *n = mempool.large.head; n != NULL; n = n->next) total_remaining += n->size;
for (size_t i=0; i<MEMPOOL_BUCKET_SIZE; i++) for (MemNode *n = mempool.buckets[i].head; n != NULL; n = n->next) total_remaining += n->size;
for (size_t i = 0; i < MEMPOOL_BUCKET_SIZE; i++) for (MemNode *n = mempool.buckets[i].head; n != NULL; n = n->next) total_remaining += n->size;
return total_remaining; return total_remaining;
} }
@ -522,11 +551,13 @@ void MemPoolReset(MemPool *const mempool)
{ {
mempool->large.head = mempool->large.tail = NULL; mempool->large.head = mempool->large.tail = NULL;
mempool->large.len = 0; mempool->large.len = 0;
for (size_t i = 0; i < MEMPOOL_BUCKET_SIZE; i++) for (size_t i = 0; i < MEMPOOL_BUCKET_SIZE; i++)
{ {
mempool->buckets[i].head = mempool->buckets[i].tail = NULL; mempool->buckets[i].head = mempool->buckets[i].tail = NULL;
mempool->buckets[i].len = 0; mempool->buckets[i].len = 0;
} }
mempool->arena.offs = mempool->arena.mem + mempool->arena.size; mempool->arena.offs = mempool->arena.mem + mempool->arena.size;
} }
@ -537,19 +568,21 @@ void MemPoolReset(MemPool *const mempool)
ObjPool CreateObjPool(const size_t objsize, const size_t len) ObjPool CreateObjPool(const size_t objsize, const size_t len)
{ {
ObjPool objpool = { 0 }; ObjPool objpool = { 0 };
if ((len == 0) || (objsize == 0)) return objpool; if ((len == 0) || (objsize == 0)) return objpool;
else else
{ {
const size_t aligned_size = __AlignSize(objsize, sizeof(size_t)); const size_t aligned_size = __AlignSize(objsize, sizeof(size_t));
uint8_t *const restrict buf = calloc(len, aligned_size); uint8_t *const restrict buf = calloc(len, aligned_size);
if (buf == NULL) return objpool; if (buf == NULL) return objpool;
objpool.objSize = aligned_size; objpool.objSize = aligned_size;
objpool.memSize = objpool.freeBlocks = len; objpool.memSize = objpool.freeBlocks = len;
objpool.mem = ( uintptr_t )buf;
objpool.mem = (uintptr_t)buf;
for (size_t i=0; i<objpool.freeBlocks; i++) for (size_t i=0; i<objpool.freeBlocks; i++)
{ {
size_t *const restrict index = ( size_t* )(objpool.mem + (i*aligned_size));
size_t *const restrict index = (size_t *)(objpool.mem + (i*aligned_size));
*index = i + 1; *index = i + 1;
} }
@ -562,8 +595,9 @@ ObjPool CreateObjPoolFromBuffer(void *const restrict buf, const size_t objsize,
{ {
ObjPool objpool = { 0 }; ObjPool objpool = { 0 };
// If the object size isn't large enough to align to a size_t, then we can't use it.
// If the object size isn't large enough to align to a size_t, then we can't use it
const size_t aligned_size = __AlignSize(objsize, sizeof(size_t)); const size_t aligned_size = __AlignSize(objsize, sizeof(size_t));
if ((buf == NULL) || (len == 0) || (objsize < sizeof(size_t)) || (objsize*len != aligned_size*len)) return objpool; if ((buf == NULL) || (len == 0) || (objsize < sizeof(size_t)) || (objsize*len != aligned_size*len)) return objpool;
else else
{ {
@ -573,7 +607,7 @@ ObjPool CreateObjPoolFromBuffer(void *const restrict buf, const size_t objsize,
for (size_t i=0; i<objpool.freeBlocks; i++) for (size_t i=0; i<objpool.freeBlocks; i++)
{ {
size_t *const restrict index = ( size_t* )(objpool.mem + (i*aligned_size));
size_t *const restrict index = (size_t *)(objpool.mem + (i*aligned_size));
*index = i + 1; *index = i + 1;
} }
@ -587,9 +621,10 @@ void DestroyObjPool(ObjPool *const restrict objpool)
if (objpool->mem == 0) return; if (objpool->mem == 0) return;
else else
{ {
void *const restrict ptr = ( void* )objpool->mem;
void *const restrict ptr = (void *)objpool->mem;
free(ptr); free(ptr);
*objpool = (ObjPool){0};
*objpool = (ObjPool){ 0 };
} }
} }
@ -600,12 +635,13 @@ void *ObjPoolAlloc(ObjPool *const objpool)
// For first allocation, head points to the very first index. // For first allocation, head points to the very first index.
// Head = &pool[0]; // Head = &pool[0];
// ret = Head == ret = &pool[0]; // ret = Head == ret = &pool[0];
size_t *const restrict block = ( size_t* )objpool->offs;
size_t *const restrict block = (size_t *)objpool->offs;
objpool->freeBlocks--; objpool->freeBlocks--;
// after allocating, we set head to the address of the index that *Head holds.
// After allocating, we set head to the address of the index that *Head holds.
// Head = &pool[*Head * pool.objsize]; // Head = &pool[*Head * pool.objsize];
objpool->offs = (objpool->freeBlocks != 0)? objpool->mem + (*block*objpool->objSize) : 0; objpool->offs = (objpool->freeBlocks != 0)? objpool->mem + (*block*objpool->objSize) : 0;
return memset(block, 0, objpool->objSize); return memset(block, 0, objpool->objSize);
} }
else return NULL; else return NULL;
@ -614,13 +650,14 @@ void *ObjPoolAlloc(ObjPool *const objpool)
void ObjPoolFree(ObjPool *const restrict objpool, void *const ptr) void ObjPoolFree(ObjPool *const restrict objpool, void *const ptr)
{ {
uintptr_t block = (uintptr_t)ptr; uintptr_t block = (uintptr_t)ptr;
if ((ptr == NULL) || (block < objpool->mem) || (block > objpool->mem + objpool->memSize*objpool->objSize)) return; if ((ptr == NULL) || (block < objpool->mem) || (block > objpool->mem + objpool->memSize*objpool->objSize)) return;
else else
{ {
// When we free our pointer, we recycle the pointer space to store the previous index and then we push it as our new head. // When we free our pointer, we recycle the pointer space to store the previous index and then we push it as our new head.
// *p = index of Head in relation to the buffer; // *p = index of Head in relation to the buffer;
// Head = p; // Head = p;
size_t *const restrict index = ( size_t* )block;
size_t *const restrict index = (size_t *)block;
*index = (objpool->offs != 0)? (objpool->offs - objpool->mem)/objpool->objSize : objpool->memSize; *index = (objpool->offs != 0)? (objpool->offs - objpool->mem)/objpool->objSize : objpool->memSize;
objpool->offs = block; objpool->offs = block;
objpool->freeBlocks++; objpool->freeBlocks++;
@ -641,29 +678,34 @@ void ObjPoolCleanUp(ObjPool *const restrict objpool, void **const restrict ptrre
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Module Functions Definition - Double-Ended Stack // Module Functions Definition - Double-Ended Stack
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
BiStack CreateBiStack(const size_t len) BiStack CreateBiStack(const size_t len)
{ {
BiStack destack = { 0 }; BiStack destack = { 0 };
if (len == 0) return destack; if (len == 0) return destack;
uint8_t *const buf = malloc(len*sizeof *buf); uint8_t *const buf = malloc(len*sizeof *buf);
if (buf==NULL) return destack;
if (buf == NULL) return destack;
destack.size = len; destack.size = len;
destack.mem = ( uintptr_t )buf;
destack.mem = (uintptr_t)buf;
destack.front = destack.mem; destack.front = destack.mem;
destack.back = destack.mem + len; destack.back = destack.mem + len;
return destack; return destack;
} }
BiStack CreateBiStackFromBuffer(void *const buf, const size_t len) BiStack CreateBiStackFromBuffer(void *const buf, const size_t len)
{ {
BiStack destack = { 0 }; BiStack destack = { 0 };
if (len == 0 || buf == NULL) return destack;
if ((len == 0) || (buf == NULL)) return destack;
else else
{ {
destack.size = len; destack.size = len;
destack.mem = destack.front = ( uintptr_t )buf;
destack.mem = destack.front = (uintptr_t)buf;
destack.back = destack.mem + len; destack.back = destack.mem + len;
return destack; return destack;
} }
} }
@ -673,9 +715,9 @@ void DestroyBiStack(BiStack *const restrict destack)
if (destack->mem == 0) return; if (destack->mem == 0) return;
else else
{ {
uint8_t *const restrict buf = ( uint8_t* )destack->mem;
uint8_t *const restrict buf = (uint8_t *)destack->mem;
free(buf); free(buf);
*destack = (BiStack){0};
*destack = (BiStack){ 0 };
} }
} }
@ -689,8 +731,9 @@ void *BiStackAllocFront(BiStack *const restrict destack, const size_t len)
if (destack->front + ALIGNED_LEN >= destack->back) return NULL; if (destack->front + ALIGNED_LEN >= destack->back) return NULL;
else else
{ {
uint8_t *const restrict ptr = ( uint8_t* )destack->front;
uint8_t *const restrict ptr = (uint8_t *)destack->front;
destack->front += ALIGNED_LEN; destack->front += ALIGNED_LEN;
return ptr; return ptr;
} }
} }
@ -707,7 +750,8 @@ void *BiStackAllocBack(BiStack *const restrict destack, const size_t len)
else else
{ {
destack->back -= ALIGNED_LEN; destack->back -= ALIGNED_LEN;
uint8_t *const restrict ptr = ( uint8_t* )destack->back;
uint8_t *const restrict ptr = (uint8_t *)destack->back;
return ptr; return ptr;
} }
} }
@ -737,15 +781,3 @@ inline intptr_t BiStackMargins(const BiStack destack)
} }
#endif // RMEM_IMPLEMENTATION #endif // RMEM_IMPLEMENTATION
/*******
* Changelog
* v1.0: First Creation.
* v1.1: bug patches for the mempool and addition of object pool.
* v1.2: addition of bidirectional arena.
* v1.3:
* optimizations of allocators.
* renamed 'Stack' to 'Arena'.
* replaced certain define constants with an anonymous enum.
* refactored MemPool to no longer require active or deferred defragging.
********/

Notiek ielāde…
Atcelt
Saglabāt