Sepehr Taghdisian avatar Sepehr Taghdisian committed 4475676

working on multi-threading

Comments (0)

Files changed (14)

include/core/atomic.h

  * @b ATOMIC_SET_PTR(dest, ptr): set atomic pointer\n
  * @ingroup atomic
  */
-typedef long atomint_t;
-/**
- * @ingroup atomic
- */
-typedef void* volatile atomptr_t;
 
 #if defined(_WIN_)
 /* windows specific */
 #include "win.h"
 #define ATOMIC_CAS(dest, cmp_value, swap_value)     \
     InterlockedCompareExchange(&(dest), (swap_value), (cmp_value))
+#define ATOMIC_CAS64(dest, cmp_value, swap_value)   \
+    InterlockedCompareExchange64(&(dest), (swap_value), (cmp_value))
 #define ATOMIC_SET(dest, value)     \
     InterlockedExchange(&(dest), (value))
+#define ATOMIC_SET64(dest, value)   \
+    InterlockedExchange64(&(dest), (value))
 #define ATOMIC_INCR(dest)   \
     InterlockedIncrement(&(dest))
 #define ATOMIC_DECR(dest_ptr)   \
 /* unix/linux specific */
 #define ATOMIC_CAS(dest, cmp_value, swap_value)     \
     __sync_val_compare_and_swap(&(dest), (cmp_value), (swap_value))
+#define ATOMIC_CAS64 ATOMIC_CAS
 #define ATOMIC_SET(dest, value)     \
     __sync_lock_test_and_set(&(dest), (value))
 #define ATOMIC_INCR(dest)   \

include/core/freelist-alloc.h

 #include "allocator.h"
 #include "linked-list.h"
 #include "core-api.h"
+#include "mutex.h"
 
 /**
  * freelist allocator: variable-sized small block memory allocator\n
  * @return allocated memory block   @ingroup alloc
  */
 CORE_API void* mem_freelist_alloc(struct freelist_alloc* freelist, size_t size, uint32 mem_id);
+
 /**
+ * Aligned allocation from freelist
+ * @see mem_freelist_alloc
  * @ingroup alloc
  */
 CORE_API void* mem_freelist_alignedalloc(struct freelist_alloc* freelist, size_t size, 
  * @ingroup alloc
  */
 CORE_API void mem_freelist_free(struct freelist_alloc* freelist, void* ptr);
+
 /**
  * @ingroup alloc
  */
 CORE_API void mem_freelist_alignedfree(struct freelist_alloc* freelist, void* ptr);
+
 /**
  * get freelist memory leaks
  * @param pptrs array of pointers to the leaks, if =NULL function only returns number of leaks
  */
 CORE_API void mem_freelist_bindalloc(struct freelist_alloc* freelist, struct allocator* alloc);
 
+/*************************************************************************************************/
+/**
+ * freelist allocator (thread-safe): variable-sized small block memory allocator\n
+ * more that 8k memory blocks will be allocated from heap
+ * @ingroup alloc
+ */
+struct freelist_alloc_ts
+{
+    struct freelist_alloc fl;
+    mutex_t lock;
+};
+
+/**
+ * Freelist create/destroy (thread-safe)
+ * @param alloc allocator for internal freelist memory
+ * @param size size (in bytes) for freelist buffer
+ * @see mem_freelist_destroy    @ingroup alloc
+ */
+CORE_API result_t mem_freelist_create_ts(struct allocator* alloc, 
+                                         struct freelist_alloc_ts* freelist,
+                                         size_t size, uint32 mem_id);
+/**
+ * Destroy freelist (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_freelist_destroy_ts(struct freelist_alloc_ts* freelist);
+
+/**
+ * Allocate memory from freelist (thread-safe)
+ * @param size size (in bytes) of requested memory, if requested size is more than 8k -
+ * see (freelist-alloc.c), memory will be allocated from heap instead of freelist
+ * @return allocated memory block   @ingroup alloc
+ */
+CORE_API void* mem_freelist_alloc_ts(struct freelist_alloc_ts* freelist, size_t size, uint32 mem_id);
+
+/**
+ * @ingroup alloc
+ */
+CORE_API void* mem_freelist_alignedalloc_ts(struct freelist_alloc_ts* freelist, size_t size, 
+                                            uint8 alignment, uint32 mem_id);
+
+/**
+ * @ingroup alloc
+ */
+CORE_API void mem_freelist_bindalloc_ts(struct freelist_alloc_ts* freelist, struct allocator* alloc);
+
+/**
+ * @ingroup alloc
+ */
+CORE_API void mem_freelist_free_ts(struct freelist_alloc_ts* freelist, void* ptr);
+
+/**
+ * @ingroup alloc
+ */
+CORE_API void mem_freelist_alignedfree_ts(struct freelist_alloc_ts* freelist, void* ptr);
+
+/**
+ * get freelist memory leaks (thread-safe)
+ * @param pptrs array of pointers to the leaks, if =NULL function only returns number of leaks
+ * @return number of leaks
+ * @ingroup alloc
+ */
+CORE_API uint32 mem_freelist_getleaks_ts(struct freelist_alloc_ts* freelist, void** pptrs);
+
+/**
+ * get size of the allocated memory from freelist (thread-safe)
+ */
+CORE_API size_t mem_freelist_getsize_ts(struct freelist_alloc_ts* freelist, void* ptr);
+
 #endif

include/core/pool-alloc.h

 #include "linked-list.h"
 #include "allocator.h"
 #include "core-api.h"
+#include "mutex.h"
 
 /**
  * pool allocator: fixed-size pool allocation\n
  */
 CORE_API void mem_pool_bindalloc(struct pool_alloc* pool, struct allocator* alloc);
 
+/*************************************************************************************************/
+/**
+ * pool allocator: fixed-size pool allocation (thread-safe)\n
+ * it is pretty fast and can dynamically grow itself on demand. but limited to fixed sized blocks\n
+ * if number of allocations go beyond 'block_size' another block will be created
+ * @see mem_pool_create
+ * @ingroup alloc
+ */
+struct pool_alloc_ts
+{
+    struct pool_alloc p;
+    mutex_t lock;
+};
+
+/**
+ * creates a fixed item size pool and it's buffer (thread-safe)
+ * @param item_size size of each item (bytes) in the pool
+ * @param block_size number of items in each pool block
+ * @ingroup alloc
+ */
+CORE_API result_t mem_pool_create_ts(struct allocator* alloc, struct pool_alloc_ts* pool,
+                                     uint32 item_size, uint32 block_size, uint32 mem_id);
+
+/**
+ * destroys pool allocator (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_pool_destroy_ts(struct pool_alloc_ts* pool);
+
+/**
+ * allocate an item (fixed-size) from the pool (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void* mem_pool_alloc_ts(struct pool_alloc_ts* pool);
+
+/**
+ * free an item from the pool (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_pool_free_ts(struct pool_alloc_ts* pool, void* ptr);
+
+/**
+ * get memory pool leaks (thread-safe)
+ * @return number of leaks
+ * @ingroup alloc
+ */
+CORE_API uint32 mem_pool_getleaks_ts(struct pool_alloc_ts* pool);
+
+/**
+ * clear memory pool (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_pool_clear_ts(struct pool_alloc_ts* pool);
+
+/**
+ * pool binding to generic allocator (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_pool_bindalloc_ts(struct pool_alloc_ts* pool, struct allocator* alloc);
+
 #endif /* __POOLALLOC_H__ */

include/core/stack-alloc.h

 #include "types.h"
 #include "allocator.h"
 #include "core-api.h"
+#include "atomic.h"
 
 /**
  * stack allocator: variable-size sequential stack allocator\n
  */
 struct stack_alloc
 {
-    uint8*            buffer;      
-    size_t            offset;     /* in bytes */
-    size_t            size;       /* in bytes */
-    size_t            alloc_max;
-    size_t            save_offset;
+    uint8* buffer;      
+    size_t offset;     /* in bytes */
+    size_t size;       /* in bytes */
+    size_t alloc_max;
+    size_t save_offset;
     struct allocator* alloc;
 };
 
  */
 CORE_API void mem_stack_bindalloc(struct stack_alloc* stack, struct allocator* alloc);
 
+
+/**
+ * stack allocator (thread-safe): variable-size sequential stack allocator\n
+ * it is the fastest allocator, but it's sequential and does not support dynamic free\n
+ * This kind of stack allocator is thread-safe\n
+ * So It's memory should be allocated and freed from one thread, and it's data can be allocated from 
+ * different threads
+ * @ingroup alloc
+ */
+struct stack_alloc_ts
+{
+    uint8* buffer;      
+    size_t volatile offset;     /* in bytes */
+    size_t size;       /* in bytes */
+    size_t volatile alloc_max;
+    size_t volatile save_offset;
+    struct allocator* alloc;
+};
+
+/**
+ * create stack allocator (thread-safe)
+ * @param alloc allocator for internal stack allocator buffer
+ * @param size size of stack allocator buffer (bytes)
+ * @ingroup alloc
+ */
+CORE_API result_t mem_stack_create_ts(struct allocator* alloc, 
+                                      struct stack_alloc_ts* stack, size_t size, uint32 mem_id);
+
+/**
+ * destroy stack allocator (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_stack_destroy_ts(struct stack_alloc_ts* stack);
+
+/**
+ * stack alloc (thread-safe)
+ * @see mem_stack_bindalloc @ingroup alloc
+ */
+CORE_API void* mem_stack_alloc_ts(struct stack_alloc_ts* stack, size_t size, uint32 mem_id);
+
+/**
+ * stack aligned alloc (thread-safe)
+ * @see mem_stack_bindalloc @ingroup alloc
+ */
+CORE_API void* mem_stack_alignedalloc_ts(struct stack_alloc_ts* stack, size_t size, 
+                                         uint8 alignment, uint32 mem_id);
+
+/**
+ * save stack allocator state in order to load it later (thread-safe)
+ * @see mem_stack_load
+ * @ingroup alloc
+ */
+CORE_API void mem_stack_save_ts(struct stack_alloc_ts* stack);
+
+/**
+ * load previously saved stack allocator state. (thread-safe)\n
+ * memory after saved state is discarded after 'load'
+ * @see mem_stack_save
+ * @ingroup alloc
+ */
+CORE_API void mem_stack_load_ts(struct stack_alloc_ts* stack);
+
+/**
+ * reset stack allocator state, discarding any memory that is allocated (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_stack_reset_ts(struct stack_alloc_ts* stack);
+
+/**
+ * bind stack-alloc to generic allocator (thread-safe)
+ * @ingroup alloc
+ */
+CORE_API void mem_stack_bindalloc_ts(struct stack_alloc_ts* stack, struct allocator* alloc);
+
+
 #endif /*__STACKALLOC_H__*/

include/core/thread.h

 };
 
 /**
- * thread structure: holds thread events and properties
+ * Thread structure: holds thread events and properties
  * @ingroup thr
  */
 struct thread
 {
-    thread_t                t;          /* thread handle */
-    enum thread_level       level;      /* priority */
-    struct freelist_alloc   local_mem;  /* local dynamic memory besides thread's own stack */
-    struct allocator        local_alloc;    /* allocator for local memory */
-    pfn_thread_kernel       kernel_fn;  /* kernel function, runs in loop unless RET_ABORT is returned */
-    pfn_thread_init         init_fn;    /* init function (happens in thread process) */
-    pfn_thread_release      release_fn; /* release function (happens in thread process) */
-    void*                   param1;     /* custom param1 */
-    void*                   param2;     /* custom param2 */
+    thread_t t;          /* thread handle */
+    enum thread_level level;      /* priority */
+    struct freelist_alloc local_mem;  /* local dynamic memory besides thread's own stack */
+    struct allocator local_alloc;    /* allocator for local memory */
+    pfn_thread_kernel kernel_fn;  /* kernel function, runs in loop unless RET_ABORT is returned */
+    pfn_thread_init init_fn;    /* init function (happens in thread process) */
+    pfn_thread_release release_fn; /* release function (happens in thread process) */
+    void* param1;     /* custom param1 */
+    void* param2;     /* custom param2 */
     
 #if defined(_POSIXLIB_)
-    mutex_t                 state_mutex;   
-    thread_event_t          state_event;
-    thread_attr_t           attr;
-    enum thread_state       state;
+    mutex_t state_mutex;   
+    thread_event_t state_event;
+    thread_attr_t attr;
+    enum thread_state state;
 #elif defined(_WIN_)
-    thread_event_t          events[2];  /* 0=stop, 1=resume */
-    uint32                  id;
+    thread_event_t events[2];  /* 0=stop, 1=resume */
+    uint32 id;
 #endif
 };
 
 /**
- * creates a thread and start running it's kernel immediately
+ * Creates a thread and start running it's kernel immediately
  * @param kernel_fn kernel function callback which executes in an infinite loop
  * @param init_fn initialize function (OPTIONAL), that implements thread initialzation code
  * @param release_fn release function (OPTIONAL), that implmenets thread release code
     enum thread_level level, uint32 local_mem_sz, void* param1, void* param2);
 
 /**
- * destroys a thread. blocks the program until thread is stopped and exited 
+ * Destroys a thread. blocks the program until thread is stopped and exited 
  * @ingroup thr
  */
 CORE_API void mt_destroythread(struct thread* thr);
 
 /** 
- * stops execution of kernel code, the thread does not exit, but waits for user to resume the thread
+ * Stops execution of kernel code, the thread does not exit, but waits for user to resume the thread
  * @see mt_resumethread
  * @ingroup thr
  */
 CORE_API void mt_pausethread(struct thread* thr);
 
 /**
- * resume execution of kernel code 
+ * Resume execution of kernel code 
  * @see mt_pausethread
  * @ingroup thr
  */
 CORE_API void mt_resumethread(struct thread* thr);
 
 /** 
- * stop execution of thread, this function does not wait for thread to finish work, just sends stop message
+ * Stop execution of thread, this function does not wait for thread to finish work, just sends stop message
  * @ingroup thr
  */
 CORE_API void mt_stopthread(struct thread* thr);

src/core/file-mgr.c

     char path[DH_PATH_MAX];
     efsw_watchid watchid;
     mutex_t mtx;
-    atomptr_t backbuff;
-    atomptr_t frontbuff;
+    void* volatile backbuff;
+    void* volatile frontbuff;
     struct array backarr;  /* item: char[DH_PATH_MAX] */
     struct array frontarr; /* item: char[DH_PATH_MAX] */
 };

src/core/freelist-alloc.c

     }
     return count;
 }
+
+/*************************************************************************************************/
+void* fl_alloc_ts(size_t size, const char* source, uint32 line, uint32 mem_id, void* param)
+{
+    return mem_freelist_alloc_ts((struct freelist_alloc_ts*)param, size, mem_id);
+}
+
+void fl_free_ts(void* p, void* param)
+{
+    mem_freelist_free_ts((struct freelist_alloc_ts*)param, p);
+}
+
+void* fl_alignedalloc_ts(size_t size, uint8 alignment, const char* source, 
+                         uint32 line, uint32 mem_id, void* param)
+{
+    return mem_freelist_alignedalloc_ts((struct freelist_alloc_ts*)param, size, alignment, mem_id);
+}
+
+void fl_alignedfree_ts(void* p, void* param)
+{
+    mem_freelist_alignedfree_ts((struct freelist_alloc_ts*)param, p);
+}
+
+/* */
+result_t mem_freelist_create_ts(struct allocator* alloc, struct freelist_alloc_ts* freelist,
+                                size_t size, uint32 mem_id)
+{
+    memset(freelist, 0x00, sizeof(struct freelist_alloc_ts));
+    mt_createmutex(&freelist->lock);
+    return mem_freelist_create(alloc, &freelist->fl, size, mem_id);
+}
+
+void mem_freelist_destroy_ts(struct freelist_alloc_ts* freelist)
+{
+    mt_destroymutex(&freelist->lock);
+    mem_freelist_destroy(&freelist->fl);
+}
+
+void* mem_freelist_alloc_ts(struct freelist_alloc_ts* freelist, size_t size, uint32 mem_id)
+{
+    mt_lockmutex(&freelist->lock);
+    void* ptr = mem_freelist_alloc(&freelist->fl, size, mem_id);   
+    mt_unlockmutex(&freelist->lock);
+    return ptr;
+}
+
+void* mem_freelist_alignedalloc_ts(struct freelist_alloc_ts* freelist, size_t size, 
+                                   uint8 alignment, uint32 mem_id)
+{
+    size_t ns = size + alignment;
+    uptr_t raw_addr = (uptr_t)mem_freelist_alloc_ts(freelist, ns, mem_id);
+    if (raw_addr == 0)    
+        return NULL;
+
+    uptr_t misalign = raw_addr & (alignment - 1);
+    uint8 adjust = alignment - (uint8)misalign;
+    uptr_t aligned_addr = raw_addr + adjust;
+    uint8* a = (uint8*)(aligned_addr - sizeof(uint8));
+    *a = adjust;
+    return (void*)aligned_addr;
+}
+
+void mem_freelist_bindalloc_ts(struct freelist_alloc_ts* freelist, struct allocator* alloc)
+{
+    alloc->param = freelist;
+    alloc->alloc_fn = fl_alloc_ts;
+    alloc->alignedalloc_fn = fl_alignedalloc_ts;
+    alloc->free_fn = fl_free_ts;
+    alloc->alignedfree_fn = fl_alignedfree_ts;
+}
+
+void mem_freelist_free_ts(struct freelist_alloc_ts* freelist, void* ptr)
+{
+    mt_lockmutex(&freelist->lock);
+    mem_freelist_free(&freelist->fl, ptr);
+    mt_unlockmutex(&freelist->lock);
+}
+
+void mem_freelist_alignedfree_ts(struct freelist_alloc_ts* freelist, void* ptr)
+{
+    uptr_t aligned_addr = (uptr_t)ptr;
+    uint8 adjust = *((uint8*)(aligned_addr - sizeof(uint8)));
+    uptr_t raw_addr = aligned_addr - adjust;
+
+    mem_freelist_free_ts(freelist, (void*)raw_addr);
+}
+
+uint32 mem_freelist_getleaks_ts(struct freelist_alloc_ts* freelist, void** pptrs)
+{
+    return mem_freelist_getleaks(&freelist->fl, pptrs);
+}
+
+size_t mem_freelist_getsize_ts(struct freelist_alloc_ts* freelist, void* ptr)
+{
+    return mem_freelist_getsize(&freelist->fl, ptr);
+}
  */
 struct json_mgr
 {
-    struct freelist_alloc alloc;
+    struct freelist_alloc_ts alloc;
     bool_t init;
-    mutex_t lock;
 };
 
 static struct json_mgr g_json;
 /* callbacks for memory allocation/deallocation */
 void* json_malloc(size_t size)
 {
-    mt_lockmutex(&g_json.lock);
-    void* p = mem_freelist_alloc(&g_json.alloc, size, 0);
-    mt_unlockmutex(&g_json.lock);
+    void* p = mem_freelist_alloc_ts(&g_json.alloc, size, 0);
     ASSERT(p);
     return p;
 }
 
 void json_free(void* p)
 {
-    mt_lockmutex(&g_json.lock);
-    mem_freelist_free(&g_json.alloc, p);
-    mt_unlockmutex(&g_json.lock);
+    mem_freelist_free_ts(&g_json.alloc, p);
 }
 
 /* */
 {
     if (buf_size == 0)
     	buf_size = DEFAULT_BUFFER_SIZE;
-    result_t r = mem_freelist_create(mem_heap(), &g_json.alloc, buf_size, 0);
+    result_t r = mem_freelist_create_ts(mem_heap(), &g_json.alloc, buf_size, 0);
     if (IS_FAIL(r))
         return r;
     
     hooks.free_fn = json_free;
     cJSON_InitHooks(&hooks);
 
-    mt_createmutex(&g_json.lock);
-
     g_json.init = TRUE;
     return RET_OK;
 }
 void json_release()
 {
     g_json.init = FALSE;
-    mt_destroymutex(&g_json.lock);
-    mem_freelist_destroy(&g_json.alloc);
+    mem_freelist_destroy_ts(&g_json.alloc);
     cJSON_InitHooks(NULL);
 }
 
     }
     
     fwrite(buffer, 1, strlen(buffer) + 1, f);   
-    mem_freelist_free(&g_json.alloc, buffer);
+    mem_freelist_free_ts(&g_json.alloc, buffer);
     return RET_OK;
 }
 
     }
     
     io_file_write(f, buffer, 1, strlen(buffer)+1);
-    mem_freelist_free(&g_json.alloc, buffer);
+    mem_freelist_free_ts(&g_json.alloc, buffer);
     return RET_OK;   
 }
 

src/core/memory.c

     mutex_t lock;
 };
 
-static atomint_t g_meminit = FALSE;
+static long volatile g_meminit = FALSE;
 static struct mem_data g_memdata;
 
 /*************************************************************************************************/

src/core/pool-alloc.c

     }
     return count;
 }
+
+/*************************************************************************************************
+ * Thread-safe
+ */
+/* callback functions for binding pool-alloc to generic allocator */
+void* p_alloc_ts(size_t size, const char* source, uint32 line, uint32 mem_id, void* param)
+{
+    ASSERT(((struct pool_alloc_ts*)param)->p.item_sz == size);
+    return mem_pool_alloc_ts((struct pool_alloc_ts*)param);
+}
+
+void p_free_ts(void* p, void* param)
+{
+    mem_pool_free_ts((struct pool_alloc_ts*)param, p);
+}
+
+void* p_alignedalloc_ts(size_t size, uint8 alignment, const char* source, 
+                        uint32 line, uint32 mem_id, void* param)
+{
+    ASSERT(((struct pool_alloc_ts*)param)->p.item_sz == size);
+    return mem_pool_alloc_ts((struct pool_alloc_ts*)param);
+}
+
+void p_alignedfree_ts(void* p, void* param)
+{
+    mem_pool_free_ts((struct pool_alloc_ts*)param, p);
+}
+
+/* */
+result_t mem_pool_create_ts(struct allocator* alloc, struct pool_alloc_ts* pool,
+                            uint32 item_size, uint32 block_size, uint32 mem_id)
+{
+    memset(pool, 0x00, sizeof(struct pool_alloc_ts));
+    mt_createmutex(&pool->lock);
+    return mem_pool_create(alloc, &pool->p, item_size, block_size, mem_id);
+}
+
+void mem_pool_destroy_ts(struct pool_alloc_ts* pool)
+{
+    mt_destroymutex(&pool->lock);
+    mem_pool_destroy(&pool->p);
+}
+
+void* mem_pool_alloc_ts(struct pool_alloc_ts* pool)
+{
+    mt_lockmutex(&pool->lock);
+    void* ptr = mem_pool_alloc(&pool->p);
+    mt_unlockmutex(&pool->lock);
+    return ptr;
+}
+
+void mem_pool_free_ts(struct pool_alloc_ts* pool, void* ptr)
+{
+    mt_lockmutex(&pool->lock);
+    mem_pool_free(&pool->p, ptr);
+    mt_unlockmutex(&pool->lock);
+}
+
+uint32 mem_pool_getleaks_ts(struct pool_alloc_ts* pool)
+{
+    return mem_pool_getleaks(&pool->p);
+}
+
+void mem_pool_clear_ts(struct pool_alloc_ts* pool)
+{
+    mem_pool_clear(&pool->p);
+}
+
+void mem_pool_bindalloc_ts(struct pool_alloc_ts* pool, struct allocator* alloc)
+{
+    alloc->param = pool;
+    alloc->alloc_fn = p_alloc_ts;
+    alloc->free_fn = p_free_ts;
+    alloc->alignedalloc_fn = p_alignedalloc_ts;
+    alloc->alignedfree_fn = p_alignedfree_ts;
+}

src/core/stack-alloc.c

 #include "err.h"
 #include "log.h"
 
+/*************************************************************************************************/
 /* functions for binding allocators to stack-alloc */
 void* s_alloc(size_t size, const char* source, uint32 line, uint32 mem_id, void* param)
 {
     /* zero memory */
     memset(stack->buffer, 0x00, stack->size);
 }
+
+/*************************************************************************************************
+ * stack allocator: thread-safe
+ */
+void* s_alloc_ts(size_t size, const char* source, uint32 line, uint32 mem_id, void* param)
+{
+    return mem_stack_alloc_ts((struct stack_alloc_ts*)param, size, mem_id);
+}
+
+void s_free_ts(void* p, void* param)
+{
+}
+
+void* s_alignedalloc_ts(size_t size, uint8 alignment, const char* source, 
+                     uint32 line, uint32 mem_id, void* param)
+{
+    return mem_stack_alignedalloc_ts((struct stack_alloc_ts*)param, size, alignment, mem_id);
+}
+
+void s_alignedfree_ts(void* p, void* param)
+{
+}
+
+/* */
+result_t mem_stack_create_ts(struct allocator* alloc, struct stack_alloc_ts* stack, 
+                             size_t size, uint32 mem_id)
+{
+    memset(stack, 0x00, sizeof(struct stack_alloc_ts));
+    stack->buffer = (uint8*)A_ALIGNED_ALLOC(alloc, size, mem_id);
+    if (stack->buffer == NULL)
+        return RET_OUTOFMEMORY;
+
+    stack->size = size;
+    stack->alloc = alloc;
+    stack->alloc_max = 0;
+
+    return RET_OK;
+}
+
+void mem_stack_destroy_ts(struct stack_alloc_ts* stack)
+{
+    ASSERT(stack != NULL);
+
+    if (stack->buffer != NULL)  {
+        A_ALIGNED_FREE(stack->alloc, stack->buffer);
+    }
+
+    memset(stack, 0x00, sizeof(struct stack_alloc));
+}
+
+void* mem_stack_alignedalloc_ts(struct stack_alloc_ts* stack, size_t size, 
+                                uint8 alignment, uint32 mem_id)
+{
+    size_t ns = size + alignment;
+    uptr_t raw_addr = (uptr_t)mem_stack_alloc_ts(stack, ns, mem_id);
+    if (raw_addr == 0)     
+        return NULL;
+
+    uptr_t misalign = raw_addr & (alignment - 1);
+    uint8 adjust = alignment - (uint8)misalign;
+    uptr_t aligned_addr = raw_addr + adjust;
+    uint8* a = (uint8*)(aligned_addr - sizeof(uint8));
+    *a = adjust;
+    return (void*)aligned_addr;
+}
+
+void* mem_stack_alloc_ts(struct stack_alloc_ts* stack, size_t size, uint32 mem_id)
+{
+    ASSERT(stack->buffer != NULL);
+
+    void* ptr;
+    while (TRUE)    {
+        size_t cur_offset = stack->offset;
+        if ((cur_offset + size) > stack->size)  {
+            log_printf(LOG_WARNING, "stack allocator '%p' (req-size: %d, id: %d) is overloaded\n", 
+                stack, size, mem_id);
+            return ALLOC(size, mem_id);
+        }
+
+        ptr = stack->buffer + cur_offset;
+        size_t new_offset = cur_offset + size;
+
+        /* set maximum bytes fetched from stack alloc, not really important in multi-threading */
+#if defined(_X86_)
+        if (new_offset > stack->alloc_max)
+            ATOMIC_SET(stack->alloc_max, new_offset);
+#elif defined(_X64_)
+        if (new_offset > stack->alloc_max)
+            ATOMIC_SET64(stack->alloc_max, new_offset);
+#endif
+
+        /* commit changes */
+#if defined(_X86_)
+        if (ATOMIC_CAS(stack->offset, cur_offset, new_offset) == cur_offset)
+            return ptr;
+#elif defined(_X64_)
+        if (ATOMIC_CAS64(stack->offset, cur_offset, new_offset) == cur_offset)
+            return ptr;
+#endif
+    }
+
+    return ptr;
+}
+
+void mem_stack_bindalloc_ts(struct stack_alloc_ts* stack, struct allocator* alloc)
+{
+    alloc->param = stack;
+    alloc->alloc_fn = s_alloc_ts;
+    alloc->alignedalloc_fn = s_alignedalloc_ts;
+    alloc->free_fn = s_free_ts;
+    alloc->alignedfree_fn = s_alignedfree_ts;
+}
+
+void mem_stack_save_ts(struct stack_alloc_ts* stack)
+{
+    stack->save_offset = stack->offset;
+
+#if defined(_X86_)
+    ATOMIC_SET(stack->save_offset, stack->offset);
+#elif defined(_X64_)
+    ATOMIC_SET64(stack->save_offset, stack->offset);
+#endif
+}
+
+void mem_stack_load_ts(struct stack_alloc_ts* stack)
+{
+    ASSERT(stack->save_offset <= stack->offset);
+
+    /* zero memory the range and switch back */
+    memset(stack->buffer + stack->save_offset, 0x00, stack->offset - stack->save_offset);
+
+#if defined(_X86_)
+    ATOMIC_SET(stack->offset, stack->save_offset);
+#elif defined(_X64_)
+    ATOMIC_SET64(stack->offset, stack->save_offset);
+#endif
+}
+
+void mem_stack_reset_ts(struct stack_alloc_ts* stack)
+{
+    ATOMIC_SET(stack->offset, 0);
+    ATOMIC_SET(stack->save_offset, 0);
+
+    /* zero memory */
+    memset(stack->buffer, 0x00, stack->size);
+}

src/engine/engine.c

     struct stack_alloc frame_stack;
 
     struct allocator data_alloc;
-    struct stack_alloc data_stack;
-    struct freelist_alloc data_freelist;
+    struct stack_alloc_ts data_stack;
+    struct freelist_alloc_ts data_freelist;
 
     struct allocator lsr_alloc;
     struct stack_alloc lsr_stack;
 
     /* dynamic allocator for data in dev (editor) mode, stack allocator in game (normal) mode */
     if (BIT_CHECK(params->flags, ENG_FLAG_DEV)) {
-        r |= mem_freelist_create(mem_heap(), &g_eng.data_freelist, data_sz, MID_DATA);
-        mem_freelist_bindalloc(&g_eng.data_freelist, &g_eng.data_alloc);
+        r |= mem_freelist_create_ts(mem_heap(), &g_eng.data_freelist, data_sz, MID_DATA);
+        mem_freelist_bindalloc_ts(&g_eng.data_freelist, &g_eng.data_alloc);
     }   else    {
-        r |= mem_stack_create(mem_heap(), &g_eng.data_stack, data_sz, MID_DATA);
-        mem_stack_bindalloc(&g_eng.data_stack, &g_eng.data_alloc);
+        r |= mem_stack_create_ts(mem_heap(), &g_eng.data_stack, data_sz, MID_DATA);
+        mem_stack_bindalloc_ts(&g_eng.data_stack, &g_eng.data_alloc);
     }
 
     if (IS_FAIL(r)) {
 
     /* check for main memory leaks */
     if (BIT_CHECK(g_eng.params.flags, ENG_FLAG_DEV))    {
-        uint32 leak_cnt = mem_freelist_getleaks(&g_eng.data_freelist, NULL);
+        uint32 leak_cnt = mem_freelist_getleaks_ts(&g_eng.data_freelist, NULL);
         if (leak_cnt > 0)
             log_printf(LOG_WARNING, "%d leaks found on dynamic 'data' memory", leak_cnt);
     }
 
     mem_stack_destroy(&g_eng.frame_stack);
-    mem_freelist_destroy(&g_eng.data_freelist);
-    mem_stack_destroy(&g_eng.data_stack);
+    mem_freelist_destroy_ts(&g_eng.data_freelist);
+    mem_stack_destroy_ts(&g_eng.data_stack);
     mem_stack_destroy(&g_eng.lsr_stack);
 
     log_print(LOG_TEXT, "engine released.");
 {
     memset(stats, 0x00, sizeof(struct eng_mem_stats));
     if (BIT_CHECK(g_eng.params.flags, ENG_FLAG_DEV))    {
-        stats->data_max = g_eng.data_freelist.size;
-        stats->data_size = g_eng.data_freelist.alloc_size;
+        stats->data_max = g_eng.data_freelist.fl.size;
+        stats->data_size = g_eng.data_freelist.fl.alloc_size;
     }   else    {
         stats->data_max = g_eng.data_stack.size;
         stats->data_size = g_eng.data_stack.offset;

src/engine/prf-mgr.c

 
 struct prf_mgr
 {
-    atomint_t init;
+    long volatile init;
 	struct mg_context* ctx;	/* context for web-server */
 	char root_dir[DH_PATH_MAX]; /* root-directory for web-server */
 	struct array cmds; /* commands (item: cmd_desc)*/
-    atomptr_t samples_back; /* type: prf_samples*, the one that is being created by engine */
-    atomptr_t samples_front; /* type: prf_samples*, the one that is presentable to user */
+    void* volatile samples_back; /* type: prf_samples*, the one that is being created by engine */
+    void* volatile samples_front; /* type: prf_samples*, the one that is presentable to user */
     mutex_t samples_mtx;    /* mutex for front-buffer protection */
 };
 
 
     /* block presenting front buffer until we are done with json data creation */
     mt_lockmutex(&g_prf.samples_mtx);
-    atomptr_t tmp = g_prf.samples_front;
+    void* tmp = g_prf.samples_front;
     ATOMIC_SET_PTR(g_prf.samples_front, g_prf.samples_back);
     g_prf.samples_back = tmp;
     mt_unlockmutex(&g_prf.samples_mtx);

src/engine/res-mgr.c

     struct hashtable_chained dict;
     struct pool_alloc dict_itempool;
     struct allocator dict_itemalloc;
-    atomptr_t alloc;    /* data allocator */
+    void* volatile alloc;    /* data allocator */
 };
 
 /*************************************************************************************************
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.