Commits

Sebastian Hubbard  committed 6e1f699 Draft

some notes for lockfree changes

  • Participants
  • Parent commits 30149a2

Comments (0)

Files changed (1)

 #define CHUNK_ALLOCATOR(name, size_function) struct t_amtlist * name ## _new_chunk(int size, int nodecount) { \
     size_t node_and_list = sizeof(struct t_amtlist) + size_function(size); \
     int alloc_count = nodecount;					\
-    									\
+									\
     if (AMT_ALIGNMENT > 4) {						\
       node_and_list += AMT_ALIGNMENT - (node_and_list % AMT_ALIGNMENT);	\
       alloc_count++;							\
     }									\
-    									\
+									\
     void *mem = AMT_SBRK(node_and_list * alloc_count);			\
     struct t_amtlist *links;						\
     int i;								\
   return t;
 }
 
+/* ensure_free_list should use __sync_compare_and_swap to add allocated chunks to the free list.
+   if the inital CAS fails, it will be necessary to retrieve the free pointer, direct the last
+   item in the freshly allocated chunk to point to the free list, and attempt the CAS again
+   (loop ad nauseum) */
+
 #define ENSURE_FREE_LIST(chunk_type, size_bucket, free)			\
   amtnode *new;								\
   struct t_amtlist *nl;							\
+  struct t_amtlist *temp;						\
 									\
+sync_ensure_free:							\
   if (!AMT(t)->free[size_bucket]) {					\
     nl = AMT_MARSHAL(chunk_type ## _new_chunk(size_bucket, AMT_CHUNKSIZE)); \
     AMT(t)->free[size_bucket] = nl;					\
   }
 
 
+/* now, having seen that there is a bucket available on the free list, one must read the
+   next pointer, and attempt to CAS it with the current free list. if it fails, one must
+   repeat until a node is successfully pulled off, and if the new pointer is null,
+   ENSURE_FREE_LIST must be executed again */
+
 #define ALLOC_FROM_FREE_LIST(size_bucket, free, used)			\
   t = AMT(t);								\
   nl = AMT_UNMARSHAL(t->free[size_bucket]);				\
   new = (void *) nl + sizeof(struct t_amtlist);				\
   									\
-  struct t_amtlist *temp;						\
   temp = nl->next;							\
   nl->next = t->used[size_bucket];					\
   t->used[size_bucket] = t->free[size_bucket];				\
   t = AMT_MARSHAL(t);
   struct t_vallocators *vsrc = AMT_UNMARSHAL(AMT(t)->vals);
 
+  /* search for a pool of the desired size */
   while (vsrc) {
     if (vsrc->vsize == nbytes) 
       break;
     vsrc = AMT_UNMARSHAL(vsrc->next);
   }
 
+  /* if the pool is not found, allocate a pool of the desired size */
   if (!vsrc) {
     vsrc = AMT_SBRK(sizeof(struct t_vallocators));
     vsrc->next = AMT(t)->vals;
     AMT(t)->vals = AMT_MARSHAL(vsrc);
+    /* above, slip in the pool into the list of free pools */
 
     vsrc->vsize = nbytes;
     vsrc->free = 0;
 
   struct t_vitems *temp, *vfree = AMT_UNMARSHAL(vsrc->free);
 
+  /* search for a non-exhausted pool in the list of pools of the desired size */
   while (vfree && vfree->used == vfree->nitems) {
     temp = AMT_UNMARSHAL(vfree->next);
     vfree->next = vsrc->used;
     vfree = temp;
   }
 
+  /* if no pool with a free entry has been found, allocate a new pool of the desired size */
   if (!vfree) {
     vsrc = AMT_MARSHAL(vsrc);
     vfree = amt_alloc_vitems(nbytes, AMT_VALCHUNKSIZE);
     vsrc = AMT_UNMARSHAL(vsrc);
     vsrc->free = AMT_MARSHAL(vfree);
+
+    /* slip the pool into the free list */
   }
 
+  /* we can use a __sync_compare_and_swap to increment used first (as a guard), and then 
+     use __sync_fetch_and_add to grab the allocated memory off of the pool */
+
   void *v = (void *) vfree + sizeof(struct t_vitems) + (sizeof(void *) + nbytes) * vfree->used;
   vfree->used += 1;
   *((struct t_vallocators **) v) = AMT_MARSHAL(vsrc);