curr_set(void);
struct ref*
prev_ref(void);
-struct model
+struct model*
curr_model(void);
void
inc_posts(void);
+#define PAGES_PER_CHUNK 16
+
#define CURR_CDAT (*cdat_stackp)
-#define CURR_SET set_list[CURR_CDAT->num_sets]
-#define REF_IDX (num_refs % (refs_per_page * pages_per_chunk))
-#define PREV_REF (ref_buf[num_ref_chunks] + (REF_IDX * (sizeof (struct ref)) + pagesize - (sizeof (struct ref))))
-#define CURR_REF (ref_buf[num_ref_chunks] + (REF_IDX * (sizeof (struct ref)) + pagesize))
-#define ODAT_IDX (num_odats % (odats_per_page * pages_per_chunk))
-#define CURR_ODAT (odat_buf[num_odat_chunks] + (ODAT_IDX * (sizeof (struct odat)) + pagesize))
-#define VDAT_IDX (num_vdats % (vdats_per_page * pages_per_chunk))
-#define CURR_VDAT (vdat_buf[num_vdat_chunks] + (VDAT_IDX * (sizeof (struct vdat)) + pagesize))
-#define CURR_MODEL (CURR_VDAT.model_list[CURR_VDAT.num_models])
-#define CURR_LINK (link_buf[num_links])
-#define CURR_POST (post_buf[num_posts])
+//"type free" chunk stacking
+struct chunk_stack
+{ void* chunks[MAX_CHUNKS];
+ void* *csp; //chunk stack pointer
+ void* dsp[MAX_CHUNKS]; //dat stack pointer (per chunk)
+ int chunk_size; //size of a chunk (including its forfeited page)
+ int max_dats; //number of dats per chunk for this stack
+} ocs, vcs, ccs, rcs, lcs, pcs; //odat, vdat, and cdat, ref, link, post stacks
+
+//type safety handled by macro expansion (do not call these directly from code, make dependent macros for access to these)
+#define CHUNKS_LEN(STACK) ((STACK).csp - (STACK).chunks)
+#define CURRENT_CHUNK(STACK) ((STACK).chunks[CHUNKS_LEN(STACK) - 1])
+#define CURRENT_DSP(STACK,TYPE) ((TYPE*) ((STACK).dsp[CHUNKS_LEN(STACK) - 1]))
+#define DATA_FULL(STACK,TYPE) ((void*) CURRENT_DSP(STACK,TYPE) >= \
+ (CURRENT_CHUNK(STACK) + (STACK).chunk_size))
+#define CSP_PUSH(STACK) (*(++(STACK).csp) = malloc((STACK).chunk_size))
+#define CURRENT_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 1])
+#define PREVIOUS_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 2])
+#define ALLOC_DAT(STACK,TYPE) (++CURRENT_DATP(STACK,TYPE))
+#define INIT_STACK(STACK,TYPE) \
+ do { \
+ (STACK).chunk_size = PAGES_PER_CHUNK; \
+ (STACK).max_dats = (STACK).chunk_size / sizeof (TYPE); \
+ CSP_PUSH(STACK); \
+ } while (0)
+//Stack-specific macros (called directly from code (safety enforcement)
+#define CURRENT_ODAT() (CURRENT_DATP(ocs,struct odat))
+#define ODAT_FULL() (DATA_FULL(ocs,struct odat))
+#define ODAT_ALLOC() (ALLOC_DAT(ocs,struct odat))
+#define CURRENT_VDAT() (CURRENT_DATP(vcs,struct vdat))
+#define VDAT_FULL() (DATA_FULL(vcs,struct vdat))
+#define VDAT_ALLOC() (ALLOC_DAT(vcs,struct vdat))
+#define CURRENT_CDAT() (CURRENT_DATP(ccs,struct cdat))
+#define CDAT_FULL() (DATA_FULL(ccs, struct cdat))
+#define CDAT_ALLOC() (ALLOC_DAT(ccs, struct cdat))
+#define CURRENT_LINK() (CURRENT_DATP(lcs,struct link))
+#define LDAT_FULL() (DATA_FULL(lcs, struct link))
+#define LDAT_ALLOC() (ALLOC_DAT(lcs, struct link))
+#define CURRENT_POST() (CURRENT_DATP(pcs,struct ref))
+#define POST_FULL() (DATA_FULL(pcs,struct ref))
+#define POST_ALLOC() (ALLOC_DAT(pcs,struct ref))
+#define CURRENT_REF() (CURRENT_DATP(rcs,struct ref))
+#define PREVIOUS_REF() (PREVIOUS_DATP(rcs, struct ref))
+#define REF_FULL() (DATA_FULL(rcs,struct ref))
+#define REF_ALLOC() (ALLOC_DAT(rcs,struct ref))
+//Metadata
+#define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
+#define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
+
+
#define CURR_QUAD (CURR_ODAT->quad_file)
long pagesize;
struct cdat* cdat_stack[PTRS_IN_PAGE];
struct cdat** cdat_stackp;
-
-int num_odat_chunks = 0;
int num_odats = 0;
-void* odat_buf[MAX_CHUNKS];
-long odats_per_page;
-int num_vdat_chunks = 0;
int num_vdats = 0;
-void* vdat_buf[MAX_CHUNKS];
-long vdats_per_page;
-int num_ref_chunks = 0;
int num_refs = 0;
-void* ref_buf[MAX_CHUNKS];
-long refs_per_page;
uint64_t ss_ref_id = 0x00FFFFFF; /* system space for ref_ids */
int num_posts = -1;
*cdat_stackp++ = cdat_buf[num_cdats++];
pagesize = sysconf(_SC_PAGESIZE);
- odats_per_page = (sizeof (struct odat)/pagesize);
- vdats_per_page = (sizeof (struct vdat)/pagesize);
- refs_per_page = (sizeof (struct ref)/pagesize);
return 0;
for(i = 0; i <= num_odats ; i++)
{
- free(odat_buf[i]);
}
for(i = 0; i <= num_cdats; i++)
{
- free(cdat_buf[i]);
}
for(i = 0; i <= num_vdats; i++)
{
- free(vdat_buf[i]);
}
for(i = 0; i <= num_refs; i++)
{
- free(ref_buf[i]);
}
for(i = 0; i<= num_links; i++)
{
- free(link_buf[i]);
+ }
+ for(i = 0; i<= num_posts; i++)
+ {
}
}
return CURR_CDAT;
}
+
+//these should probably be inline
struct odat*
alloc_odat
()
-{
- num_odats++;
-
- if(!(num_odats % (odats_per_page * pages_per_chunk))) //chunk is full
- {
- num_odat_chunks++;
- if( ((odat_buf[num_odat_chunks] = malloc(odats_per_page * pages_per_chunk)) == NULL) )
- perror("malloc odat chunk failed");
- }
-
- return CURR_ODAT;
+{ if(ODAT_FULL())
+ CSP_PUSH(ocs);
+ else
+ ODAT_ALLOC();
+ return CURRENT_ODAT();
}
void
alloc_vdat
()
-{
- num_vdats++;
-
- if(!(num_vdats % (vdats_per_page * pages_per_chunk))) //chunk is full
- {
- num_vdat_chunks++;
- if( ((vdat_buf[num_vdat_chunks] = malloc(vdats_per_page * pages_per_chunk)) == NULL) )
- perror("malloc vdat chunk failed");
- }
-
+{ num_vdats++;
+ if(VDAT_FULL())
+ CSP_PUSH(vcs);
+ else
+ VDAT_ALLOC();
}
struct link*
alloc_link
()
-{
- num_links++;
-
- if(num_links >= curr_max_links)
- { if( (realloc((void*) link_buf, PTRS_IN_PAGE * 4)) == NULL)
- perror("realloc vdat_buf failed");
- curr_max_links += PTRS_IN_PAGE;
- }
- if((CURR_LINK = (struct link*) malloc(sizeof (struct link))) == NULL)
- perror("malloc link failed");
-
- return CURR_LINK;
+{ num_links++;
+ if(LDAT_FULL())
+ CSP_PUSH(lcs);
+ else
+ LDAT_ALLOC();
+ return CURRENT_LINK();
}
struct ref*
alloc_ref
()
-{
- num_refs++;
+{ num_refs++;
+ if(REF_FULL())
+ CSP_PUSH(rcs);
+ else
+ REF_ALLOC();
if(num_refs % 16 == 0)
- { CURR_POST = CURR_REF;
+ { CURRENT_POST() = CURRENT_REF();
inc_posts();
}
- if(!(num_refs % (refs_per_page * pages_per_chunk))) //chunk is full
- {
- num_ref_chunks++;
- if( ((ref_buf[num_ref_chunks] = malloc(refs_per_page * pages_per_chunk)) == NULL) )
- perror("malloc ref chunk failed");
- }
- return CURR_REF;
+ return CURRENT_REF();
}
void
inc_posts()
-{
- if(num_posts >= curr_max_posts)
- { if( (realloc((void*) ref_buf, PTRS_IN_PAGE * 4)) == NULL)
- perror("realoc post_buf failed");
- curr_max_posts += PTRS_IN_PAGE;
- }
- if ((CURR_POST = (struct ref*) malloc (sizeof (struct ref))) == NULL)
- perror("malloc post failed");
-
+{ num_posts++;
+ if(POST_FULL())
+ CSP_PUSH(pcs);
+ else
+ POST_ALLOC();
}
struct cdat*
curr_odat
()
{
- return CURR_ODAT;
+ return CURRENT_ODAT();
}
struct vdat*
curr_vdat
()
{
- return CURR_VDAT;
+ return CURRENT_VDAT();
}
struct set*
curr_set
()
{
- return &CURR_CDAT->CURR_SET;
+ return &CURRENT_SET();
}
struct ref*
prev_ref
()
{
- return PREV_REF;
+ return PREVIOUS_REF();
}
-struct model
+struct model*
curr_model
()
{
- return CURR_MODEL;
+ return &CURRENT_MODEL();
}