//"type free" chunk stacking
struct chunk_stack
{ void* chunks[MAX_CHUNKS];
- void* csp; //chunk stack pointer
+ void* *csp; //chunk stack pointer
void* dsp[MAX_CHUNKS]; //dat stack pointer (per chunk)
- int max_dats; //num dats in a chunk
-} ocs, vcs, ccs, rcs, lcs, pcs; //odat, vdat, and cdat, ref, link, post stacks
-
-//type safety handled by macro expansion
-#define CURRENT_CHUNK(STACK) ( (void*) (STACK).csp - (void*) (STACK).chunks - 1)
-#define CHUNKS_LEN(STACK) ( (void*) (STACK).csp - (void*) (STACK).chunks)
-#define CHUNK_FULL(STACK, TYPE) ( (CURRENT_DAT(STACK,TYPE) - (TYPE) CURRENT_CHUNK(STACK)) \
- >= (STACK).max_dats )
-#define CSP_PUSH(STACK) (++(STACK).csp = malloc(pagesize * PAGES_PER_CHUNK))
-#define CURRENT_DAT(STACK,TYPE) ((TYPE) (STACK).dsp[CHUNKS_LEN(STACK)])
-#define PREVIOUS_DAT(STACK,TYPE) ((TYPE) (STACK).dsp[CHUNKS_LEN(STACK)]-1)
-#define INCREMENT_DAT(STACK,TYPE) (++CURRENT_DAT(STACK,TYPE))
-//Stack-specific macros
-#define CURRENT_ODAT() (CURRENT_DAT(ocs,struct odat*))
-#define CURRENT_VDAT() (CURRENT_DAT(vcs,struct vdat*))
-#define CURRENT_CDAT() (CURRENT_DAT(ccs,struct cdat*))
-#define CURRENT_LINK() (CURRENT_DAT(lcs,struct link*))
-#define CURRENT_POST() (CURRENT_DAT(pcs,struct ref*))
-#define CURRENT_REF() (CURRENT_DAT(rcs,struct ref*))
-#define PREVIOUS_REF() (PREVIOUS_DAT(rcs, struct ref*))
+ int chunk_size; //size of a chunk (including its forfeited page)
+ int max_dats; //number of dats per chunk for this stack
+} ocs, vcs, ccs, rcs, lcs, pcs; //odat, vdat, and cdat, ref, link, post stacks
+
+//type safety handled by macro expansion (do not call these directly from code, make dependent macros for access to these)
+#define CHUNKS_LEN(STACK) ((STACK).csp - (STACK).chunks)
+#define CURRENT_CHUNK(STACK) ((STACK).chunks[CHUNKS_LEN(STACK) - 1])
+#define CURRENT_DSP(STACK,TYPE) ((TYPE*) ((STACK).dsp[CHUNKS_LEN(STACK) - 1]))
+#define DATA_FULL(STACK,TYPE) ((void*) CURRENT_DSP(STACK,TYPE) >= \
+ (CURRENT_CHUNK(STACK) + (STACK).chunk_size))
+#define CSP_PUSH(STACK) (*(++(STACK).csp) = malloc((STACK).chunk_size))
+#define CURRENT_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 1])
+#define PREVIOUS_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 2])
+#define ALLOC_DAT(STACK,TYPE) (++CURRENT_DATP(STACK,TYPE))
+#define INIT_STACK(STACK,TYPE) \
+ do { \
+ (STACK).chunk_size = PAGES_PER_CHUNK; \
+ (STACK).max_dats = (STACK).chunk_size / sizeof (TYPE); \
+ CSP_PUSH(STACK); \
+ } while (0)
+//Stack-specific macros (called directly from code (safety enforcement)
+#define CURRENT_ODAT() (CURRENT_DATP(ocs,struct odat))
+#define ODAT_FULL() (DATA_FULL(ocs,struct odat))
+#define ODAT_ALLOC() (ALLOC_DAT(ocs,struct odat))
+#define CURRENT_VDAT() (CURRENT_DATP(vcs,struct vdat))
+#define VDAT_FULL() (DATA_FULL(vcs,struct vdat))
+#define VDAT_ALLOC() (ALLOC_DAT(vcs,struct vdat))
+#define CURRENT_CDAT() (CURRENT_DATP(ccs,struct cdat))
+#define CDAT_FULL() (DATA_FULL(ccs, struct cdat))
+#define CDAT_ALLOC() (ALLOC_DAT(ccs, struct cdat))
+#define CURRENT_LINK() (CURRENT_DATP(lcs,struct link))
+#define LDAT_FULL() (DATA_FULL(lcs, struct link))
+#define LDAT_ALLOC() (ALLOC_DAT(lcs, struct link))
+#define CURRENT_POST() (CURRENT_DATP(pcs,struct ref))
+#define POST_FULL() (DATA_FULL(pcs,struct ref))
+#define POST_ALLOC() (ALLOC_DAT(pcs,struct ref))
+#define CURRENT_REF() (CURRENT_DATP(rcs,struct ref))
+#define PREVIOUS_REF() (PREVIOUS_DATP(rcs, struct ref))
+#define REF_FULL() (DATA_FULL(rcs,struct ref))
+#define REF_ALLOC() (ALLOC_DAT(rcs,struct ref))
//Metadata
-#define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
+#define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
#define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
return CURR_CDAT;
}
+
+//these should probably be inline
struct odat*
alloc_odat
()
-{
- if(CHUNK_FULL(ocs, struct odat*))
+{ if(ODAT_FULL())
CSP_PUSH(ocs);
else
- INCREMENT_DAT(ocs, struct odat*);
-
+ ODAT_ALLOC();
return CURRENT_ODAT();
}
void
alloc_vdat
()
-{
- num_vdats++;
-
- if(CHUNK_FULL(vcs, struct vdat*))
+{ num_vdats++;
+ if(VDAT_FULL())
CSP_PUSH(vcs);
else
- INCREMENT_DAT(vcs, struct vdat*);
-
+ VDAT_ALLOC();
}
struct link*
alloc_link
()
-{
- num_links++;
-
- if(CHUNK_FULL(lcs, struct link*))
+{ num_links++;
+ if(LDAT_FULL())
CSP_PUSH(lcs);
else
- INCREMENT_DAT(lcs, struct link*);
-
+ LDAT_ALLOC();
return CURRENT_LINK();
}
struct ref*
alloc_ref
()
-{
- num_refs++;
- if(CHUNK_FULL(rcs, struct link*))
+{ num_refs++;
+ if(REF_FULL())
CSP_PUSH(rcs);
else
- INCREMENT_DAT(rcs, struct link*);
+ REF_ALLOC();
if(num_refs % 16 == 0)
{ CURRENT_POST() = CURRENT_REF();
void
inc_posts()
-{
- num_posts++;
-
- if(CHUNK_FULL(pcs, struct ref*))
- {CSP_PUSH(pcs);}
+{ num_posts++;
+ if(POST_FULL())
+ CSP_PUSH(pcs);
else
- INCREMENT_DAT(pcs, struct ref*);
-
-
+ POST_ALLOC();
}
struct cdat*