c241a29aa3fa78849164a8f979bd14b30963d7a9
[henge/webcc.git] / src / apc / irmem.c
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <stdint.h>
4 #include <errno.h>
5 #include <string.h>
6 #include <stdlib.h>
7 #include <apc/ir.h>
8 #include <unistd.h>
9
10 struct cdat*
11 alloc_cdat(void);
12 struct odat*
13 alloc_odat(void);
14 struct vdat*
15 alloc_vdat(void);
16 struct link*
17 alloc_link(void);
18 struct ref*
19 alloc_ref(void);
20 struct cdat*
21 curr_cdat(void);
22 struct odat*
23 curr_odat(void);
24 struct vdat*
25 curr_vdat(void);
26 struct variant*
27 curr_variant(void);
28 struct set*
29 curr_set(void);
30 struct ref*
31 curr_ref(void);
32 struct model*
33 curr_model(void);
34 void
35 inc_posts(void);
36
37 #define PAGES_PER_CHUNK 16
38
39 //"type free" chunk stacking
40 struct chunk_stack
41 { void* chunks[MAX_CHUNKS];
42 void* *csp; //chunk stack pointer
43 void* dsp[MAX_CHUNKS]; //dat stack pointer (per chunk)
44 int chunk_size; //size of a chunk (including its forfeited page)
45 int max_dats; //number of dats per chunk for this stack
46 } ocs, vcs, ccs, rcs, lcs, pcs, varcs; //odat, vdat, cdat,variant, ref, link, post stacks
47
48 //type safety handled by macro expansion (do not call these directly from code, make dependent macros for access to these)
49 #define CHUNKS_LEN(STACK) ((STACK).csp - (STACK).chunks)
50 #define CURRENT_CHUNK(STACK) ((STACK).chunks[CHUNKS_LEN(STACK) - 1])
51 #define CHUNKS_FULL(STACK) ( (STACK).csp >= \
52 (STACK).chunks + MAX_CHUNKS * (STACK).chunk_size)
53 #define CURRENT_DSP(STACK,TYPE) ((TYPE*) ((STACK).dsp[CHUNKS_LEN(STACK) - 1]))
54 #define DATA_FULL(STACK,TYPE) ((void*) CURRENT_DSP(STACK,TYPE) >= \
55 (CURRENT_CHUNK(STACK) + (STACK).chunk_size))
56 #define CSP_PUSH(STACK) (*(++(STACK).csp) = malloc((STACK).chunk_size))
57 #define CURRENT_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 1])
58 #define PREVIOUS_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 2])
59 #define ALLOC_DAT(STACK,TYPE) (++CURRENT_DATP(STACK,TYPE))
60 #define INIT_STACK(STACK,TYPE) \
61 { int i; \
62 (STACK).chunk_size = PAGES_PER_CHUNK * pagesize; \
63 (STACK).max_dats = (STACK).chunk_size / sizeof (TYPE); \
64 CSP_PUSH(STACK); \
65 for( i = 0; i < MAX_CHUNKS; i++){ \
66 (STACK).dsp[i] += pagesize; \
67 } \
68 }
69 //Stack-specific macros (called directly from code (safety enforcement)
70 #define INIT_ODAT() (INIT_STACK(ocs, struct odat))
71 #define CURRENT_ODAT() (CURRENT_DATP(ocs,struct odat))
72 #define ODAT_FULL() (DATA_FULL(ocs,struct odat))
73 #define ODAT_ALLOC() (ALLOC_DAT(ocs,struct odat))
74 #define OCS_FULL() (CHUNKS_FULL(ocs))
75 #define INIT_VDAT() (INIT_STACK(vcs, struct vdat))
76 #define CURRENT_VDAT() (CURRENT_DATP(vcs,struct vdat))
77 #define VDAT_FULL() (DATA_FULL(vcs,struct vdat))
78 #define VDAT_ALLOC() (ALLOC_DAT(vcs,struct vdat))
79 #define VCS_FULL() (CHUNKS_FULL(vcs))
80 #define INIT_CDAT() (INIT_STACK(ccs, struct cdat))
81 #define CURRENT_CDAT() (CURRENT_DATP(ccs,struct cdat))
82 #define CDAT_FULL() (DATA_FULL(ccs, struct cdat))
83 #define CDAT_ALLOC() (ALLOC_DAT(ccs, struct cdat))
84 #define CCS_FULL() (CHUNKS_FULL(ccs))
85 #define INIT_VARIANT() (INIT_STACK(varcs, struct variant))
86 #define CURRENT_VARIANT() (CURRENT_DATP(varcs, struct variant))
87 #define VARIANT_FULL() (DATA_FULL(varcs, struct variant))
88 #define VARIANT_ALLOC() (ALLOC_DAT(varcs, struct variant))
89 #define VARCS_FULL() (CHUNKS_FULL(varcs))
90 #define INIT_LINK() (INIT_STACK(lcs, struct link))
91 #define CURRENT_LINK() (CURRENT_DATP(lcs,struct link))
92 #define LDAT_FULL() (DATA_FULL(lcs, struct link))
93 #define LDAT_ALLOC() (ALLOC_DAT(lcs, struct link))
94 #define LCS_FULL() (CHUNKS_FULL(lcs))
95 #define INIT_POST() (INIT_STACK(rcs, struct ref))
96 #define CURRENT_POST() (CURRENT_DATP(pcs,struct ref))
97 #define POST_FULL() (DATA_FULL(pcs,struct ref))
98 #define POST_ALLOC() (ALLOC_DAT(pcs,struct ref))
99 #define PCS_FULL() (CHUNKS_FULL(pcs))
100 #define INIT_REF() (INIT_STACK(rcs, struct ref))
101 #define CURRENT_REF() (CURRENT_DATP(rcs,struct ref))
102 #define PREVIOUS_REF() (PREVIOUS_DATP(rcs, struct ref))
103 #define REF_FULL() (DATA_FULL(rcs,struct ref))
104 #define REF_ALLOC() (ALLOC_DAT(rcs,struct ref))
105 #define RCS_FULL() (CHUNKS_FULL(rcs))
106 //Metadata
107 #define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
108 //#define CURRENT_QUAD() (CURRENT_VARIANT()->quad_list[CURRENT_VARIANT()->num_quads])
109 //#define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
110
111
112
113 long pagesize;
114
115 int num_cdats = 0;
116
117 struct cdat* cdat_stack[MAX_CLASSES];
118 struct cdat** cdat_stackp;
119
120 int num_odats = 0;
121
122 int num_vdats = 0;
123
124 int num_variants = 0;
125
126 int num_refs = 0;
127 int ss_ref_id = 0x0FFFFFFF; /* system space for ref_ids */
128
129 int num_posts = 0;
130
131 int num_links = 0;
132
133
134 /* The initalization function of the IR. */
135 int
136 ir_init()
137 {
138
139 char root[4] = "root";
140
141 pagesize = sysconf(_SC_PAGESIZE);
142
143 INIT_CDAT();
144 *cdat_stackp = CURRENT_CDAT();
145
146 memmove((*cdat_stackp)->name, root, 32);
147
148 INIT_ODAT();
149 INIT_VDAT();
150 INIT_VARIANT();
151 INIT_LINK();
152 INIT_REF();
153 INIT_POST();
154
155
156 return 0;
157
158 }
159
160 void
161 ir_quit()
162 {
163 int i;
164
165 for(i = 0; i < CHUNKS_LEN(ccs) ; i++)
166 {
167 free(ccs.chunks[i]);
168 }
169 for(i = 0; i < CHUNKS_LEN(ocs); i++)
170 {
171 free(ocs.chunks[i]);
172 }
173 for(i = 0; i < CHUNKS_LEN(vcs) ; i++)
174 {
175 free(vcs.chunks[i]);
176 }
177 for(i = 0; i < CHUNKS_LEN(rcs); i++)
178 {
179 free(rcs.chunks[i]);
180 }
181 for(i = 0; i < CHUNKS_LEN(lcs); i++)
182 {
183 free(lcs.chunks[i]);
184 }
185 for(i = 0; i < CHUNKS_LEN(pcs); i++)
186 {
187 free(pcs.chunks[i]);
188 }
189
190 }
191
192 struct cdat*
193 alloc_cdat()
194 {
195 num_cdats++;
196 if(CDAT_FULL())
197 { if(CCS_FULL())
198 { fprintf(stderr, "You have allocated to many (%d) cdats ", num_cdats);
199 exit(EXIT_FAILURE);
200 }
201 else
202 CSP_PUSH(ccs);
203 }
204 else
205 CDAT_ALLOC();
206
207 return CURRENT_CDAT();
208 }
209
210 //these should probably be inline
211 struct odat*
212 alloc_odat
213 ()
214 {
215 num_odats++;
216 if(ODAT_FULL())
217 { if(!OCS_FULL())
218 { fprintf(stderr, "You have allocated to many (%d) odats ", num_odats);
219 exit(EXIT_FAILURE);
220 }
221 else
222 CSP_PUSH(ocs);
223 }
224 else
225 ODAT_ALLOC();
226
227 return CURRENT_ODAT();
228 }
229
230 struct vdat*
231 alloc_vdat
232 ()
233 { num_vdats++;
234 if(VDAT_FULL())
235 { if(!VCS_FULL())
236 { fprintf(stderr, "You have allocated to many (%d) vdats ", num_vdats);
237 exit(EXIT_FAILURE);
238 }
239 else
240 CSP_PUSH(vcs);
241 }
242 else
243 VDAT_ALLOC();
244
245 return CURRENT_VDAT();
246 }
247
248 struct variant*
249 alloc_variant
250 ()
251 { num_variants++;
252 if(VARIANT_FULL())
253 { if(!VARCS_FULL())
254 { fprintf(stderr, "You have allocated to many (%d) variants ", num_variants);
255 exit(EXIT_FAILURE);
256 }
257 else
258 CSP_PUSH(varcs);
259 }
260 else
261 VARIANT_ALLOC();
262
263 return CURRENT_VARIANT();
264 }
265
266
267 struct link*
268 alloc_link
269 ()
270 { num_links++;
271 if(LDAT_FULL())
272 { if(!LCS_FULL())
273 { fprintf(stderr, "You have allocated to many (%d) links ", num_links);
274 exit(EXIT_FAILURE);
275 }
276 else
277 CSP_PUSH(lcs);
278 }
279 else
280 LDAT_ALLOC();
281
282 return CURRENT_LINK();
283
284 }
285
286 struct ref*
287 alloc_ref
288 ()
289 { num_refs++;
290 if(REF_FULL())
291 { if(!RCS_FULL())
292 { fprintf(stderr, "You have allocated to many (%d) refs ", num_refs);
293 exit(EXIT_FAILURE);
294 }
295 else
296 CSP_PUSH(rcs);
297 }
298 else
299 REF_ALLOC();
300
301
302 if(num_refs % 16 == 0)
303 { CURRENT_POST() = CURRENT_REF();
304 inc_posts();
305 }
306
307 return CURRENT_REF();
308 }
309
310 void
311 inc_posts()
312 { num_posts++;
313 if(POST_FULL())
314 { if(!PCS_FULL())
315 { fprintf(stderr, "You have allocated to many (%d) refs ", num_posts);
316 exit(EXIT_FAILURE);
317 }
318 else
319 CSP_PUSH(pcs);
320 }
321 else
322 POST_ALLOC();
323
324 }
325
326 struct cdat*
327 curr_cdat
328 ()
329 {
330 return (*cdat_stackp);
331 }
332
333 struct odat*
334 curr_odat
335 ()
336 {
337 return CURRENT_ODAT();
338 }
339 struct vdat*
340 curr_vdat
341 ()
342 {
343 return CURRENT_VDAT();
344 }
345 struct set*
346 curr_set
347 ()
348 {
349 return &CURRENT_SET();
350 }
351 struct ref*
352 curr_ref
353 ()
354 {
355 return CURRENT_REF();
356 }
357 struct variant*
358 curr_variant
359 ()
360 {
361 return CURRENT_VARIANT();
362 }
363 /* struct quad* */
364 /* curr_quad */
365 /* () */
366 /* { */
367 /* return &CURRENT_QUAD(); */
368 /* } */
369 /* struct model* */
370 /* curr_model */
371 /* () */
372 /* { */
373 /* return &CURRENT_MODEL(); */
374 /* } */