5b4fb574981ff55b6e140d22cba758eb8c89b52b
[henge/webcc.git] / src / apc / irmem.c
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <stdint.h>
4 #include <errno.h>
5 #include <string.h>
6 #include <stdlib.h>
7 #include <apc/ir.h>
8 #include <unistd.h>
9
10 struct cdat*
11 alloc_cdat(void);
12 struct odat*
13 alloc_odat(void);
14 void
15 alloc_vdat(void);
16 struct link*
17 alloc_link(void);
18 struct ref*
19 alloc_ref(void);
20 struct cdat*
21 curr_cdat(void);
22 struct odat*
23 curr_odat(void);
24 struct vdat*
25 curr_vdat(void);
26 struct set*
27 curr_set(void);
28 struct ref*
29 prev_ref(void);
30 struct model
31 curr_model(void);
32 void
33 inc_posts(void);
34
35 #define CURR_CDAT (*cdat_stackp)
36 #define CURR_SET set_list[CURR_CDAT->num_sets]
37 #define REF_IDX (num_refs % (refs_per_page * pages_per_chunk))
38 #define PREV_REF (ref_buf[num_ref_chunks] + (REF_IDX * (sizeof (struct ref)) + pagesize - (sizeof (struct ref))))
39 #define CURR_REF (ref_buf[num_ref_chunks] + (REF_IDX * (sizeof (struct ref)) + pagesize))
40 #define ODAT_IDX (num_odats % (odats_per_page * pages_per_chunk))
41 #define CURR_ODAT (odat_buf[num_odat_chunks] + (ODAT_IDX * (sizeof (struct odat)) + pagesize))
42 #define VDAT_IDX (num_vdats % (vdats_per_page * pages_per_chunk))
43 #define CURR_VDAT (vdat_buf[num_vdat_chunks] + (VDAT_IDX * (sizeof (struct vdat)) + pagesize))
44 #define CURR_MODEL (CURR_VDAT.model_list[CURR_VDAT.num_models])
45 #define CURR_LINK (link_buf[num_links])
46 #define CURR_POST (post_buf[num_posts])
47 #define CURR_QUAD (CURR_ODAT->quad_file)
48
49 long pagesize;
50
51 int pages_per_chunk = 10;
52
53 int num_cdats = 0;
54 int curr_max_cdats = PTRS_IN_PAGE;
55
56 struct cdat* cdat_buf[PTRS_IN_PAGE];
57 struct cdat* cdat_stack[PTRS_IN_PAGE];
58 struct cdat** cdat_stackp;
59
60
61 int num_odat_chunks = 0;
62 int num_odats = 0;
63 void* odat_buf[MAX_CHUNKS];
64 long odats_per_page;
65
66
67 //"type free" chunk stacking
68 struct chunk_stack
69 { void* chunks[MAX_CHUNKS];
70 void* csp; //chunk stack pointer
71 void* dsp[MAX_CHUNKS]; //dat stack pointer (per chunk)
72 } ocs, vcs, ccs; //odat, vdat, and cdat chunk stacks
73
74 //type safety handled by macro expansion
75 #define CHUNK_LEN(STACK) ((STACK).csp - (STACK).chunks)
76 #define CURRENT_DAT(STACK,TYPE) ((TYPE)(STACK.dsp[CHUNK_LEN(STACK)])
77 #define INCREMENT_DAT(STACK,TYPE) (++CURRENT_DAT(STACK,TYPE))
78 //Stack-specific macros
79 #define CURRENT_ODAT() (CURRENT_DAT(ocs,struct odat*))
80 #define CURRENT_VDAT() (CURRENT_DAT(vcs,struct vdat*))
81 #define CURRENT_CDAT() (CURRENT_DAT(ccs,struct cdat*))
82 //Metadata
83 #define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
84
85
86 int num_vdat_chunks = 0;
87 int num_vdats = 0;
88 struct vdat* vdat_buf[MAX_CHUNKS];
89 long vdats_per_page;
90
91 int num_ref_chunks = 0;
92 int num_refs = 0;
93 void* ref_buf[MAX_CHUNKS];
94 long refs_per_page;
95 uint64_t ss_ref_id = 0x00FFFFFF; /* system space for ref_ids */
96
97 int num_posts = -1;
98 int curr_max_posts = PTRS_IN_PAGE;
99 struct ref* post_buf[PTRS_IN_PAGE];
100
101
102 int num_links = -1;
103 int curr_max_links = PTRS_IN_PAGE;
104 struct link* link_buf[PTRS_IN_PAGE];
105
106
107 /* The initalization function of the IR. */
108 int
109 ir_init()
110 {
111
112 /* Init root cdat and stack */
113 char root[4] = "root";
114
115 if( (cdat_buf[num_cdats] = (struct cdat*) malloc(sizeof(struct cdat))) == NULL)
116 {
117 perror("malloc root class failed\n");
118 return -1;
119 }
120 cdat_buf[num_cdats]->idx = num_cdats;
121 memmove(cdat_buf[num_cdats]->name, root, 4);
122
123 cdat_stackp = cdat_stack;
124 *cdat_stackp++ = cdat_buf[num_cdats++];
125
126 pagesize = sysconf(_SC_PAGESIZE);
127 odats_per_page = (sizeof (struct odat)/pagesize);
128 vdats_per_page = (sizeof (struct vdat)/pagesize);
129 refs_per_page = (sizeof (struct ref)/pagesize);
130
131 return 0;
132
133 }
134
135 void
136 ir_quit()
137 {
138 int i;
139
140 for(i = 0; i <= num_odats ; i++)
141 {
142 free(odat_buf[i]);
143 }
144 for(i = 0; i <= num_cdats; i++)
145 {
146 free(cdat_buf[i]);
147 }
148 for(i = 0; i <= num_vdats; i++)
149 {
150 free(vdat_buf[i]);
151 }
152 for(i = 0; i <= num_refs; i++)
153 {
154 free(ref_buf[i]);
155 }
156 for(i = 0; i<= num_links; i++)
157 {
158 free(link_buf[i]);
159 }
160
161 }
162
163 //TODO: FREE MEMORY!
164 struct cdat*
165 alloc_cdat()
166 {
167 num_cdats++;
168 if(curr_max_cdats <= num_cdats)
169 { if( (realloc((void*) cdat_buf, PTRS_IN_PAGE * 4)) == NULL)
170 perror("realloc cdat_buf failed");
171 curr_max_cdats += PTRS_IN_PAGE;
172 if( (realloc( (void*) cdat_stack, PTRS_IN_PAGE * 4)) == NULL) //increase cdat_stack also
173 perror("realloc cdat_stack failed");
174 }
175 if( (CURR_CDAT = (struct cdat*) malloc(sizeof (struct cdat)) ) == NULL )
176 perror("malloc cdat failed");
177
178 return CURR_CDAT;
179
180 }
181 struct odat*
182 alloc_odat
183 ()
184 {
185 num_odats++;
186
187 if(!(num_odats % (odats_per_page * pages_per_chunk))) //chunk is full
188 {
189 num_odat_chunks++;
190 if( ((odat_buf[num_odat_chunks] = malloc(odats_per_page * pages_per_chunk)) == NULL) )
191 perror("malloc odat chunk failed");
192 }
193
194 return CURR_ODAT;
195 }
196
197 void
198 alloc_vdat
199 ()
200 {
201 num_vdats++;
202
203 if(!(num_vdats % (vdats_per_page * pages_per_chunk))) //chunk is full
204 {
205 num_vdat_chunks++;
206 if( ((vdat_buf[num_vdat_chunks] = malloc(vdats_per_page * pages_per_chunk)) == NULL) )
207 perror("malloc vdat chunk failed");
208 }
209
210 }
211
212 struct link*
213 alloc_link
214 ()
215 {
216 num_links++;
217
218 if(num_links >= curr_max_links)
219 { if( (realloc((void*) link_buf, PTRS_IN_PAGE * 4)) == NULL)
220 perror("realloc vdat_buf failed");
221 curr_max_links += PTRS_IN_PAGE;
222 }
223 if((CURR_LINK = (struct link*) malloc(sizeof (struct link))) == NULL)
224 perror("malloc link failed");
225
226 return CURR_LINK;
227 }
228
229 struct ref*
230 alloc_ref
231 ()
232 {
233 num_refs++;
234
235 if(num_refs % 16 == 0)
236 { CURR_POST = CURR_REF;
237 inc_posts();
238 }
239 if(!(num_refs % (refs_per_page * pages_per_chunk))) //chunk is full
240 {
241 num_ref_chunks++;
242 if( ((ref_buf[num_ref_chunks] = malloc(refs_per_page * pages_per_chunk)) == NULL) )
243 perror("malloc ref chunk failed");
244 }
245
246 return CURR_REF;
247 }
248
249 void
250 inc_posts()
251 {
252 if(num_posts >= curr_max_posts)
253 { if( (realloc((void*) ref_buf, PTRS_IN_PAGE * 4)) == NULL)
254 perror("realoc post_buf failed");
255 curr_max_posts += PTRS_IN_PAGE;
256 }
257 if ((CURR_POST = (struct ref*) malloc (sizeof (struct ref))) == NULL)
258 perror("malloc post failed");
259
260 }
261
262 struct cdat*
263 curr_cdat
264 ()
265 {
266 return CURR_CDAT;
267 }
268
269 struct odat*
270 curr_odat
271 ()
272 {
273 return CURR_ODAT;
274 }
275 struct vdat*
276 curr_vdat
277 ()
278 {
279 return CURR_VDAT;
280 }
281 struct set*
282 curr_set
283 ()
284 {
285 return &CURR_CDAT->CURR_SET;
286 }
287 struct ref*
288 prev_ref
289 ()
290 {
291 return PREV_REF;
292 }
293 struct model
294 curr_model
295 ()
296 {
297 return CURR_MODEL;
298 }