added stb, more binaryout changes"
[henge/apc.git] / stb / tests / caveview / cave_parse.c
1 #include <assert.h>
2 #include <stdio.h>
3 #include <limits.h>
4 #include <stdlib.h>
5
6 #define FAST_CHUNK // disabling this enables the old, slower path that deblocks into a regular form
7
8 #include "cave_parse.h"
9
10 #include "stb_image.h"
11 #include "stb.h"
12
13 #define NUM_CHUNKS_PER_REGION 32 // only on one axis
14 #define NUM_CHUNKS_PER_REGION_LOG2 5
15
16 #define NUM_COLUMNS_PER_CHUNK 16
17 #define NUM_COLUMNS_PER_CHUNK_LOG2 4
18
19 uint32 read_uint32_be(FILE *f)
20 {
21 unsigned char data[4];
22 fread(data, 1, 4, f);
23 return (data[0]<<24) + (data[1]<<16) + (data[2]<<8) + data[3];
24 }
25
26 typedef struct
27 {
28 uint8 *data;
29 size_t len;
30 int x,z; // chunk index
31 int refcount; // for multi-threading
32 } compressed_chunk;
33
34 typedef struct
35 {
36 int x,z;
37 uint32 sector_data[NUM_CHUNKS_PER_REGION][NUM_CHUNKS_PER_REGION];
38 } region;
39
40 size_t cached_compressed=0;
41
42 FILE *last_region;
43 int last_region_x;
44 int last_region_z;
45 int opened=0;
46
47 static void open_file(int reg_x, int reg_z)
48 {
49 if (!opened || last_region_x != reg_x || last_region_z != reg_z) {
50 char filename[256];
51 if (last_region != NULL)
52 fclose(last_region);
53 sprintf(filename, "r.%d.%d.mca", reg_x, reg_z);
54 last_region = fopen(filename, "rb");
55 last_region_x = reg_x;
56 last_region_z = reg_z;
57 opened = 1;
58 }
59 }
60
61 static region *load_region(int reg_x, int reg_z)
62 {
63 region *r;
64 int x,z;
65
66 open_file(reg_x, reg_z);
67
68 r = malloc(sizeof(*r));
69
70 if (last_region == NULL) {
71 memset(r, 0, sizeof(*r));
72 } else {
73 fseek(last_region, 0, SEEK_SET);
74 for (z=0; z < NUM_CHUNKS_PER_REGION; ++z)
75 for (x=0; x < NUM_CHUNKS_PER_REGION; ++x)
76 r->sector_data[z][x] = read_uint32_be(last_region);
77 }
78 r->x = reg_x;
79 r->z = reg_z;
80
81 return r;
82 }
83
84 void free_region(region *r)
85 {
86 free(r);
87 }
88
89 #define MAX_MAP_REGIONS 64 // in one axis: 64 regions * 32 chunk/region * 16 columns/chunk = 16384 columns
90 region *regions[MAX_MAP_REGIONS][MAX_MAP_REGIONS];
91
92 static region *get_region(int reg_x, int reg_z)
93 {
94 int slot_x = reg_x & (MAX_MAP_REGIONS-1);
95 int slot_z = reg_z & (MAX_MAP_REGIONS-1);
96 region *r;
97
98 r = regions[slot_z][slot_x];
99
100 if (r) {
101 if (r->x == reg_x && r->z == reg_z)
102 return r;
103 free_region(r);
104 }
105
106 r = load_region(reg_x, reg_z);
107 regions[slot_z][slot_x] = r;
108
109 return r;
110 }
111
112 // about one region, so size should be ok
113 #define NUM_CACHED_X 64
114 #define NUM_CACHED_Z 64
115
116 // @TODO: is it really worth caching these? we probably can just
117 // pull them from the disk cache nearly as efficiently.
118 // Can test that by setting to 1x1?
119 compressed_chunk *cached_chunk[NUM_CACHED_Z][NUM_CACHED_X];
120
121 static void deref_compressed_chunk(compressed_chunk *cc)
122 {
123 assert(cc->refcount > 0);
124 --cc->refcount;
125 if (cc->refcount == 0) {
126 if (cc->data)
127 free(cc->data);
128 free(cc);
129 }
130 }
131
132 static compressed_chunk *get_compressed_chunk(int chunk_x, int chunk_z)
133 {
134 int slot_x = chunk_x & (NUM_CACHED_X-1);
135 int slot_z = chunk_z & (NUM_CACHED_Z-1);
136 compressed_chunk *cc = cached_chunk[slot_z][slot_x];
137
138 if (cc && cc->x == chunk_x && cc->z == chunk_z)
139 return cc;
140 else {
141 int reg_x = chunk_x >> NUM_CHUNKS_PER_REGION_LOG2;
142 int reg_z = chunk_z >> NUM_CHUNKS_PER_REGION_LOG2;
143 region *r = get_region(reg_x, reg_z);
144 if (cc) {
145 deref_compressed_chunk(cc);
146 cached_chunk[slot_z][slot_x] = NULL;
147 }
148 cc = malloc(sizeof(*cc));
149 cc->x = chunk_x;
150 cc->z = chunk_z;
151 {
152 int subchunk_x = chunk_x & (NUM_CHUNKS_PER_REGION-1);
153 int subchunk_z = chunk_z & (NUM_CHUNKS_PER_REGION-1);
154 uint32 code = r->sector_data[subchunk_z][subchunk_x];
155
156 if (code & 255) {
157 open_file(reg_x, reg_z);
158 fseek(last_region, (code>>8)*4096, SEEK_SET);
159 cc->len = (code&255)*4096;
160 cc->data = malloc(cc->len);
161 fread(cc->data, 1, cc->len, last_region);
162 } else {
163 cc->len = 0;
164 cc->data = 0;
165 }
166 }
167 cc->refcount = 1;
168 cached_chunk[slot_z][slot_x] = cc;
169 return cc;
170 }
171 }
172
173
174 // NBT parser -- can automatically parse stuff we don't
175 // have definitions for, but want to explicitly parse
176 // stuff we do have definitions for.
177 //
178 // option 1: auto-parse everything into data structures,
179 // then read those
180 //
181 // option 2: have a "parse next object" which
182 // doesn't resolve whether it expands its children
183 // yet, and then the user either says "expand" or
184 // "skip" after looking at the name. Anything with
185 // "children" without names can't go through this
186 // interface.
187 //
188 // Let's try option 2.
189
190
191 typedef struct
192 {
193 unsigned char *buffer_start;
194 unsigned char *buffer_end;
195 unsigned char *cur;
196 int nesting;
197 char temp_buffer[256];
198 } nbt;
199
200 enum { TAG_End=0, TAG_Byte=1, TAG_Short=2, TAG_Int=3, TAG_Long=4,
201 TAG_Float=5, TAG_Double=6, TAG_Byte_Array=7, TAG_String=8,
202 TAG_List=9, TAG_Compound=10, TAG_Int_Array=11 };
203
204 static void nbt_get_string_data(unsigned char *data, char *buffer, size_t bufsize)
205 {
206 int len = data[0]*256 + data[1];
207 int i;
208 for (i=0; i < len && i+1 < (int) bufsize; ++i)
209 buffer[i] = (char) data[i+2];
210 buffer[i] = 0;
211 }
212
213 static char *nbt_peek(nbt *n)
214 {
215 unsigned char type = *n->cur;
216 if (type == TAG_End)
217 return NULL;
218 nbt_get_string_data(n->cur+1, n->temp_buffer, sizeof(n->temp_buffer));
219 return n->temp_buffer;
220 }
221
222 static uint32 nbt_parse_uint32(unsigned char *buffer)
223 {
224 return (buffer[0] << 24) + (buffer[1]<<16) + (buffer[2]<<8) + buffer[3];
225 }
226
227 static void nbt_skip(nbt *n);
228
229 // skip an item that doesn't have an id or name prefix (usable in lists)
230 static void nbt_skip_raw(nbt *n, unsigned char type)
231 {
232 switch (type) {
233 case TAG_Byte : n->cur += 1; break;
234 case TAG_Short : n->cur += 2; break;
235 case TAG_Int : n->cur += 4; break;
236 case TAG_Long : n->cur += 8; break;
237 case TAG_Float : n->cur += 4; break;
238 case TAG_Double: n->cur += 8; break;
239 case TAG_Byte_Array: n->cur += 4 + 1*nbt_parse_uint32(n->cur); break;
240 case TAG_Int_Array : n->cur += 4 + 4*nbt_parse_uint32(n->cur); break;
241 case TAG_String : n->cur += 2 + (n->cur[0]*256 + n->cur[1]); break;
242 case TAG_List : {
243 unsigned char list_type = *n->cur++;
244 unsigned int list_len = nbt_parse_uint32(n->cur);
245 unsigned int i;
246 n->cur += 4; // list_len
247 for (i=0; i < list_len; ++i)
248 nbt_skip_raw(n, list_type);
249 break;
250 }
251 case TAG_Compound : {
252 while (*n->cur != TAG_End)
253 nbt_skip(n);
254 nbt_skip(n); // skip the TAG_end
255 break;
256 }
257 }
258 assert(n->cur <= n->buffer_end);
259 }
260
261 static void nbt_skip(nbt *n)
262 {
263 unsigned char type = *n->cur++;
264 if (type == TAG_End)
265 return;
266 // skip name
267 n->cur += (n->cur[0]*256 + n->cur[1]) + 2;
268 nbt_skip_raw(n, type);
269 }
270
271 // byteswap
272 static void nbt_swap(unsigned char *ptr, int len)
273 {
274 int i;
275 for (i=0; i < (len>>1); ++i) {
276 unsigned char t = ptr[i];
277 ptr[i] = ptr[len-1-i];
278 ptr[len-1-i] = t;
279 }
280 }
281
282 // pass in the expected type, fail if doesn't match
283 // returns a pointer to the data, byteswapped if appropriate
284 static void *nbt_get_fromlist(nbt *n, unsigned char type, int *len)
285 {
286 unsigned char *ptr;
287 assert(type != TAG_Compound);
288 assert(type != TAG_List); // we could support getting lists of primitives as if they were arrays, but eh
289 if (len) *len = 1;
290 ptr = n->cur;
291 switch (type) {
292 case TAG_Byte : break;
293
294 case TAG_Short : nbt_swap(ptr, 2); break;
295 case TAG_Int : nbt_swap(ptr, 4); break;
296 case TAG_Long : nbt_swap(ptr, 8); break;
297 case TAG_Float : nbt_swap(ptr, 4); break;
298 case TAG_Double: nbt_swap(ptr, 8); break;
299
300 case TAG_Byte_Array:
301 *len = nbt_parse_uint32(ptr);
302 ptr += 4;
303 break;
304 case TAG_Int_Array: {
305 int i;
306 *len = nbt_parse_uint32(ptr);
307 ptr += 4;
308 for (i=0; i < *len; ++i)
309 nbt_swap(ptr + 4*i, 4);
310 break;
311 }
312
313 default: assert(0); // unhandled case
314 }
315 nbt_skip_raw(n, type);
316 return ptr;
317 }
318
319 static void *nbt_get(nbt *n, unsigned char type, int *len)
320 {
321 assert(n->cur[0] == type);
322 n->cur += 3 + (n->cur[1]*256+n->cur[2]);
323 return nbt_get_fromlist(n, type, len);
324 }
325
326 static void nbt_begin_compound(nbt *n) // start a compound
327 {
328 assert(*n->cur == TAG_Compound);
329 // skip header
330 n->cur += 3 + (n->cur[1]*256 + n->cur[2]);
331 ++n->nesting;
332 }
333
334 static void nbt_begin_compound_in_list(nbt *n) // start a compound
335 {
336 ++n->nesting;
337 }
338
339 static void nbt_end_compound(nbt *n) // end a compound
340 {
341 assert(*n->cur == TAG_End);
342 assert(n->nesting != 0);
343 ++n->cur;
344 --n->nesting;
345 }
346
347 // @TODO no interface to get lists from lists
348 static int nbt_begin_list(nbt *n, unsigned char type)
349 {
350 uint32 len;
351 unsigned char *ptr;
352
353 ptr = n->cur + 3 + (n->cur[1]*256 + n->cur[2]);
354 if (ptr[0] != type)
355 return -1;
356 n->cur = ptr;
357 len = nbt_parse_uint32(n->cur+1);
358 assert(n->cur[0] == type);
359 // @TODO keep a stack with the count to make sure they do it right
360 ++n->nesting;
361 n->cur += 5;
362 return (int) len;
363 }
364
365 static void nbt_end_list(nbt *n)
366 {
367 --n->nesting;
368 }
369
370 // raw_block chunk is 16x256x16x4 = 2^(4+8+4+2) = 256KB
371 //
372 // if we want to process 64x64x256 at a time, that will be:
373 // 4*4*256KB => 4MB per area in raw_block
374 //
375 // (plus we maybe need to decode adjacent regions)
376
377
378 #ifdef FAST_CHUNK
379 typedef fast_chunk parse_chunk;
380 #else
381 typedef chunk parse_chunk;
382 #endif
383
384 static parse_chunk *minecraft_chunk_parse(unsigned char *data, size_t len)
385 {
386 char *s;
387 parse_chunk *c = NULL;
388
389 nbt n_store, *n = &n_store;
390 n->buffer_start = data;
391 n->buffer_end = data + len;
392 n->cur = n->buffer_start;
393 n->nesting = 0;
394
395 nbt_begin_compound(n);
396 while ((s = nbt_peek(n)) != NULL) {
397 if (!strcmp(s, "Level")) {
398 int *height;
399 c = malloc(sizeof(*c));
400 #ifdef FAST_CHUNK
401 memset(c, 0, sizeof(*c));
402 c->pointer_to_free = data;
403 #else
404 c->rb[15][15][255].block = 0;
405 #endif
406 c->max_y = 0;
407
408 nbt_begin_compound(n);
409 while ((s = nbt_peek(n)) != NULL) {
410 if (!strcmp(s, "xPos"))
411 c->xpos = *(int *) nbt_get(n, TAG_Int, 0);
412 else if (!strcmp(s, "zPos"))
413 c->zpos = *(int *) nbt_get(n, TAG_Int, 0);
414 else if (!strcmp(s, "Sections")) {
415 int count = nbt_begin_list(n, TAG_Compound), i;
416 if (count == -1) {
417 // this not-a-list case happens in The End and I'm not sure
418 // what it means... possibly one of those silly encodings
419 // where it's not encoded as a list if there's only one?
420 // not worth figuring out
421 nbt_skip(n);
422 count = -1;
423 }
424 for (i=0; i < count; ++i) {
425 int yi, len;
426 uint8 *light = NULL, *blocks = NULL, *data = NULL, *skylight = NULL;
427 nbt_begin_compound_in_list(n);
428 while ((s = nbt_peek(n)) != NULL) {
429 if (!strcmp(s, "Y"))
430 yi = * (uint8 *) nbt_get(n, TAG_Byte, 0);
431 else if (!strcmp(s, "BlockLight")) {
432 light = nbt_get(n, TAG_Byte_Array, &len);
433 assert(len == 2048);
434 } else if (!strcmp(s, "Blocks")) {
435 blocks = nbt_get(n, TAG_Byte_Array, &len);
436 assert(len == 4096);
437 } else if (!strcmp(s, "Data")) {
438 data = nbt_get(n, TAG_Byte_Array, &len);
439 assert(len == 2048);
440 } else if (!strcmp(s, "SkyLight")) {
441 skylight = nbt_get(n, TAG_Byte_Array, &len);
442 assert(len == 2048);
443 }
444 }
445 nbt_end_compound(n);
446
447 assert(yi < 16);
448
449 #ifndef FAST_CHUNK
450
451 // clear data below current max_y
452 {
453 int x,z;
454 while (c->max_y < yi*16) {
455 for (x=0; x < 16; ++x)
456 for (z=0; z < 16; ++z)
457 c->rb[z][x][c->max_y].block = 0;
458 ++c->max_y;
459 }
460 }
461
462 // now assemble the data
463 {
464 int x,y,z, o2=0,o4=0;
465 for (y=0; y < 16; ++y) {
466 for (z=0; z < 16; ++z) {
467 for (x=0; x < 16; x += 2) {
468 raw_block *rb = &c->rb[15-z][x][y + yi*16]; // 15-z because switching to z-up will require flipping an axis
469 rb[0].block = blocks[o4];
470 rb[0].light = light[o2] & 15;
471 rb[0].data = data[o2] & 15;
472 rb[0].skylight = skylight[o2] & 15;
473
474 rb[256].block = blocks[o4+1];
475 rb[256].light = light[o2] >> 4;
476 rb[256].data = data[o2] >> 4;
477 rb[256].skylight = skylight[o2] >> 4;
478
479 o2 += 1;
480 o4 += 2;
481 }
482 }
483 }
484 c->max_y += 16;
485 }
486 #else
487 c->blockdata[yi] = blocks;
488 c->data [yi] = data;
489 c->light [yi] = light;
490 c->skylight [yi] = skylight;
491 #endif
492 }
493 //nbt_end_list(n);
494 } else if (!strcmp(s, "HeightMap")) {
495 height = nbt_get(n, TAG_Int_Array, &len);
496 assert(len == 256);
497 } else
498 nbt_skip(n);
499 }
500 nbt_end_compound(n);
501
502 } else
503 nbt_skip(n);
504 }
505 nbt_end_compound(n);
506 assert(n->cur == n->buffer_end);
507 return c;
508 }
509
510 #define MAX_DECODED_CHUNK_X 64
511 #define MAX_DECODED_CHUNK_Z 64
512
513 typedef struct
514 {
515 int cx,cz;
516 fast_chunk *fc;
517 int valid;
518 } decoded_buffer;
519
520 static decoded_buffer decoded_buffers[MAX_DECODED_CHUNK_Z][MAX_DECODED_CHUNK_X];
521 void lock_chunk_get_mutex(void);
522 void unlock_chunk_get_mutex(void);
523
524 #ifdef FAST_CHUNK
525 fast_chunk *get_decoded_fastchunk_uncached(int chunk_x, int chunk_z)
526 {
527 unsigned char *decoded;
528 compressed_chunk *cc;
529 int inlen;
530 int len;
531 fast_chunk *fc;
532
533 lock_chunk_get_mutex();
534 cc = get_compressed_chunk(chunk_x, chunk_z);
535 if (cc->len != 0)
536 ++cc->refcount;
537 unlock_chunk_get_mutex();
538
539 if (cc->len == 0)
540 return NULL;
541
542 assert(cc != NULL);
543
544 assert(cc->data[4] == 2);
545
546 inlen = nbt_parse_uint32(cc->data);
547 decoded = stbi_zlib_decode_malloc_guesssize(cc->data+5, inlen, inlen*3, &len);
548 assert(decoded != NULL);
549 assert(len != 0);
550
551 lock_chunk_get_mutex();
552 deref_compressed_chunk(cc);
553 unlock_chunk_get_mutex();
554
555 #ifdef FAST_CHUNK
556 fc = minecraft_chunk_parse(decoded, len);
557 #else
558 fc = NULL;
559 #endif
560 if (fc == NULL)
561 free(decoded);
562 return fc;
563 }
564
565
566 decoded_buffer *get_decoded_buffer(int chunk_x, int chunk_z)
567 {
568 decoded_buffer *db = &decoded_buffers[chunk_z&(MAX_DECODED_CHUNK_Z-1)][chunk_x&(MAX_DECODED_CHUNK_X-1)];
569 if (db->valid) {
570 if (db->cx == chunk_x && db->cz == chunk_z)
571 return db;
572 if (db->fc) {
573 free(db->fc->pointer_to_free);
574 free(db->fc);
575 }
576 }
577
578 db->cx = chunk_x;
579 db->cz = chunk_z;
580 db->valid = 1;
581 db->fc = 0;
582
583 {
584 db->fc = get_decoded_fastchunk_uncached(chunk_x, chunk_z);
585 return db;
586 }
587 }
588
589 fast_chunk *get_decoded_fastchunk(int chunk_x, int chunk_z)
590 {
591 decoded_buffer *db = get_decoded_buffer(chunk_x, chunk_z);
592 return db->fc;
593 }
594 #endif
595
596 #ifndef FAST_CHUNK
597 chunk *get_decoded_chunk_raw(int chunk_x, int chunk_z)
598 {
599 unsigned char *decoded;
600 compressed_chunk *cc = get_compressed_chunk(chunk_x, chunk_z);
601 assert(cc != NULL);
602 if (cc->len == 0)
603 return NULL;
604 else {
605 chunk *ch;
606 int inlen = nbt_parse_uint32(cc->data);
607 int len;
608 assert(cc->data[4] == 2);
609 decoded = stbi_zlib_decode_malloc_guesssize(cc->data+5, inlen, inlen*3, &len);
610 assert(decoded != NULL);
611 #ifdef FAST_CHUNK
612 ch = NULL;
613 #else
614 ch = minecraft_chunk_parse(decoded, len);
615 #endif
616 free(decoded);
617 return ch;
618 }
619 }
620
621 static chunk *decoded_chunks[MAX_DECODED_CHUNK_Z][MAX_DECODED_CHUNK_X];
622 chunk *get_decoded_chunk(int chunk_x, int chunk_z)
623 {
624 chunk *c = decoded_chunks[chunk_z&(MAX_DECODED_CHUNK_Z-1)][chunk_x&(MAX_DECODED_CHUNK_X-1)];
625 if (c && c->xpos == chunk_x && c->zpos == chunk_z)
626 return c;
627 if (c) free(c);
628 c = get_decoded_chunk_raw(chunk_x, chunk_z);
629 decoded_chunks[chunk_z&(MAX_DECODED_CHUNK_Z-1)][chunk_x&(MAX_DECODED_CHUNK_X-1)] = c;
630 return c;
631 }
632 #endif