comments updated
[henge/apc.git] / stb / stb_connected_components.h
1 // stb_connected_components - v0.95 - public domain connected components on grids
2 // http://github.com/nothings/stb
3 //
4 // Finds connected components on 2D grids for testing reachability between
5 // two points, with fast updates when changing reachability (e.g. on one machine
6 // it was typically 0.2ms w/ 1024x1024 grid). Each grid square must be "open" or
7 // "closed" (traversable or untraversable), and grid squares are only connected
8 // to their orthogonal neighbors, not diagonally.
9 //
10 // In one source file, create the implementation by doing something like this:
11 //
12 // #define STBCC_GRID_COUNT_X_LOG2 10
13 // #define STBCC_GRID_COUNT_Y_LOG2 10
14 // #define STB_CONNECTED_COMPONENTS_IMPLEMENTATION
15 // #include "stb_connected_components.h"
16 //
17 // The above creates an implementation that can run on maps up to 1024x1024.
18 // Map sizes must be a multiple of (1<<(LOG2/2)) on each axis (e.g. 32 if LOG2=10,
19 // 16 if LOG2=8, etc.) (You can just pad your map with untraversable space.)
20 //
21 // MEMORY USAGE
22 //
23 // Uses about 6-7 bytes per grid square (e.g. 7MB for a 1024x1024 grid).
24 // Uses a single worst-case allocation which you pass in.
25 //
26 // PERFORMANCE
27 //
28 // On a core i7-2700K at 3.5 Ghz, for a particular 1024x1024 map (map_03.png):
29 //
30 // Creating map : 44.85 ms
31 // Making one square traversable: 0.27 ms (average over 29,448 calls)
32 // Making one square untraversable: 0.23 ms (average over 30,123 calls)
33 // Reachability query: 0.00001 ms (average over 4,000,000 calls)
34 //
35 // On non-degenerate maps update time is O(N^0.5), but on degenerate maps like
36 // checkerboards or 50% random, update time is O(N^0.75) (~2ms on above machine).
37 //
38 // CHANGELOG
39 //
40 // 0.95 (2016-10-16) Bugfix if multiple clumps in one cluster connect to same clump in another
41 // 0.94 (2016-04-17) Bugfix & optimize worst case (checkerboard & random)
42 // 0.93 (2016-04-16) Reduce memory by 10x for 1Kx1K map; small speedup
43 // 0.92 (2016-04-16) Compute sqrt(N) cluster size by default
44 // 0.91 (2016-04-15) Initial release
45 //
46 // TODO:
47 // - better API documentation
48 // - more comments
49 // - try re-integrating naive algorithm & compare performance
50 // - more optimized batching (current approach still recomputes local clumps many times)
51 // - function for setting a grid of squares at once (just use batching)
52 //
53 // LICENSE
54 //
55 // This software is dual-licensed to the public domain and under the following
56 // license: you are granted a perpetual, irrevocable license to copy, modify,
57 // publish, and distribute this file as you see fit.
58 //
59 // ALGORITHM
60 //
61 // The NxN grid map is split into sqrt(N) x sqrt(N) blocks called
62 // "clusters". Each cluster independently computes a set of connected
63 // components within that cluster (ignoring all connectivity out of
64 // that cluster) using a union-find disjoint set forest. This produces a bunch
65 // of locally connected components called "clumps". Each clump is (a) connected
66 // within its cluster, (b) does not directly connect to any other clumps in the
67 // cluster (though it may connect to them by paths that lead outside the cluster,
68 // but those are ignored at this step), and (c) maintains an adjacency list of
69 // all clumps in adjacent clusters that it _is_ connected to. Then a second
70 // union-find disjoint set forest is used to compute connected clumps
71 // globally, across the whole map. Reachability is then computed by
72 // finding which clump each input point belongs to, and checking whether
73 // those clumps are in the same "global" connected component.
74 //
75 // The above data structure can be updated efficiently; on a change
76 // of a single grid square on the map, only one cluster changes its
77 // purely-local state, so only one cluster needs its clumps fully
78 // recomputed. Clumps in adjacent clusters need their adjacency lists
79 // updated: first to remove all references to the old clumps in the
80 // rebuilt cluster, then to add new references to the new clumps. Both
81 // of these operations can use the existing "find which clump each input
82 // point belongs to" query to compute that adjacency information rapidly.
83
84 #ifndef INCLUDE_STB_CONNECTED_COMPONENTS_H
85 #define INCLUDE_STB_CONNECTED_COMPONENTS_H
86
87 #include <stdlib.h>
88
89 typedef struct st_stbcc_grid stbcc_grid;
90
91 #ifdef __cplusplus
92 extern "C" {
93 #endif
94
95 //////////////////////////////////////////////////////////////////////////////////////////
96 //
97 // initialization
98 //
99
100 // you allocate the grid data structure to this size (note that it will be very big!!!)
101 extern size_t stbcc_grid_sizeof(void);
102
103 // initialize the grid, value of map[] is 0 = traversable, non-0 is solid
104 extern void stbcc_init_grid(stbcc_grid *g, unsigned char *map, int w, int h);
105
106
107 //////////////////////////////////////////////////////////////////////////////////////////
108 //
109 // main functionality
110 //
111
112 // update a grid square state, 0 = traversable, non-0 is solid
113 // i can add a batch-update if it's needed
114 extern void stbcc_update_grid(stbcc_grid *g, int x, int y, int solid);
115
116 // query if two grid squares are reachable from each other
117 extern int stbcc_query_grid_node_connection(stbcc_grid *g, int x1, int y1, int x2, int y2);
118
119
120 //////////////////////////////////////////////////////////////////////////////////////////
121 //
122 // bonus functions
123 //
124
125 // wrap multiple stbcc_update_grid calls in these function to compute
126 // multiple updates more efficiently; cannot make queries inside batch
127 extern void stbcc_update_batch_begin(stbcc_grid *g);
128 extern void stbcc_update_batch_end(stbcc_grid *g);
129
130 // query the grid data structure for whether a given square is open or not
131 extern int stbcc_query_grid_open(stbcc_grid *g, int x, int y);
132
133 // get a unique id for the connected component this is in; it's not necessarily
134 // small, you'll need a hash table or something to remap it (or just use
135 extern unsigned int stbcc_get_unique_id(stbcc_grid *g, int x, int y);
136 #define STBCC_NULL_UNIQUE_ID 0xffffffff // returned for closed map squares
137
138 #ifdef __cplusplus
139 }
140 #endif
141
142 #endif // INCLUDE_STB_CONNECTED_COMPONENTS_H
143
144 #ifdef STB_CONNECTED_COMPONENTS_IMPLEMENTATION
145
146 #include <assert.h>
147 #include <string.h> // memset
148
149 #if !defined(STBCC_GRID_COUNT_X_LOG2) || !defined(STBCC_GRID_COUNT_Y_LOG2)
150 #error "You must define STBCC_GRID_COUNT_X_LOG2 and STBCC_GRID_COUNT_Y_LOG2 to define the max grid supported."
151 #endif
152
153 #define STBCC__GRID_COUNT_X (1 << STBCC_GRID_COUNT_X_LOG2)
154 #define STBCC__GRID_COUNT_Y (1 << STBCC_GRID_COUNT_Y_LOG2)
155
156 #define STBCC__MAP_STRIDE (1 << (STBCC_GRID_COUNT_X_LOG2-3))
157
158 #ifndef STBCC_CLUSTER_SIZE_X_LOG2
159 #define STBCC_CLUSTER_SIZE_X_LOG2 (STBCC_GRID_COUNT_X_LOG2/2) // log2(sqrt(2^N)) = 1/2 * log2(2^N)) = 1/2 * N
160 #if STBCC_CLUSTER_SIZE_X_LOG2 > 6
161 #undef STBCC_CLUSTER_SIZE_X_LOG2
162 #define STBCC_CLUSTER_SIZE_X_LOG2 6
163 #endif
164 #endif
165
166 #ifndef STBCC_CLUSTER_SIZE_Y_LOG2
167 #define STBCC_CLUSTER_SIZE_Y_LOG2 (STBCC_GRID_COUNT_Y_LOG2/2)
168 #if STBCC_CLUSTER_SIZE_Y_LOG2 > 6
169 #undef STBCC_CLUSTER_SIZE_Y_LOG2
170 #define STBCC_CLUSTER_SIZE_Y_LOG2 6
171 #endif
172 #endif
173
174 #define STBCC__CLUSTER_SIZE_X (1 << STBCC_CLUSTER_SIZE_X_LOG2)
175 #define STBCC__CLUSTER_SIZE_Y (1 << STBCC_CLUSTER_SIZE_Y_LOG2)
176
177 #define STBCC__CLUSTER_COUNT_X_LOG2 (STBCC_GRID_COUNT_X_LOG2 - STBCC_CLUSTER_SIZE_X_LOG2)
178 #define STBCC__CLUSTER_COUNT_Y_LOG2 (STBCC_GRID_COUNT_Y_LOG2 - STBCC_CLUSTER_SIZE_Y_LOG2)
179
180 #define STBCC__CLUSTER_COUNT_X (1 << STBCC__CLUSTER_COUNT_X_LOG2)
181 #define STBCC__CLUSTER_COUNT_Y (1 << STBCC__CLUSTER_COUNT_Y_LOG2)
182
183 #if STBCC__CLUSTER_SIZE_X >= STBCC__GRID_COUNT_X || STBCC__CLUSTER_SIZE_Y >= STBCC__GRID_COUNT_Y
184 #error "STBCC_CLUSTER_SIZE_X/Y_LOG2 must be smaller than STBCC_GRID_COUNT_X/Y_LOG2"
185 #endif
186
187 // worst case # of clumps per cluster
188 #define STBCC__MAX_CLUMPS_PER_CLUSTER_LOG2 (STBCC_CLUSTER_SIZE_X_LOG2 + STBCC_CLUSTER_SIZE_Y_LOG2-1)
189 #define STBCC__MAX_CLUMPS_PER_CLUSTER (1 << STBCC__MAX_CLUMPS_PER_CLUSTER_LOG2)
190 #define STBCC__MAX_CLUMPS (STBCC__MAX_CLUMPS_PER_CLUSTER * STBCC__CLUSTER_COUNT_X * STBCC__CLUSTER_COUNT_Y)
191 #define STBCC__NULL_CLUMPID STBCC__MAX_CLUMPS_PER_CLUSTER
192
193 #define STBCC__CLUSTER_X_FOR_COORD_X(x) ((x) >> STBCC_CLUSTER_SIZE_X_LOG2)
194 #define STBCC__CLUSTER_Y_FOR_COORD_Y(y) ((y) >> STBCC_CLUSTER_SIZE_Y_LOG2)
195
196 #define STBCC__MAP_BYTE_MASK(x,y) (1 << ((x) & 7))
197 #define STBCC__MAP_BYTE(g,x,y) ((g)->map[y][(x) >> 3])
198 #define STBCC__MAP_OPEN(g,x,y) (STBCC__MAP_BYTE(g,x,y) & STBCC__MAP_BYTE_MASK(x,y))
199
200 typedef unsigned short stbcc__clumpid;
201 typedef unsigned char stbcc__verify_max_clumps[STBCC__MAX_CLUMPS_PER_CLUSTER < (1 << (8*sizeof(stbcc__clumpid))) ? 1 : -1];
202
203 #define STBCC__MAX_EXITS_PER_CLUSTER (STBCC__CLUSTER_SIZE_X + STBCC__CLUSTER_SIZE_Y) // 64 for 32x32
204 #define STBCC__MAX_EXITS_PER_CLUMP (STBCC__CLUSTER_SIZE_X + STBCC__CLUSTER_SIZE_Y) // 64 for 32x32
205 #define STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER (STBCC__MAX_EXITS_PER_CLUMP)
206
207 // 2^19 * 2^6 => 2^25 exits => 2^26 => 64MB for 1024x1024
208
209 // Logic for above on 4x4 grid:
210 //
211 // Many clumps: One clump:
212 // + + + +
213 // +X.X. +XX.X+
214 // .X.X+ .XXX
215 // +X.X. XXX.
216 // .X.X+ +X.XX+
217 // + + + +
218 //
219 // 8 exits either way
220
221 typedef unsigned char stbcc__verify_max_exits[STBCC__MAX_EXITS_PER_CLUMP <= 256];
222
223 typedef struct
224 {
225 unsigned short clump_index:12;
226 signed short cluster_dx:2;
227 signed short cluster_dy:2;
228 } stbcc__relative_clumpid;
229
230 typedef union
231 {
232 struct {
233 unsigned int clump_index:12;
234 unsigned int cluster_x:10;
235 unsigned int cluster_y:10;
236 } f;
237 unsigned int c;
238 } stbcc__global_clumpid;
239
240 // rebuilt cluster 3,4
241
242 // what changes in cluster 2,4
243
244 typedef struct
245 {
246 stbcc__global_clumpid global_label; // 4
247 unsigned char num_adjacent; // 1
248 unsigned char max_adjacent; // 1
249 unsigned char adjacent_clump_list_index; // 1
250 unsigned char reserved;
251 } stbcc__clump; // 8
252
253 #define STBCC__CLUSTER_ADJACENCY_COUNT (STBCC__MAX_EXITS_PER_CLUSTER*2)
254 typedef struct
255 {
256 short num_clumps;
257 unsigned char num_edge_clumps;
258 unsigned char rebuild_adjacency;
259 stbcc__clump clump[STBCC__MAX_CLUMPS_PER_CLUSTER]; // 8 * 2^9 = 4KB
260 stbcc__relative_clumpid adjacency_storage[STBCC__CLUSTER_ADJACENCY_COUNT]; // 256 bytes
261 } stbcc__cluster;
262
263 struct st_stbcc_grid
264 {
265 int w,h,cw,ch;
266 int in_batched_update;
267 //unsigned char cluster_dirty[STBCC__CLUSTER_COUNT_Y][STBCC__CLUSTER_COUNT_X]; // could bitpack, but: 1K x 1K => 1KB
268 unsigned char map[STBCC__GRID_COUNT_Y][STBCC__MAP_STRIDE]; // 1K x 1K => 1K x 128 => 128KB
269 stbcc__clumpid clump_for_node[STBCC__GRID_COUNT_Y][STBCC__GRID_COUNT_X]; // 1K x 1K x 2 = 2MB
270 stbcc__cluster cluster[STBCC__CLUSTER_COUNT_Y][STBCC__CLUSTER_COUNT_X]; // 1K x 4.5KB = 4.5MB
271 };
272
273 int stbcc_query_grid_node_connection(stbcc_grid *g, int x1, int y1, int x2, int y2)
274 {
275 stbcc__global_clumpid label1, label2;
276 stbcc__clumpid c1 = g->clump_for_node[y1][x1];
277 stbcc__clumpid c2 = g->clump_for_node[y2][x2];
278 int cx1 = STBCC__CLUSTER_X_FOR_COORD_X(x1);
279 int cy1 = STBCC__CLUSTER_Y_FOR_COORD_Y(y1);
280 int cx2 = STBCC__CLUSTER_X_FOR_COORD_X(x2);
281 int cy2 = STBCC__CLUSTER_Y_FOR_COORD_Y(y2);
282 assert(!g->in_batched_update);
283 if (c1 == STBCC__NULL_CLUMPID || c2 == STBCC__NULL_CLUMPID)
284 return 0;
285 label1 = g->cluster[cy1][cx1].clump[c1].global_label;
286 label2 = g->cluster[cy2][cx2].clump[c2].global_label;
287 if (label1.c == label2.c)
288 return 1;
289 return 0;
290 }
291
292 int stbcc_query_grid_open(stbcc_grid *g, int x, int y)
293 {
294 return STBCC__MAP_OPEN(g, x, y) != 0;
295 }
296
297 unsigned int stbcc_get_unique_id(stbcc_grid *g, int x, int y)
298 {
299 stbcc__clumpid c = g->clump_for_node[y][x];
300 int cx = STBCC__CLUSTER_X_FOR_COORD_X(x);
301 int cy = STBCC__CLUSTER_Y_FOR_COORD_Y(y);
302 assert(!g->in_batched_update);
303 if (c == STBCC__NULL_CLUMPID) return STBCC_NULL_UNIQUE_ID;
304 return g->cluster[cy][cx].clump[c].global_label.c;
305 }
306
307 typedef struct
308 {
309 unsigned char x,y;
310 } stbcc__tinypoint;
311
312 typedef struct
313 {
314 stbcc__tinypoint parent[STBCC__CLUSTER_SIZE_Y][STBCC__CLUSTER_SIZE_X]; // 32x32 => 2KB
315 stbcc__clumpid label[STBCC__CLUSTER_SIZE_Y][STBCC__CLUSTER_SIZE_X];
316 } stbcc__cluster_build_info;
317
318 static void stbcc__build_clumps_for_cluster(stbcc_grid *g, int cx, int cy);
319 static void stbcc__remove_connections_to_adjacent_cluster(stbcc_grid *g, int cx, int cy, int dx, int dy);
320 static void stbcc__add_connections_to_adjacent_cluster(stbcc_grid *g, int cx, int cy, int dx, int dy);
321
322 static stbcc__global_clumpid stbcc__clump_find(stbcc_grid *g, stbcc__global_clumpid n)
323 {
324 stbcc__global_clumpid q;
325 stbcc__clump *c = &g->cluster[n.f.cluster_y][n.f.cluster_x].clump[n.f.clump_index];
326
327 if (c->global_label.c == n.c)
328 return n;
329
330 q = stbcc__clump_find(g, c->global_label);
331 c->global_label = q;
332 return q;
333 }
334
335 typedef struct
336 {
337 unsigned int cluster_x;
338 unsigned int cluster_y;
339 unsigned int clump_index;
340 } stbcc__unpacked_clumpid;
341
342 static void stbcc__clump_union(stbcc_grid *g, stbcc__unpacked_clumpid m, int x, int y, int idx)
343 {
344 stbcc__clump *mc = &g->cluster[m.cluster_y][m.cluster_x].clump[m.clump_index];
345 stbcc__clump *nc = &g->cluster[y][x].clump[idx];
346 stbcc__global_clumpid mp = stbcc__clump_find(g, mc->global_label);
347 stbcc__global_clumpid np = stbcc__clump_find(g, nc->global_label);
348
349 if (mp.c == np.c)
350 return;
351
352 g->cluster[mp.f.cluster_y][mp.f.cluster_x].clump[mp.f.clump_index].global_label = np;
353 }
354
355 static void stbcc__build_connected_components_for_clumps(stbcc_grid *g)
356 {
357 int i,j,k,h;
358
359 for (j=0; j < STBCC__CLUSTER_COUNT_Y; ++j) {
360 for (i=0; i < STBCC__CLUSTER_COUNT_X; ++i) {
361 stbcc__cluster *cluster = &g->cluster[j][i];
362 for (k=0; k < (int) cluster->num_edge_clumps; ++k) {
363 stbcc__global_clumpid m;
364 m.f.clump_index = k;
365 m.f.cluster_x = i;
366 m.f.cluster_y = j;
367 assert((int) m.f.clump_index == k && (int) m.f.cluster_x == i && (int) m.f.cluster_y == j);
368 cluster->clump[k].global_label = m;
369 }
370 }
371 }
372
373 for (j=0; j < STBCC__CLUSTER_COUNT_Y; ++j) {
374 for (i=0; i < STBCC__CLUSTER_COUNT_X; ++i) {
375 stbcc__cluster *cluster = &g->cluster[j][i];
376 for (k=0; k < (int) cluster->num_edge_clumps; ++k) {
377 stbcc__clump *clump = &cluster->clump[k];
378 stbcc__unpacked_clumpid m;
379 stbcc__relative_clumpid *adj;
380 m.clump_index = k;
381 m.cluster_x = i;
382 m.cluster_y = j;
383 adj = &cluster->adjacency_storage[clump->adjacent_clump_list_index];
384 for (h=0; h < clump->num_adjacent; ++h) {
385 unsigned int clump_index = adj[h].clump_index;
386 unsigned int x = adj[h].cluster_dx + i;
387 unsigned int y = adj[h].cluster_dy + j;
388 stbcc__clump_union(g, m, x, y, clump_index);
389 }
390 }
391 }
392 }
393
394 for (j=0; j < STBCC__CLUSTER_COUNT_Y; ++j) {
395 for (i=0; i < STBCC__CLUSTER_COUNT_X; ++i) {
396 stbcc__cluster *cluster = &g->cluster[j][i];
397 for (k=0; k < (int) cluster->num_edge_clumps; ++k) {
398 stbcc__global_clumpid m;
399 m.f.clump_index = k;
400 m.f.cluster_x = i;
401 m.f.cluster_y = j;
402 stbcc__clump_find(g, m);
403 }
404 }
405 }
406 }
407
408 static void stbcc__build_all_connections_for_cluster(stbcc_grid *g, int cx, int cy)
409 {
410 // in this particular case, we are fully non-incremental. that means we
411 // can discover the correct sizes for the arrays, but requires we build
412 // the data into temporary data structures, or just count the sizes, so
413 // for simplicity we do the latter
414 stbcc__cluster *cluster = &g->cluster[cy][cx];
415 unsigned char connected[STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER][STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER/8]; // 64 x 8 => 1KB
416 unsigned char num_adj[STBCC__MAX_CLUMPS_PER_CLUSTER] = { 0 };
417 int x = cx * STBCC__CLUSTER_SIZE_X;
418 int y = cy * STBCC__CLUSTER_SIZE_Y;
419 int step_x, step_y=0, i, j, k, n, m, dx, dy, total;
420 int extra;
421
422 g->cluster[cy][cx].rebuild_adjacency = 0;
423
424 total = 0;
425 for (m=0; m < 4; ++m) {
426 switch (m) {
427 case 0:
428 dx = 1, dy = 0;
429 step_x = 0, step_y = 1;
430 i = STBCC__CLUSTER_SIZE_X-1;
431 j = 0;
432 n = STBCC__CLUSTER_SIZE_Y;
433 break;
434 case 1:
435 dx = -1, dy = 0;
436 i = 0;
437 j = 0;
438 step_x = 0;
439 step_y = 1;
440 n = STBCC__CLUSTER_SIZE_Y;
441 break;
442 case 2:
443 dy = -1, dx = 0;
444 i = 0;
445 j = 0;
446 step_x = 1;
447 step_y = 0;
448 n = STBCC__CLUSTER_SIZE_X;
449 break;
450 case 3:
451 dy = 1, dx = 0;
452 i = 0;
453 j = STBCC__CLUSTER_SIZE_Y-1;
454 step_x = 1;
455 step_y = 0;
456 n = STBCC__CLUSTER_SIZE_X;
457 break;
458 }
459
460 if (cx+dx < 0 || cx+dx >= g->cw || cy+dy < 0 || cy+dy >= g->ch)
461 continue;
462
463 memset(connected, 0, sizeof(connected));
464 for (k=0; k < n; ++k) {
465 if (STBCC__MAP_OPEN(g, x+i, y+j) && STBCC__MAP_OPEN(g, x+i+dx, y+j+dy)) {
466 stbcc__clumpid src = g->clump_for_node[y+j][x+i];
467 stbcc__clumpid dest = g->clump_for_node[y+j+dy][x+i+dx];
468 if (0 == (connected[src][dest>>3] & (1 << (dest & 7)))) {
469 connected[src][dest>>3] |= 1 << (dest & 7);
470 ++num_adj[src];
471 ++total;
472 }
473 }
474 i += step_x;
475 j += step_y;
476 }
477 }
478
479 assert(total <= STBCC__CLUSTER_ADJACENCY_COUNT);
480
481 // decide how to apportion unused adjacency slots; only clumps that lie
482 // on the edges of the cluster need adjacency slots, so divide them up
483 // evenly between those clumps
484
485 // we want:
486 // extra = (STBCC__CLUSTER_ADJACENCY_COUNT - total) / cluster->num_edge_clumps;
487 // but we efficiently approximate this without a divide, because
488 // ignoring edge-vs-non-edge with 'num_adj[i]*2' was faster than
489 // 'num_adj[i]+extra' with the divide
490 if (total + (cluster->num_edge_clumps<<2) <= STBCC__CLUSTER_ADJACENCY_COUNT)
491 extra = 4;
492 else if (total + (cluster->num_edge_clumps<<1) <= STBCC__CLUSTER_ADJACENCY_COUNT)
493 extra = 2;
494 else if (total + (cluster->num_edge_clumps<<0) <= STBCC__CLUSTER_ADJACENCY_COUNT)
495 extra = 1;
496 else
497 extra = 0;
498
499 total = 0;
500 for (i=0; i < (int) cluster->num_edge_clumps; ++i) {
501 int alloc = num_adj[i]+extra;
502 if (alloc > STBCC__MAX_EXITS_PER_CLUSTER)
503 alloc = STBCC__MAX_EXITS_PER_CLUSTER;
504 assert(total < 256); // must fit in byte
505 cluster->clump[i].adjacent_clump_list_index = (unsigned char) total;
506 cluster->clump[i].max_adjacent = alloc;
507 cluster->clump[i].num_adjacent = 0;
508 total += alloc;
509 }
510 assert(total <= STBCC__CLUSTER_ADJACENCY_COUNT);
511
512 stbcc__add_connections_to_adjacent_cluster(g, cx, cy, -1, 0);
513 stbcc__add_connections_to_adjacent_cluster(g, cx, cy, 1, 0);
514 stbcc__add_connections_to_adjacent_cluster(g, cx, cy, 0,-1);
515 stbcc__add_connections_to_adjacent_cluster(g, cx, cy, 0, 1);
516 // make sure all of the above succeeded.
517 assert(g->cluster[cy][cx].rebuild_adjacency == 0);
518 }
519
520 static void stbcc__add_connections_to_adjacent_cluster_with_rebuild(stbcc_grid *g, int cx, int cy, int dx, int dy)
521 {
522 if (cx >= 0 && cx < g->cw && cy >= 0 && cy < g->ch) {
523 stbcc__add_connections_to_adjacent_cluster(g, cx, cy, dx, dy);
524 if (g->cluster[cy][cx].rebuild_adjacency)
525 stbcc__build_all_connections_for_cluster(g, cx, cy);
526 }
527 }
528
529 void stbcc_update_grid(stbcc_grid *g, int x, int y, int solid)
530 {
531 int cx,cy;
532
533 if (!solid) {
534 if (STBCC__MAP_OPEN(g,x,y))
535 return;
536 } else {
537 if (!STBCC__MAP_OPEN(g,x,y))
538 return;
539 }
540
541 cx = STBCC__CLUSTER_X_FOR_COORD_X(x);
542 cy = STBCC__CLUSTER_Y_FOR_COORD_Y(y);
543
544 stbcc__remove_connections_to_adjacent_cluster(g, cx-1, cy, 1, 0);
545 stbcc__remove_connections_to_adjacent_cluster(g, cx+1, cy, -1, 0);
546 stbcc__remove_connections_to_adjacent_cluster(g, cx, cy-1, 0, 1);
547 stbcc__remove_connections_to_adjacent_cluster(g, cx, cy+1, 0,-1);
548
549 if (!solid)
550 STBCC__MAP_BYTE(g,x,y) |= STBCC__MAP_BYTE_MASK(x,y);
551 else
552 STBCC__MAP_BYTE(g,x,y) &= ~STBCC__MAP_BYTE_MASK(x,y);
553
554 stbcc__build_clumps_for_cluster(g, cx, cy);
555 stbcc__build_all_connections_for_cluster(g, cx, cy);
556
557 stbcc__add_connections_to_adjacent_cluster_with_rebuild(g, cx-1, cy, 1, 0);
558 stbcc__add_connections_to_adjacent_cluster_with_rebuild(g, cx+1, cy, -1, 0);
559 stbcc__add_connections_to_adjacent_cluster_with_rebuild(g, cx, cy-1, 0, 1);
560 stbcc__add_connections_to_adjacent_cluster_with_rebuild(g, cx, cy+1, 0,-1);
561
562 if (!g->in_batched_update)
563 stbcc__build_connected_components_for_clumps(g);
564 #if 0
565 else
566 g->cluster_dirty[cy][cx] = 1;
567 #endif
568 }
569
570 void stbcc_update_batch_begin(stbcc_grid *g)
571 {
572 assert(!g->in_batched_update);
573 g->in_batched_update = 1;
574 }
575
576 void stbcc_update_batch_end(stbcc_grid *g)
577 {
578 assert(g->in_batched_update);
579 g->in_batched_update = 0;
580 stbcc__build_connected_components_for_clumps(g); // @OPTIMIZE: only do this if update was non-empty
581 }
582
583 size_t stbcc_grid_sizeof(void)
584 {
585 return sizeof(stbcc_grid);
586 }
587
588 void stbcc_init_grid(stbcc_grid *g, unsigned char *map, int w, int h)
589 {
590 int i,j,k;
591 assert(w % STBCC__CLUSTER_SIZE_X == 0);
592 assert(h % STBCC__CLUSTER_SIZE_Y == 0);
593 assert(w % 8 == 0);
594
595 g->w = w;
596 g->h = h;
597 g->cw = w >> STBCC_CLUSTER_SIZE_X_LOG2;
598 g->ch = h >> STBCC_CLUSTER_SIZE_Y_LOG2;
599 g->in_batched_update = 0;
600
601 #if 0
602 for (j=0; j < STBCC__CLUSTER_COUNT_Y; ++j)
603 for (i=0; i < STBCC__CLUSTER_COUNT_X; ++i)
604 g->cluster_dirty[j][i] = 0;
605 #endif
606
607 for (j=0; j < h; ++j) {
608 for (i=0; i < w; i += 8) {
609 unsigned char c = 0;
610 for (k=0; k < 8; ++k)
611 if (map[j*w + (i+k)] == 0)
612 c |= (1 << k);
613 g->map[j][i>>3] = c;
614 }
615 }
616
617 for (j=0; j < g->ch; ++j)
618 for (i=0; i < g->cw; ++i)
619 stbcc__build_clumps_for_cluster(g, i, j);
620
621 for (j=0; j < g->ch; ++j)
622 for (i=0; i < g->cw; ++i)
623 stbcc__build_all_connections_for_cluster(g, i, j);
624
625 stbcc__build_connected_components_for_clumps(g);
626
627 for (j=0; j < g->h; ++j)
628 for (i=0; i < g->w; ++i)
629 assert(g->clump_for_node[j][i] <= STBCC__NULL_CLUMPID);
630 }
631
632
633 static void stbcc__add_clump_connection(stbcc_grid *g, int x1, int y1, int x2, int y2)
634 {
635 stbcc__cluster *cluster;
636 stbcc__clump *clump;
637
638 int cx1 = STBCC__CLUSTER_X_FOR_COORD_X(x1);
639 int cy1 = STBCC__CLUSTER_Y_FOR_COORD_Y(y1);
640 int cx2 = STBCC__CLUSTER_X_FOR_COORD_X(x2);
641 int cy2 = STBCC__CLUSTER_Y_FOR_COORD_Y(y2);
642
643 stbcc__clumpid c1 = g->clump_for_node[y1][x1];
644 stbcc__clumpid c2 = g->clump_for_node[y2][x2];
645
646 stbcc__relative_clumpid rc;
647
648 assert(cx1 != cx2 || cy1 != cy2);
649 assert(abs(cx1-cx2) + abs(cy1-cy2) == 1);
650
651 // add connection to c2 in c1
652
653 rc.clump_index = c2;
654 rc.cluster_dx = x2-x1;
655 rc.cluster_dy = y2-y1;
656
657 cluster = &g->cluster[cy1][cx1];
658 clump = &cluster->clump[c1];
659 assert(clump->num_adjacent <= clump->max_adjacent);
660 if (clump->num_adjacent == clump->max_adjacent)
661 g->cluster[cy1][cx1].rebuild_adjacency = 1;
662 else {
663 stbcc__relative_clumpid *adj = &cluster->adjacency_storage[clump->adjacent_clump_list_index];
664 assert(clump->num_adjacent < STBCC__MAX_EXITS_PER_CLUMP);
665 assert(clump->adjacent_clump_list_index + clump->num_adjacent <= STBCC__CLUSTER_ADJACENCY_COUNT);
666 adj[clump->num_adjacent++] = rc;
667 }
668 }
669
670 static void stbcc__remove_clump_connection(stbcc_grid *g, int x1, int y1, int x2, int y2)
671 {
672 stbcc__cluster *cluster;
673 stbcc__clump *clump;
674 stbcc__relative_clumpid *adj;
675 int i;
676
677 int cx1 = STBCC__CLUSTER_X_FOR_COORD_X(x1);
678 int cy1 = STBCC__CLUSTER_Y_FOR_COORD_Y(y1);
679 int cx2 = STBCC__CLUSTER_X_FOR_COORD_X(x2);
680 int cy2 = STBCC__CLUSTER_Y_FOR_COORD_Y(y2);
681
682 stbcc__clumpid c1 = g->clump_for_node[y1][x1];
683 stbcc__clumpid c2 = g->clump_for_node[y2][x2];
684
685 stbcc__relative_clumpid rc;
686
687 assert(cx1 != cx2 || cy1 != cy2);
688 assert(abs(cx1-cx2) + abs(cy1-cy2) == 1);
689
690 // add connection to c2 in c1
691
692 rc.clump_index = c2;
693 rc.cluster_dx = x2-x1;
694 rc.cluster_dy = y2-y1;
695
696 cluster = &g->cluster[cy1][cx1];
697 clump = &cluster->clump[c1];
698 adj = &cluster->adjacency_storage[clump->adjacent_clump_list_index];
699
700 for (i=0; i < clump->num_adjacent; ++i)
701 if (rc.clump_index == adj[i].clump_index &&
702 rc.cluster_dx == adj[i].cluster_dx &&
703 rc.cluster_dy == adj[i].cluster_dy)
704 break;
705
706 if (i < clump->num_adjacent)
707 adj[i] = adj[--clump->num_adjacent];
708 else
709 assert(0);
710 }
711
712 static void stbcc__add_connections_to_adjacent_cluster(stbcc_grid *g, int cx, int cy, int dx, int dy)
713 {
714 unsigned char connected[STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER][STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER/8] = { 0 };
715 int x = cx * STBCC__CLUSTER_SIZE_X;
716 int y = cy * STBCC__CLUSTER_SIZE_Y;
717 int step_x, step_y=0, i, j, k, n;
718
719 if (cx < 0 || cx >= g->cw || cy < 0 || cy >= g->ch)
720 return;
721
722 if (cx+dx < 0 || cx+dx >= g->cw || cy+dy < 0 || cy+dy >= g->ch)
723 return;
724
725 if (g->cluster[cy][cx].rebuild_adjacency)
726 return;
727
728 assert(abs(dx) + abs(dy) == 1);
729
730 if (dx == 1) {
731 i = STBCC__CLUSTER_SIZE_X-1;
732 j = 0;
733 step_x = 0;
734 step_y = 1;
735 n = STBCC__CLUSTER_SIZE_Y;
736 } else if (dx == -1) {
737 i = 0;
738 j = 0;
739 step_x = 0;
740 step_y = 1;
741 n = STBCC__CLUSTER_SIZE_Y;
742 } else if (dy == -1) {
743 i = 0;
744 j = 0;
745 step_x = 1;
746 step_y = 0;
747 n = STBCC__CLUSTER_SIZE_X;
748 } else if (dy == 1) {
749 i = 0;
750 j = STBCC__CLUSTER_SIZE_Y-1;
751 step_x = 1;
752 step_y = 0;
753 n = STBCC__CLUSTER_SIZE_X;
754 } else {
755 assert(0);
756 }
757
758 for (k=0; k < n; ++k) {
759 if (STBCC__MAP_OPEN(g, x+i, y+j) && STBCC__MAP_OPEN(g, x+i+dx, y+j+dy)) {
760 stbcc__clumpid src = g->clump_for_node[y+j][x+i];
761 stbcc__clumpid dest = g->clump_for_node[y+j+dy][x+i+dx];
762 if (0 == (connected[src][dest>>3] & (1 << (dest & 7)))) {
763 assert((dest>>3) < sizeof(connected));
764 connected[src][dest>>3] |= 1 << (dest & 7);
765 stbcc__add_clump_connection(g, x+i, y+j, x+i+dx, y+j+dy);
766 if (g->cluster[cy][cx].rebuild_adjacency)
767 break;
768 }
769 }
770 i += step_x;
771 j += step_y;
772 }
773 }
774
775 static void stbcc__remove_connections_to_adjacent_cluster(stbcc_grid *g, int cx, int cy, int dx, int dy)
776 {
777 unsigned char disconnected[STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER][STBCC__MAX_EDGE_CLUMPS_PER_CLUSTER/8] = { 0 };
778 int x = cx * STBCC__CLUSTER_SIZE_X;
779 int y = cy * STBCC__CLUSTER_SIZE_Y;
780 int step_x, step_y=0, i, j, k, n;
781
782 if (cx < 0 || cx >= g->cw || cy < 0 || cy >= g->ch)
783 return;
784
785 if (cx+dx < 0 || cx+dx >= g->cw || cy+dy < 0 || cy+dy >= g->ch)
786 return;
787
788 assert(abs(dx) + abs(dy) == 1);
789
790 if (dx == 1) {
791 i = STBCC__CLUSTER_SIZE_X-1;
792 j = 0;
793 step_x = 0;
794 step_y = 1;
795 n = STBCC__CLUSTER_SIZE_Y;
796 } else if (dx == -1) {
797 i = 0;
798 j = 0;
799 step_x = 0;
800 step_y = 1;
801 n = STBCC__CLUSTER_SIZE_Y;
802 } else if (dy == -1) {
803 i = 0;
804 j = 0;
805 step_x = 1;
806 step_y = 0;
807 n = STBCC__CLUSTER_SIZE_X;
808 } else if (dy == 1) {
809 i = 0;
810 j = STBCC__CLUSTER_SIZE_Y-1;
811 step_x = 1;
812 step_y = 0;
813 n = STBCC__CLUSTER_SIZE_X;
814 } else {
815 assert(0);
816 }
817
818 for (k=0; k < n; ++k) {
819 if (STBCC__MAP_OPEN(g, x+i, y+j) && STBCC__MAP_OPEN(g, x+i+dx, y+j+dy)) {
820 stbcc__clumpid src = g->clump_for_node[y+j][x+i];
821 stbcc__clumpid dest = g->clump_for_node[y+j+dy][x+i+dx];
822 if (0 == (disconnected[src][dest>>3] & (1 << (dest & 7)))) {
823 disconnected[src][dest>>3] |= 1 << (dest & 7);
824 stbcc__remove_clump_connection(g, x+i, y+j, x+i+dx, y+j+dy);
825 }
826 }
827 i += step_x;
828 j += step_y;
829 }
830 }
831
832 static stbcc__tinypoint stbcc__incluster_find(stbcc__cluster_build_info *cbi, int x, int y)
833 {
834 stbcc__tinypoint p,q;
835 p = cbi->parent[y][x];
836 if (p.x == x && p.y == y)
837 return p;
838 q = stbcc__incluster_find(cbi, p.x, p.y);
839 cbi->parent[y][x] = q;
840 return q;
841 }
842
843 static void stbcc__incluster_union(stbcc__cluster_build_info *cbi, int x1, int y1, int x2, int y2)
844 {
845 stbcc__tinypoint p = stbcc__incluster_find(cbi, x1,y1);
846 stbcc__tinypoint q = stbcc__incluster_find(cbi, x2,y2);
847
848 if (p.x == q.x && p.y == q.y)
849 return;
850
851 cbi->parent[p.y][p.x] = q;
852 }
853
854 static void stbcc__switch_root(stbcc__cluster_build_info *cbi, int x, int y, stbcc__tinypoint p)
855 {
856 cbi->parent[p.y][p.x].x = x;
857 cbi->parent[p.y][p.x].y = y;
858 cbi->parent[y][x].x = x;
859 cbi->parent[y][x].y = y;
860 }
861
862 static void stbcc__build_clumps_for_cluster(stbcc_grid *g, int cx, int cy)
863 {
864 stbcc__cluster *c;
865 stbcc__cluster_build_info cbi;
866 int label=0;
867 int i,j;
868 int x = cx * STBCC__CLUSTER_SIZE_X;
869 int y = cy * STBCC__CLUSTER_SIZE_Y;
870
871 // set initial disjoint set forest state
872 for (j=0; j < STBCC__CLUSTER_SIZE_Y; ++j) {
873 for (i=0; i < STBCC__CLUSTER_SIZE_X; ++i) {
874 cbi.parent[j][i].x = i;
875 cbi.parent[j][i].y = j;
876 }
877 }
878
879 // join all sets that are connected
880 for (j=0; j < STBCC__CLUSTER_SIZE_Y; ++j) {
881 // check down only if not on bottom row
882 if (j < STBCC__CLUSTER_SIZE_Y-1)
883 for (i=0; i < STBCC__CLUSTER_SIZE_X; ++i)
884 if (STBCC__MAP_OPEN(g,x+i,y+j) && STBCC__MAP_OPEN(g,x+i ,y+j+1))
885 stbcc__incluster_union(&cbi, i,j, i,j+1);
886 // check right for everything but rightmost column
887 for (i=0; i < STBCC__CLUSTER_SIZE_X-1; ++i)
888 if (STBCC__MAP_OPEN(g,x+i,y+j) && STBCC__MAP_OPEN(g,x+i+1,y+j ))
889 stbcc__incluster_union(&cbi, i,j, i+1,j);
890 }
891
892 // label all non-empty clumps along edges so that all edge clumps are first
893 // in list; this means in degenerate case we can skip traversing non-edge clumps.
894 // because in the first pass we only label leaders, we swap the leader to the
895 // edge first
896
897 // first put solid labels on all the edges; these will get overwritten if they're open
898 for (j=0; j < STBCC__CLUSTER_SIZE_Y; ++j)
899 cbi.label[j][0] = cbi.label[j][STBCC__CLUSTER_SIZE_X-1] = STBCC__NULL_CLUMPID;
900 for (i=0; i < STBCC__CLUSTER_SIZE_X; ++i)
901 cbi.label[0][i] = cbi.label[STBCC__CLUSTER_SIZE_Y-1][i] = STBCC__NULL_CLUMPID;
902
903 for (j=0; j < STBCC__CLUSTER_SIZE_Y; ++j) {
904 i = 0;
905 if (STBCC__MAP_OPEN(g, x+i, y+j)) {
906 stbcc__tinypoint p = stbcc__incluster_find(&cbi, i,j);
907 if (p.x == i && p.y == j)
908 // if this is the leader, give it a label
909 cbi.label[j][i] = label++;
910 else if (!(p.x == 0 || p.x == STBCC__CLUSTER_SIZE_X-1 || p.y == 0 || p.y == STBCC__CLUSTER_SIZE_Y-1)) {
911 // if leader is in interior, promote this edge node to leader and label
912 stbcc__switch_root(&cbi, i, j, p);
913 cbi.label[j][i] = label++;
914 }
915 // else if leader is on edge, do nothing (it'll get labelled when we reach it)
916 }
917 i = STBCC__CLUSTER_SIZE_X-1;
918 if (STBCC__MAP_OPEN(g, x+i, y+j)) {
919 stbcc__tinypoint p = stbcc__incluster_find(&cbi, i,j);
920 if (p.x == i && p.y == j)
921 cbi.label[j][i] = label++;
922 else if (!(p.x == 0 || p.x == STBCC__CLUSTER_SIZE_X-1 || p.y == 0 || p.y == STBCC__CLUSTER_SIZE_Y-1)) {
923 stbcc__switch_root(&cbi, i, j, p);
924 cbi.label[j][i] = label++;
925 }
926 }
927 }
928
929 for (i=1; i < STBCC__CLUSTER_SIZE_Y-1; ++i) {
930 j = 0;
931 if (STBCC__MAP_OPEN(g, x+i, y+j)) {
932 stbcc__tinypoint p = stbcc__incluster_find(&cbi, i,j);
933 if (p.x == i && p.y == j)
934 cbi.label[j][i] = label++;
935 else if (!(p.x == 0 || p.x == STBCC__CLUSTER_SIZE_X-1 || p.y == 0 || p.y == STBCC__CLUSTER_SIZE_Y-1)) {
936 stbcc__switch_root(&cbi, i, j, p);
937 cbi.label[j][i] = label++;
938 }
939 }
940 j = STBCC__CLUSTER_SIZE_Y-1;
941 if (STBCC__MAP_OPEN(g, x+i, y+j)) {
942 stbcc__tinypoint p = stbcc__incluster_find(&cbi, i,j);
943 if (p.x == i && p.y == j)
944 cbi.label[j][i] = label++;
945 else if (!(p.x == 0 || p.x == STBCC__CLUSTER_SIZE_X-1 || p.y == 0 || p.y == STBCC__CLUSTER_SIZE_Y-1)) {
946 stbcc__switch_root(&cbi, i, j, p);
947 cbi.label[j][i] = label++;
948 }
949 }
950 }
951
952 c = &g->cluster[cy][cx];
953 c->num_edge_clumps = label;
954
955 // label any internal clusters
956 for (j=1; j < STBCC__CLUSTER_SIZE_Y-1; ++j) {
957 for (i=1; i < STBCC__CLUSTER_SIZE_X-1; ++i) {
958 stbcc__tinypoint p = cbi.parent[j][i];
959 if (p.x == i && p.y == j)
960 if (STBCC__MAP_OPEN(g,x+i,y+j))
961 cbi.label[j][i] = label++;
962 else
963 cbi.label[j][i] = STBCC__NULL_CLUMPID;
964 }
965 }
966
967 // label all other nodes
968 for (j=0; j < STBCC__CLUSTER_SIZE_Y; ++j) {
969 for (i=0; i < STBCC__CLUSTER_SIZE_X; ++i) {
970 stbcc__tinypoint p = stbcc__incluster_find(&cbi, i,j);
971 if (p.x != i || p.y != j) {
972 if (STBCC__MAP_OPEN(g,x+i,y+j))
973 cbi.label[j][i] = cbi.label[p.y][p.x];
974 }
975 if (STBCC__MAP_OPEN(g,x+i,y+j))
976 assert(cbi.label[j][i] != STBCC__NULL_CLUMPID);
977 }
978 }
979
980 c->num_clumps = label;
981
982 for (i=0; i < label; ++i) {
983 c->clump[i].num_adjacent = 0;
984 c->clump[i].max_adjacent = 0;
985 }
986
987 for (j=0; j < STBCC__CLUSTER_SIZE_Y; ++j)
988 for (i=0; i < STBCC__CLUSTER_SIZE_X; ++i) {
989 g->clump_for_node[y+j][x+i] = cbi.label[j][i]; // @OPTIMIZE: remove cbi.label entirely
990 assert(g->clump_for_node[y+j][x+i] <= STBCC__NULL_CLUMPID);
991 }
992
993 // set the global label for all interior clumps since they can't have connections,
994 // so we don't have to do this on the global pass (brings from O(N) to O(N^0.75))
995 for (i=(int) c->num_edge_clumps; i < (int) c->num_clumps; ++i) {
996 stbcc__global_clumpid gc;
997 gc.f.cluster_x = cx;
998 gc.f.cluster_y = cy;
999 gc.f.clump_index = i;
1000 c->clump[i].global_label = gc;
1001 }
1002
1003 c->rebuild_adjacency = 1; // flag that it has no valid adjacency data
1004 }
1005
1006 #endif // STB_CONNECTED_COMPONENTS_IMPLEMENTATION