]> jfr.im git - irc/evilnet/x3.git/blob - src/alloc-slab.c
Added an error check to help trace users figure out mistakes with mark vs marked
[irc/evilnet/x3.git] / src / alloc-slab.c
1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005,2007 srvx Development Team
3 *
4 * This file is part of srvx.
5 *
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include "common.h"
18 #include "log.h"
19
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
22 #endif
23
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
26 #endif
27
28 #define SLAB_DEBUG_HEADER 1
29 #define SLAB_DEBUG_LOG 2
30 #define SLAB_DEBUG_PERMS 4
31
32 #if !defined(SLAB_DEBUG)
33 # define SLAB_DEBUG 0
34 #endif
35
36 #if !defined(SLAB_RESERVE)
37 # define SLAB_RESERVE 0
38 #endif
39
40 #if !defined(MAX_SLAB_FREE)
41 # define MAX_SLAB_FREE 1024
42 #endif
43
44 #if SLAB_DEBUG & SLAB_DEBUG_HEADER
45
46 #define ALLOC_MAGIC 0x1a
47 #define FREE_MAGIC 0xcf
48
49 struct alloc_header {
50 unsigned int size : 24;
51 unsigned int magic : 8;
52 unsigned int file_id : 8;
53 unsigned int line : 16;
54 };
55
56 static const char *file_ids[256];
57 static struct file_id_entry {
58 const char *name;
59 unsigned int id : 8;
60 } file_id_map[256];
61 unsigned int file_ids_used;
62
63 static int
64 file_id_cmp(const void *a_, const void *b_)
65 {
66 return strcmp(*(const char**)a_, *(const char**)b_);
67 }
68
69 static unsigned int
70 get_file_id(const char *fname)
71 {
72 struct file_id_entry *entry;
73
74 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
75 if (entry)
76 return entry->id;
77 entry = file_id_map + file_ids_used;
78 file_ids[file_ids_used] = fname;
79 entry->name = fname;
80 entry->id = file_ids_used;
81 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
82 return file_ids_used - 1;
83 }
84
85 typedef struct alloc_header alloc_header_t;
86
87 #else
88
89 typedef size_t alloc_header_t;
90
91 #endif
92
93 struct slab {
94 struct slabset *parent;
95 struct slab *prev;
96 struct slab *next;
97 void *base;
98 void **free;
99 unsigned int used;
100 };
101
102 struct slabset {
103 struct slab *child;
104 size_t nslabs;
105 size_t nallocs;
106 size_t size;
107 size_t items_per_slab;
108 };
109
110 #define SLAB_MIN (2 * sizeof(void*))
111 #define SLAB_GRAIN sizeof(void*)
112 #define SLAB_ALIGN SLAB_GRAIN
113 #define SMALL_CUTOFF 576
114 /* Element size < SMALL_CUTOFF -> use small slabs.
115 * Larger elements are allocated directly using mmap(). The largest
116 * regularly allocated struct in srvx 1.x is smaller than
117 * SMALL_CUTOFF, so there is not much point in coding support for
118 * larger slabs.
119 */
120
121 static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
122 static struct slab *free_slab_head;
123 static struct slab *free_slab_tail;
124 unsigned long free_slab_count;
125 unsigned long big_alloc_count;
126 unsigned long big_alloc_size;
127 unsigned long slab_count;
128 unsigned long slab_alloc_count;
129 unsigned long slab_alloc_size;
130
131 #if defined(MAP_ANON)
132 #elif defined(MAP_ANONYMOUS)
133 # define MAP_ANON MAP_ANONYMOUS
134 #else
135 # define MAP_ANON 0
136 #endif
137
138 #if SLAB_DEBUG & SLAB_DEBUG_LOG
139
140 FILE *slab_log;
141
142 struct slab_log_entry
143 {
144 struct timeval tv;
145 void *slab;
146 ssize_t size;
147 };
148
149 static void
150 close_slab_log(void)
151 {
152 fclose(slab_log);
153 }
154
155 static void
156 slab_log_alloc(void *slab, size_t size)
157 {
158 struct slab_log_entry sle;
159
160 gettimeofday(&sle.tv, NULL);
161 sle.slab = slab;
162 sle.size = (ssize_t)size;
163
164 if (!slab_log)
165 {
166 const char *fname;
167 fname = getenv("SLAB_LOG_FILE");
168 if (!fname)
169 fname = "slab.log";
170 slab_log = fopen(fname, "w");
171 atexit(close_slab_log);
172 }
173
174 fwrite(&sle, sizeof(sle), 1, slab_log);
175 }
176
177 static void
178 slab_log_free(void *slab, size_t size)
179 {
180 struct slab_log_entry sle;
181
182 gettimeofday(&sle.tv, NULL);
183 sle.slab = slab;
184 sle.size = -(ssize_t)size;
185 fwrite(&sle, sizeof(sle), 1, slab_log);
186 }
187
188 static void
189 slab_log_unmap(void *slab)
190 {
191 struct slab_log_entry sle;
192
193 gettimeofday(&sle.tv, NULL);
194 sle.slab = slab;
195 sle.size = 0;
196 fwrite(&sle, sizeof(sle), 1, slab_log);
197 }
198
199 #else
200 # define slab_log_alloc(SLAB, SIZE)
201 # define slab_log_free(SLAB, SIZE)
202 # define slab_log_unmap(SLAB)
203 #endif
204
205 #if (SLAB_DEBUG & SLAB_DEBUG_PERMS) && defined(HAVE_MPROTECT)
206
207 static void
208 slab_protect(struct slab *slab)
209 {
210 mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_NONE);
211 }
212
213 static void
214 slab_unprotect(struct slab *slab)
215 {
216 mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_READ | PROT_WRITE);
217 }
218
219 #else
220 # define slab_protect(SLAB) (void)(SLAB)
221 # define slab_unprotect(SLAB) (void)(SLAB)
222 #endif
223
224 static size_t
225 slab_pagesize(void)
226 {
227 static size_t pagesize;
228 if (pagesize
229 #if defined(HAVE_GETPAGESIZE)
230 || (pagesize = getpagesize())
231 #endif
232 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
233 || (pagesize = sysconf(_SC_PAGESIZE))
234 #endif
235 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
236 || (pagesize = sysconf(_SC_PAGE_SIZE))
237 #endif
238 ) return pagesize;
239 assert(0 && "unable to find system page size");
240 return pagesize = 4096;
241 }
242
243 static size_t
244 slab_round_up(size_t size)
245 {
246 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
247 }
248
249 static void *
250 slab_map(size_t length)
251 {
252 static int mmap_fd = -1;
253 void *res;
254
255 #if ! MAP_ANON
256 if (mmap_fd < 0) {
257 mmap_fd = open("/dev/zero", 0);
258 if (mmap_fd < 0)
259 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
260 }
261 #endif
262 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
263 if (res == MAP_FAILED)
264 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
265 return res;
266 }
267
268 static void *slab_alloc(struct slabset *sset);
269 static void slab_unalloc(void *ptr, size_t size);
270
271 static struct slabset *
272 slabset_create(size_t size)
273 {
274 unsigned int idx;
275
276 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
277 idx = size / SLAB_GRAIN;
278 assert(idx < ArrayLength(little_slabs));
279 if (!little_slabs[idx].size) {
280 little_slabs[idx].size = size;
281 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
282 }
283 return &little_slabs[idx];
284 }
285
286 static void *
287 slab_alloc(struct slabset *sset)
288 {
289 struct slab *slab;
290 void **item;
291
292 if (!sset->child || !sset->child->free) {
293 unsigned int ii, step;
294
295 /* Allocate new slab. */
296 if (free_slab_head) {
297 slab = free_slab_head;
298 slab_unprotect(slab);
299 if (!(free_slab_head = slab->next))
300 free_slab_tail = NULL;
301 } else {
302 item = slab_map(slab_pagesize());
303 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
304 slab->base = item;
305 slab_count++;
306 }
307 slab_log_alloc(slab, sset->size);
308
309 /* Populate free list. */
310 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
311 for (ii = 1, item = slab->free = slab->base;
312 ii < sset->items_per_slab;
313 ++ii, item = (*item = (char*)item + step));
314 *item = NULL;
315
316 /* Link to parent slabset. */
317 slab->parent = sset;
318 slab->prev = sset->child;
319 if (slab->prev) {
320 slab->next = slab->prev->next;
321 slab->prev->next = slab;
322 if (slab->next)
323 slab->next->prev = slab;
324 } else
325 slab->next = NULL;
326 assert(!slab->next || slab == slab->next->prev);
327 assert(!slab->prev || slab == slab->prev->next);
328 sset->child = slab;
329 sset->nslabs++;
330 }
331
332 slab = sset->child;
333 item = slab->free;
334 assert(((unsigned long)item & (slab_pagesize() - 1))
335 <= (slab_pagesize() - sizeof(*slab) - sset->size));
336 slab->free = *item;
337 if (++slab->used == sset->items_per_slab) {
338 if (sset->child != slab) {
339 /* Unlink slab and reinsert before sset->child. */
340 if (slab->prev)
341 slab->prev->next = slab->next;
342 if (slab->next)
343 slab->next->prev = slab->prev;
344 if ((slab->prev = sset->child->prev))
345 slab->prev->next = slab;
346 if ((slab->next = sset->child))
347 slab->next->prev = slab;
348 assert(!slab->next || slab == slab->next->prev);
349 assert(!slab->prev || slab == slab->prev->next);
350 } else if (slab->next) {
351 /* Advance sset->child to next pointer. */
352 sset->child = slab->next;
353 }
354 }
355 sset->nallocs++;
356 memset(item, 0, sset->size);
357 return item;
358 }
359
360 static void
361 slab_unalloc(void *ptr, size_t size)
362 {
363 struct slab *slab, *new_next;
364
365 assert(size < SMALL_CUTOFF);
366 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
367 *(void**)ptr = slab->free;
368 slab->free = ptr;
369 slab->parent->nallocs--;
370
371 if (slab->used-- == slab->parent->items_per_slab
372 && slab->parent->child != slab) {
373 /* Unlink from current position, relink as parent's first child. */
374 new_next = slab->parent->child;
375 assert(new_next != NULL);
376 if (slab->prev)
377 slab->prev->next = slab->next;
378 if (slab->next)
379 slab->next->prev = slab->prev;
380 if ((slab->prev = new_next->prev))
381 slab->prev->next = slab;
382 slab->next = new_next;
383 new_next->prev = slab;
384 slab->parent->child = slab;
385 assert(!slab->next || slab == slab->next->prev);
386 assert(!slab->prev || slab == slab->prev->next);
387 } else if (!slab->used) {
388 slab_log_free(slab, size);
389
390 /* Unlink slab from its parent. */
391 slab->parent->nslabs--;
392 if (slab->prev)
393 slab->prev->next = slab->next;
394 if (slab->next)
395 slab->next->prev = slab->prev;
396 new_next = slab->next ? slab->next : slab->prev;
397 if (slab == slab->parent->child)
398 slab->parent->child = new_next;
399 if (new_next) {
400 assert(!new_next->next || new_next == new_next->next->prev);
401 assert(!new_next->prev || new_next == new_next->prev->next);
402 }
403
404 #if SLAB_RESERVE
405 /* Make sure we have enough free slab pages. */
406 while (free_slab_count < SLAB_RESERVE) {
407 struct slab *tslab;
408 void *item;
409
410 item = slab_map(slab_pagesize());
411 tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
412 tslab->base = item;
413 tslab->prev = free_slab_tail;
414 free_slab_tail = tslab;
415 if (!free_slab_head)
416 free_slab_head = tslab;
417 else {
418 slab_unprotect(tslab->prev);
419 tslab->prev->next = tslab;
420 slab_protect(tslab->prev);
421 }
422 free_slab_count++;
423 slab_count++;
424 }
425 #endif
426
427 /* Link to list of free slabs. */
428 slab->parent = NULL;
429 slab->next = NULL;
430 slab->prev = free_slab_tail;
431 if (slab->prev) {
432 slab_unprotect(slab->prev);
433 slab->prev->next = slab;
434 slab_protect(slab->prev);
435 } else
436 free_slab_head = slab;
437 slab_protect(slab);
438 free_slab_tail = slab;
439 free_slab_count++;
440
441 #if MAX_SLAB_FREE >= 0
442 /* Unlink and unmap old slabs, so accesses to stale-enough
443 * pointers will fault. */
444 while (free_slab_count > MAX_SLAB_FREE) {
445 struct slab *tslab;
446
447 tslab = free_slab_tail;
448 slab_unprotect(tslab);
449 free_slab_tail = tslab->prev;
450 if (tslab->prev) {
451 slab_unprotect(tslab->prev);
452 tslab->prev->next = NULL;
453 slab_protect(tslab->prev);
454 } else
455 free_slab_head = NULL;
456 free_slab_count--;
457 slab_count--;
458 slab_log_unmap(slab);
459 munmap(slab->base, slab_pagesize());
460 }
461 #endif
462 }
463 (void)size;
464 }
465
466 void *
467 slab_malloc(const char *file, unsigned int line, size_t size)
468 {
469 alloc_header_t *res;
470 size_t real;
471
472 assert(size < 1 << 24);
473 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
474 if (real < SMALL_CUTOFF) {
475 res = slab_alloc(slabset_create(real));
476 slab_alloc_count++;
477 slab_alloc_size += size;
478 } else {
479 res = slab_map(slab_round_up(real));
480 big_alloc_count++;
481 big_alloc_size += size;
482 slab_log_alloc(res, size);
483 }
484 #if SLAB_DEBUG & SLAB_DEBUG_HEADER
485 res->file_id = get_file_id(file);
486 res->size = size;
487 res->line = line;
488 res->magic = ALLOC_MAGIC;
489 #else
490 *res = size;
491 (void)file; (void)line;
492 #endif
493 return res + 1;
494 }
495
496 void *
497 slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
498 {
499 alloc_header_t *orig;
500 void *newblock;
501 size_t osize;
502
503 if (!ptr)
504 return slab_malloc(file, line, size);
505
506 verify(ptr);
507 orig = (alloc_header_t*)ptr - 1;
508 #if SLAB_DEBUG & SLAB_DEBUG_HEADER
509 osize = orig->size;
510 #else
511 osize = *orig;
512 #endif
513 if (osize >= size)
514 return ptr;
515 newblock = slab_malloc(file, line, size);
516 memcpy(newblock, ptr, osize);
517 slab_free(file, line, ptr);
518 return newblock;
519 }
520
521 char *
522 slab_strdup(const char *file, unsigned int line, const char *src)
523 {
524 char *target;
525 size_t len;
526
527 len = strlen(src) + 1;
528 target = slab_malloc(file, line, len);
529 memcpy(target, src, len);
530 return target;
531 }
532
533 void
534 slab_free(const char *file, unsigned int line, void *ptr)
535 {
536 alloc_header_t *hdr;
537 size_t user, real;
538
539 if (!ptr)
540 return;
541 verify(ptr);
542 hdr = (alloc_header_t*)ptr - 1;
543 #if SLAB_DEBUG & SLAB_DEBUG_HEADER
544 hdr->file_id = get_file_id(file);
545 hdr->line = line;
546 hdr->magic = FREE_MAGIC;
547 user = hdr->size;
548 #else
549 user = *hdr;
550 (void)file; (void)line;
551 #endif
552 real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
553 if (real < SMALL_CUTOFF) {
554 memset(hdr + 1, 0xde, real - sizeof(*hdr));
555 slab_unalloc(hdr, real);
556 slab_alloc_count--;
557 slab_alloc_size -= user;
558 } else {
559 munmap(hdr, slab_round_up(real));
560 big_alloc_count--;
561 big_alloc_size -= user;
562 slab_log_unmap(hdr);
563 }
564 }
565
566 /* Undefine the verify macro in case we're not debugging. */
567 #undef verify
568 void
569 verify(const void *ptr)
570 {
571 alloc_header_t *hdr;
572 size_t real;
573
574 if (!ptr)
575 return;
576
577 hdr = (alloc_header_t*)ptr - 1;
578 #if SLAB_DEBUG & SLAB_DEBUG_HEADER
579 real = hdr->size + sizeof(*hdr);
580 assert(hdr->file_id < file_ids_used);
581 assert(hdr->magic == ALLOC_MAGIC);
582 #else
583 real = *hdr + sizeof(*hdr);
584 #endif
585 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
586
587 if (real >= SMALL_CUTOFF)
588 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
589 else {
590 struct slab *slab;
591 size_t expected;
592
593 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
594 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
595 assert(slab->parent->size == expected);
596 }
597 }