]> jfr.im git - irc/evilnet/x3.git/blame - src/alloc-slab.c
Rewrote PHP X3 DB parser function sample code as a class and faster code
[irc/evilnet/x3.git] / src / alloc-slab.c
CommitLineData
0d16e639 1/* alloc-slab.c - Slab debugging allocator
1136f709 2 * Copyright 2005,2007 srvx Development Team
0d16e639 3 *
2f61d1d7 4 * This file is part of srvx.
0d16e639 5 *
1136f709 6 * srvx is free software; you can redistribute it and/or modify
0d16e639 7 * it under the terms of the GNU General Public License as published by
be2c97a5 8 * the Free Software Foundation; either version 3 of the License, or
0d16e639 9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "common.h"
18#include "log.h"
19
20#if defined(HAVE_SYS_MMAN_H)
21# include <sys/mman.h>
22#endif
23
24#if !defined(HAVE_MMAP)
25# error The slab allocator requires that your system have the mmap() system call.
26#endif
2f61d1d7 27
1136f709 28#define SLAB_DEBUG_HEADER 1
29#define SLAB_DEBUG_LOG 2
30#define SLAB_DEBUG_PERMS 4
0d16e639 31
1136f709 32#if !defined(SLAB_DEBUG)
33# define SLAB_DEBUG 0
34#endif
35
36#if !defined(SLAB_RESERVE)
37# define SLAB_RESERVE 0
38#endif
39
40#if !defined(MAX_SLAB_FREE)
41# define MAX_SLAB_FREE 1024
42#endif
43
44#if SLAB_DEBUG & SLAB_DEBUG_HEADER
2f61d1d7 45
0d16e639 46#define ALLOC_MAGIC 0x1a
47#define FREE_MAGIC 0xcf
48
2f61d1d7 49struct alloc_header {
0d16e639 50 unsigned int size : 24;
51 unsigned int magic : 8;
52 unsigned int file_id : 8;
2f61d1d7 53 unsigned int line : 16;
0d16e639 54};
55
56static const char *file_ids[256];
57static struct file_id_entry {
58 const char *name;
59 unsigned int id : 8;
60} file_id_map[256];
61unsigned int file_ids_used;
62
63static int
64file_id_cmp(const void *a_, const void *b_)
65{
66 return strcmp(*(const char**)a_, *(const char**)b_);
67}
68
69static unsigned int
70get_file_id(const char *fname)
71{
72 struct file_id_entry *entry;
73
74 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
75 if (entry)
76 return entry->id;
77 entry = file_id_map + file_ids_used;
78 file_ids[file_ids_used] = fname;
79 entry->name = fname;
80 entry->id = file_ids_used;
81 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
82 return file_ids_used - 1;
83}
84
85typedef struct alloc_header alloc_header_t;
86
87#else
88
89typedef size_t alloc_header_t;
90
91#endif
92
93struct slab {
94 struct slabset *parent;
95 struct slab *prev;
96 struct slab *next;
97 void *base;
98 void **free;
99 unsigned int used;
100};
101
2f61d1d7 102struct slabset {
0d16e639 103 struct slab *child;
104 size_t nslabs;
105 size_t nallocs;
106 size_t size;
107 size_t items_per_slab;
108};
109
110#define SLAB_MIN (2 * sizeof(void*))
111#define SLAB_GRAIN sizeof(void*)
2f61d1d7 112#define SLAB_ALIGN SLAB_GRAIN
113#define SMALL_CUTOFF 576
0d16e639 114/* Element size < SMALL_CUTOFF -> use small slabs.
115 * Larger elements are allocated directly using mmap(). The largest
116 * regularly allocated struct in srvx 1.x is smaller than
117 * SMALL_CUTOFF, so there is not much point in coding support for
118 * larger slabs.
119 */
2f61d1d7 120
121static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
0d16e639 122static struct slab *free_slab_head;
123static struct slab *free_slab_tail;
124unsigned long free_slab_count;
125unsigned long big_alloc_count;
126unsigned long big_alloc_size;
127unsigned long slab_count;
128unsigned long slab_alloc_count;
129unsigned long slab_alloc_size;
130
131#if defined(MAP_ANON)
132#elif defined(MAP_ANONYMOUS)
133# define MAP_ANON MAP_ANONYMOUS
134#else
135# define MAP_ANON 0
136#endif
137
1136f709 138#if SLAB_DEBUG & SLAB_DEBUG_LOG
139
140FILE *slab_log;
141
142struct slab_log_entry
143{
144 struct timeval tv;
145 void *slab;
146 ssize_t size;
147};
148
149static void
150close_slab_log(void)
151{
152 fclose(slab_log);
153}
154
155static void
156slab_log_alloc(void *slab, size_t size)
157{
158 struct slab_log_entry sle;
159
160 gettimeofday(&sle.tv, NULL);
161 sle.slab = slab;
162 sle.size = (ssize_t)size;
163
164 if (!slab_log)
165 {
166 const char *fname;
167 fname = getenv("SLAB_LOG_FILE");
168 if (!fname)
169 fname = "slab.log";
170 slab_log = fopen(fname, "w");
171 atexit(close_slab_log);
172 }
173
174 fwrite(&sle, sizeof(sle), 1, slab_log);
175}
176
177static void
178slab_log_free(void *slab, size_t size)
179{
180 struct slab_log_entry sle;
181
182 gettimeofday(&sle.tv, NULL);
183 sle.slab = slab;
184 sle.size = -(ssize_t)size;
185 fwrite(&sle, sizeof(sle), 1, slab_log);
186}
187
188static void
189slab_log_unmap(void *slab)
190{
191 struct slab_log_entry sle;
192
193 gettimeofday(&sle.tv, NULL);
194 sle.slab = slab;
195 sle.size = 0;
196 fwrite(&sle, sizeof(sle), 1, slab_log);
197}
198
199#else
200# define slab_log_alloc(SLAB, SIZE)
201# define slab_log_free(SLAB, SIZE)
202# define slab_log_unmap(SLAB)
203#endif
204
205#if (SLAB_DEBUG & SLAB_DEBUG_PERMS) && defined(HAVE_MPROTECT)
206
207static void
208slab_protect(struct slab *slab)
209{
210 mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_NONE);
211}
212
213static void
214slab_unprotect(struct slab *slab)
215{
216 mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_READ | PROT_WRITE);
217}
218
219#else
220# define slab_protect(SLAB) (void)(SLAB)
221# define slab_unprotect(SLAB) (void)(SLAB)
222#endif
223
0d16e639 224static size_t
225slab_pagesize(void)
226{
227 static size_t pagesize;
228 if (pagesize
229#if defined(HAVE_GETPAGESIZE)
230 || (pagesize = getpagesize())
231#endif
232#if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
233 || (pagesize = sysconf(_SC_PAGESIZE))
234#endif
235#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
236 || (pagesize = sysconf(_SC_PAGE_SIZE))
237#endif
238 ) return pagesize;
239 assert(0 && "unable to find system page size");
240 return pagesize = 4096;
241}
242
243static size_t
244slab_round_up(size_t size)
245{
246 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
247}
248
249static void *
250slab_map(size_t length)
251{
252 static int mmap_fd = -1;
253 void *res;
254
255#if ! MAP_ANON
256 if (mmap_fd < 0) {
257 mmap_fd = open("/dev/zero", 0);
258 if (mmap_fd < 0)
259 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
260 }
261#endif
262 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
263 if (res == MAP_FAILED)
264 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
265 return res;
266}
267
268static void *slab_alloc(struct slabset *sset);
269static void slab_unalloc(void *ptr, size_t size);
270
271static struct slabset *
272slabset_create(size_t size)
273{
274 unsigned int idx;
275
276 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
277 idx = size / SLAB_GRAIN;
2f61d1d7 278 assert(idx < ArrayLength(little_slabs));
0d16e639 279 if (!little_slabs[idx].size) {
280 little_slabs[idx].size = size;
281 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
2f61d1d7 282 }
0d16e639 283 return &little_slabs[idx];
284}
285
286static void *
287slab_alloc(struct slabset *sset)
288{
289 struct slab *slab;
290 void **item;
291
292 if (!sset->child || !sset->child->free) {
293 unsigned int ii, step;
294
2f61d1d7 295 /* Allocate new slab. */
0d16e639 296 if (free_slab_head) {
297 slab = free_slab_head;
1136f709 298 slab_unprotect(slab);
0d16e639 299 if (!(free_slab_head = slab->next))
300 free_slab_tail = NULL;
301 } else {
302 item = slab_map(slab_pagesize());
303 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
304 slab->base = item;
305 slab_count++;
306 }
1136f709 307 slab_log_alloc(slab, sset->size);
0d16e639 308
309 /* Populate free list. */
310 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
311 for (ii = 1, item = slab->free = slab->base;
312 ii < sset->items_per_slab;
313 ++ii, item = (*item = (char*)item + step));
314 *item = NULL;
315
316 /* Link to parent slabset. */
2f61d1d7 317 slab->parent = sset;
0d16e639 318 slab->prev = sset->child;
319 if (slab->prev) {
320 slab->next = slab->prev->next;
321 slab->prev->next = slab;
322 if (slab->next)
2f61d1d7 323 slab->next->prev = slab;
0d16e639 324 } else
325 slab->next = NULL;
326 assert(!slab->next || slab == slab->next->prev);
327 assert(!slab->prev || slab == slab->prev->next);
328 sset->child = slab;
2f61d1d7 329 sset->nslabs++;
0d16e639 330 }
331
332 slab = sset->child;
333 item = slab->free;
334 assert(((unsigned long)item & (slab_pagesize() - 1))
335 <= (slab_pagesize() - sizeof(*slab) - sset->size));
2f61d1d7 336 slab->free = *item;
337 if (++slab->used == sset->items_per_slab) {
0d16e639 338 if (sset->child != slab) {
339 /* Unlink slab and reinsert before sset->child. */
340 if (slab->prev)
341 slab->prev->next = slab->next;
342 if (slab->next)
343 slab->next->prev = slab->prev;
344 if ((slab->prev = sset->child->prev))
345 slab->prev->next = slab;
346 if ((slab->next = sset->child))
347 slab->next->prev = slab;
348 assert(!slab->next || slab == slab->next->prev);
349 assert(!slab->prev || slab == slab->prev->next);
350 } else if (slab->next) {
351 /* Advance sset->child to next pointer. */
352 sset->child = slab->next;
353 }
354 }
355 sset->nallocs++;
356 memset(item, 0, sset->size);
357 return item;
358}
359
360static void
361slab_unalloc(void *ptr, size_t size)
2f61d1d7 362{
0d16e639 363 struct slab *slab, *new_next;
2f61d1d7 364
0d16e639 365 assert(size < SMALL_CUTOFF);
2f61d1d7 366 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
0d16e639 367 *(void**)ptr = slab->free;
368 slab->free = ptr;
369 slab->parent->nallocs--;
370
371 if (slab->used-- == slab->parent->items_per_slab
372 && slab->parent->child != slab) {
373 /* Unlink from current position, relink as parent's first child. */
2f61d1d7 374 new_next = slab->parent->child;
0d16e639 375 assert(new_next != NULL);
376 if (slab->prev)
377 slab->prev->next = slab->next;
378 if (slab->next)
379 slab->next->prev = slab->prev;
380 if ((slab->prev = new_next->prev))
2f61d1d7 381 slab->prev->next = slab;
0d16e639 382 slab->next = new_next;
383 new_next->prev = slab;
384 slab->parent->child = slab;
385 assert(!slab->next || slab == slab->next->prev);
2f61d1d7 386 assert(!slab->prev || slab == slab->prev->next);
387 } else if (!slab->used) {
1136f709 388 slab_log_free(slab, size);
389
0d16e639 390 /* Unlink slab from its parent. */
2f61d1d7 391 slab->parent->nslabs--;
0d16e639 392 if (slab->prev)
393 slab->prev->next = slab->next;
394 if (slab->next)
395 slab->next->prev = slab->prev;
396 new_next = slab->next ? slab->next : slab->prev;
397 if (slab == slab->parent->child)
398 slab->parent->child = new_next;
399 if (new_next) {
400 assert(!new_next->next || new_next == new_next->next->prev);
401 assert(!new_next->prev || new_next == new_next->prev->next);
402 }
403
2f61d1d7 404#if SLAB_RESERVE
1136f709 405 /* Make sure we have enough free slab pages. */
406 while (free_slab_count < SLAB_RESERVE) {
407 struct slab *tslab;
408 void *item;
409
410 item = slab_map(slab_pagesize());
411 tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
412 tslab->base = item;
413 tslab->prev = free_slab_tail;
414 free_slab_tail = tslab;
415 if (!free_slab_head)
416 free_slab_head = tslab;
417 else {
418 slab_unprotect(tslab->prev);
419 tslab->prev->next = tslab;
420 slab_protect(tslab->prev);
0d16e639 421 }
1136f709 422 free_slab_count++;
423 slab_count++;
0d16e639 424 }
1136f709 425#endif
0d16e639 426
2f61d1d7 427 /* Link to list of free slabs. */
428 slab->parent = NULL;
0d16e639 429 slab->next = NULL;
1136f709 430 slab->prev = free_slab_tail;
431 if (slab->prev) {
432 slab_unprotect(slab->prev);
433 slab->prev->next = slab;
434 slab_protect(slab->prev);
435 } else
0d16e639 436 free_slab_head = slab;
1136f709 437 slab_protect(slab);
438 free_slab_tail = slab;
0d16e639 439 free_slab_count++;
1136f709 440
441#if MAX_SLAB_FREE >= 0
442 /* Unlink and unmap old slabs, so accesses to stale-enough
443 * pointers will fault. */
444 while (free_slab_count > MAX_SLAB_FREE) {
445 struct slab *tslab;
446
447 tslab = free_slab_tail;
448 slab_unprotect(tslab);
449 free_slab_tail = tslab->prev;
450 if (tslab->prev) {
451 slab_unprotect(tslab->prev);
452 tslab->prev->next = NULL;
453 slab_protect(tslab->prev);
454 } else
455 free_slab_head = NULL;
456 free_slab_count--;
457 slab_count--;
458 slab_log_unmap(slab);
459 munmap(slab->base, slab_pagesize());
460 }
0d16e639 461#endif
462 }
2f61d1d7 463 (void)size;
0d16e639 464}
465
2f61d1d7 466void *
0d16e639 467slab_malloc(const char *file, unsigned int line, size_t size)
2f61d1d7 468{
0d16e639 469 alloc_header_t *res;
470 size_t real;
2f61d1d7 471
0d16e639 472 assert(size < 1 << 24);
2f61d1d7 473 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
0d16e639 474 if (real < SMALL_CUTOFF) {
2f61d1d7 475 res = slab_alloc(slabset_create(real));
0d16e639 476 slab_alloc_count++;
477 slab_alloc_size += size;
478 } else {
479 res = slab_map(slab_round_up(real));
480 big_alloc_count++;
481 big_alloc_size += size;
1136f709 482 slab_log_alloc(res, size);
0d16e639 483 }
1136f709 484#if SLAB_DEBUG & SLAB_DEBUG_HEADER
0d16e639 485 res->file_id = get_file_id(file);
486 res->size = size;
487 res->line = line;
488 res->magic = ALLOC_MAGIC;
489#else
490 *res = size;
491 (void)file; (void)line;
2f61d1d7 492#endif
0d16e639 493 return res + 1;
494}
495
496void *
497slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
2f61d1d7 498{
0d16e639 499 alloc_header_t *orig;
500 void *newblock;
501 size_t osize;
502
503 if (!ptr)
504 return slab_malloc(file, line, size);
505
2f61d1d7 506 verify(ptr);
0d16e639 507 orig = (alloc_header_t*)ptr - 1;
1136f709 508#if SLAB_DEBUG & SLAB_DEBUG_HEADER
0d16e639 509 osize = orig->size;
510#else
511 osize = *orig;
512#endif
513 if (osize >= size)
514 return ptr;
2f61d1d7 515 newblock = slab_malloc(file, line, size);
0d16e639 516 memcpy(newblock, ptr, osize);
517 slab_free(file, line, ptr);
518 return newblock;
519}
520
521char *
522slab_strdup(const char *file, unsigned int line, const char *src)
523{
524 char *target;
525 size_t len;
526
527 len = strlen(src) + 1;
528 target = slab_malloc(file, line, len);
529 memcpy(target, src, len);
530 return target;
531}
532
2f61d1d7 533void
0d16e639 534slab_free(const char *file, unsigned int line, void *ptr)
2f61d1d7 535{
536 alloc_header_t *hdr;
0d16e639 537 size_t user, real;
538
539 if (!ptr)
540 return;
2f61d1d7 541 verify(ptr);
0d16e639 542 hdr = (alloc_header_t*)ptr - 1;
1136f709 543#if SLAB_DEBUG & SLAB_DEBUG_HEADER
0d16e639 544 hdr->file_id = get_file_id(file);
545 hdr->line = line;
2f61d1d7 546 hdr->magic = FREE_MAGIC;
0d16e639 547 user = hdr->size;
2f61d1d7 548#else
0d16e639 549 user = *hdr;
550 (void)file; (void)line;
2f61d1d7 551#endif
0d16e639 552 real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
0d16e639 553 if (real < SMALL_CUTOFF) {
2f61d1d7 554 memset(hdr + 1, 0xde, real - sizeof(*hdr));
0d16e639 555 slab_unalloc(hdr, real);
0d16e639 556 slab_alloc_count--;
2f61d1d7 557 slab_alloc_size -= user;
0d16e639 558 } else {
2f61d1d7 559 munmap(hdr, slab_round_up(real));
0d16e639 560 big_alloc_count--;
2f61d1d7 561 big_alloc_size -= user;
1136f709 562 slab_log_unmap(hdr);
0d16e639 563 }
564}
565
2f61d1d7 566/* Undefine the verify macro in case we're not debugging. */
567#undef verify
0d16e639 568void
569verify(const void *ptr)
2f61d1d7 570{
0d16e639 571 alloc_header_t *hdr;
572 size_t real;
573
574 if (!ptr)
2f61d1d7 575 return;
0d16e639 576
577 hdr = (alloc_header_t*)ptr - 1;
1136f709 578#if SLAB_DEBUG & SLAB_DEBUG_HEADER
0d16e639 579 real = hdr->size + sizeof(*hdr);
580 assert(hdr->file_id < file_ids_used);
581 assert(hdr->magic == ALLOC_MAGIC);
582#else
583 real = *hdr + sizeof(*hdr);
584#endif
585 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
586
587 if (real >= SMALL_CUTOFF)
588 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
589 else {
590 struct slab *slab;
591 size_t expected;
2f61d1d7 592
0d16e639 593 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
594 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
595 assert(slab->parent->size == expected);
596 }
597}