]> jfr.im git - irc/evilnet/x3.git/blame - src/alloc-slab.c
Fixed header comments
[irc/evilnet/x3.git] / src / alloc-slab.c
CommitLineData
0d16e639 1/* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
3 *
2f61d1d7 4 * This file is part of srvx.
0d16e639 5 *
d0f04f71 6 * x3 is free software; you can redistribute it and/or modify
0d16e639 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "common.h"
18#include "log.h"
19
20#if defined(HAVE_SYS_MMAN_H)
21# include <sys/mman.h>
22#endif
23
24#if !defined(HAVE_MMAP)
25# error The slab allocator requires that your system have the mmap() system call.
26#endif
2f61d1d7 27
0d16e639 28#define SLAB_DEBUG 1
29#define SLAB_RESERVE 1024
30
31#if SLAB_DEBUG
2f61d1d7 32
0d16e639 33#define ALLOC_MAGIC 0x1a
34#define FREE_MAGIC 0xcf
35
2f61d1d7 36struct alloc_header {
0d16e639 37 unsigned int size : 24;
38 unsigned int magic : 8;
39 unsigned int file_id : 8;
2f61d1d7 40 unsigned int line : 16;
0d16e639 41};
42
43static const char *file_ids[256];
44static struct file_id_entry {
45 const char *name;
46 unsigned int id : 8;
47} file_id_map[256];
48unsigned int file_ids_used;
49
50static int
51file_id_cmp(const void *a_, const void *b_)
52{
53 return strcmp(*(const char**)a_, *(const char**)b_);
54}
55
56static unsigned int
57get_file_id(const char *fname)
58{
59 struct file_id_entry *entry;
60
61 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
62 if (entry)
63 return entry->id;
64 entry = file_id_map + file_ids_used;
65 file_ids[file_ids_used] = fname;
66 entry->name = fname;
67 entry->id = file_ids_used;
68 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
69 return file_ids_used - 1;
70}
71
72typedef struct alloc_header alloc_header_t;
73
74#else
75
76typedef size_t alloc_header_t;
77
78#endif
79
80struct slab {
81 struct slabset *parent;
82 struct slab *prev;
83 struct slab *next;
84 void *base;
85 void **free;
86 unsigned int used;
87};
88
2f61d1d7 89struct slabset {
0d16e639 90 struct slab *child;
91 size_t nslabs;
92 size_t nallocs;
93 size_t size;
94 size_t items_per_slab;
95};
96
97#define SLAB_MIN (2 * sizeof(void*))
98#define SLAB_GRAIN sizeof(void*)
2f61d1d7 99#define SLAB_ALIGN SLAB_GRAIN
100#define SMALL_CUTOFF 576
0d16e639 101/* Element size < SMALL_CUTOFF -> use small slabs.
102 * Larger elements are allocated directly using mmap(). The largest
103 * regularly allocated struct in srvx 1.x is smaller than
104 * SMALL_CUTOFF, so there is not much point in coding support for
105 * larger slabs.
106 */
2f61d1d7 107
108static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
0d16e639 109static struct slab *free_slab_head;
110static struct slab *free_slab_tail;
111unsigned long free_slab_count;
112unsigned long big_alloc_count;
113unsigned long big_alloc_size;
114unsigned long slab_count;
115unsigned long slab_alloc_count;
116unsigned long slab_alloc_size;
117
118#if defined(MAP_ANON)
119#elif defined(MAP_ANONYMOUS)
120# define MAP_ANON MAP_ANONYMOUS
121#else
122# define MAP_ANON 0
123#endif
124
125static size_t
126slab_pagesize(void)
127{
128 static size_t pagesize;
129 if (pagesize
130#if defined(HAVE_GETPAGESIZE)
131 || (pagesize = getpagesize())
132#endif
133#if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
134 || (pagesize = sysconf(_SC_PAGESIZE))
135#endif
136#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
137 || (pagesize = sysconf(_SC_PAGE_SIZE))
138#endif
139 ) return pagesize;
140 assert(0 && "unable to find system page size");
141 return pagesize = 4096;
142}
143
144static size_t
145slab_round_up(size_t size)
146{
147 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
148}
149
150static void *
151slab_map(size_t length)
152{
153 static int mmap_fd = -1;
154 void *res;
155
156#if ! MAP_ANON
157 if (mmap_fd < 0) {
158 mmap_fd = open("/dev/zero", 0);
159 if (mmap_fd < 0)
160 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
161 }
162#endif
163 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
164 if (res == MAP_FAILED)
165 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
166 return res;
167}
168
169static void *slab_alloc(struct slabset *sset);
170static void slab_unalloc(void *ptr, size_t size);
171
172static struct slabset *
173slabset_create(size_t size)
174{
175 unsigned int idx;
176
177 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
178 idx = size / SLAB_GRAIN;
2f61d1d7 179 assert(idx < ArrayLength(little_slabs));
0d16e639 180 if (!little_slabs[idx].size) {
181 little_slabs[idx].size = size;
182 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
2f61d1d7 183 }
0d16e639 184 return &little_slabs[idx];
185}
186
187static void *
188slab_alloc(struct slabset *sset)
189{
190 struct slab *slab;
191 void **item;
192
193 if (!sset->child || !sset->child->free) {
194 unsigned int ii, step;
195
2f61d1d7 196 /* Allocate new slab. */
0d16e639 197 if (free_slab_head) {
198 slab = free_slab_head;
199 if (!(free_slab_head = slab->next))
200 free_slab_tail = NULL;
201 } else {
202 item = slab_map(slab_pagesize());
203 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
204 slab->base = item;
205 slab_count++;
206 }
207
208 /* Populate free list. */
209 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
210 for (ii = 1, item = slab->free = slab->base;
211 ii < sset->items_per_slab;
212 ++ii, item = (*item = (char*)item + step));
213 *item = NULL;
214
215 /* Link to parent slabset. */
2f61d1d7 216 slab->parent = sset;
0d16e639 217 slab->prev = sset->child;
218 if (slab->prev) {
219 slab->next = slab->prev->next;
220 slab->prev->next = slab;
221 if (slab->next)
2f61d1d7 222 slab->next->prev = slab;
0d16e639 223 } else
224 slab->next = NULL;
225 assert(!slab->next || slab == slab->next->prev);
226 assert(!slab->prev || slab == slab->prev->next);
227 sset->child = slab;
2f61d1d7 228 sset->nslabs++;
0d16e639 229 }
230
231 slab = sset->child;
232 item = slab->free;
233 assert(((unsigned long)item & (slab_pagesize() - 1))
234 <= (slab_pagesize() - sizeof(*slab) - sset->size));
2f61d1d7 235 slab->free = *item;
236 if (++slab->used == sset->items_per_slab) {
0d16e639 237 if (sset->child != slab) {
238 /* Unlink slab and reinsert before sset->child. */
239 if (slab->prev)
240 slab->prev->next = slab->next;
241 if (slab->next)
242 slab->next->prev = slab->prev;
243 if ((slab->prev = sset->child->prev))
244 slab->prev->next = slab;
245 if ((slab->next = sset->child))
246 slab->next->prev = slab;
247 assert(!slab->next || slab == slab->next->prev);
248 assert(!slab->prev || slab == slab->prev->next);
249 } else if (slab->next) {
250 /* Advance sset->child to next pointer. */
251 sset->child = slab->next;
252 }
253 }
254 sset->nallocs++;
255 memset(item, 0, sset->size);
256 return item;
257}
258
259static void
260slab_unalloc(void *ptr, size_t size)
2f61d1d7 261{
0d16e639 262 struct slab *slab, *new_next;
2f61d1d7 263
0d16e639 264 assert(size < SMALL_CUTOFF);
2f61d1d7 265 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
0d16e639 266 *(void**)ptr = slab->free;
267 slab->free = ptr;
268 slab->parent->nallocs--;
269
270 if (slab->used-- == slab->parent->items_per_slab
271 && slab->parent->child != slab) {
272 /* Unlink from current position, relink as parent's first child. */
2f61d1d7 273 new_next = slab->parent->child;
0d16e639 274 assert(new_next != NULL);
275 if (slab->prev)
276 slab->prev->next = slab->next;
277 if (slab->next)
278 slab->next->prev = slab->prev;
279 if ((slab->prev = new_next->prev))
2f61d1d7 280 slab->prev->next = slab;
0d16e639 281 slab->next = new_next;
282 new_next->prev = slab;
283 slab->parent->child = slab;
284 assert(!slab->next || slab == slab->next->prev);
2f61d1d7 285 assert(!slab->prev || slab == slab->prev->next);
286 } else if (!slab->used) {
0d16e639 287 /* Unlink slab from its parent. */
2f61d1d7 288 slab->parent->nslabs--;
0d16e639 289 if (slab->prev)
290 slab->prev->next = slab->next;
291 if (slab->next)
292 slab->next->prev = slab->prev;
293 new_next = slab->next ? slab->next : slab->prev;
294 if (slab == slab->parent->child)
295 slab->parent->child = new_next;
296 if (new_next) {
297 assert(!new_next->next || new_next == new_next->next->prev);
298 assert(!new_next->prev || new_next == new_next->prev->next);
299 }
300
2f61d1d7 301#if SLAB_RESERVE
0d16e639 302 if (!free_slab_count) {
303 /* Make sure we have enough free slab pages. */
304 while (free_slab_count < SLAB_RESERVE) {
305 struct slab *tslab;
306 void *item;
307
308 item = slab_map(slab_pagesize());
309 tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
310 tslab->base = item;
311 tslab->prev = free_slab_tail;
312 free_slab_tail = tslab;
313 if (!free_slab_head)
314 free_slab_head = tslab;
315 free_slab_count++;
316 slab_count++;
317 }
318 }
319
320 /* Unmap old slab, so accesses to stale pointers will fault. */
321 munmap(slab->base, slab_pagesize());
322 slab_count--;
323#else
2f61d1d7 324 /* Link to list of free slabs. */
325 slab->parent = NULL;
0d16e639 326 slab->prev = free_slab_tail;
327 slab->next = NULL;
328 free_slab_tail = slab;
329 if (!free_slab_head)
330 free_slab_head = slab;
331 free_slab_count++;
332#endif
333 }
2f61d1d7 334 (void)size;
0d16e639 335}
336
2f61d1d7 337void *
0d16e639 338slab_malloc(const char *file, unsigned int line, size_t size)
2f61d1d7 339{
0d16e639 340 alloc_header_t *res;
341 size_t real;
2f61d1d7 342
0d16e639 343 assert(size < 1 << 24);
2f61d1d7 344 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
0d16e639 345 if (real < SMALL_CUTOFF) {
2f61d1d7 346 res = slab_alloc(slabset_create(real));
0d16e639 347 slab_alloc_count++;
348 slab_alloc_size += size;
349 } else {
350 res = slab_map(slab_round_up(real));
351 big_alloc_count++;
352 big_alloc_size += size;
353 }
354#if SLAB_DEBUG
355 res->file_id = get_file_id(file);
356 res->size = size;
357 res->line = line;
358 res->magic = ALLOC_MAGIC;
359#else
360 *res = size;
361 (void)file; (void)line;
2f61d1d7 362#endif
0d16e639 363 return res + 1;
364}
365
366void *
367slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
2f61d1d7 368{
0d16e639 369 alloc_header_t *orig;
370 void *newblock;
371 size_t osize;
372
373 if (!ptr)
374 return slab_malloc(file, line, size);
375
2f61d1d7 376 verify(ptr);
0d16e639 377 orig = (alloc_header_t*)ptr - 1;
378#if SLAB_DEBUG
379 osize = orig->size;
380#else
381 osize = *orig;
382#endif
383 if (osize >= size)
384 return ptr;
2f61d1d7 385 newblock = slab_malloc(file, line, size);
0d16e639 386 memcpy(newblock, ptr, osize);
387 slab_free(file, line, ptr);
388 return newblock;
389}
390
391char *
392slab_strdup(const char *file, unsigned int line, const char *src)
393{
394 char *target;
395 size_t len;
396
397 len = strlen(src) + 1;
398 target = slab_malloc(file, line, len);
399 memcpy(target, src, len);
400 return target;
401}
402
2f61d1d7 403void
0d16e639 404slab_free(const char *file, unsigned int line, void *ptr)
2f61d1d7 405{
406 alloc_header_t *hdr;
0d16e639 407 size_t user, real;
408
409 if (!ptr)
410 return;
2f61d1d7 411 verify(ptr);
0d16e639 412 hdr = (alloc_header_t*)ptr - 1;
413#if SLAB_DEBUG
414 hdr->file_id = get_file_id(file);
415 hdr->line = line;
2f61d1d7 416 hdr->magic = FREE_MAGIC;
0d16e639 417 user = hdr->size;
2f61d1d7 418#else
0d16e639 419 user = *hdr;
420 (void)file; (void)line;
2f61d1d7 421#endif
0d16e639 422 real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
0d16e639 423 if (real < SMALL_CUTOFF) {
2f61d1d7 424 memset(hdr + 1, 0xde, real - sizeof(*hdr));
0d16e639 425 slab_unalloc(hdr, real);
0d16e639 426 slab_alloc_count--;
2f61d1d7 427 slab_alloc_size -= user;
0d16e639 428 } else {
2f61d1d7 429 munmap(hdr, slab_round_up(real));
0d16e639 430 big_alloc_count--;
2f61d1d7 431 big_alloc_size -= user;
0d16e639 432 }
433}
434
2f61d1d7 435/* Undefine the verify macro in case we're not debugging. */
436#undef verify
0d16e639 437void
438verify(const void *ptr)
2f61d1d7 439{
0d16e639 440 alloc_header_t *hdr;
441 size_t real;
442
443 if (!ptr)
2f61d1d7 444 return;
0d16e639 445
446 hdr = (alloc_header_t*)ptr - 1;
447#if SLAB_DEBUG
448 real = hdr->size + sizeof(*hdr);
449 assert(hdr->file_id < file_ids_used);
450 assert(hdr->magic == ALLOC_MAGIC);
451#else
452 real = *hdr + sizeof(*hdr);
453#endif
454 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
455
456 if (real >= SMALL_CUTOFF)
457 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
458 else {
459 struct slab *slab;
460 size_t expected;
2f61d1d7 461
0d16e639 462 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
463 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
464 assert(slab->parent->size == expected);
465 }
466}