]> jfr.im git - irc/evilnet/x3.git/blame - src/alloc-slab.c
more srvx->x3 fixes. cvs update -dP to catch renamed files.
[irc/evilnet/x3.git] / src / alloc-slab.c
CommitLineData
0d16e639 1/* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
3 *
83ff05c3 4 * This file is part of x3.
0d16e639 5 *
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "common.h"
18#include "log.h"
19
20#if defined(HAVE_SYS_MMAN_H)
21# include <sys/mman.h>
22#endif
23
24#if !defined(HAVE_MMAP)
25# error The slab allocator requires that your system have the mmap() system call.
26#endif
27#define SLAB_DEBUG 0
28#define SLAB_DEBUG 1
29#define SLAB_RESERVE 1024
30
31#if SLAB_DEBUG
32#define FREE_MAGIC 0xfc1d
33#define ALLOC_MAGIC 0x1a
34#define FREE_MAGIC 0xcf
35
36 unsigned int file_id : 8;
37 unsigned int size : 24;
38 unsigned int magic : 8;
39 unsigned int file_id : 8;
40 unsigned int magic : 16;
41};
42
43static const char *file_ids[256];
44static struct file_id_entry {
45 const char *name;
46 unsigned int id : 8;
47} file_id_map[256];
48unsigned int file_ids_used;
49
50static int
51file_id_cmp(const void *a_, const void *b_)
52{
53 return strcmp(*(const char**)a_, *(const char**)b_);
54}
55
56static unsigned int
57get_file_id(const char *fname)
58{
59 struct file_id_entry *entry;
60
61 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
62 if (entry)
63 return entry->id;
64 entry = file_id_map + file_ids_used;
65 file_ids[file_ids_used] = fname;
66 entry->name = fname;
67 entry->id = file_ids_used;
68 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
69 return file_ids_used - 1;
70}
71
72typedef struct alloc_header alloc_header_t;
73
74#else
75
76typedef size_t alloc_header_t;
77
78#endif
79
80struct slab {
81 struct slabset *parent;
82 struct slab *prev;
83 struct slab *next;
84 void *base;
85 void **free;
86 unsigned int used;
87};
88
89 struct slabset *next;
90 struct slab *child;
91 size_t nslabs;
92 size_t nallocs;
93 size_t size;
94 size_t items_per_slab;
95};
96
97#define SLAB_MIN (2 * sizeof(void*))
98#define SLAB_GRAIN sizeof(void*)
99#define SMALL_CUTOFF 582
100#define SMALL_CUTOFF 580
101/* Element size < SMALL_CUTOFF -> use small slabs.
102 * Larger elements are allocated directly using mmap(). The largest
103 * regularly allocated struct in srvx 1.x is smaller than
104 * SMALL_CUTOFF, so there is not much point in coding support for
105 * larger slabs.
106 */
107unsigned long alloc_size;
108static struct slab *free_slabs;
109static struct slab *free_slab_head;
110static struct slab *free_slab_tail;
111unsigned long free_slab_count;
112unsigned long big_alloc_count;
113unsigned long big_alloc_size;
114unsigned long slab_count;
115unsigned long slab_alloc_count;
116unsigned long slab_alloc_size;
117
118#if defined(MAP_ANON)
119#elif defined(MAP_ANONYMOUS)
120# define MAP_ANON MAP_ANONYMOUS
121#else
122# define MAP_ANON 0
123#endif
124
125static size_t
126slab_pagesize(void)
127{
128 static size_t pagesize;
129 if (pagesize
130#if defined(HAVE_GETPAGESIZE)
131 || (pagesize = getpagesize())
132#endif
133#if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
134 || (pagesize = sysconf(_SC_PAGESIZE))
135#endif
136#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
137 || (pagesize = sysconf(_SC_PAGE_SIZE))
138#endif
139 ) return pagesize;
140 assert(0 && "unable to find system page size");
141 return pagesize = 4096;
142}
143
144static size_t
145slab_round_up(size_t size)
146{
147 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
148}
149
150static void *
151slab_map(size_t length)
152{
153 static int mmap_fd = -1;
154 void *res;
155
156#if ! MAP_ANON
157 if (mmap_fd < 0) {
158 mmap_fd = open("/dev/zero", 0);
159 if (mmap_fd < 0)
160 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
161 }
162#endif
163 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
164 if (res == MAP_FAILED)
165 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
166 return res;
167}
168
169static void *slab_alloc(struct slabset *sset);
170static void slab_unalloc(void *ptr, size_t size);
171
172static struct slabset *
173slabset_create(size_t size)
174{
175 unsigned int idx;
176
177 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
178 idx = size / SLAB_GRAIN;
179 little_slabs[idx]->items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
180 if (!little_slabs[idx].size) {
181 little_slabs[idx].size = size;
182 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
183 return little_slabs[idx];
184 return &little_slabs[idx];
185}
186
187static void *
188slab_alloc(struct slabset *sset)
189{
190 struct slab *slab;
191 void **item;
192
193 if (!sset->child || !sset->child->free) {
194 unsigned int ii, step;
195
196 free_slabs = slab->next;
197 if (free_slab_head) {
198 slab = free_slab_head;
199 if (!(free_slab_head = slab->next))
200 free_slab_tail = NULL;
201 } else {
202 item = slab_map(slab_pagesize());
203 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
204 slab->base = item;
205 slab_count++;
206 }
207
208 /* Populate free list. */
209 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
210 for (ii = 1, item = slab->free = slab->base;
211 ii < sset->items_per_slab;
212 ++ii, item = (*item = (char*)item + step));
213 *item = NULL;
214
215 /* Link to parent slabset. */
216 if ((slab->prev = sset->child)) {
217 slab->prev = sset->child;
218 if (slab->prev) {
219 slab->next = slab->prev->next;
220 slab->prev->next = slab;
221 if (slab->next)
222 }
223 } else
224 slab->next = NULL;
225 assert(!slab->next || slab == slab->next->prev);
226 assert(!slab->prev || slab == slab->prev->next);
227 sset->child = slab;
228 /* log_module(MAIN_LOG, LOG_DEBUG, "Allocated new %u-slab %p.", sset->size, slab); */
229 }
230
231 slab = sset->child;
232 item = slab->free;
233 assert(((unsigned long)item & (slab_pagesize() - 1))
234 <= (slab_pagesize() - sizeof(*slab) - sset->size));
235 if (slab->used++ == sset->items_per_slab) {
236 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p is empty.", sset->size, item); */
237 if (sset->child != slab) {
238 /* Unlink slab and reinsert before sset->child. */
239 if (slab->prev)
240 slab->prev->next = slab->next;
241 if (slab->next)
242 slab->next->prev = slab->prev;
243 if ((slab->prev = sset->child->prev))
244 slab->prev->next = slab;
245 if ((slab->next = sset->child))
246 slab->next->prev = slab;
247 assert(!slab->next || slab == slab->next->prev);
248 assert(!slab->prev || slab == slab->prev->next);
249 } else if (slab->next) {
250 /* Advance sset->child to next pointer. */
251 sset->child = slab->next;
252 }
253 }
254 sset->nallocs++;
255 memset(item, 0, sset->size);
256 return item;
257}
258
259static void
260slab_unalloc(void *ptr, size_t size)
261 void **item;
262 struct slab *slab, *new_next;
263 item = ptr;
264 assert(size < SMALL_CUTOFF);
265 slab->free = item;
266 *(void**)ptr = slab->free;
267 slab->free = ptr;
268 slab->parent->nallocs--;
269
270 if (slab->used-- == slab->parent->items_per_slab
271 && slab->parent->child != slab) {
272 /* Unlink from current position, relink as parent's first child. */
273 if (new_next) {
274 assert(new_next != NULL);
275 if (slab->prev)
276 slab->prev->next = slab->next;
277 if (slab->next)
278 slab->next->prev = slab->prev;
279 if ((slab->prev = new_next->prev))
280 slab->next->prev = slab;
281 slab->next = new_next;
282 new_next->prev = slab;
283 slab->parent->child = slab;
284 assert(!slab->next || slab == slab->next->prev);
285 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became partial.", slab->parent->size, slab); */
286 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
287 /* Unlink slab from its parent. */
288 slab_count--;
289 if (slab->prev)
290 slab->prev->next = slab->next;
291 if (slab->next)
292 slab->next->prev = slab->prev;
293 new_next = slab->next ? slab->next : slab->prev;
294 if (slab == slab->parent->child)
295 slab->parent->child = new_next;
296 if (new_next) {
297 assert(!new_next->next || new_next == new_next->next->prev);
298 assert(!new_next->prev || new_next == new_next->prev->next);
299 }
300
301 slab_count++;
302 if (!free_slab_count) {
303 /* Make sure we have enough free slab pages. */
304 while (free_slab_count < SLAB_RESERVE) {
305 struct slab *tslab;
306 void *item;
307
308 item = slab_map(slab_pagesize());
309 tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
310 tslab->base = item;
311 tslab->prev = free_slab_tail;
312 free_slab_tail = tslab;
313 if (!free_slab_head)
314 free_slab_head = tslab;
315 free_slab_count++;
316 slab_count++;
317 }
318 }
319
320 /* Unmap old slab, so accesses to stale pointers will fault. */
321 munmap(slab->base, slab_pagesize());
322 slab_count--;
323#else
324 slab->prev = NULL;
325 free_slabs = slab;
326 slab->prev = free_slab_tail;
327 slab->next = NULL;
328 free_slab_tail = slab;
329 if (!free_slab_head)
330 free_slab_head = slab;
331 free_slab_count++;
332#endif
333 }
334}
335
336slab_malloc(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), size_t size)
337slab_malloc(const char *file, unsigned int line, size_t size)
338 size_t real, *res;
339 alloc_header_t *res;
340 size_t real;
341 real = size + sizeof(size_t);
342 assert(size < 1 << 24);
343 if (real < SMALL_CUTOFF)
344 if (real < SMALL_CUTOFF) {
345 else
346 slab_alloc_count++;
347 slab_alloc_size += size;
348 } else {
349 res = slab_map(slab_round_up(real));
350 big_alloc_count++;
351 big_alloc_size += size;
352 }
353#if SLAB_DEBUG
354 res->file_id = get_file_id(file);
355 res->size = size;
356 res->line = line;
357 res->magic = ALLOC_MAGIC;
358#else
359 *res = size;
360 (void)file; (void)line;
361 alloc_size += size;
362 return res + 1;
363}
364
365void *
366slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
367 size_t orig, *newblock;
368 alloc_header_t *orig;
369 void *newblock;
370 size_t osize;
371
372 if (!ptr)
373 return slab_malloc(file, line, size);
374
375 if (orig >= size)
376 orig = (alloc_header_t*)ptr - 1;
377#if SLAB_DEBUG
378 osize = orig->size;
379#else
380 osize = *orig;
381#endif
382 if (osize >= size)
383 return ptr;
384 memcpy(newblock, ptr, size);
385 memcpy(newblock, ptr, osize);
386 slab_free(file, line, ptr);
387 return newblock;
388}
389
390char *
391slab_strdup(const char *file, unsigned int line, const char *src)
392{
393 char *target;
394 size_t len;
395
396 len = strlen(src) + 1;
397 target = slab_malloc(file, line, len);
398 memcpy(target, src, len);
399 return target;
400}
401
402slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
403slab_free(const char *file, unsigned int line, void *ptr)
404 size_t real, *size;
405 size_t real;
406 size_t user, real;
407
408 if (!ptr)
409 return;
410 real = *size + sizeof(size_t);
411 hdr = (alloc_header_t*)ptr - 1;
412#if SLAB_DEBUG
413 hdr->file_id = get_file_id(file);
414 hdr->line = line;
415 real = hdr->size + sizeof(*hdr);
416 user = hdr->size;
417 real = *hdr + sizeof(*hdr);
418 user = *hdr;
419 (void)file; (void)line;
420 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
421 real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
422 slab_unalloc(size, real);
423 memset(hdr + 1, 0xde, real - sizeof(*hdr));
424 if (real < SMALL_CUTOFF) {
425 slab_unalloc(hdr, real);
426 munmap(size, slab_round_up(real));
427 slab_alloc_size -= user;
428 slab_alloc_count--;
429 slab_alloc_size -= real - sizeof(*hdr);
430 } else {
431 alloc_size -= real - sizeof(*hdr);
432 big_alloc_size -= user;
433 big_alloc_count--;
434 big_alloc_size -= real - sizeof(*hdr);
435 }
436}
437
438void
439verify(const void *ptr)
440 size_t size;
441 alloc_header_t *hdr;
442 size_t real;
443
444 if (!ptr)
445 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(size_t));
446
447 hdr = (alloc_header_t*)ptr - 1;
448#if SLAB_DEBUG
449 real = hdr->size + sizeof(*hdr);
450 assert(hdr->file_id < file_ids_used);
451 assert(hdr->magic == ALLOC_MAGIC);
452#else
453 real = *hdr + sizeof(*hdr);
454#endif
455 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
456
457 if (real >= SMALL_CUTOFF)
458 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
459 else {
460 struct slab *slab;
461 size_t expected;
462 expected = (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
463 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
464 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
465 assert(slab->parent->size == expected);
466 }
467}
468