1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
4 * This file is part of x3.
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
29 #define SLAB_RESERVE 1024
32 #define FREE_MAGIC 0xfc1d
33 #define ALLOC_MAGIC 0x1a
34 #define FREE_MAGIC 0xcf
36 unsigned int file_id
: 8;
37 unsigned int size
: 24;
38 unsigned int magic
: 8;
39 unsigned int file_id
: 8;
40 unsigned int magic
: 16;
43 static const char *file_ids
[256];
44 static struct file_id_entry
{
48 unsigned int file_ids_used
;
51 file_id_cmp(const void *a_
, const void *b_
)
53 return strcmp(*(const char**)a_
, *(const char**)b_
);
57 get_file_id(const char *fname
)
59 struct file_id_entry
*entry
;
61 entry
= bsearch(&fname
, file_id_map
, file_ids_used
, sizeof(file_id_map
[0]), file_id_cmp
);
64 entry
= file_id_map
+ file_ids_used
;
65 file_ids
[file_ids_used
] = fname
;
67 entry
->id
= file_ids_used
;
68 qsort(file_id_map
, ++file_ids_used
, sizeof(file_id_map
[0]), file_id_cmp
);
69 return file_ids_used
- 1;
72 typedef struct alloc_header alloc_header_t
;
76 typedef size_t alloc_header_t
;
81 struct slabset
*parent
;
94 size_t items_per_slab
;
97 #define SLAB_MIN (2 * sizeof(void*))
98 #define SLAB_GRAIN sizeof(void*)
99 #define SMALL_CUTOFF 582
100 #define SMALL_CUTOFF 580
101 /* Element size < SMALL_CUTOFF -> use small slabs.
102 * Larger elements are allocated directly using mmap(). The largest
103 * regularly allocated struct in srvx 1.x is smaller than
104 * SMALL_CUTOFF, so there is not much point in coding support for
107 unsigned long alloc_size
;
108 static struct slab
*free_slabs
;
109 static struct slab
*free_slab_head
;
110 static struct slab
*free_slab_tail
;
111 unsigned long free_slab_count
;
112 unsigned long big_alloc_count
;
113 unsigned long big_alloc_size
;
114 unsigned long slab_count
;
115 unsigned long slab_alloc_count
;
116 unsigned long slab_alloc_size
;
118 #if defined(MAP_ANON)
119 #elif defined(MAP_ANONYMOUS)
120 # define MAP_ANON MAP_ANONYMOUS
128 static size_t pagesize
;
130 #if defined(HAVE_GETPAGESIZE)
131 || (pagesize
= getpagesize())
133 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
134 || (pagesize
= sysconf(_SC_PAGESIZE
))
136 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
137 || (pagesize
= sysconf(_SC_PAGE_SIZE
))
140 assert(0 && "unable to find system page size");
141 return pagesize
= 4096;
145 slab_round_up(size_t size
)
147 return (size
+ slab_pagesize() - 1) & ~(slab_pagesize() - 1);
151 slab_map(size_t length
)
153 static int mmap_fd
= -1;
158 mmap_fd
= open("/dev/zero", 0);
160 log_module(MAIN_LOG
, LOG_FATAL
, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
163 res
= mmap(0, length
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, mmap_fd
, 0);
164 if (res
== MAP_FAILED
)
165 log_module(MAIN_LOG
, LOG_FATAL
, "Unable to mmap %lu bytes (%s).", (unsigned long)length
, strerror(errno
));
169 static void *slab_alloc(struct slabset
*sset
);
170 static void slab_unalloc(void *ptr
, size_t size
);
172 static struct slabset
*
173 slabset_create(size_t size
)
177 size
= (size
< SLAB_MIN
) ? SLAB_MIN
: (size
+ SLAB_GRAIN
- 1) & ~(SLAB_GRAIN
- 1);
178 idx
= size
/ SLAB_GRAIN
;
179 little_slabs
[idx
]->items_per_slab
= (slab_pagesize() - sizeof(struct slab
)) / ((size
+ SLAB_ALIGN
- 1) & ~(SLAB_ALIGN
- 1));
180 if (!little_slabs
[idx
].size
) {
181 little_slabs
[idx
].size
= size
;
182 little_slabs
[idx
].items_per_slab
= (slab_pagesize() - sizeof(struct slab
)) / ((size
+ SLAB_ALIGN
- 1) & ~(SLAB_ALIGN
- 1));
183 return little_slabs
[idx
];
184 return &little_slabs
[idx
];
188 slab_alloc(struct slabset
*sset
)
193 if (!sset
->child
|| !sset
->child
->free
) {
194 unsigned int ii
, step
;
196 free_slabs
= slab
->next
;
197 if (free_slab_head
) {
198 slab
= free_slab_head
;
199 if (!(free_slab_head
= slab
->next
))
200 free_slab_tail
= NULL
;
202 item
= slab_map(slab_pagesize());
203 slab
= (struct slab
*)((char*)item
+ slab_pagesize() - sizeof(*slab
));
208 /* Populate free list. */
209 step
= (sset
->size
+ SLAB_ALIGN
- 1) & ~(SLAB_ALIGN
- 1);
210 for (ii
= 1, item
= slab
->free
= slab
->base
;
211 ii
< sset
->items_per_slab
;
212 ++ii
, item
= (*item
= (char*)item
+ step
));
215 /* Link to parent slabset. */
216 if ((slab
->prev
= sset
->child
)) {
217 slab
->prev
= sset
->child
;
219 slab
->next
= slab
->prev
->next
;
220 slab
->prev
->next
= slab
;
225 assert(!slab
->next
|| slab
== slab
->next
->prev
);
226 assert(!slab
->prev
|| slab
== slab
->prev
->next
);
228 /* log_module(MAIN_LOG, LOG_DEBUG, "Allocated new %u-slab %p.", sset->size, slab); */
233 assert(((unsigned long)item
& (slab_pagesize() - 1))
234 <= (slab_pagesize() - sizeof(*slab
) - sset
->size
));
235 if (slab
->used
++ == sset
->items_per_slab
) {
236 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p is empty.", sset->size, item); */
237 if (sset
->child
!= slab
) {
238 /* Unlink slab and reinsert before sset->child. */
240 slab
->prev
->next
= slab
->next
;
242 slab
->next
->prev
= slab
->prev
;
243 if ((slab
->prev
= sset
->child
->prev
))
244 slab
->prev
->next
= slab
;
245 if ((slab
->next
= sset
->child
))
246 slab
->next
->prev
= slab
;
247 assert(!slab
->next
|| slab
== slab
->next
->prev
);
248 assert(!slab
->prev
|| slab
== slab
->prev
->next
);
249 } else if (slab
->next
) {
250 /* Advance sset->child to next pointer. */
251 sset
->child
= slab
->next
;
255 memset(item
, 0, sset
->size
);
260 slab_unalloc(void *ptr
, size_t size
)
262 struct slab
*slab
, *new_next
;
264 assert(size
< SMALL_CUTOFF
);
266 *(void**)ptr
= slab
->free
;
268 slab
->parent
->nallocs
--;
270 if (slab
->used
-- == slab
->parent
->items_per_slab
271 && slab
->parent
->child
!= slab
) {
272 /* Unlink from current position, relink as parent's first child. */
274 assert(new_next
!= NULL
);
276 slab
->prev
->next
= slab
->next
;
278 slab
->next
->prev
= slab
->prev
;
279 if ((slab
->prev
= new_next
->prev
))
280 slab
->next
->prev
= slab
;
281 slab
->next
= new_next
;
282 new_next
->prev
= slab
;
283 slab
->parent
->child
= slab
;
284 assert(!slab
->next
|| slab
== slab
->next
->prev
);
285 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became partial.", slab->parent->size, slab); */
286 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
287 /* Unlink slab from its parent. */
290 slab
->prev
->next
= slab
->next
;
292 slab
->next
->prev
= slab
->prev
;
293 new_next
= slab
->next
? slab
->next
: slab
->prev
;
294 if (slab
== slab
->parent
->child
)
295 slab
->parent
->child
= new_next
;
297 assert(!new_next
->next
|| new_next
== new_next
->next
->prev
);
298 assert(!new_next
->prev
|| new_next
== new_next
->prev
->next
);
302 if (!free_slab_count
) {
303 /* Make sure we have enough free slab pages. */
304 while (free_slab_count
< SLAB_RESERVE
) {
308 item
= slab_map(slab_pagesize());
309 tslab
= (struct slab
*)((char*)item
+ slab_pagesize() - sizeof(*slab
));
311 tslab
->prev
= free_slab_tail
;
312 free_slab_tail
= tslab
;
314 free_slab_head
= tslab
;
320 /* Unmap old slab, so accesses to stale pointers will fault. */
321 munmap(slab
->base
, slab_pagesize());
326 slab
->prev
= free_slab_tail
;
328 free_slab_tail
= slab
;
330 free_slab_head
= slab
;
336 slab_malloc(UNUSED_ARG(const char *file
), UNUSED_ARG(unsigned int line
), size_t size
)
337 slab_malloc(const char *file
, unsigned int line
, size_t size
)
341 real
= size
+ sizeof(size_t);
342 assert(size
< 1 << 24);
343 if (real
< SMALL_CUTOFF
)
344 if (real
< SMALL_CUTOFF
) {
347 slab_alloc_size
+= size
;
349 res
= slab_map(slab_round_up(real
));
351 big_alloc_size
+= size
;
354 res
->file_id
= get_file_id(file
);
357 res
->magic
= ALLOC_MAGIC
;
360 (void)file
; (void)line
;
366 slab_realloc(const char *file
, unsigned int line
, void *ptr
, size_t size
)
367 size_t orig
, *newblock
;
368 alloc_header_t
*orig
;
373 return slab_malloc(file
, line
, size
);
376 orig
= (alloc_header_t
*)ptr
- 1;
384 memcpy(newblock
, ptr
, size
);
385 memcpy(newblock
, ptr
, osize
);
386 slab_free(file
, line
, ptr
);
391 slab_strdup(const char *file
, unsigned int line
, const char *src
)
396 len
= strlen(src
) + 1;
397 target
= slab_malloc(file
, line
, len
);
398 memcpy(target
, src
, len
);
402 slab_free(UNUSED_ARG(const char *file
), UNUSED_ARG(unsigned int line
), void *ptr
)
403 slab_free(const char *file
, unsigned int line
, void *ptr
)
410 real
= *size
+ sizeof(size_t);
411 hdr
= (alloc_header_t
*)ptr
- 1;
413 hdr
->file_id
= get_file_id(file
);
415 real
= hdr
->size
+ sizeof(*hdr
);
417 real
= *hdr
+ sizeof(*hdr
);
419 (void)file
; (void)line
;
420 real
= (real
+ SLAB_GRAIN
- 1) & ~(SLAB_GRAIN
- 1);
421 real
= (user
+ sizeof(*hdr
) + SLAB_GRAIN
- 1) & ~(SLAB_GRAIN
- 1);
422 slab_unalloc(size
, real
);
423 memset(hdr
+ 1, 0xde, real
- sizeof(*hdr
));
424 if (real
< SMALL_CUTOFF
) {
425 slab_unalloc(hdr
, real
);
426 munmap(size
, slab_round_up(real
));
427 slab_alloc_size
-= user
;
429 slab_alloc_size
-= real
- sizeof(*hdr
);
431 alloc_size
-= real
- sizeof(*hdr
);
432 big_alloc_size
-= user
;
434 big_alloc_size
-= real
- sizeof(*hdr
);
439 verify(const void *ptr
)
445 assert(((unsigned long)ptr
& (slab_pagesize() - 1)) == sizeof(size_t));
447 hdr
= (alloc_header_t
*)ptr
- 1;
449 real
= hdr
->size
+ sizeof(*hdr
);
450 assert(hdr
->file_id
< file_ids_used
);
451 assert(hdr
->magic
== ALLOC_MAGIC
);
453 real
= *hdr
+ sizeof(*hdr
);
455 real
= (real
+ SLAB_GRAIN
- 1) & ~(SLAB_GRAIN
- 1);
457 if (real
>= SMALL_CUTOFF
)
458 assert(((unsigned long)ptr
& (slab_pagesize() - 1)) == sizeof(*hdr
));
462 expected
= (size
+ SLAB_GRAIN
- 1) & ~(SLAB_GRAIN
- 1);
463 expected
= (real
+ SLAB_GRAIN
- 1) & ~(SLAB_GRAIN
- 1);
464 slab
= (struct slab
*)((((unsigned long)ptr
| (slab_pagesize() - 1)) + 1) - sizeof(*slab
));
465 assert(slab
->parent
->size
== expected
);