diff options
author | Nick Piggin <npiggin@suse.de> | 2007-07-16 02:38:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 12:05:35 -0400 |
commit | 553948491c18413928b85a9025b92af80e7d61d6 (patch) | |
tree | fcb6adcb743533c7d6818bbd13511062f6fda737 | |
parent | d87a133fc21d842e3cc285e6bbff727181abec81 (diff) |
slob: improved alignment handling
Remove the core slob allocator's minimum alignment restrictions, and instead
introduce the alignment restrictions at the slab API layer. This lets us heed
the ARCH_KMALLOC/SLAB_MINALIGN directives, and also use __alignof__ (unsigned
long) for the default alignment (which should allow relaxed alignment
architectures to take better advantage of SLOB's small minimum alignment).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/slob.c | 49 |
1 files changed, 26 insertions, 23 deletions
@@ -7,8 +7,8 @@ | |||
7 | * | 7 | * |
8 | * The core of SLOB is a traditional K&R style heap allocator, with | 8 | * The core of SLOB is a traditional K&R style heap allocator, with |
9 | * support for returning aligned objects. The granularity of this | 9 | * support for returning aligned objects. The granularity of this |
10 | * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it | 10 | * allocator is as little as 2 bytes, however typically most architectures |
11 | * could be as low as 2 if the compiler alignment requirements allow. | 11 | * will require 4 bytes on 32-bit and 8 bytes on 64-bit. |
12 | * | 12 | * |
13 | * The slob heap is a linked list of pages from __get_free_page, and | 13 | * The slob heap is a linked list of pages from __get_free_page, and |
14 | * within each page, there is a singly-linked list of free blocks (slob_t). | 14 | * within each page, there is a singly-linked list of free blocks (slob_t). |
@@ -16,7 +16,7 @@ | |||
16 | * first-fit. | 16 | * first-fit. |
17 | * | 17 | * |
18 | * Above this is an implementation of kmalloc/kfree. Blocks returned | 18 | * Above this is an implementation of kmalloc/kfree. Blocks returned |
19 | * from kmalloc are 4-byte aligned and prepended with a 4-byte header. | 19 | * from kmalloc are prepended with a 4-byte header with the kmalloc size. |
20 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls | 20 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
21 | * __get_free_pages directly, allocating compound pages so the page order | 21 | * __get_free_pages directly, allocating compound pages so the page order |
22 | * does not have to be separately tracked, and also stores the exact | 22 | * does not have to be separately tracked, and also stores the exact |
@@ -45,13 +45,6 @@ | |||
45 | #include <linux/list.h> | 45 | #include <linux/list.h> |
46 | #include <asm/atomic.h> | 46 | #include <asm/atomic.h> |
47 | 47 | ||
48 | /* SLOB_MIN_ALIGN == sizeof(long) */ | ||
49 | #if BITS_PER_BYTE == 32 | ||
50 | #define SLOB_MIN_ALIGN 4 | ||
51 | #else | ||
52 | #define SLOB_MIN_ALIGN 8 | ||
53 | #endif | ||
54 | |||
55 | /* | 48 | /* |
56 | * slob_block has a field 'units', which indicates size of block if +ve, | 49 | * slob_block has a field 'units', which indicates size of block if +ve, |
57 | * or offset of next block if -ve (in SLOB_UNITs). | 50 | * or offset of next block if -ve (in SLOB_UNITs). |
@@ -60,19 +53,15 @@ | |||
60 | * Those with larger size contain their size in the first SLOB_UNIT of | 53 | * Those with larger size contain their size in the first SLOB_UNIT of |
61 | * memory, and the offset of the next free block in the second SLOB_UNIT. | 54 | * memory, and the offset of the next free block in the second SLOB_UNIT. |
62 | */ | 55 | */ |
63 | #if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN) | 56 | #if PAGE_SIZE <= (32767 * 2) |
64 | typedef s16 slobidx_t; | 57 | typedef s16 slobidx_t; |
65 | #else | 58 | #else |
66 | typedef s32 slobidx_t; | 59 | typedef s32 slobidx_t; |
67 | #endif | 60 | #endif |
68 | 61 | ||
69 | /* | ||
70 | * Align struct slob_block to long for now, but can some embedded | ||
71 | * architectures get away with less? | ||
72 | */ | ||
73 | struct slob_block { | 62 | struct slob_block { |
74 | slobidx_t units; | 63 | slobidx_t units; |
75 | } __attribute__((aligned(SLOB_MIN_ALIGN))); | 64 | }; |
76 | typedef struct slob_block slob_t; | 65 | typedef struct slob_block slob_t; |
77 | 66 | ||
78 | /* | 67 | /* |
@@ -384,14 +373,25 @@ out: | |||
384 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. | 373 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. |
385 | */ | 374 | */ |
386 | 375 | ||
376 | #ifndef ARCH_KMALLOC_MINALIGN | ||
377 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) | ||
378 | #endif | ||
379 | |||
380 | #ifndef ARCH_SLAB_MINALIGN | ||
381 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) | ||
382 | #endif | ||
383 | |||
384 | |||
387 | void *__kmalloc(size_t size, gfp_t gfp) | 385 | void *__kmalloc(size_t size, gfp_t gfp) |
388 | { | 386 | { |
389 | if (size < PAGE_SIZE - SLOB_UNIT) { | 387 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
390 | slob_t *m; | 388 | |
391 | m = slob_alloc(size + SLOB_UNIT, gfp, 0); | 389 | if (size < PAGE_SIZE - align) { |
390 | unsigned int *m; | ||
391 | m = slob_alloc(size + align, gfp, align); | ||
392 | if (m) | 392 | if (m) |
393 | m->units = size; | 393 | *m = size; |
394 | return m+1; | 394 | return (void *)m + align; |
395 | } else { | 395 | } else { |
396 | void *ret; | 396 | void *ret; |
397 | 397 | ||
@@ -449,8 +449,9 @@ void kfree(const void *block) | |||
449 | 449 | ||
450 | sp = (struct slob_page *)virt_to_page(block); | 450 | sp = (struct slob_page *)virt_to_page(block); |
451 | if (slob_page(sp)) { | 451 | if (slob_page(sp)) { |
452 | slob_t *m = (slob_t *)block - 1; | 452 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
453 | slob_free(m, m->units + SLOB_UNIT); | 453 | unsigned int *m = (unsigned int *)(block - align); |
454 | slob_free(m, *m + align); | ||
454 | } else | 455 | } else |
455 | put_page(&sp->page); | 456 | put_page(&sp->page); |
456 | } | 457 | } |
@@ -499,6 +500,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
499 | c->ctor = ctor; | 500 | c->ctor = ctor; |
500 | /* ignore alignment unless it's forced */ | 501 | /* ignore alignment unless it's forced */ |
501 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 502 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
503 | if (c->align < ARCH_SLAB_MINALIGN) | ||
504 | c->align = ARCH_SLAB_MINALIGN; | ||
502 | if (c->align < align) | 505 | if (c->align < align) |
503 | c->align = align; | 506 | c->align = align; |
504 | } else if (flags & SLAB_PANIC) | 507 | } else if (flags & SLAB_PANIC) |