diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-24 10:33:43 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-24 10:33:43 -0400 |
| commit | cedfb2db7b2d6b2c780999536aa1e2650fadee36 (patch) | |
| tree | d36479ce5997bd6b0d66764620d9139eda263c61 /include/linux | |
| parent | 85f9642e3199271614210b8feebe18b7652894b6 (diff) | |
| parent | bb4f6b0cd7524ad7d56709723eaf8a7bf5a87b57 (diff) | |
Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Use alloc_pages_exact_node() for page allocation
slub: __kmalloc_node_track_caller should trace kmalloc_large_node case
slub: Potential stack overflow
crypto: Use ARCH_KMALLOC_MINALIGN for CRYPTO_MINALIGN now that it's exposed
mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slub_def.h>
mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slob_def.h>
mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slab_def.h>
slab: Fix missing DEBUG_SLAB last user
slab: add memory hotplug support
slab: Fix continuation lines
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/crypto.h | 6 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 24 | ||||
| -rw-r--r-- | include/linux/slob_def.h | 8 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 8 |
4 files changed, 40 insertions, 6 deletions
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 24d2e30f1b46..a6a7a1c83f54 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -99,13 +99,7 @@ | |||
| 99 | * as arm where pointers are 32-bit aligned but there are data types such as | 99 | * as arm where pointers are 32-bit aligned but there are data types such as |
| 100 | * u64 which require 64-bit alignment. | 100 | * u64 which require 64-bit alignment. |
| 101 | */ | 101 | */ |
| 102 | #if defined(ARCH_KMALLOC_MINALIGN) | ||
| 103 | #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN | 102 | #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN |
| 104 | #elif defined(ARCH_SLAB_MINALIGN) | ||
| 105 | #define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN | ||
| 106 | #else | ||
| 107 | #define CRYPTO_MINALIGN __alignof__(unsigned long long) | ||
| 108 | #endif | ||
| 109 | 103 | ||
| 110 | #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) | 104 | #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) |
| 111 | 105 | ||
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index ca6b2b317991..1812dac8c496 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -16,6 +16,30 @@ | |||
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/kmemtrace.h> | 17 | #include <linux/kmemtrace.h> |
| 18 | 18 | ||
| 19 | #ifndef ARCH_KMALLOC_MINALIGN | ||
| 20 | /* | ||
| 21 | * Enforce a minimum alignment for the kmalloc caches. | ||
| 22 | * Usually, the kmalloc caches are cache_line_size() aligned, except when | ||
| 23 | * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. | ||
| 24 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | ||
| 25 | * alignment larger than the alignment of a 64-bit integer. | ||
| 26 | * ARCH_KMALLOC_MINALIGN allows that. | ||
| 27 | * Note that increasing this value may disable some debug features. | ||
| 28 | */ | ||
| 29 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
| 30 | #endif | ||
| 31 | |||
| 32 | #ifndef ARCH_SLAB_MINALIGN | ||
| 33 | /* | ||
| 34 | * Enforce a minimum alignment for all caches. | ||
| 35 | * Intended for archs that get misalignment faults even for BYTES_PER_WORD | ||
| 36 | * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. | ||
| 37 | * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables | ||
| 38 | * some debug features. | ||
| 39 | */ | ||
| 40 | #define ARCH_SLAB_MINALIGN 0 | ||
| 41 | #endif | ||
| 42 | |||
| 19 | /* | 43 | /* |
| 20 | * struct kmem_cache | 44 | * struct kmem_cache |
| 21 | * | 45 | * |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..62667f72c2ef 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
| @@ -1,6 +1,14 @@ | |||
| 1 | #ifndef __LINUX_SLOB_DEF_H | 1 | #ifndef __LINUX_SLOB_DEF_H |
| 2 | #define __LINUX_SLOB_DEF_H | 2 | #define __LINUX_SLOB_DEF_H |
| 3 | 3 | ||
| 4 | #ifndef ARCH_KMALLOC_MINALIGN | ||
| 5 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) | ||
| 6 | #endif | ||
| 7 | |||
| 8 | #ifndef ARCH_SLAB_MINALIGN | ||
| 9 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) | ||
| 10 | #endif | ||
| 11 | |||
| 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 12 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 5 | 13 | ||
| 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, | 14 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0249d4175bac..55695c8d2f8a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -116,6 +116,14 @@ struct kmem_cache { | |||
| 116 | 116 | ||
| 117 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 117 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
| 118 | 118 | ||
| 119 | #ifndef ARCH_KMALLOC_MINALIGN | ||
| 120 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
| 121 | #endif | ||
| 122 | |||
| 123 | #ifndef ARCH_SLAB_MINALIGN | ||
| 124 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 125 | #endif | ||
| 126 | |||
| 119 | /* | 127 | /* |
| 120 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | 128 | * Maximum kmalloc object size handled by SLUB. Larger object allocations |
| 121 | * are passed through to the page allocator. The page allocator "fastpath" | 129 | * are passed through to the page allocator. The page allocator "fastpath" |
