diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-17 01:11:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 08:23:04 -0400 |
commit | 0aa817f078b655d0ae36669169d73a5c8a388016 (patch) | |
tree | 140acc4d0dc992b4d20394f6a6412a7c1bb3a306 /include | |
parent | 3ec0974210fe1b7c0618ad6e39a882a4237d7de2 (diff) |
Slab allocators: define common size limitations
Currently we have a maze of configuration variables that determine the
maximum slab size. Worst of all it seems to vary between SLAB and SLUB.
So define a common maximum size for kmalloc. For conveniences sake we use
the maximum size ever supported which is 32 MB. We limit the maximum size
to a lower limit if MAX_ORDER does not allow such large allocations.
For many architectures this patch will have the effect of adding large
kmalloc sizes. x86_64 adds 5 new kmalloc sizes. So a small amount of
memory will be needed for these caches (contemporary SLAB has dynamically
sizeable node and cpu structure so the waste is less than in the past)
Most architectures will then be able to allocate object with sizes up to
MAX_ORDER. We have had repeated breakage (in fact whenever we doubled the
number of supported processors) on IA64 because one or the other struct
grew beyond what the slab allocators supported. This will avoid future
issues and f.e. avoid fixes for 2k and 4k cpu support.
CONFIG_LARGE_ALLOCS is no longer necessary so drop it.
It fixes sparc64 with SLAB.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/kmalloc_sizes.h | 20 | ||||
-rw-r--r-- | include/linux/slab.h | 15 | ||||
-rw-r--r-- | include/linux/slub_def.h | 19 |
3 files changed, 32 insertions, 22 deletions
diff --git a/include/linux/kmalloc_sizes.h b/include/linux/kmalloc_sizes.h index bda23e00ed71..e576b848ce10 100644 --- a/include/linux/kmalloc_sizes.h +++ b/include/linux/kmalloc_sizes.h | |||
@@ -19,17 +19,27 @@ | |||
19 | CACHE(32768) | 19 | CACHE(32768) |
20 | CACHE(65536) | 20 | CACHE(65536) |
21 | CACHE(131072) | 21 | CACHE(131072) |
22 | #if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU) | 22 | #if KMALLOC_MAX_SIZE >= 262144 |
23 | CACHE(262144) | 23 | CACHE(262144) |
24 | #endif | 24 | #endif |
25 | #ifndef CONFIG_MMU | 25 | #if KMALLOC_MAX_SIZE >= 524288 |
26 | CACHE(524288) | 26 | CACHE(524288) |
27 | #endif | ||
28 | #if KMALLOC_MAX_SIZE >= 1048576 | ||
27 | CACHE(1048576) | 29 | CACHE(1048576) |
28 | #ifdef CONFIG_LARGE_ALLOCS | 30 | #endif |
31 | #if KMALLOC_MAX_SIZE >= 2097152 | ||
29 | CACHE(2097152) | 32 | CACHE(2097152) |
33 | #endif | ||
34 | #if KMALLOC_MAX_SIZE >= 4194304 | ||
30 | CACHE(4194304) | 35 | CACHE(4194304) |
36 | #endif | ||
37 | #if KMALLOC_MAX_SIZE >= 8388608 | ||
31 | CACHE(8388608) | 38 | CACHE(8388608) |
39 | #endif | ||
40 | #if KMALLOC_MAX_SIZE >= 16777216 | ||
32 | CACHE(16777216) | 41 | CACHE(16777216) |
42 | #endif | ||
43 | #if KMALLOC_MAX_SIZE >= 33554432 | ||
33 | CACHE(33554432) | 44 | CACHE(33554432) |
34 | #endif /* CONFIG_LARGE_ALLOCS */ | 45 | #endif |
35 | #endif /* CONFIG_MMU */ | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 6fb2ae214152..a015236cc572 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -74,6 +74,21 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * The largest kmalloc size supported by the slab allocators is | ||
78 | * 32 megabyte (2^25) or the maximum allocatable page order if that is | ||
79 | * less than 32 MB. | ||
80 | * | ||
81 | * WARNING: Its not easy to increase this value since the allocators have | ||
82 | * to do various tricks to work around compiler limitations in order to | ||
83 | * ensure proper constant folding. | ||
84 | */ | ||
85 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \ | ||
86 | (MAX_ORDER + PAGE_SHIFT) : 25) | ||
87 | |||
88 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) | ||
89 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) | ||
90 | |||
91 | /* | ||
77 | * Common kmalloc functions provided by all allocators | 92 | * Common kmalloc functions provided by all allocators |
78 | */ | 93 | */ |
79 | void *__kmalloc(size_t, gfp_t); | 94 | void *__kmalloc(size_t, gfp_t); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a9fb92862aaa..0764c829d967 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -58,17 +58,6 @@ struct kmem_cache { | |||
58 | */ | 58 | */ |
59 | #define KMALLOC_SHIFT_LOW 3 | 59 | #define KMALLOC_SHIFT_LOW 3 |
60 | 60 | ||
61 | #ifdef CONFIG_LARGE_ALLOCS | ||
62 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \ | ||
63 | (MAX_ORDER + PAGE_SHIFT - 1) : 25) | ||
64 | #else | ||
65 | #if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 | ||
66 | #define KMALLOC_SHIFT_HIGH 20 | ||
67 | #else | ||
68 | #define KMALLOC_SHIFT_HIGH 18 | ||
69 | #endif | ||
70 | #endif | ||
71 | |||
72 | /* | 61 | /* |
73 | * We keep the general caches in an array of slab caches that are used for | 62 | * We keep the general caches in an array of slab caches that are used for |
74 | * 2^x bytes of allocations. | 63 | * 2^x bytes of allocations. |
@@ -79,7 +68,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | |||
79 | * Sorry that the following has to be that ugly but some versions of GCC | 68 | * Sorry that the following has to be that ugly but some versions of GCC |
80 | * have trouble with constant propagation and loops. | 69 | * have trouble with constant propagation and loops. |
81 | */ | 70 | */ |
82 | static inline int kmalloc_index(int size) | 71 | static inline int kmalloc_index(size_t size) |
83 | { | 72 | { |
84 | /* | 73 | /* |
85 | * We should return 0 if size == 0 but we use the smallest object | 74 | * We should return 0 if size == 0 but we use the smallest object |
@@ -87,7 +76,7 @@ static inline int kmalloc_index(int size) | |||
87 | */ | 76 | */ |
88 | WARN_ON_ONCE(size == 0); | 77 | WARN_ON_ONCE(size == 0); |
89 | 78 | ||
90 | if (size > (1 << KMALLOC_SHIFT_HIGH)) | 79 | if (size > KMALLOC_MAX_SIZE) |
91 | return -1; | 80 | return -1; |
92 | 81 | ||
93 | if (size > 64 && size <= 96) | 82 | if (size > 64 && size <= 96) |
@@ -110,17 +99,13 @@ static inline int kmalloc_index(int size) | |||
110 | if (size <= 64 * 1024) return 16; | 99 | if (size <= 64 * 1024) return 16; |
111 | if (size <= 128 * 1024) return 17; | 100 | if (size <= 128 * 1024) return 17; |
112 | if (size <= 256 * 1024) return 18; | 101 | if (size <= 256 * 1024) return 18; |
113 | #if KMALLOC_SHIFT_HIGH > 18 | ||
114 | if (size <= 512 * 1024) return 19; | 102 | if (size <= 512 * 1024) return 19; |
115 | if (size <= 1024 * 1024) return 20; | 103 | if (size <= 1024 * 1024) return 20; |
116 | #endif | ||
117 | #if KMALLOC_SHIFT_HIGH > 20 | ||
118 | if (size <= 2 * 1024 * 1024) return 21; | 104 | if (size <= 2 * 1024 * 1024) return 21; |
119 | if (size <= 4 * 1024 * 1024) return 22; | 105 | if (size <= 4 * 1024 * 1024) return 22; |
120 | if (size <= 8 * 1024 * 1024) return 23; | 106 | if (size <= 8 * 1024 * 1024) return 23; |
121 | if (size <= 16 * 1024 * 1024) return 24; | 107 | if (size <= 16 * 1024 * 1024) return 24; |
122 | if (size <= 32 * 1024 * 1024) return 25; | 108 | if (size <= 32 * 1024 * 1024) return 25; |
123 | #endif | ||
124 | return -1; | 109 | return -1; |
125 | 110 | ||
126 | /* | 111 | /* |