aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:24:38 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:53 -0400
commitaadb4bc4a1f9108c1d0fbd121827c936c2ed4217 (patch)
tree879b7c9ba11a65958e4477c563602e08d9e6635f /include/linux/slub_def.h
parent57f6b96c09c30e444e0d3fc3080feba037657a7b (diff)
SLUB: direct pass through of page size or higher kmalloc requests
This gets rid of all kmalloc caches larger than page size. A kmalloc request larger than PAGE_SIZE > 2 is going to be passed through to the page allocator. This works both inline where we will call __get_free_pages instead of kmem_cache_alloc and in __kmalloc. kfree is modified to check if the object is in a slab page. If not then the page is freed via the page allocator instead. Roughly similar to what SLOB does. Advantages: - Reduces memory overhead for kmalloc array - Large kmalloc operations are faster since they do not need to pass through the slab allocator to get to the page allocator. - Performance increase of 10%-20% on alloc and 50% on free for PAGE_SIZEd allocations. SLUB must call page allocator for each alloc anyways since the higher order pages which that allowed avoiding the page alloc calls are not available in a reliable way anymore. So we are basically removing useless slab allocator overhead. - Large kmallocs yields page aligned object which is what SLAB did. Bad things like using page sized kmalloc allocations to stand in for page allocate allocs can be transparently handled and are not distinguishable from page allocator uses. - Checking for too large objects can be removed since it is done by the page allocator. Drawbacks: - No accounting for large kmalloc slab allocations anymore - No debugging of large kmalloc slab allocations. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h57
1 files changed, 24 insertions, 33 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 74962077f632..3b361b2906bd 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -72,7 +72,7 @@ struct kmem_cache {
72 * We keep the general caches in an array of slab caches that are used for 72 * We keep the general caches in an array of slab caches that are used for
73 * 2^x bytes of allocations. 73 * 2^x bytes of allocations.
74 */ 74 */
75extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 75extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
76 76
77/* 77/*
78 * Sorry that the following has to be that ugly but some versions of GCC 78 * Sorry that the following has to be that ugly but some versions of GCC
@@ -83,9 +83,6 @@ static __always_inline int kmalloc_index(size_t size)
83 if (!size) 83 if (!size)
84 return 0; 84 return 0;
85 85
86 if (size > KMALLOC_MAX_SIZE)
87 return -1;
88
89 if (size <= KMALLOC_MIN_SIZE) 86 if (size <= KMALLOC_MIN_SIZE)
90 return KMALLOC_SHIFT_LOW; 87 return KMALLOC_SHIFT_LOW;
91 88
@@ -102,6 +99,10 @@ static __always_inline int kmalloc_index(size_t size)
102 if (size <= 512) return 9; 99 if (size <= 512) return 9;
103 if (size <= 1024) return 10; 100 if (size <= 1024) return 10;
104 if (size <= 2 * 1024) return 11; 101 if (size <= 2 * 1024) return 11;
102/*
103 * The following is only needed to support architectures with a larger page
104 * size than 4k.
105 */
105 if (size <= 4 * 1024) return 12; 106 if (size <= 4 * 1024) return 12;
106 if (size <= 8 * 1024) return 13; 107 if (size <= 8 * 1024) return 13;
107 if (size <= 16 * 1024) return 14; 108 if (size <= 16 * 1024) return 14;
@@ -109,13 +110,9 @@ static __always_inline int kmalloc_index(size_t size)
109 if (size <= 64 * 1024) return 16; 110 if (size <= 64 * 1024) return 16;
110 if (size <= 128 * 1024) return 17; 111 if (size <= 128 * 1024) return 17;
111 if (size <= 256 * 1024) return 18; 112 if (size <= 256 * 1024) return 18;
112 if (size <= 512 * 1024) return 19; 113 if (size <= 512 * 1024) return 19;
113 if (size <= 1024 * 1024) return 20; 114 if (size <= 1024 * 1024) return 20;
114 if (size <= 2 * 1024 * 1024) return 21; 115 if (size <= 2 * 1024 * 1024) return 21;
115 if (size <= 4 * 1024 * 1024) return 22;
116 if (size <= 8 * 1024 * 1024) return 23;
117 if (size <= 16 * 1024 * 1024) return 24;
118 if (size <= 32 * 1024 * 1024) return 25;
119 return -1; 116 return -1;
120 117
121/* 118/*
@@ -140,19 +137,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
140 if (index == 0) 137 if (index == 0)
141 return NULL; 138 return NULL;
142 139
143 /*
144 * This function only gets expanded if __builtin_constant_p(size), so
145 * testing it here shouldn't be needed. But some versions of gcc need
146 * help.
147 */
148 if (__builtin_constant_p(size) && index < 0) {
149 /*
150 * Generate a link failure. Would be great if we could
151 * do something to stop the compile here.
152 */
153 extern void __kmalloc_size_too_large(void);
154 __kmalloc_size_too_large();
155 }
156 return &kmalloc_caches[index]; 140 return &kmalloc_caches[index];
157} 141}
158 142
@@ -168,15 +152,21 @@ void *__kmalloc(size_t size, gfp_t flags);
168 152
169static __always_inline void *kmalloc(size_t size, gfp_t flags) 153static __always_inline void *kmalloc(size_t size, gfp_t flags)
170{ 154{
171 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { 155 if (__builtin_constant_p(size)) {
172 struct kmem_cache *s = kmalloc_slab(size); 156 if (size > PAGE_SIZE / 2)
157 return (void *)__get_free_pages(flags | __GFP_COMP,
158 get_order(size));
173 159
174 if (!s) 160 if (!(flags & SLUB_DMA)) {
175 return ZERO_SIZE_PTR; 161 struct kmem_cache *s = kmalloc_slab(size);
162
163 if (!s)
164 return ZERO_SIZE_PTR;
176 165
177 return kmem_cache_alloc(s, flags); 166 return kmem_cache_alloc(s, flags);
178 } else 167 }
179 return __kmalloc(size, flags); 168 }
169 return __kmalloc(size, flags);
180} 170}
181 171
182#ifdef CONFIG_NUMA 172#ifdef CONFIG_NUMA
@@ -185,15 +175,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
185 175
186static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 176static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
187{ 177{
188 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { 178 if (__builtin_constant_p(size) &&
189 struct kmem_cache *s = kmalloc_slab(size); 179 size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
180 struct kmem_cache *s = kmalloc_slab(size);
190 181
191 if (!s) 182 if (!s)
192 return ZERO_SIZE_PTR; 183 return ZERO_SIZE_PTR;
193 184
194 return kmem_cache_alloc_node(s, flags, node); 185 return kmem_cache_alloc_node(s, flags, node);
195 } else 186 }
196 return __kmalloc_node(size, flags, node); 187 return __kmalloc_node(size, flags, node);
197} 188}
198#endif 189#endif
199 190