aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:05 -0500
commitce6a50263d4ddeba1f0d08f16716a82770c03690 (patch)
tree099024fa474177d2e26709de76a211050ee9a4a1
parent345046673449b5c35840e5cc34a60059cbec9305 (diff)
slab: Common kmalloc slab index determination
Extract the function to determine the index of the slab within the array of kmalloc caches as well as a function to determine maximum object size from the nr of the kmalloc slab. This is used here only to simplify slub bootstrap but will be used later also for SLAB. Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/slab.h172
-rw-r--r--include/linux/slub_def.h63
2 files changed, 122 insertions, 113 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ccbb37685c6c..c97fe92532d1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -94,29 +94,6 @@
94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR) 95 (unsigned long)ZERO_SIZE_PTR)
96 96
97/*
98 * Common fields provided in kmem_cache by all slab allocators
99 * This struct is either used directly by the allocator (SLOB)
100 * or the allocator must include definitions for all fields
101 * provided in kmem_cache_common in their definition of kmem_cache.
102 *
103 * Once we can do anonymous structs (C11 standard) we could put a
104 * anonymous struct definition in these allocators so that the
105 * separate allocations in the kmem_cache structure of SLAB and
106 * SLUB is no longer needed.
107 */
108#ifdef CONFIG_SLOB
109struct kmem_cache {
110 unsigned int object_size;/* The original size of the object */
111 unsigned int size; /* The aligned/padded/added on size */
112 unsigned int align; /* Alignment as calculated */
113 unsigned long flags; /* Active flags on the slab */
114 const char *name; /* Slab name for sysfs */
115 int refcount; /* Use counter */
116 void (*ctor)(void *); /* Called on object slot creation */
117 struct list_head list; /* List of all slab caches on the system */
118};
119#endif
120 97
121struct mem_cgroup; 98struct mem_cgroup;
122/* 99/*
@@ -156,6 +133,35 @@ void kfree(const void *);
156void kzfree(const void *); 133void kzfree(const void *);
157size_t ksize(const void *); 134size_t ksize(const void *);
158 135
136#ifdef CONFIG_SLOB
137/*
138 * Common fields provided in kmem_cache by all slab allocators
139 * This struct is either used directly by the allocator (SLOB)
140 * or the allocator must include definitions for all fields
141 * provided in kmem_cache_common in their definition of kmem_cache.
142 *
143 * Once we can do anonymous structs (C11 standard) we could put a
144 * anonymous struct definition in these allocators so that the
145 * separate allocations in the kmem_cache structure of SLAB and
146 * SLUB is no longer needed.
147 */
148struct kmem_cache {
149 unsigned int object_size;/* The original size of the object */
150 unsigned int size; /* The aligned/padded/added on size */
151 unsigned int align; /* Alignment as calculated */
152 unsigned long flags; /* Active flags on the slab */
153 const char *name; /* Slab name for sysfs */
154 int refcount; /* Use counter */
155 void (*ctor)(void *); /* Called on object slot creation */
156 struct list_head list; /* List of all slab caches on the system */
157};
158
159#define KMALLOC_MAX_SIZE (1UL << 30)
160
161#include <linux/slob_def.h>
162
163#else /* CONFIG_SLOB */
164
159/* 165/*
160 * The largest kmalloc size supported by the slab allocators is 166 * The largest kmalloc size supported by the slab allocators is
161 * 32 megabyte (2^25) or the maximum allocatable page order if that is 167 * 32 megabyte (2^25) or the maximum allocatable page order if that is
@@ -172,6 +178,99 @@ size_t ksize(const void *);
172#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 178#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
173 179
174/* 180/*
181 * Kmalloc subsystem.
182 */
183#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
184#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
185#else
186#ifdef CONFIG_SLAB
187#define KMALLOC_MIN_SIZE 32
188#else
189#define KMALLOC_MIN_SIZE 8
190#endif
191#endif
192
193#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
194
195/*
196 * Figure out which kmalloc slab an allocation of a certain size
197 * belongs to.
198 * 0 = zero alloc
199 * 1 = 65 .. 96 bytes
200 * 2 = 120 .. 192 bytes
201 * n = 2^(n-1) .. 2^n -1
202 */
203static __always_inline int kmalloc_index(size_t size)
204{
205 if (!size)
206 return 0;
207
208 if (size <= KMALLOC_MIN_SIZE)
209 return KMALLOC_SHIFT_LOW;
210
211 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
212 return 1;
213 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
214 return 2;
215 if (size <= 8) return 3;
216 if (size <= 16) return 4;
217 if (size <= 32) return 5;
218 if (size <= 64) return 6;
219 if (size <= 128) return 7;
220 if (size <= 256) return 8;
221 if (size <= 512) return 9;
222 if (size <= 1024) return 10;
223 if (size <= 2 * 1024) return 11;
224 if (size <= 4 * 1024) return 12;
225 if (size <= 8 * 1024) return 13;
226 if (size <= 16 * 1024) return 14;
227 if (size <= 32 * 1024) return 15;
228 if (size <= 64 * 1024) return 16;
229 if (size <= 128 * 1024) return 17;
230 if (size <= 256 * 1024) return 18;
231 if (size <= 512 * 1024) return 19;
232 if (size <= 1024 * 1024) return 20;
233 if (size <= 2 * 1024 * 1024) return 21;
234 if (size <= 4 * 1024 * 1024) return 22;
235 if (size <= 8 * 1024 * 1024) return 23;
236 if (size <= 16 * 1024 * 1024) return 24;
237 if (size <= 32 * 1024 * 1024) return 25;
238 if (size <= 64 * 1024 * 1024) return 26;
239 BUG();
240
241 /* Will never be reached. Needed because the compiler may complain */
242 return -1;
243}
244
245#ifdef CONFIG_SLAB
246#include <linux/slab_def.h>
247#elif defined(CONFIG_SLUB)
248#include <linux/slub_def.h>
249#else
250#error "Unknown slab allocator"
251#endif
252
253/*
254 * Determine size used for the nth kmalloc cache.
255 * return size or 0 if a kmalloc cache for that
256 * size does not exist
257 */
258static __always_inline int kmalloc_size(int n)
259{
260 if (n > 2)
261 return 1 << n;
262
263 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
264 return 96;
265
266 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
267 return 192;
268
269 return 0;
270}
271#endif /* !CONFIG_SLOB */
272
273/*
175 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 274 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
176 * alignment larger than the alignment of a 64-bit integer. 275 * alignment larger than the alignment of a 64-bit integer.
177 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 276 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
@@ -233,33 +332,6 @@ struct seq_file;
233int cache_show(struct kmem_cache *s, struct seq_file *m); 332int cache_show(struct kmem_cache *s, struct seq_file *m);
234void print_slabinfo_header(struct seq_file *m); 333void print_slabinfo_header(struct seq_file *m);
235 334
236/*
237 * Allocator specific definitions. These are mainly used to establish optimized
238 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
239 * selecting the appropriate general cache at compile time.
240 *
241 * Allocators must define at least:
242 *
243 * kmem_cache_alloc()
244 * __kmalloc()
245 * kmalloc()
246 *
247 * Those wishing to support NUMA must also define:
248 *
249 * kmem_cache_alloc_node()
250 * kmalloc_node()
251 *
252 * See each allocator definition file for additional comments and
253 * implementation notes.
254 */
255#ifdef CONFIG_SLUB
256#include <linux/slub_def.h>
257#elif defined(CONFIG_SLOB)
258#include <linux/slob_def.h>
259#else
260#include <linux/slab_def.h>
261#endif
262
263/** 335/**
264 * kmalloc_array - allocate memory for an array. 336 * kmalloc_array - allocate memory for an array.
265 * @n: number of elements. 337 * @n: number of elements.
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 9db4825cd393..99c3e05ff1f0 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -116,17 +116,6 @@ struct kmem_cache {
116}; 116};
117 117
118/* 118/*
119 * Kmalloc subsystem.
120 */
121#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
122#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
123#else
124#define KMALLOC_MIN_SIZE 8
125#endif
126
127#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
128
129/*
130 * Maximum kmalloc object size handled by SLUB. Larger object allocations 119 * Maximum kmalloc object size handled by SLUB. Larger object allocations
131 * are passed through to the page allocator. The page allocator "fastpath" 120 * are passed through to the page allocator. The page allocator "fastpath"
132 * is relatively slow so we need this value sufficiently high so that 121 * is relatively slow so we need this value sufficiently high so that
@@ -153,58 +142,6 @@ struct kmem_cache {
153extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 142extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
154 143
155/* 144/*
156 * Sorry that the following has to be that ugly but some versions of GCC
157 * have trouble with constant propagation and loops.
158 */
159static __always_inline int kmalloc_index(size_t size)
160{
161 if (!size)
162 return 0;
163
164 if (size <= KMALLOC_MIN_SIZE)
165 return KMALLOC_SHIFT_LOW;
166
167 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
168 return 1;
169 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
170 return 2;
171 if (size <= 8) return 3;
172 if (size <= 16) return 4;
173 if (size <= 32) return 5;
174 if (size <= 64) return 6;
175 if (size <= 128) return 7;
176 if (size <= 256) return 8;
177 if (size <= 512) return 9;
178 if (size <= 1024) return 10;
179 if (size <= 2 * 1024) return 11;
180 if (size <= 4 * 1024) return 12;
181/*
182 * The following is only needed to support architectures with a larger page
183 * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page
184 * size we would have to go up to 128k.
185 */
186 if (size <= 8 * 1024) return 13;
187 if (size <= 16 * 1024) return 14;
188 if (size <= 32 * 1024) return 15;
189 if (size <= 64 * 1024) return 16;
190 if (size <= 128 * 1024) return 17;
191 if (size <= 256 * 1024) return 18;
192 if (size <= 512 * 1024) return 19;
193 if (size <= 1024 * 1024) return 20;
194 if (size <= 2 * 1024 * 1024) return 21;
195 BUG();
196 return -1; /* Will never be reached */
197
198/*
199 * What we really wanted to do and cannot do because of compiler issues is:
200 * int i;
201 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
202 * if (size <= (1 << i))
203 * return i;
204 */
205}
206
207/*
208 * Find the slab cache for a given combination of allocation flags and size. 145 * Find the slab cache for a given combination of allocation flags and size.
209 * 146 *
210 * This ought to end up with a global pointer to the right cache 147 * This ought to end up with a global pointer to the right cache