aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:05 -0500
commitce6a50263d4ddeba1f0d08f16716a82770c03690 (patch)
tree099024fa474177d2e26709de76a211050ee9a4a1 /include/linux/slub_def.h
parent345046673449b5c35840e5cc34a60059cbec9305 (diff)
slab: Common kmalloc slab index determination
Extract the function to determine the index of the slab within the array of kmalloc caches as well as a function to determine maximum object size from the nr of the kmalloc slab. This is used here only to simplify slub bootstrap but will be used later also for SLAB. Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h63
1 files changed, 0 insertions, 63 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 9db4825cd393..99c3e05ff1f0 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -116,17 +116,6 @@ struct kmem_cache {
116}; 116};
117 117
118/* 118/*
119 * Kmalloc subsystem.
120 */
121#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
122#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
123#else
124#define KMALLOC_MIN_SIZE 8
125#endif
126
127#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
128
129/*
130 * Maximum kmalloc object size handled by SLUB. Larger object allocations 119 * Maximum kmalloc object size handled by SLUB. Larger object allocations
131 * are passed through to the page allocator. The page allocator "fastpath" 120 * are passed through to the page allocator. The page allocator "fastpath"
132 * is relatively slow so we need this value sufficiently high so that 121 * is relatively slow so we need this value sufficiently high so that
@@ -153,58 +142,6 @@ struct kmem_cache {
153extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 142extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
154 143
155/* 144/*
156 * Sorry that the following has to be that ugly but some versions of GCC
157 * have trouble with constant propagation and loops.
158 */
159static __always_inline int kmalloc_index(size_t size)
160{
161 if (!size)
162 return 0;
163
164 if (size <= KMALLOC_MIN_SIZE)
165 return KMALLOC_SHIFT_LOW;
166
167 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
168 return 1;
169 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
170 return 2;
171 if (size <= 8) return 3;
172 if (size <= 16) return 4;
173 if (size <= 32) return 5;
174 if (size <= 64) return 6;
175 if (size <= 128) return 7;
176 if (size <= 256) return 8;
177 if (size <= 512) return 9;
178 if (size <= 1024) return 10;
179 if (size <= 2 * 1024) return 11;
180 if (size <= 4 * 1024) return 12;
181/*
182 * The following is only needed to support architectures with a larger page
183 * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page
184 * size we would have to go up to 128k.
185 */
186 if (size <= 8 * 1024) return 13;
187 if (size <= 16 * 1024) return 14;
188 if (size <= 32 * 1024) return 15;
189 if (size <= 64 * 1024) return 16;
190 if (size <= 128 * 1024) return 17;
191 if (size <= 256 * 1024) return 18;
192 if (size <= 512 * 1024) return 19;
193 if (size <= 1024 * 1024) return 20;
194 if (size <= 2 * 1024 * 1024) return 21;
195 BUG();
196 return -1; /* Will never be reached */
197
198/*
199 * What we really wanted to do and cannot do because of compiler issues is:
200 * int i;
201 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
202 * if (size <= (1 << i))
203 * return i;
204 */
205}
206
207/*
208 * Find the slab cache for a given combination of allocation flags and size. 145 * Find the slab cache for a given combination of allocation flags and size.
209 * 146 *
210 * This ought to end up with a global pointer to the right cache 147 * This ought to end up with a global pointer to the right cache