aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2015-06-29 10:28:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-29 13:49:51 -0400
commita9730fca9946f3697410479e0ef1bd759ba00a77 (patch)
tree0708dfc873d6df1e7df1ce7cd6575f19cd8b5ed0 /include
parent88793e5c774ec69351ef6b5200bb59f532e41bca (diff)
Fix kmalloc slab creation sequence
This patch restores the slab creation sequence that was broken by commit 4066c33d0308f8 and also reverts the portions that introduced the KMALLOC_LOOP_XXX macros. Those can never really work since the slab creation is much more complex than just going from a minimum to a maximum number. The latest upstream kernel boots cleanly on my machine with a 64 bit x86 configuration under KVM using either SLAB or SLUB. Fixes: 4066c33d0308f8 ("support the slub_debug boot option") Reported-by: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h22
1 files changed, 0 insertions, 22 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9de2fdc8b5e4..a99f0e5243e1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -153,30 +153,8 @@ size_t ksize(const void *);
153#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 153#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
154#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 154#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
155#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 155#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
156/*
157 * The KMALLOC_LOOP_LOW is the definition for the for loop index start number
158 * to create the kmalloc_caches object in create_kmalloc_caches(). The first
159 * and the second are 96 and 192. You can see that in the kmalloc_index(), if
160 * the KMALLOC_MIN_SIZE <= 32, then return 1 (96). If KMALLOC_MIN_SIZE <= 64,
161 * then return 2 (192). If the KMALLOC_MIN_SIZE is bigger than 64, we don't
162 * need to initialize 96 and 192. Go directly to start the KMALLOC_SHIFT_LOW.
163 */
164#if KMALLOC_MIN_SIZE <= 32
165#define KMALLOC_LOOP_LOW 1
166#elif KMALLOC_MIN_SIZE <= 64
167#define KMALLOC_LOOP_LOW 2
168#else
169#define KMALLOC_LOOP_LOW KMALLOC_SHIFT_LOW
170#endif
171
172#else 156#else
173#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
174/*
175 * The KMALLOC_MIN_SIZE of slub/slab/slob is 2^3/2^5/2^3. So, even slab is used.
176 * The KMALLOC_MIN_SIZE <= 32. The kmalloc-96 and kmalloc-192 should also be
177 * initialized.
178 */
179#define KMALLOC_LOOP_LOW 1
180#endif 158#endif
181 159
182/* 160/*