aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943c..1a427c0ae83b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
5 * The allocator synchronizes using per slab locks and only 5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs. 6 * uses a centralized lock to manage a pool of partial slabs.
7 * 7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 8 * (C) 2007 SGI, Christoph Lameter
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -2995,8 +2995,6 @@ void __init kmem_cache_init(void)
2995 create_kmalloc_cache(&kmalloc_caches[1], 2995 create_kmalloc_cache(&kmalloc_caches[1],
2996 "kmalloc-96", 96, GFP_KERNEL); 2996 "kmalloc-96", 96, GFP_KERNEL);
2997 caches++; 2997 caches++;
2998 }
2999 if (KMALLOC_MIN_SIZE <= 128) {
3000 create_kmalloc_cache(&kmalloc_caches[2], 2998 create_kmalloc_cache(&kmalloc_caches[2],
3001 "kmalloc-192", 192, GFP_KERNEL); 2999 "kmalloc-192", 192, GFP_KERNEL);
3002 caches++; 3000 caches++;
@@ -3026,6 +3024,16 @@ void __init kmem_cache_init(void)
3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3024 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3025 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3028 3026
3027 if (KMALLOC_MIN_SIZE == 128) {
3028 /*
3029 * The 192 byte sized cache is not used if the alignment
3030 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3031 * instead.
3032 */
3033 for (i = 128 + 8; i <= 192; i += 8)
3034 size_index[(i - 1) / 8] = 8;
3035 }
3036
3029 slab_state = UP; 3037 slab_state = UP;
3030 3038
3031 /* Provide the correct kmalloc names now that the caches are up */ 3039 /* Provide the correct kmalloc names now that the caches are up */