aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a505a828ef41..1a427c0ae83b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
5 * The allocator synchronizes using per slab locks and only 5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs. 6 * uses a centralized lock to manage a pool of partial slabs.
7 * 7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 8 * (C) 2007 SGI, Christoph Lameter
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -2726,9 +2726,10 @@ size_t ksize(const void *object)
2726 2726
2727 page = virt_to_head_page(object); 2727 page = virt_to_head_page(object);
2728 2728
2729 if (unlikely(!PageSlab(page))) 2729 if (unlikely(!PageSlab(page))) {
2730 WARN_ON(!PageCompound(page));
2730 return PAGE_SIZE << compound_order(page); 2731 return PAGE_SIZE << compound_order(page);
2731 2732 }
2732 s = page->slab; 2733 s = page->slab;
2733 2734
2734#ifdef CONFIG_SLUB_DEBUG 2735#ifdef CONFIG_SLUB_DEBUG
@@ -2994,8 +2995,6 @@ void __init kmem_cache_init(void)
2994 create_kmalloc_cache(&kmalloc_caches[1], 2995 create_kmalloc_cache(&kmalloc_caches[1],
2995 "kmalloc-96", 96, GFP_KERNEL); 2996 "kmalloc-96", 96, GFP_KERNEL);
2996 caches++; 2997 caches++;
2997 }
2998 if (KMALLOC_MIN_SIZE <= 128) {
2999 create_kmalloc_cache(&kmalloc_caches[2], 2998 create_kmalloc_cache(&kmalloc_caches[2],
3000 "kmalloc-192", 192, GFP_KERNEL); 2999 "kmalloc-192", 192, GFP_KERNEL);
3001 caches++; 3000 caches++;
@@ -3025,6 +3024,16 @@ void __init kmem_cache_init(void)
3025 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3024 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3026 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3025 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3027 3026
3027 if (KMALLOC_MIN_SIZE == 128) {
3028 /*
3029 * The 192 byte sized cache is not used if the alignment
3030 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3031 * instead.
3032 */
3033 for (i = 128 + 8; i <= 192; i += 8)
3034 size_index[(i - 1) / 8] = 8;
3035 }
3036
3028 slab_state = UP; 3037 slab_state = UP;
3029 3038
3030 /* Provide the correct kmalloc names now that the caches are up */ 3039 /* Provide the correct kmalloc names now that the caches are up */