aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-14 00:29:49 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-14 00:29:49 -0400
commit11c2d8174ed3dc4f1971564732689b4a39129702 (patch)
treeac00daa548ea8ac24ae7a5c8062312e335ab9858 /mm/slub.c
parentcde274c0c789404df8ece3f9e7d6506caf0127e2 (diff)
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
Merge commit 'origin/HEAD' into test-merge
Manual fixup of include/asm-powerpc/pgtable-ppc64.h
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943c..315c392253c7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
5 * The allocator synchronizes using per slab locks and only 5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs. 6 * uses a centralized lock to manage a pool of partial slabs.
7 * 7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 8 * (C) 2007 SGI, Christoph Lameter
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -1628,9 +1628,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1628 void **object; 1628 void **object;
1629 struct kmem_cache_cpu *c; 1629 struct kmem_cache_cpu *c;
1630 unsigned long flags; 1630 unsigned long flags;
1631 unsigned int objsize;
1631 1632
1632 local_irq_save(flags); 1633 local_irq_save(flags);
1633 c = get_cpu_slab(s, smp_processor_id()); 1634 c = get_cpu_slab(s, smp_processor_id());
1635 objsize = c->objsize;
1634 if (unlikely(!c->freelist || !node_match(c, node))) 1636 if (unlikely(!c->freelist || !node_match(c, node)))
1635 1637
1636 object = __slab_alloc(s, gfpflags, node, addr, c); 1638 object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1645,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1643 local_irq_restore(flags); 1645 local_irq_restore(flags);
1644 1646
1645 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1647 if (unlikely((gfpflags & __GFP_ZERO) && object))
1646 memset(object, 0, c->objsize); 1648 memset(object, 0, objsize);
1647 1649
1648 return object; 1650 return object;
1649} 1651}
@@ -2995,8 +2997,6 @@ void __init kmem_cache_init(void)
2995 create_kmalloc_cache(&kmalloc_caches[1], 2997 create_kmalloc_cache(&kmalloc_caches[1],
2996 "kmalloc-96", 96, GFP_KERNEL); 2998 "kmalloc-96", 96, GFP_KERNEL);
2997 caches++; 2999 caches++;
2998 }
2999 if (KMALLOC_MIN_SIZE <= 128) {
3000 create_kmalloc_cache(&kmalloc_caches[2], 3000 create_kmalloc_cache(&kmalloc_caches[2],
3001 "kmalloc-192", 192, GFP_KERNEL); 3001 "kmalloc-192", 192, GFP_KERNEL);
3002 caches++; 3002 caches++;
@@ -3026,6 +3026,16 @@ void __init kmem_cache_init(void)
3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3028 3028
3029 if (KMALLOC_MIN_SIZE == 128) {
3030 /*
3031 * The 192 byte sized cache is not used if the alignment
3032 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3033 * instead.
3034 */
3035 for (i = 128 + 8; i <= 192; i += 8)
3036 size_index[(i - 1) / 8] = 8;
3037 }
3038
3029 slab_state = UP; 3039 slab_state = UP;
3030 3040
3031 /* Provide the correct kmalloc names now that the caches are up */ 3041 /* Provide the correct kmalloc names now that the caches are up */