aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 44715eb70c06..35ab38a94b46 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
5 * The allocator synchronizes using per slab locks and only 5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs. 6 * uses a centralized lock to manage a pool of partial slabs.
7 * 7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 8 * (C) 2007 SGI, Christoph Lameter
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -411,7 +411,7 @@ static void set_track(struct kmem_cache *s, void *object,
411 if (addr) { 411 if (addr) {
412 p->addr = addr; 412 p->addr = addr;
413 p->cpu = smp_processor_id(); 413 p->cpu = smp_processor_id();
414 p->pid = current ? current->pid : -1; 414 p->pid = current->pid;
415 p->when = jiffies; 415 p->when = jiffies;
416 } else 416 } else
417 memset(p, 0, sizeof(struct track)); 417 memset(p, 0, sizeof(struct track));
@@ -431,9 +431,8 @@ static void print_track(const char *s, struct track *t)
431 if (!t->addr) 431 if (!t->addr)
432 return; 432 return;
433 433
434 printk(KERN_ERR "INFO: %s in ", s); 434 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
435 __print_symbol("%s", (unsigned long)t->addr); 435 s, t->addr, jiffies - t->when, t->cpu, t->pid);
436 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
437} 436}
438 437
439static void print_tracking(struct kmem_cache *s, void *object) 438static void print_tracking(struct kmem_cache *s, void *object)
@@ -1628,9 +1627,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1628 void **object; 1627 void **object;
1629 struct kmem_cache_cpu *c; 1628 struct kmem_cache_cpu *c;
1630 unsigned long flags; 1629 unsigned long flags;
1630 unsigned int objsize;
1631 1631
1632 local_irq_save(flags); 1632 local_irq_save(flags);
1633 c = get_cpu_slab(s, smp_processor_id()); 1633 c = get_cpu_slab(s, smp_processor_id());
1634 objsize = c->objsize;
1634 if (unlikely(!c->freelist || !node_match(c, node))) 1635 if (unlikely(!c->freelist || !node_match(c, node)))
1635 1636
1636 object = __slab_alloc(s, gfpflags, node, addr, c); 1637 object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1644,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1643 local_irq_restore(flags); 1644 local_irq_restore(flags);
1644 1645
1645 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1646 if (unlikely((gfpflags & __GFP_ZERO) && object))
1646 memset(object, 0, c->objsize); 1647 memset(object, 0, objsize);
1647 1648
1648 return object; 1649 return object;
1649} 1650}
@@ -2765,6 +2766,7 @@ void kfree(const void *x)
2765 2766
2766 page = virt_to_head_page(x); 2767 page = virt_to_head_page(x);
2767 if (unlikely(!PageSlab(page))) { 2768 if (unlikely(!PageSlab(page))) {
2769 BUG_ON(!PageCompound(page));
2768 put_page(page); 2770 put_page(page);
2769 return; 2771 return;
2770 } 2772 }
@@ -2995,8 +2997,6 @@ void __init kmem_cache_init(void)
2995 create_kmalloc_cache(&kmalloc_caches[1], 2997 create_kmalloc_cache(&kmalloc_caches[1],
2996 "kmalloc-96", 96, GFP_KERNEL); 2998 "kmalloc-96", 96, GFP_KERNEL);
2997 caches++; 2999 caches++;
2998 }
2999 if (KMALLOC_MIN_SIZE <= 128) {
3000 create_kmalloc_cache(&kmalloc_caches[2], 3000 create_kmalloc_cache(&kmalloc_caches[2],
3001 "kmalloc-192", 192, GFP_KERNEL); 3001 "kmalloc-192", 192, GFP_KERNEL);
3002 caches++; 3002 caches++;
@@ -3026,6 +3026,16 @@ void __init kmem_cache_init(void)
3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3028 3028
3029 if (KMALLOC_MIN_SIZE == 128) {
3030 /*
3031 * The 192 byte sized cache is not used if the alignment
3032 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3033 * instead.
3034 */
3035 for (i = 128 + 8; i <= 192; i += 8)
3036 size_index[(i - 1) / 8] = 8;
3037 }
3038
3029 slab_state = UP; 3039 slab_state = UP;
3030 3040
3031 /* Provide the correct kmalloc names now that the caches are up */ 3041 /* Provide the correct kmalloc names now that the caches are up */