aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/slab.c314
-rw-r--r--mm/slob.c10
-rw-r--r--mm/util.c10
-rw-r--r--mm/vmscan.c2
7 files changed, 296 insertions, 58 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index bd80460360db..332f5c29b53a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -138,8 +138,8 @@ config SPLIT_PTLOCK_CPUS
138# 138#
139config MIGRATION 139config MIGRATION
140 bool "Page migration" 140 bool "Page migration"
141 def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM 141 def_bool y if NUMA
142 depends on SWAP 142 depends on SWAP && NUMA
143 help 143 help
144 Allows the migration of the physical location of pages of processes 144 Allows the migration of the physical location of pages of processes
145 while the virtual addresses are not changed. This is useful for 145 while the virtual addresses are not changed. This is useful for
diff --git a/mm/mmap.c b/mm/mmap.c
index 0eb9894db6de..4f5b5709136a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1040,12 +1040,11 @@ munmap_back:
1040 * specific mapper. the address has already been validated, but 1040 * specific mapper. the address has already been validated, but
1041 * not unmapped, but the maps are removed from the list. 1041 * not unmapped, but the maps are removed from the list.
1042 */ 1042 */
1043 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 1043 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1044 if (!vma) { 1044 if (!vma) {
1045 error = -ENOMEM; 1045 error = -ENOMEM;
1046 goto unacct_error; 1046 goto unacct_error;
1047 } 1047 }
1048 memset(vma, 0, sizeof(*vma));
1049 1048
1050 vma->vm_mm = mm; 1049 vma->vm_mm = mm;
1051 vma->vm_start = addr; 1050 vma->vm_start = addr;
@@ -1896,12 +1895,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1896 /* 1895 /*
1897 * create a vma struct for an anonymous mapping 1896 * create a vma struct for an anonymous mapping
1898 */ 1897 */
1899 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 1898 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1900 if (!vma) { 1899 if (!vma) {
1901 vm_unacct_memory(len >> PAGE_SHIFT); 1900 vm_unacct_memory(len >> PAGE_SHIFT);
1902 return -ENOMEM; 1901 return -ENOMEM;
1903 } 1902 }
1904 memset(vma, 0, sizeof(*vma));
1905 1903
1906 vma->vm_mm = mm; 1904 vma->vm_mm = mm;
1907 vma->vm_start = addr; 1905 vma->vm_start = addr;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a5c3f8bd98ae..338a02bb004d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2029,8 +2029,9 @@ static __meminit void zone_pcp_init(struct zone *zone)
2029 setup_pageset(zone_pcp(zone,cpu), batch); 2029 setup_pageset(zone_pcp(zone,cpu), batch);
2030#endif 2030#endif
2031 } 2031 }
2032 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2032 if (zone->present_pages)
2033 zone->name, zone->present_pages, batch); 2033 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2034 zone->name, zone->present_pages, batch);
2034} 2035}
2035 2036
2036static __meminit void init_currently_empty_zone(struct zone *zone, 2037static __meminit void init_currently_empty_zone(struct zone *zone,
@@ -2701,8 +2702,7 @@ void *__init alloc_large_system_hash(const char *tablename,
2701 else 2702 else
2702 numentries <<= (PAGE_SHIFT - scale); 2703 numentries <<= (PAGE_SHIFT - scale);
2703 } 2704 }
2704 /* rounded up to nearest power of 2 in size */ 2705 numentries = roundup_pow_of_two(numentries);
2705 numentries = 1UL << (long_log2(numentries) + 1);
2706 2706
2707 /* limit allocation size to 1/16 total memory by default */ 2707 /* limit allocation size to 1/16 total memory by default */
2708 if (max == 0) { 2708 if (max == 0) {
diff --git a/mm/slab.c b/mm/slab.c
index 26138c9f8f00..681837499d7d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -204,7 +204,8 @@
204typedef unsigned int kmem_bufctl_t; 204typedef unsigned int kmem_bufctl_t;
205#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 205#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
206#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 206#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
207#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
208 209
209/* Max number of objs-per-slab for caches which use off-slab slabs. 210/* Max number of objs-per-slab for caches which use off-slab slabs.
210 * Needed to avoid a possible looping condition in cache_grow(). 211 * Needed to avoid a possible looping condition in cache_grow().
@@ -897,6 +898,30 @@ static struct array_cache *alloc_arraycache(int node, int entries,
897 return nc; 898 return nc;
898} 899}
899 900
901/*
902 * Transfer objects in one arraycache to another.
903 * Locking must be handled by the caller.
904 *
905 * Return the number of entries transferred.
906 */
907static int transfer_objects(struct array_cache *to,
908 struct array_cache *from, unsigned int max)
909{
910 /* Figure out how many entries to transfer */
911 int nr = min(min(from->avail, max), to->limit - to->avail);
912
913 if (!nr)
914 return 0;
915
916 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
917 sizeof(void *) *nr);
918
919 from->avail -= nr;
920 to->avail += nr;
921 to->touched = 1;
922 return nr;
923}
924
900#ifdef CONFIG_NUMA 925#ifdef CONFIG_NUMA
901static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); 926static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
902static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 927static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
@@ -946,6 +971,13 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
946 971
947 if (ac->avail) { 972 if (ac->avail) {
948 spin_lock(&rl3->list_lock); 973 spin_lock(&rl3->list_lock);
974 /*
975 * Stuff objects into the remote nodes shared array first.
976 * That way we could avoid the overhead of putting the objects
977 * into the free lists and getting them back later.
978 */
979 transfer_objects(rl3->shared, ac, ac->limit);
980
949 free_block(cachep, ac->entry, ac->avail, node); 981 free_block(cachep, ac->entry, ac->avail, node);
950 ac->avail = 0; 982 ac->avail = 0;
951 spin_unlock(&rl3->list_lock); 983 spin_unlock(&rl3->list_lock);
@@ -961,8 +993,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
961 993
962 if (l3->alien) { 994 if (l3->alien) {
963 struct array_cache *ac = l3->alien[node]; 995 struct array_cache *ac = l3->alien[node];
964 if (ac && ac->avail) { 996
965 spin_lock_irq(&ac->lock); 997 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
966 __drain_alien_cache(cachep, ac, node); 998 __drain_alien_cache(cachep, ac, node);
967 spin_unlock_irq(&ac->lock); 999 spin_unlock_irq(&ac->lock);
968 } 1000 }
@@ -1989,10 +2021,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1989 align = ralign; 2021 align = ralign;
1990 2022
1991 /* Get cache's description obj. */ 2023 /* Get cache's description obj. */
1992 cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL); 2024 cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
1993 if (!cachep) 2025 if (!cachep)
1994 goto oops; 2026 goto oops;
1995 memset(cachep, 0, sizeof(struct kmem_cache));
1996 2027
1997#if DEBUG 2028#if DEBUG
1998 cachep->obj_size = size; 2029 cachep->obj_size = size;
@@ -2399,7 +2430,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2399 /* Verify that the slab belongs to the intended node */ 2430 /* Verify that the slab belongs to the intended node */
2400 WARN_ON(slabp->nodeid != nodeid); 2431 WARN_ON(slabp->nodeid != nodeid);
2401 2432
2402 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { 2433 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2403 printk(KERN_ERR "slab: double free detected in cache " 2434 printk(KERN_ERR "slab: double free detected in cache "
2404 "'%s', objp %p\n", cachep->name, objp); 2435 "'%s', objp %p\n", cachep->name, objp);
2405 BUG(); 2436 BUG();
@@ -2605,6 +2636,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2605 */ 2636 */
2606 cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2637 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2607 } 2638 }
2639#ifdef CONFIG_DEBUG_SLAB_LEAK
2640 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2641#endif
2608 if (cachep->flags & SLAB_POISON) { 2642 if (cachep->flags & SLAB_POISON) {
2609#ifdef CONFIG_DEBUG_PAGEALLOC 2643#ifdef CONFIG_DEBUG_PAGEALLOC
2610 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2644 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2677,20 +2711,10 @@ retry:
2677 BUG_ON(ac->avail > 0 || !l3); 2711 BUG_ON(ac->avail > 0 || !l3);
2678 spin_lock(&l3->list_lock); 2712 spin_lock(&l3->list_lock);
2679 2713
2680 if (l3->shared) { 2714 /* See if we can refill from the shared array */
2681 struct array_cache *shared_array = l3->shared; 2715 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2682 if (shared_array->avail) { 2716 goto alloc_done;
2683 if (batchcount > shared_array->avail) 2717
2684 batchcount = shared_array->avail;
2685 shared_array->avail -= batchcount;
2686 ac->avail = batchcount;
2687 memcpy(ac->entry,
2688 &(shared_array->entry[shared_array->avail]),
2689 sizeof(void *) * batchcount);
2690 shared_array->touched = 1;
2691 goto alloc_done;
2692 }
2693 }
2694 while (batchcount > 0) { 2718 while (batchcount > 0) {
2695 struct list_head *entry; 2719 struct list_head *entry;
2696 struct slab *slabp; 2720 struct slab *slabp;
@@ -2788,6 +2812,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2788 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2812 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2789 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2813 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2790 } 2814 }
2815#ifdef CONFIG_DEBUG_SLAB_LEAK
2816 {
2817 struct slab *slabp;
2818 unsigned objnr;
2819
2820 slabp = page_get_slab(virt_to_page(objp));
2821 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2822 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
2823 }
2824#endif
2791 objp += obj_offset(cachep); 2825 objp += obj_offset(cachep);
2792 if (cachep->ctor && cachep->flags & SLAB_POISON) { 2826 if (cachep->ctor && cachep->flags & SLAB_POISON) {
2793 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2827 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -3094,6 +3128,23 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3094EXPORT_SYMBOL(kmem_cache_alloc); 3128EXPORT_SYMBOL(kmem_cache_alloc);
3095 3129
3096/** 3130/**
3131 * kmem_cache_alloc - Allocate an object. The memory is set to zero.
3132 * @cache: The cache to allocate from.
3133 * @flags: See kmalloc().
3134 *
3135 * Allocate an object from this cache and set the allocated memory to zero.
3136 * The flags are only relevant if the cache has no available objects.
3137 */
3138void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3139{
3140 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3141 if (ret)
3142 memset(ret, 0, obj_size(cache));
3143 return ret;
3144}
3145EXPORT_SYMBOL(kmem_cache_zalloc);
3146
3147/**
3097 * kmem_ptr_validate - check if an untrusted pointer might 3148 * kmem_ptr_validate - check if an untrusted pointer might
3098 * be a slab entry. 3149 * be a slab entry.
3099 * @cachep: the cache we're checking against 3150 * @cachep: the cache we're checking against
@@ -3220,22 +3271,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3220 return __cache_alloc(cachep, flags, caller); 3271 return __cache_alloc(cachep, flags, caller);
3221} 3272}
3222 3273
3223#ifndef CONFIG_DEBUG_SLAB
3224 3274
3225void *__kmalloc(size_t size, gfp_t flags) 3275void *__kmalloc(size_t size, gfp_t flags)
3226{ 3276{
3277#ifndef CONFIG_DEBUG_SLAB
3227 return __do_kmalloc(size, flags, NULL); 3278 return __do_kmalloc(size, flags, NULL);
3279#else
3280 return __do_kmalloc(size, flags, __builtin_return_address(0));
3281#endif
3228} 3282}
3229EXPORT_SYMBOL(__kmalloc); 3283EXPORT_SYMBOL(__kmalloc);
3230 3284
3231#else 3285#ifdef CONFIG_DEBUG_SLAB
3232
3233void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3286void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3234{ 3287{
3235 return __do_kmalloc(size, flags, caller); 3288 return __do_kmalloc(size, flags, caller);
3236} 3289}
3237EXPORT_SYMBOL(__kmalloc_track_caller); 3290EXPORT_SYMBOL(__kmalloc_track_caller);
3238
3239#endif 3291#endif
3240 3292
3241#ifdef CONFIG_SMP 3293#ifdef CONFIG_SMP
@@ -3366,63 +3418,86 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
3366EXPORT_SYMBOL_GPL(kmem_cache_name); 3418EXPORT_SYMBOL_GPL(kmem_cache_name);
3367 3419
3368/* 3420/*
3369 * This initializes kmem_list3 for all nodes. 3421 * This initializes kmem_list3 or resizes varioius caches for all nodes.
3370 */ 3422 */
3371static int alloc_kmemlist(struct kmem_cache *cachep) 3423static int alloc_kmemlist(struct kmem_cache *cachep)
3372{ 3424{
3373 int node; 3425 int node;
3374 struct kmem_list3 *l3; 3426 struct kmem_list3 *l3;
3375 int err = 0; 3427 struct array_cache *new_shared;
3428 struct array_cache **new_alien;
3376 3429
3377 for_each_online_node(node) { 3430 for_each_online_node(node) {
3378 struct array_cache *nc = NULL, *new; 3431
3379 struct array_cache **new_alien = NULL;
3380#ifdef CONFIG_NUMA
3381 new_alien = alloc_alien_cache(node, cachep->limit); 3432 new_alien = alloc_alien_cache(node, cachep->limit);
3382 if (!new_alien) 3433 if (!new_alien)
3383 goto fail; 3434 goto fail;
3384#endif 3435
3385 new = alloc_arraycache(node, cachep->shared*cachep->batchcount, 3436 new_shared = alloc_arraycache(node,
3437 cachep->shared*cachep->batchcount,
3386 0xbaadf00d); 3438 0xbaadf00d);
3387 if (!new) 3439 if (!new_shared) {
3440 free_alien_cache(new_alien);
3388 goto fail; 3441 goto fail;
3442 }
3443
3389 l3 = cachep->nodelists[node]; 3444 l3 = cachep->nodelists[node];
3390 if (l3) { 3445 if (l3) {
3446 struct array_cache *shared = l3->shared;
3447
3391 spin_lock_irq(&l3->list_lock); 3448 spin_lock_irq(&l3->list_lock);
3392 3449
3393 nc = cachep->nodelists[node]->shared; 3450 if (shared)
3394 if (nc) 3451 free_block(cachep, shared->entry,
3395 free_block(cachep, nc->entry, nc->avail, node); 3452 shared->avail, node);
3396 3453
3397 l3->shared = new; 3454 l3->shared = new_shared;
3398 if (!cachep->nodelists[node]->alien) { 3455 if (!l3->alien) {
3399 l3->alien = new_alien; 3456 l3->alien = new_alien;
3400 new_alien = NULL; 3457 new_alien = NULL;
3401 } 3458 }
3402 l3->free_limit = (1 + nr_cpus_node(node)) * 3459 l3->free_limit = (1 + nr_cpus_node(node)) *
3403 cachep->batchcount + cachep->num; 3460 cachep->batchcount + cachep->num;
3404 spin_unlock_irq(&l3->list_lock); 3461 spin_unlock_irq(&l3->list_lock);
3405 kfree(nc); 3462 kfree(shared);
3406 free_alien_cache(new_alien); 3463 free_alien_cache(new_alien);
3407 continue; 3464 continue;
3408 } 3465 }
3409 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3466 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3410 if (!l3) 3467 if (!l3) {
3468 free_alien_cache(new_alien);
3469 kfree(new_shared);
3411 goto fail; 3470 goto fail;
3471 }
3412 3472
3413 kmem_list3_init(l3); 3473 kmem_list3_init(l3);
3414 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3474 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3415 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3475 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3416 l3->shared = new; 3476 l3->shared = new_shared;
3417 l3->alien = new_alien; 3477 l3->alien = new_alien;
3418 l3->free_limit = (1 + nr_cpus_node(node)) * 3478 l3->free_limit = (1 + nr_cpus_node(node)) *
3419 cachep->batchcount + cachep->num; 3479 cachep->batchcount + cachep->num;
3420 cachep->nodelists[node] = l3; 3480 cachep->nodelists[node] = l3;
3421 } 3481 }
3422 return err; 3482 return 0;
3483
3423fail: 3484fail:
3424 err = -ENOMEM; 3485 if (!cachep->next.next) {
3425 return err; 3486 /* Cache is not active yet. Roll back what we did */
3487 node--;
3488 while (node >= 0) {
3489 if (cachep->nodelists[node]) {
3490 l3 = cachep->nodelists[node];
3491
3492 kfree(l3->shared);
3493 free_alien_cache(l3->alien);
3494 kfree(l3);
3495 cachep->nodelists[node] = NULL;
3496 }
3497 node--;
3498 }
3499 }
3500 return -ENOMEM;
3426} 3501}
3427 3502
3428struct ccupdate_struct { 3503struct ccupdate_struct {
@@ -3899,6 +3974,159 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3899 res = count; 3974 res = count;
3900 return res; 3975 return res;
3901} 3976}
3977
3978#ifdef CONFIG_DEBUG_SLAB_LEAK
3979
3980static void *leaks_start(struct seq_file *m, loff_t *pos)
3981{
3982 loff_t n = *pos;
3983 struct list_head *p;
3984
3985 mutex_lock(&cache_chain_mutex);
3986 p = cache_chain.next;
3987 while (n--) {
3988 p = p->next;
3989 if (p == &cache_chain)
3990 return NULL;
3991 }
3992 return list_entry(p, struct kmem_cache, next);
3993}
3994
3995static inline int add_caller(unsigned long *n, unsigned long v)
3996{
3997 unsigned long *p;
3998 int l;
3999 if (!v)
4000 return 1;
4001 l = n[1];
4002 p = n + 2;
4003 while (l) {
4004 int i = l/2;
4005 unsigned long *q = p + 2 * i;
4006 if (*q == v) {
4007 q[1]++;
4008 return 1;
4009 }
4010 if (*q > v) {
4011 l = i;
4012 } else {
4013 p = q + 2;
4014 l -= i + 1;
4015 }
4016 }
4017 if (++n[1] == n[0])
4018 return 0;
4019 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4020 p[0] = v;
4021 p[1] = 1;
4022 return 1;
4023}
4024
4025static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4026{
4027 void *p;
4028 int i;
4029 if (n[0] == n[1])
4030 return;
4031 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4032 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4033 continue;
4034 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4035 return;
4036 }
4037}
4038
4039static void show_symbol(struct seq_file *m, unsigned long address)
4040{
4041#ifdef CONFIG_KALLSYMS
4042 char *modname;
4043 const char *name;
4044 unsigned long offset, size;
4045 char namebuf[KSYM_NAME_LEN+1];
4046
4047 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
4048
4049 if (name) {
4050 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4051 if (modname)
4052 seq_printf(m, " [%s]", modname);
4053 return;
4054 }
4055#endif
4056 seq_printf(m, "%p", (void *)address);
4057}
4058
4059static int leaks_show(struct seq_file *m, void *p)
4060{
4061 struct kmem_cache *cachep = p;
4062 struct list_head *q;
4063 struct slab *slabp;
4064 struct kmem_list3 *l3;
4065 const char *name;
4066 unsigned long *n = m->private;
4067 int node;
4068 int i;
4069
4070 if (!(cachep->flags & SLAB_STORE_USER))
4071 return 0;
4072 if (!(cachep->flags & SLAB_RED_ZONE))
4073 return 0;
4074
4075 /* OK, we can do it */
4076
4077 n[1] = 0;
4078
4079 for_each_online_node(node) {
4080 l3 = cachep->nodelists[node];
4081 if (!l3)
4082 continue;
4083
4084 check_irq_on();
4085 spin_lock_irq(&l3->list_lock);
4086
4087 list_for_each(q, &l3->slabs_full) {
4088 slabp = list_entry(q, struct slab, list);
4089 handle_slab(n, cachep, slabp);
4090 }
4091 list_for_each(q, &l3->slabs_partial) {
4092 slabp = list_entry(q, struct slab, list);
4093 handle_slab(n, cachep, slabp);
4094 }
4095 spin_unlock_irq(&l3->list_lock);
4096 }
4097 name = cachep->name;
4098 if (n[0] == n[1]) {
4099 /* Increase the buffer size */
4100 mutex_unlock(&cache_chain_mutex);
4101 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4102 if (!m->private) {
4103 /* Too bad, we are really out */
4104 m->private = n;
4105 mutex_lock(&cache_chain_mutex);
4106 return -ENOMEM;
4107 }
4108 *(unsigned long *)m->private = n[0] * 2;
4109 kfree(n);
4110 mutex_lock(&cache_chain_mutex);
4111 /* Now make sure this entry will be retried */
4112 m->count = m->size;
4113 return 0;
4114 }
4115 for (i = 0; i < n[1]; i++) {
4116 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4117 show_symbol(m, n[2*i+2]);
4118 seq_putc(m, '\n');
4119 }
4120 return 0;
4121}
4122
4123struct seq_operations slabstats_op = {
4124 .start = leaks_start,
4125 .next = s_next,
4126 .stop = s_stop,
4127 .show = leaks_show,
4128};
4129#endif
3902#endif 4130#endif
3903 4131
3904/** 4132/**
diff --git a/mm/slob.c b/mm/slob.c
index a1f42bdc0245..9bcc7e2cabfd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -294,6 +294,16 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
294} 294}
295EXPORT_SYMBOL(kmem_cache_alloc); 295EXPORT_SYMBOL(kmem_cache_alloc);
296 296
297void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
298{
299 void *ret = kmem_cache_alloc(c, flags);
300 if (ret)
301 memset(ret, 0, c->size);
302
303 return ret;
304}
305EXPORT_SYMBOL(kmem_cache_zalloc);
306
297void kmem_cache_free(struct kmem_cache *c, void *b) 307void kmem_cache_free(struct kmem_cache *c, void *b)
298{ 308{
299 if (c->dtor) 309 if (c->dtor)
diff --git a/mm/util.c b/mm/util.c
index 49e29f751b50..7368479220b3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -5,18 +5,18 @@
5#include <asm/uaccess.h> 5#include <asm/uaccess.h>
6 6
7/** 7/**
8 * kzalloc - allocate memory. The memory is set to zero. 8 * __kzalloc - allocate memory. The memory is set to zero.
9 * @size: how many bytes of memory are required. 9 * @size: how many bytes of memory are required.
10 * @flags: the type of memory to allocate. 10 * @flags: the type of memory to allocate.
11 */ 11 */
12void *kzalloc(size_t size, gfp_t flags) 12void *__kzalloc(size_t size, gfp_t flags)
13{ 13{
14 void *ret = kmalloc(size, flags); 14 void *ret = ____kmalloc(size, flags);
15 if (ret) 15 if (ret)
16 memset(ret, 0, size); 16 memset(ret, 0, size);
17 return ret; 17 return ret;
18} 18}
19EXPORT_SYMBOL(kzalloc); 19EXPORT_SYMBOL(__kzalloc);
20 20
21/* 21/*
22 * kstrdup - allocate space for and copy an existing string 22 * kstrdup - allocate space for and copy an existing string
@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp)
33 return NULL; 33 return NULL;
34 34
35 len = strlen(s) + 1; 35 len = strlen(s) + 1;
36 buf = kmalloc(len, gfp); 36 buf = ____kmalloc(len, gfp);
37 if (buf) 37 if (buf)
38 memcpy(buf, s, len); 38 memcpy(buf, s, len);
39 return buf; 39 return buf;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fd572bbdc9f5..78865c849f8f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1356,7 +1356,9 @@ static int __init kswapd_init(void)
1356 1356
1357 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); 1357 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1358 BUG_ON(pid < 0); 1358 BUG_ON(pid < 0);
1359 read_lock(&tasklist_lock);
1359 pgdat->kswapd = find_task_by_pid(pid); 1360 pgdat->kswapd = find_task_by_pid(pid);
1361 read_unlock(&tasklist_lock);
1360 } 1362 }
1361 total_memory = nr_free_pagecache_pages(); 1363 total_memory = nr_free_pagecache_pages();
1362 hotcpu_notifier(cpu_callback, 0); 1364 hotcpu_notifier(cpu_callback, 0);