aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/filemap_xip.c1
-rw-r--r--mm/madvise.c1
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/msync.c1
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/rmap.c66
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c59
-rw-r--r--mm/slob.c53
-rw-r--r--mm/slub.c234
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmstat.c1
16 files changed, 257 insertions, 193 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index a17da8bafe62..8ac412b45f18 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -166,5 +166,5 @@ config ZONE_DMA_FLAG
166config NR_QUICK 166config NR_QUICK
167 int 167 int
168 depends on QUICKLIST 168 depends on QUICKLIST
169 default "2" if SUPERH 169 default "2" if (SUPERH && !SUPERH64)
170 default "1" 170 default "1"
diff --git a/mm/filemap.c b/mm/filemap.c
index 7b48b2ad00e7..edb1b0b5cc8d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -670,7 +670,8 @@ repeat:
670 page = find_lock_page(mapping, index); 670 page = find_lock_page(mapping, index);
671 if (!page) { 671 if (!page) {
672 if (!cached_page) { 672 if (!cached_page) {
673 cached_page = alloc_page(gfp_mask); 673 cached_page =
674 __page_cache_alloc(gfp_mask);
674 if (!cached_page) 675 if (!cached_page)
675 return NULL; 676 return NULL;
676 } 677 }
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 1b49dab9b25d..fa360e566d88 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/uio.h> 14#include <linux/uio.h>
15#include <linux/rmap.h> 15#include <linux/rmap.h>
16#include <linux/sched.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
17#include "filemap.h" 18#include "filemap.h"
18 19
diff --git a/mm/madvise.c b/mm/madvise.c
index e75096b5a6d3..60542d006ec1 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -10,6 +10,7 @@
10#include <linux/syscalls.h> 10#include <linux/syscalls.h>
11#include <linux/mempolicy.h> 11#include <linux/mempolicy.h>
12#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
13#include <linux/sched.h>
13 14
14/* 15/*
15 * Any behaviour which results in changes to the vma->vm_flags needs to 16 * Any behaviour which results in changes to the vma->vm_flags needs to
diff --git a/mm/memory.c b/mm/memory.c
index 1d647ab0ee72..cb94488ab96d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
481 page = vm_normal_page(vma, addr, pte); 481 page = vm_normal_page(vma, addr, pte);
482 if (page) { 482 if (page) {
483 get_page(page); 483 get_page(page);
484 page_dup_rmap(page); 484 page_dup_rmap(page, vma, addr);
485 rss[!!PageAnon(page)]++; 485 rss[!!PageAnon(page)]++;
486 } 486 }
487 487
diff --git a/mm/mlock.c b/mm/mlock.c
index 3446b7ef731e..4d3fea267e0d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -10,7 +10,18 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/mempolicy.h> 11#include <linux/mempolicy.h>
12#include <linux/syscalls.h> 12#include <linux/syscalls.h>
13#include <linux/sched.h>
14#include <linux/module.h>
13 15
16int can_do_mlock(void)
17{
18 if (capable(CAP_IPC_LOCK))
19 return 1;
20 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
21 return 1;
22 return 0;
23}
24EXPORT_SYMBOL(can_do_mlock);
14 25
15static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, 26static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
16 unsigned long start, unsigned long end, unsigned int newflags) 27 unsigned long start, unsigned long end, unsigned int newflags)
diff --git a/mm/msync.c b/mm/msync.c
index 358d73cf7b78..144a7570535d 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -12,6 +12,7 @@
12#include <linux/mman.h> 12#include <linux/mman.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/syscalls.h> 14#include <linux/syscalls.h>
15#include <linux/sched.h>
15 16
16/* 17/*
17 * MS_SYNC syncs the entire file - including mappings. 18 * MS_SYNC syncs the entire file - including mappings.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae96dd844432..8b000d6803c2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2165,7 +2165,7 @@ void __init setup_per_cpu_pageset(void)
2165 2165
2166#endif 2166#endif
2167 2167
2168static __meminit noinline 2168static noinline __init_refok
2169int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2169int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2170{ 2170{
2171 int i; 2171 int i;
@@ -2678,7 +2678,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2678 } 2678 }
2679} 2679}
2680 2680
2681static void __meminit alloc_node_mem_map(struct pglist_data *pgdat) 2681static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
2682{ 2682{
2683 /* Skip empty nodes */ 2683 /* Skip empty nodes */
2684 if (!pgdat->node_spanned_pages) 2684 if (!pgdat->node_spanned_pages)
diff --git a/mm/rmap.c b/mm/rmap.c
index 304f51985c78..850165d32b7a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma)
162static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 162static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
163 unsigned long flags) 163 unsigned long flags)
164{ 164{
165 if (flags & SLAB_CTOR_CONSTRUCTOR) { 165 struct anon_vma *anon_vma = data;
166 struct anon_vma *anon_vma = data;
167 166
168 spin_lock_init(&anon_vma->lock); 167 spin_lock_init(&anon_vma->lock);
169 INIT_LIST_HEAD(&anon_vma->head); 168 INIT_LIST_HEAD(&anon_vma->head);
170 }
171} 169}
172 170
173void __init anon_vma_init(void) 171void __init anon_vma_init(void)
@@ -532,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page,
532} 530}
533 531
534/** 532/**
533 * page_set_anon_rmap - sanity check anonymous rmap addition
534 * @page: the page to add the mapping to
535 * @vma: the vm area in which the mapping is added
536 * @address: the user virtual address mapped
537 */
538static void __page_check_anon_rmap(struct page *page,
539 struct vm_area_struct *vma, unsigned long address)
540{
541#ifdef CONFIG_DEBUG_VM
542 /*
543 * The page's anon-rmap details (mapping and index) are guaranteed to
544 * be set up correctly at this point.
545 *
546 * We have exclusion against page_add_anon_rmap because the caller
547 * always holds the page locked, except if called from page_dup_rmap,
548 * in which case the page is already known to be setup.
549 *
550 * We have exclusion against page_add_new_anon_rmap because those pages
551 * are initially only visible via the pagetables, and the pte is locked
552 * over the call to page_add_new_anon_rmap.
553 */
554 struct anon_vma *anon_vma = vma->anon_vma;
555 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
556 BUG_ON(page->mapping != (struct address_space *)anon_vma);
557 BUG_ON(page->index != linear_page_index(vma, address));
558#endif
559}
560
561/**
535 * page_add_anon_rmap - add pte mapping to an anonymous page 562 * page_add_anon_rmap - add pte mapping to an anonymous page
536 * @page: the page to add the mapping to 563 * @page: the page to add the mapping to
537 * @vma: the vm area in which the mapping is added 564 * @vma: the vm area in which the mapping is added
538 * @address: the user virtual address mapped 565 * @address: the user virtual address mapped
539 * 566 *
540 * The caller needs to hold the pte lock. 567 * The caller needs to hold the pte lock and the page must be locked.
541 */ 568 */
542void page_add_anon_rmap(struct page *page, 569void page_add_anon_rmap(struct page *page,
543 struct vm_area_struct *vma, unsigned long address) 570 struct vm_area_struct *vma, unsigned long address)
544{ 571{
572 VM_BUG_ON(!PageLocked(page));
573 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
545 if (atomic_inc_and_test(&page->_mapcount)) 574 if (atomic_inc_and_test(&page->_mapcount))
546 __page_set_anon_rmap(page, vma, address); 575 __page_set_anon_rmap(page, vma, address);
547 /* else checking page index and mapping is racy */ 576 else
577 __page_check_anon_rmap(page, vma, address);
548} 578}
549 579
550/* 580/*
@@ -555,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
555 * 585 *
556 * Same as page_add_anon_rmap but must only be called on *new* pages. 586 * Same as page_add_anon_rmap but must only be called on *new* pages.
557 * This means the inc-and-test can be bypassed. 587 * This means the inc-and-test can be bypassed.
588 * Page does not have to be locked.
558 */ 589 */
559void page_add_new_anon_rmap(struct page *page, 590void page_add_new_anon_rmap(struct page *page,
560 struct vm_area_struct *vma, unsigned long address) 591 struct vm_area_struct *vma, unsigned long address)
561{ 592{
593 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
562 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 594 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
563 __page_set_anon_rmap(page, vma, address); 595 __page_set_anon_rmap(page, vma, address);
564} 596}
@@ -575,6 +607,26 @@ void page_add_file_rmap(struct page *page)
575 __inc_zone_page_state(page, NR_FILE_MAPPED); 607 __inc_zone_page_state(page, NR_FILE_MAPPED);
576} 608}
577 609
610#ifdef CONFIG_DEBUG_VM
611/**
612 * page_dup_rmap - duplicate pte mapping to a page
613 * @page: the page to add the mapping to
614 *
615 * For copy_page_range only: minimal extract from page_add_file_rmap /
616 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
617 * quicker.
618 *
619 * The caller needs to hold the pte lock.
620 */
621void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
622{
623 BUG_ON(page_mapcount(page) == 0);
624 if (PageAnon(page))
625 __page_check_anon_rmap(page, vma, address);
626 atomic_inc(&page->_mapcount);
627}
628#endif
629
578/** 630/**
579 * page_remove_rmap - take down pte mapping from a page 631 * page_remove_rmap - take down pte mapping from a page
580 * @page: page to remove mapping from 632 * @page: page to remove mapping from
diff --git a/mm/shmem.c b/mm/shmem.c
index f01e8deed645..e537317bec4d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep,
2358{ 2358{
2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2360 2360
2361 if (flags & SLAB_CTOR_CONSTRUCTOR) { 2361 inode_init_once(&p->vfs_inode);
2362 inode_init_once(&p->vfs_inode);
2363#ifdef CONFIG_TMPFS_POSIX_ACL 2362#ifdef CONFIG_TMPFS_POSIX_ACL
2364 p->i_acl = NULL; 2363 p->i_acl = NULL;
2365 p->i_default_acl = NULL; 2364 p->i_default_acl = NULL;
2366#endif 2365#endif
2367 }
2368} 2366}
2369 2367
2370static int init_inodecache(void) 2368static int init_inodecache(void)
diff --git a/mm/slab.c b/mm/slab.c
index 944b20581f8c..2e71a328aa09 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -409,9 +409,6 @@ struct kmem_cache {
409 /* constructor func */ 409 /* constructor func */
410 void (*ctor) (void *, struct kmem_cache *, unsigned long); 410 void (*ctor) (void *, struct kmem_cache *, unsigned long);
411 411
412 /* de-constructor func */
413 void (*dtor) (void *, struct kmem_cache *, unsigned long);
414
415/* 5) cache creation/removal */ 412/* 5) cache creation/removal */
416 const char *name; 413 const char *name;
417 struct list_head next; 414 struct list_head next;
@@ -572,21 +569,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
572#endif 569#endif
573 570
574/* 571/*
575 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
576 * order.
577 */
578#if defined(CONFIG_LARGE_ALLOCS)
579#define MAX_OBJ_ORDER 13 /* up to 32Mb */
580#define MAX_GFP_ORDER 13 /* up to 32Mb */
581#elif defined(CONFIG_MMU)
582#define MAX_OBJ_ORDER 5 /* 32 pages */
583#define MAX_GFP_ORDER 5 /* 32 pages */
584#else
585#define MAX_OBJ_ORDER 8 /* up to 1Mb */
586#define MAX_GFP_ORDER 8 /* up to 1Mb */
587#endif
588
589/*
590 * Do not go above this order unless 0 objects fit into the slab. 572 * Do not go above this order unless 0 objects fit into the slab.
591 */ 573 */
592#define BREAK_GFP_ORDER_HI 1 574#define BREAK_GFP_ORDER_HI 1
@@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
792 */ 774 */
793 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 775 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
794#endif 776#endif
777 WARN_ON_ONCE(size == 0);
795 while (size > csizep->cs_size) 778 while (size > csizep->cs_size)
796 csizep++; 779 csizep++;
797 780
@@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1911 slab_error(cachep, "end of a freed object " 1894 slab_error(cachep, "end of a freed object "
1912 "was overwritten"); 1895 "was overwritten");
1913 } 1896 }
1914 if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1915 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1916 } 1897 }
1917} 1898}
1918#else 1899#else
1919static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1900static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1920{ 1901{
1921 if (cachep->dtor) {
1922 int i;
1923 for (i = 0; i < cachep->num; i++) {
1924 void *objp = index_to_obj(cachep, slabp, i);
1925 (cachep->dtor) (objp, cachep, 0);
1926 }
1927 }
1928} 1902}
1929#endif 1903#endif
1930 1904
@@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2013 size_t left_over = 0; 1987 size_t left_over = 0;
2014 int gfporder; 1988 int gfporder;
2015 1989
2016 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1990 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2017 unsigned int num; 1991 unsigned int num;
2018 size_t remainder; 1992 size_t remainder;
2019 1993
@@ -2063,7 +2037,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2063 return left_over; 2037 return left_over;
2064} 2038}
2065 2039
2066static int setup_cpu_cache(struct kmem_cache *cachep) 2040static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2067{ 2041{
2068 if (g_cpucache_up == FULL) 2042 if (g_cpucache_up == FULL)
2069 return enable_cpucache(cachep); 2043 return enable_cpucache(cachep);
@@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep)
2124 * @align: The required alignment for the objects. 2098 * @align: The required alignment for the objects.
2125 * @flags: SLAB flags 2099 * @flags: SLAB flags
2126 * @ctor: A constructor for the objects. 2100 * @ctor: A constructor for the objects.
2127 * @dtor: A destructor for the objects. 2101 * @dtor: A destructor for the objects (not implemented anymore).
2128 * 2102 *
2129 * Returns a ptr to the cache on success, NULL on failure. 2103 * Returns a ptr to the cache on success, NULL on failure.
2130 * Cannot be called within a int, but can be interrupted. 2104 * Cannot be called within a int, but can be interrupted.
@@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2159 * Sanity checks... these are all serious usage bugs. 2133 * Sanity checks... these are all serious usage bugs.
2160 */ 2134 */
2161 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2135 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2162 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2136 size > KMALLOC_MAX_SIZE || dtor) {
2163 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2137 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2164 name); 2138 name);
2165 BUG(); 2139 BUG();
@@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2213 if (flags & SLAB_DESTROY_BY_RCU) 2187 if (flags & SLAB_DESTROY_BY_RCU)
2214 BUG_ON(flags & SLAB_POISON); 2188 BUG_ON(flags & SLAB_POISON);
2215#endif 2189#endif
2216 if (flags & SLAB_DESTROY_BY_RCU)
2217 BUG_ON(dtor);
2218
2219 /* 2190 /*
2220 * Always checks flags, a caller might be expecting debug support which 2191 * Always checks flags, a caller might be expecting debug support which
2221 * isn't available. 2192 * isn't available.
@@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2370 BUG_ON(!cachep->slabp_cache); 2341 BUG_ON(!cachep->slabp_cache);
2371 } 2342 }
2372 cachep->ctor = ctor; 2343 cachep->ctor = ctor;
2373 cachep->dtor = dtor;
2374 cachep->name = name; 2344 cachep->name = name;
2375 2345
2376 if (setup_cpu_cache(cachep)) { 2346 if (setup_cpu_cache(cachep)) {
@@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2625} 2595}
2626 2596
2627static void cache_init_objs(struct kmem_cache *cachep, 2597static void cache_init_objs(struct kmem_cache *cachep,
2628 struct slab *slabp, unsigned long ctor_flags) 2598 struct slab *slabp)
2629{ 2599{
2630 int i; 2600 int i;
2631 2601
@@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2649 */ 2619 */
2650 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2620 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2651 cachep->ctor(objp + obj_offset(cachep), cachep, 2621 cachep->ctor(objp + obj_offset(cachep), cachep,
2652 ctor_flags); 2622 0);
2653 2623
2654 if (cachep->flags & SLAB_RED_ZONE) { 2624 if (cachep->flags & SLAB_RED_ZONE) {
2655 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2625 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2665 cachep->buffer_size / PAGE_SIZE, 0); 2635 cachep->buffer_size / PAGE_SIZE, 0);
2666#else 2636#else
2667 if (cachep->ctor) 2637 if (cachep->ctor)
2668 cachep->ctor(objp, cachep, ctor_flags); 2638 cachep->ctor(objp, cachep, 0);
2669#endif 2639#endif
2670 slab_bufctl(slabp)[i] = i + 1; 2640 slab_bufctl(slabp)[i] = i + 1;
2671 } 2641 }
@@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep,
2754 struct slab *slabp; 2724 struct slab *slabp;
2755 size_t offset; 2725 size_t offset;
2756 gfp_t local_flags; 2726 gfp_t local_flags;
2757 unsigned long ctor_flags;
2758 struct kmem_list3 *l3; 2727 struct kmem_list3 *l3;
2759 2728
2760 /* 2729 /*
@@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep,
2763 */ 2732 */
2764 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2733 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
2765 2734
2766 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2767 local_flags = (flags & GFP_LEVEL_MASK); 2735 local_flags = (flags & GFP_LEVEL_MASK);
2768 /* Take the l3 list lock to change the colour_next on this node */ 2736 /* Take the l3 list lock to change the colour_next on this node */
2769 check_irq_off(); 2737 check_irq_off();
@@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
2808 slabp->nodeid = nodeid; 2776 slabp->nodeid = nodeid;
2809 slab_map_pages(cachep, slabp, objp); 2777 slab_map_pages(cachep, slabp, objp);
2810 2778
2811 cache_init_objs(cachep, slabp, ctor_flags); 2779 cache_init_objs(cachep, slabp);
2812 2780
2813 if (local_flags & __GFP_WAIT) 2781 if (local_flags & __GFP_WAIT)
2814 local_irq_disable(); 2782 local_irq_disable();
@@ -2835,7 +2803,6 @@ failed:
2835 * Perform extra freeing checks: 2803 * Perform extra freeing checks:
2836 * - detect bad pointers. 2804 * - detect bad pointers.
2837 * - POISON/RED_ZONE checking 2805 * - POISON/RED_ZONE checking
2838 * - destructor calls, for caches with POISON+dtor
2839 */ 2806 */
2840static void kfree_debugcheck(const void *objp) 2807static void kfree_debugcheck(const void *objp)
2841{ 2808{
@@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2894 BUG_ON(objnr >= cachep->num); 2861 BUG_ON(objnr >= cachep->num);
2895 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2862 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2896 2863
2897 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2898 /* we want to cache poison the object,
2899 * call the destruction callback
2900 */
2901 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2902 }
2903#ifdef CONFIG_DEBUG_SLAB_LEAK 2864#ifdef CONFIG_DEBUG_SLAB_LEAK
2904 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2865 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2905#endif 2866#endif
@@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3099#endif 3060#endif
3100 objp += obj_offset(cachep); 3061 objp += obj_offset(cachep);
3101 if (cachep->ctor && cachep->flags & SLAB_POISON) 3062 if (cachep->ctor && cachep->flags & SLAB_POISON)
3102 cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); 3063 cachep->ctor(objp, cachep, 0);
3103#if ARCH_SLAB_MINALIGN 3064#if ARCH_SLAB_MINALIGN
3104 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3065 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3105 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3066 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
diff --git a/mm/slob.c b/mm/slob.c
index c6933bc19bcd..71976c5d40d3 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/rcupdate.h>
38 39
39struct slob_block { 40struct slob_block {
40 int units; 41 int units;
@@ -53,6 +54,16 @@ struct bigblock {
53}; 54};
54typedef struct bigblock bigblock_t; 55typedef struct bigblock bigblock_t;
55 56
57/*
58 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
59 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
60 * the block using call_rcu.
61 */
62struct slob_rcu {
63 struct rcu_head head;
64 int size;
65};
66
56static slob_t arena = { .next = &arena, .units = 1 }; 67static slob_t arena = { .next = &arena, .units = 1 };
57static slob_t *slobfree = &arena; 68static slob_t *slobfree = &arena;
58static bigblock_t *bigblocks; 69static bigblock_t *bigblocks;
@@ -266,9 +277,9 @@ size_t ksize(const void *block)
266 277
267struct kmem_cache { 278struct kmem_cache {
268 unsigned int size, align; 279 unsigned int size, align;
280 unsigned long flags;
269 const char *name; 281 const char *name;
270 void (*ctor)(void *, struct kmem_cache *, unsigned long); 282 void (*ctor)(void *, struct kmem_cache *, unsigned long);
271 void (*dtor)(void *, struct kmem_cache *, unsigned long);
272}; 283};
273 284
274struct kmem_cache *kmem_cache_create(const char *name, size_t size, 285struct kmem_cache *kmem_cache_create(const char *name, size_t size,
@@ -283,8 +294,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
283 if (c) { 294 if (c) {
284 c->name = name; 295 c->name = name;
285 c->size = size; 296 c->size = size;
297 if (flags & SLAB_DESTROY_BY_RCU) {
298 /* leave room for rcu footer at the end of object */
299 c->size += sizeof(struct slob_rcu);
300 }
301 c->flags = flags;
286 c->ctor = ctor; 302 c->ctor = ctor;
287 c->dtor = dtor;
288 /* ignore alignment unless it's forced */ 303 /* ignore alignment unless it's forced */
289 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 304 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
290 if (c->align < align) 305 if (c->align < align)
@@ -312,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
312 b = (void *)__get_free_pages(flags, get_order(c->size)); 327 b = (void *)__get_free_pages(flags, get_order(c->size));
313 328
314 if (c->ctor) 329 if (c->ctor)
315 c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); 330 c->ctor(b, c, 0);
316 331
317 return b; 332 return b;
318} 333}
@@ -328,15 +343,33 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
328} 343}
329EXPORT_SYMBOL(kmem_cache_zalloc); 344EXPORT_SYMBOL(kmem_cache_zalloc);
330 345
331void kmem_cache_free(struct kmem_cache *c, void *b) 346static void __kmem_cache_free(void *b, int size)
332{ 347{
333 if (c->dtor) 348 if (size < PAGE_SIZE)
334 c->dtor(b, c, 0); 349 slob_free(b, size);
335
336 if (c->size < PAGE_SIZE)
337 slob_free(b, c->size);
338 else 350 else
339 free_pages((unsigned long)b, get_order(c->size)); 351 free_pages((unsigned long)b, get_order(size));
352}
353
354static void kmem_rcu_free(struct rcu_head *head)
355{
356 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
357 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
358
359 __kmem_cache_free(b, slob_rcu->size);
360}
361
362void kmem_cache_free(struct kmem_cache *c, void *b)
363{
364 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
365 struct slob_rcu *slob_rcu;
366 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
367 INIT_RCU_HEAD(&slob_rcu->head);
368 slob_rcu->size = c->size;
369 call_rcu(&slob_rcu->head, kmem_rcu_free);
370 } else {
371 __kmem_cache_free(b, c->size);
372 }
340} 373}
341EXPORT_SYMBOL(kmem_cache_free); 374EXPORT_SYMBOL(kmem_cache_free);
342 375
diff --git a/mm/slub.c b/mm/slub.c
index b39c8a69a4ff..98801d404d69 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -78,10 +78,18 @@
78 * 78 *
79 * Overloading of page flags that are otherwise used for LRU management. 79 * Overloading of page flags that are otherwise used for LRU management.
80 * 80 *
81 * PageActive The slab is used as a cpu cache. Allocations 81 * PageActive The slab is frozen and exempt from list processing.
82 * may be performed from the slab. The slab is not 82 * This means that the slab is dedicated to a purpose
83 * on any slab list and cannot be moved onto one. 83 * such as satisfying allocations for a specific
84 * The cpu slab may be equipped with an additioanl 84 * processor. Objects may be freed in the slab while
85 * it is frozen but slab_free will then skip the usual
86 * list operations. It is up to the processor holding
87 * the slab to integrate the slab into the slab lists
88 * when the slab is no longer needed.
89 *
90 * One use of this flag is to mark slabs that are
91 * used for allocations. Then such a slab becomes a cpu
92 * slab. The cpu slab may be equipped with an additional
85 * lockless_freelist that allows lockless access to 93 * lockless_freelist that allows lockless access to
86 * free objects in addition to the regular freelist 94 * free objects in addition to the regular freelist
87 * that requires the slab lock. 95 * that requires the slab lock.
@@ -91,27 +99,42 @@
91 * the fast path and disables lockless freelists. 99 * the fast path and disables lockless freelists.
92 */ 100 */
93 101
94static inline int SlabDebug(struct page *page) 102#define FROZEN (1 << PG_active)
95{ 103
96#ifdef CONFIG_SLUB_DEBUG 104#ifdef CONFIG_SLUB_DEBUG
97 return PageError(page); 105#define SLABDEBUG (1 << PG_error)
98#else 106#else
99 return 0; 107#define SLABDEBUG 0
100#endif 108#endif
109
110static inline int SlabFrozen(struct page *page)
111{
112 return page->flags & FROZEN;
113}
114
115static inline void SetSlabFrozen(struct page *page)
116{
117 page->flags |= FROZEN;
118}
119
120static inline void ClearSlabFrozen(struct page *page)
121{
122 page->flags &= ~FROZEN;
123}
124
125static inline int SlabDebug(struct page *page)
126{
127 return page->flags & SLABDEBUG;
101} 128}
102 129
103static inline void SetSlabDebug(struct page *page) 130static inline void SetSlabDebug(struct page *page)
104{ 131{
105#ifdef CONFIG_SLUB_DEBUG 132 page->flags |= SLABDEBUG;
106 SetPageError(page);
107#endif
108} 133}
109 134
110static inline void ClearSlabDebug(struct page *page) 135static inline void ClearSlabDebug(struct page *page)
111{ 136{
112#ifdef CONFIG_SLUB_DEBUG 137 page->flags &= ~SLABDEBUG;
113 ClearPageError(page);
114#endif
115} 138}
116 139
117/* 140/*
@@ -719,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
719 return search == NULL; 742 return search == NULL;
720} 743}
721 744
745static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
746{
747 if (s->flags & SLAB_TRACE) {
748 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
749 s->name,
750 alloc ? "alloc" : "free",
751 object, page->inuse,
752 page->freelist);
753
754 if (!alloc)
755 print_section("Object", (void *)object, s->objsize);
756
757 dump_stack();
758 }
759}
760
722/* 761/*
723 * Tracking of fully allocated slabs for debugging purposes. 762 * Tracking of fully allocated slabs for debugging purposes.
724 */ 763 */
@@ -743,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page)
743 spin_unlock(&n->list_lock); 782 spin_unlock(&n->list_lock);
744} 783}
745 784
746static int alloc_object_checks(struct kmem_cache *s, struct page *page, 785static void setup_object_debug(struct kmem_cache *s, struct page *page,
747 void *object) 786 void *object)
787{
788 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
789 return;
790
791 init_object(s, object, 0);
792 init_tracking(s, object);
793}
794
795static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
796 void *object, void *addr)
748{ 797{
749 if (!check_slab(s, page)) 798 if (!check_slab(s, page))
750 goto bad; 799 goto bad;
@@ -759,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page,
759 goto bad; 808 goto bad;
760 } 809 }
761 810
762 if (!object) 811 if (object && !check_object(s, page, object, 0))
763 return 1;
764
765 if (!check_object(s, page, object, 0))
766 goto bad; 812 goto bad;
767 813
814 /* Success perform special debug activities for allocs */
815 if (s->flags & SLAB_STORE_USER)
816 set_track(s, object, TRACK_ALLOC, addr);
817 trace(s, page, object, 1);
818 init_object(s, object, 1);
768 return 1; 819 return 1;
820
769bad: 821bad:
770 if (PageSlab(page)) { 822 if (PageSlab(page)) {
771 /* 823 /*
@@ -783,8 +835,8 @@ bad:
783 return 0; 835 return 0;
784} 836}
785 837
786static int free_object_checks(struct kmem_cache *s, struct page *page, 838static int free_debug_processing(struct kmem_cache *s, struct page *page,
787 void *object) 839 void *object, void *addr)
788{ 840{
789 if (!check_slab(s, page)) 841 if (!check_slab(s, page))
790 goto fail; 842 goto fail;
@@ -818,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page,
818 "to slab %s", object, page->slab->name); 870 "to slab %s", object, page->slab->name);
819 goto fail; 871 goto fail;
820 } 872 }
873
874 /* Special debug activities for freeing objects */
875 if (!SlabFrozen(page) && !page->freelist)
876 remove_full(s, page);
877 if (s->flags & SLAB_STORE_USER)
878 set_track(s, object, TRACK_FREE, addr);
879 trace(s, page, object, 0);
880 init_object(s, object, 0);
821 return 1; 881 return 1;
882
822fail: 883fail:
823 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 884 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
824 s->name, page, object); 885 s->name, page, object);
825 return 0; 886 return 0;
826} 887}
827 888
828static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
829{
830 if (s->flags & SLAB_TRACE) {
831 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
832 s->name,
833 alloc ? "alloc" : "free",
834 object, page->inuse,
835 page->freelist);
836
837 if (!alloc)
838 print_section("Object", (void *)object, s->objsize);
839
840 dump_stack();
841 }
842}
843
844static int __init setup_slub_debug(char *str) 889static int __init setup_slub_debug(char *str)
845{ 890{
846 if (!str || *str != '=') 891 if (!str || *str != '=')
@@ -891,13 +936,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
891 * On 32 bit platforms the limit is 256k. On 64bit platforms 936 * On 32 bit platforms the limit is 256k. On 64bit platforms
892 * the limit is 512k. 937 * the limit is 512k.
893 * 938 *
894 * Debugging or ctor/dtors may create a need to move the free 939 * Debugging or ctor may create a need to move the free
895 * pointer. Fail if this happens. 940 * pointer. Fail if this happens.
896 */ 941 */
897 if (s->size >= 65535 * sizeof(void *)) { 942 if (s->size >= 65535 * sizeof(void *)) {
898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 943 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 944 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
900 BUG_ON(s->ctor || s->dtor); 945 BUG_ON(s->ctor);
901 } 946 }
902 else 947 else
903 /* 948 /*
@@ -909,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
909 s->flags |= slub_debug; 954 s->flags |= slub_debug;
910} 955}
911#else 956#else
957static inline void setup_object_debug(struct kmem_cache *s,
958 struct page *page, void *object) {}
912 959
913static inline int alloc_object_checks(struct kmem_cache *s, 960static inline int alloc_debug_processing(struct kmem_cache *s,
914 struct page *page, void *object) { return 0; } 961 struct page *page, void *object, void *addr) { return 0; }
915 962
916static inline int free_object_checks(struct kmem_cache *s, 963static inline int free_debug_processing(struct kmem_cache *s,
917 struct page *page, void *object) { return 0; } 964 struct page *page, void *object, void *addr) { return 0; }
918 965
919static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
920static inline void remove_full(struct kmem_cache *s, struct page *page) {}
921static inline void trace(struct kmem_cache *s, struct page *page,
922 void *object, int alloc) {}
923static inline void init_object(struct kmem_cache *s,
924 void *object, int active) {}
925static inline void init_tracking(struct kmem_cache *s, void *object) {}
926static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 966static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
927 { return 1; } 967 { return 1; }
928static inline int check_object(struct kmem_cache *s, struct page *page, 968static inline int check_object(struct kmem_cache *s, struct page *page,
929 void *object, int active) { return 1; } 969 void *object, int active) { return 1; }
930static inline void set_track(struct kmem_cache *s, void *object, 970static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
931 enum track_item alloc, void *addr) {}
932static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 971static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
933#define slub_debug 0 972#define slub_debug 0
934#endif 973#endif
@@ -965,13 +1004,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
965static void setup_object(struct kmem_cache *s, struct page *page, 1004static void setup_object(struct kmem_cache *s, struct page *page,
966 void *object) 1005 void *object)
967{ 1006{
968 if (SlabDebug(page)) { 1007 setup_object_debug(s, page, object);
969 init_object(s, object, 0);
970 init_tracking(s, object);
971 }
972
973 if (unlikely(s->ctor)) 1008 if (unlikely(s->ctor))
974 s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); 1009 s->ctor(object, s, 0);
975} 1010}
976 1011
977static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1012static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1030,15 +1065,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1030{ 1065{
1031 int pages = 1 << s->order; 1066 int pages = 1 << s->order;
1032 1067
1033 if (unlikely(SlabDebug(page) || s->dtor)) { 1068 if (unlikely(SlabDebug(page))) {
1034 void *p; 1069 void *p;
1035 1070
1036 slab_pad_check(s, page); 1071 slab_pad_check(s, page);
1037 for_each_object(p, s, page_address(page)) { 1072 for_each_object(p, s, page_address(page))
1038 if (s->dtor)
1039 s->dtor(p, s, 0);
1040 check_object(s, page, p, 0); 1073 check_object(s, page, p, 0);
1041 }
1042 } 1074 }
1043 1075
1044 mod_zone_page_state(page_zone(page), 1076 mod_zone_page_state(page_zone(page),
@@ -1138,11 +1170,12 @@ static void remove_partial(struct kmem_cache *s,
1138 * 1170 *
1139 * Must hold list_lock. 1171 * Must hold list_lock.
1140 */ 1172 */
1141static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) 1173static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
1142{ 1174{
1143 if (slab_trylock(page)) { 1175 if (slab_trylock(page)) {
1144 list_del(&page->lru); 1176 list_del(&page->lru);
1145 n->nr_partial--; 1177 n->nr_partial--;
1178 SetSlabFrozen(page);
1146 return 1; 1179 return 1;
1147 } 1180 }
1148 return 0; 1181 return 0;
@@ -1166,7 +1199,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
1166 1199
1167 spin_lock(&n->list_lock); 1200 spin_lock(&n->list_lock);
1168 list_for_each_entry(page, &n->partial, lru) 1201 list_for_each_entry(page, &n->partial, lru)
1169 if (lock_and_del_slab(n, page)) 1202 if (lock_and_freeze_slab(n, page))
1170 goto out; 1203 goto out;
1171 page = NULL; 1204 page = NULL;
1172out: 1205out:
@@ -1245,10 +1278,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1245 * 1278 *
1246 * On exit the slab lock will have been dropped. 1279 * On exit the slab lock will have been dropped.
1247 */ 1280 */
1248static void putback_slab(struct kmem_cache *s, struct page *page) 1281static void unfreeze_slab(struct kmem_cache *s, struct page *page)
1249{ 1282{
1250 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1283 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1251 1284
1285 ClearSlabFrozen(page);
1252 if (page->inuse) { 1286 if (page->inuse) {
1253 1287
1254 if (page->freelist) 1288 if (page->freelist)
@@ -1299,9 +1333,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
1299 page->inuse--; 1333 page->inuse--;
1300 } 1334 }
1301 s->cpu_slab[cpu] = NULL; 1335 s->cpu_slab[cpu] = NULL;
1302 ClearPageActive(page); 1336 unfreeze_slab(s, page);
1303
1304 putback_slab(s, page);
1305} 1337}
1306 1338
1307static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) 1339static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
@@ -1392,9 +1424,7 @@ another_slab:
1392new_slab: 1424new_slab:
1393 page = get_partial(s, gfpflags, node); 1425 page = get_partial(s, gfpflags, node);
1394 if (page) { 1426 if (page) {
1395have_slab:
1396 s->cpu_slab[cpu] = page; 1427 s->cpu_slab[cpu] = page;
1397 SetPageActive(page);
1398 goto load_freelist; 1428 goto load_freelist;
1399 } 1429 }
1400 1430
@@ -1424,17 +1454,15 @@ have_slab:
1424 flush_slab(s, s->cpu_slab[cpu], cpu); 1454 flush_slab(s, s->cpu_slab[cpu], cpu);
1425 } 1455 }
1426 slab_lock(page); 1456 slab_lock(page);
1427 goto have_slab; 1457 SetSlabFrozen(page);
1458 s->cpu_slab[cpu] = page;
1459 goto load_freelist;
1428 } 1460 }
1429 return NULL; 1461 return NULL;
1430debug: 1462debug:
1431 object = page->freelist; 1463 object = page->freelist;
1432 if (!alloc_object_checks(s, page, object)) 1464 if (!alloc_debug_processing(s, page, object, addr))
1433 goto another_slab; 1465 goto another_slab;
1434 if (s->flags & SLAB_STORE_USER)
1435 set_track(s, object, TRACK_ALLOC, addr);
1436 trace(s, page, object, 1);
1437 init_object(s, object, 1);
1438 1466
1439 page->inuse++; 1467 page->inuse++;
1440 page->freelist = object[page->offset]; 1468 page->freelist = object[page->offset];
@@ -1511,11 +1539,7 @@ checks_ok:
1511 page->freelist = object; 1539 page->freelist = object;
1512 page->inuse--; 1540 page->inuse--;
1513 1541
1514 if (unlikely(PageActive(page))) 1542 if (unlikely(SlabFrozen(page)))
1515 /*
1516 * Cpu slabs are never on partial lists and are
1517 * never freed.
1518 */
1519 goto out_unlock; 1543 goto out_unlock;
1520 1544
1521 if (unlikely(!page->inuse)) 1545 if (unlikely(!page->inuse))
@@ -1545,14 +1569,8 @@ slab_empty:
1545 return; 1569 return;
1546 1570
1547debug: 1571debug:
1548 if (!free_object_checks(s, page, x)) 1572 if (!free_debug_processing(s, page, x, addr))
1549 goto out_unlock; 1573 goto out_unlock;
1550 if (!PageActive(page) && !page->freelist)
1551 remove_full(s, page);
1552 if (s->flags & SLAB_STORE_USER)
1553 set_track(s, x, TRACK_FREE, addr);
1554 trace(s, page, object, 0);
1555 init_object(s, object, 0);
1556 goto checks_ok; 1574 goto checks_ok;
1557} 1575}
1558 1576
@@ -1789,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
1789 page->freelist = get_freepointer(kmalloc_caches, n); 1807 page->freelist = get_freepointer(kmalloc_caches, n);
1790 page->inuse++; 1808 page->inuse++;
1791 kmalloc_caches->node[node] = n; 1809 kmalloc_caches->node[node] = n;
1792 init_object(kmalloc_caches, n, 1); 1810 setup_object_debug(kmalloc_caches, page, n);
1793 init_kmem_cache_node(n); 1811 init_kmem_cache_node(n);
1794 atomic_long_inc(&n->nr_slabs); 1812 atomic_long_inc(&n->nr_slabs);
1795 add_partial(n, page); 1813 add_partial(n, page);
@@ -1871,7 +1889,7 @@ static int calculate_sizes(struct kmem_cache *s)
1871 * then we should never poison the object itself. 1889 * then we should never poison the object itself.
1872 */ 1890 */
1873 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1891 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
1874 !s->ctor && !s->dtor) 1892 !s->ctor)
1875 s->flags |= __OBJECT_POISON; 1893 s->flags |= __OBJECT_POISON;
1876 else 1894 else
1877 s->flags &= ~__OBJECT_POISON; 1895 s->flags &= ~__OBJECT_POISON;
@@ -1901,7 +1919,7 @@ static int calculate_sizes(struct kmem_cache *s)
1901 1919
1902#ifdef CONFIG_SLUB_DEBUG 1920#ifdef CONFIG_SLUB_DEBUG
1903 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1921 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
1904 s->ctor || s->dtor)) { 1922 s->ctor)) {
1905 /* 1923 /*
1906 * Relocate free pointer after the object if it is not 1924 * Relocate free pointer after the object if it is not
1907 * permitted to overwrite the first word of the object on 1925 * permitted to overwrite the first word of the object on
@@ -1970,13 +1988,11 @@ static int calculate_sizes(struct kmem_cache *s)
1970static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1988static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1971 const char *name, size_t size, 1989 const char *name, size_t size,
1972 size_t align, unsigned long flags, 1990 size_t align, unsigned long flags,
1973 void (*ctor)(void *, struct kmem_cache *, unsigned long), 1991 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1974 void (*dtor)(void *, struct kmem_cache *, unsigned long))
1975{ 1992{
1976 memset(s, 0, kmem_size); 1993 memset(s, 0, kmem_size);
1977 s->name = name; 1994 s->name = name;
1978 s->ctor = ctor; 1995 s->ctor = ctor;
1979 s->dtor = dtor;
1980 s->objsize = size; 1996 s->objsize = size;
1981 s->flags = flags; 1997 s->flags = flags;
1982 s->align = align; 1998 s->align = align;
@@ -2161,7 +2177,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2161 2177
2162 down_write(&slub_lock); 2178 down_write(&slub_lock);
2163 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2179 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2164 flags, NULL, NULL)) 2180 flags, NULL))
2165 goto panic; 2181 goto panic;
2166 2182
2167 list_add(&s->list, &slab_caches); 2183 list_add(&s->list, &slab_caches);
@@ -2463,7 +2479,7 @@ static int slab_unmergeable(struct kmem_cache *s)
2463 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2479 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2464 return 1; 2480 return 1;
2465 2481
2466 if (s->ctor || s->dtor) 2482 if (s->ctor)
2467 return 1; 2483 return 1;
2468 2484
2469 return 0; 2485 return 0;
@@ -2471,15 +2487,14 @@ static int slab_unmergeable(struct kmem_cache *s)
2471 2487
2472static struct kmem_cache *find_mergeable(size_t size, 2488static struct kmem_cache *find_mergeable(size_t size,
2473 size_t align, unsigned long flags, 2489 size_t align, unsigned long flags,
2474 void (*ctor)(void *, struct kmem_cache *, unsigned long), 2490 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2475 void (*dtor)(void *, struct kmem_cache *, unsigned long))
2476{ 2491{
2477 struct list_head *h; 2492 struct list_head *h;
2478 2493
2479 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2494 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2480 return NULL; 2495 return NULL;
2481 2496
2482 if (ctor || dtor) 2497 if (ctor)
2483 return NULL; 2498 return NULL;
2484 2499
2485 size = ALIGN(size, sizeof(void *)); 2500 size = ALIGN(size, sizeof(void *));
@@ -2521,8 +2536,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2521{ 2536{
2522 struct kmem_cache *s; 2537 struct kmem_cache *s;
2523 2538
2539 BUG_ON(dtor);
2524 down_write(&slub_lock); 2540 down_write(&slub_lock);
2525 s = find_mergeable(size, align, flags, dtor, ctor); 2541 s = find_mergeable(size, align, flags, ctor);
2526 if (s) { 2542 if (s) {
2527 s->refcount++; 2543 s->refcount++;
2528 /* 2544 /*
@@ -2536,7 +2552,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2536 } else { 2552 } else {
2537 s = kmalloc(kmem_size, GFP_KERNEL); 2553 s = kmalloc(kmem_size, GFP_KERNEL);
2538 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2554 if (s && kmem_cache_open(s, GFP_KERNEL, name,
2539 size, align, flags, ctor, dtor)) { 2555 size, align, flags, ctor)) {
2540 if (sysfs_slab_add(s)) { 2556 if (sysfs_slab_add(s)) {
2541 kfree(s); 2557 kfree(s);
2542 goto err; 2558 goto err;
@@ -3177,17 +3193,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3177} 3193}
3178SLAB_ATTR_RO(ctor); 3194SLAB_ATTR_RO(ctor);
3179 3195
3180static ssize_t dtor_show(struct kmem_cache *s, char *buf)
3181{
3182 if (s->dtor) {
3183 int n = sprint_symbol(buf, (unsigned long)s->dtor);
3184
3185 return n + sprintf(buf + n, "\n");
3186 }
3187 return 0;
3188}
3189SLAB_ATTR_RO(dtor);
3190
3191static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3196static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3192{ 3197{
3193 return sprintf(buf, "%d\n", s->refcount - 1); 3198 return sprintf(buf, "%d\n", s->refcount - 1);
@@ -3419,7 +3424,6 @@ static struct attribute * slab_attrs[] = {
3419 &partial_attr.attr, 3424 &partial_attr.attr,
3420 &cpu_slabs_attr.attr, 3425 &cpu_slabs_attr.attr,
3421 &ctor_attr.attr, 3426 &ctor_attr.attr,
3422 &dtor_attr.attr,
3423 &aliases_attr.attr, 3427 &aliases_attr.attr,
3424 &align_attr.attr, 3428 &align_attr.attr,
3425 &sanity_checks_attr.attr, 3429 &sanity_checks_attr.attr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 6f3fff907bc2..1302f8348d51 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(page_to_nid);
44#endif 44#endif
45 45
46#ifdef CONFIG_SPARSEMEM_EXTREME 46#ifdef CONFIG_SPARSEMEM_EXTREME
47static struct mem_section noinline *sparse_index_alloc(int nid) 47static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
48{ 48{
49 struct mem_section *section = NULL; 49 struct mem_section *section = NULL;
50 unsigned long array_size = SECTIONS_PER_ROOT * 50 unsigned long array_size = SECTIONS_PER_ROOT *
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index faa2a521dea3..d3a9c5368257 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -311,7 +311,7 @@ struct vm_struct *remove_vm_area(void *addr)
311 return v; 311 return v;
312} 312}
313 313
314void __vunmap(void *addr, int deallocate_pages) 314static void __vunmap(void *addr, int deallocate_pages)
315{ 315{
316 struct vm_struct *area; 316 struct vm_struct *area;
317 317
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8faf27e5aa98..38254297a494 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/sched.h>
15 16
16#ifdef CONFIG_VM_EVENT_COUNTERS 17#ifdef CONFIG_VM_EVENT_COUNTERS
17DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 18DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};