aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:09:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:09:51 -0400
commitb3fec0fe35a4ff048484f1408385a27695d4273b (patch)
tree088c23f098421ea681d9976a83aad73d15be1027 /mm/slub.c
parente1f5b94fd0c93c3e27ede88b7ab652d086dc960f (diff)
parent722f2a6c87f34ee0fd0130a8cf45f81e0705594a (diff)
Merge branch 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck
* 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck: (39 commits) signal: fix __send_signal() false positive kmemcheck warning fs: fix do_mount_root() false positive kmemcheck warning fs: introduce __getname_gfp() trace: annotate bitfields in struct ring_buffer_event net: annotate struct sock bitfield c2port: annotate bitfield for kmemcheck net: annotate inet_timewait_sock bitfields ieee1394/csr1212: fix false positive kmemcheck report ieee1394: annotate bitfield net: annotate bitfields in struct inet_sock net: use kmemcheck bitfields API for skbuff kmemcheck: introduce bitfield API kmemcheck: add opcode self-testing at boot x86: unify pte_hidden x86: make _PAGE_HIDDEN conditional kmemcheck: make kconfig accessible for other architectures kmemcheck: enable in the x86 Kconfig kmemcheck: add hooks for the page allocator kmemcheck: add hooks for page- and sg-dma-mappings kmemcheck: don't track page tables ...
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 34 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 30354bfeb43d..15960a09abb1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/kmemtrace.h> 20#include <linux/kmemtrace.h>
21#include <linux/kmemcheck.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
22#include <linux/cpuset.h> 23#include <linux/cpuset.h>
23#include <linux/kmemleak.h> 24#include <linux/kmemleak.h>
@@ -147,7 +148,7 @@
147 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) 148 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
148 149
149#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 150#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
150 SLAB_CACHE_DMA) 151 SLAB_CACHE_DMA | SLAB_NOTRACK)
151 152
152#ifndef ARCH_KMALLOC_MINALIGN 153#ifndef ARCH_KMALLOC_MINALIGN
153#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 154#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -1071,6 +1072,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1071{ 1072{
1072 int order = oo_order(oo); 1073 int order = oo_order(oo);
1073 1074
1075 flags |= __GFP_NOTRACK;
1076
1074 if (node == -1) 1077 if (node == -1)
1075 return alloc_pages(flags, order); 1078 return alloc_pages(flags, order);
1076 else 1079 else
@@ -1098,6 +1101,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1098 1101
1099 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1102 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1100 } 1103 }
1104
1105 if (kmemcheck_enabled
1106 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
1107 {
1108 int pages = 1 << oo_order(oo);
1109
1110 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1111
1112 /*
1113 * Objects from caches that have a constructor don't get
1114 * cleared when they're allocated, so we need to do it here.
1115 */
1116 if (s->ctor)
1117 kmemcheck_mark_uninitialized_pages(page, pages);
1118 else
1119 kmemcheck_mark_unallocated_pages(page, pages);
1120 }
1121
1101 page->objects = oo_objects(oo); 1122 page->objects = oo_objects(oo);
1102 mod_zone_page_state(page_zone(page), 1123 mod_zone_page_state(page_zone(page),
1103 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1124 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1192,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1171 __ClearPageSlubDebug(page); 1192 __ClearPageSlubDebug(page);
1172 } 1193 }
1173 1194
1195 kmemcheck_free_shadow(page, compound_order(page));
1196
1174 mod_zone_page_state(page_zone(page), 1197 mod_zone_page_state(page_zone(page),
1175 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1198 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1176 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1199 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1626,7 +1649,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1626 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1649 if (unlikely((gfpflags & __GFP_ZERO) && object))
1627 memset(object, 0, objsize); 1650 memset(object, 0, objsize);
1628 1651
1652 kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
1629 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); 1653 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
1654
1630 return object; 1655 return object;
1631} 1656}
1632 1657
@@ -1759,6 +1784,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1759 kmemleak_free_recursive(x, s->flags); 1784 kmemleak_free_recursive(x, s->flags);
1760 local_irq_save(flags); 1785 local_irq_save(flags);
1761 c = get_cpu_slab(s, smp_processor_id()); 1786 c = get_cpu_slab(s, smp_processor_id());
1787 kmemcheck_slab_free(s, object, c->objsize);
1762 debug_check_no_locks_freed(object, c->objsize); 1788 debug_check_no_locks_freed(object, c->objsize);
1763 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1789 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1764 debug_check_no_obj_freed(object, c->objsize); 1790 debug_check_no_obj_freed(object, c->objsize);
@@ -2633,7 +2659,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2633 2659
2634 if (!s || !text || !kmem_cache_open(s, flags, text, 2660 if (!s || !text || !kmem_cache_open(s, flags, text,
2635 realsize, ARCH_KMALLOC_MINALIGN, 2661 realsize, ARCH_KMALLOC_MINALIGN,
2636 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2662 SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
2663 NULL)) {
2637 kfree(s); 2664 kfree(s);
2638 kfree(text); 2665 kfree(text);
2639 goto unlock_out; 2666 goto unlock_out;
@@ -2727,9 +2754,10 @@ EXPORT_SYMBOL(__kmalloc);
2727 2754
2728static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2755static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2729{ 2756{
2730 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2757 struct page *page;
2731 get_order(size));
2732 2758
2759 flags |= __GFP_COMP | __GFP_NOTRACK;
2760 page = alloc_pages_node(node, flags, get_order(size));
2733 if (page) 2761 if (page)
2734 return page_address(page); 2762 return page_address(page);
2735 else 2763 else
@@ -4412,6 +4440,8 @@ static char *create_unique_id(struct kmem_cache *s)
4412 *p++ = 'a'; 4440 *p++ = 'a';
4413 if (s->flags & SLAB_DEBUG_FREE) 4441 if (s->flags & SLAB_DEBUG_FREE)
4414 *p++ = 'F'; 4442 *p++ = 'F';
4443 if (!(s->flags & SLAB_NOTRACK))
4444 *p++ = 't';
4415 if (p != name + 1) 4445 if (p != name + 1)
4416 *p++ = '-'; 4446 *p++ = '-';
4417 p += sprintf(p, "%07d", s->size); 4447 p += sprintf(p, "%07d", s->size);