aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:09:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:09:51 -0400
commitb3fec0fe35a4ff048484f1408385a27695d4273b (patch)
tree088c23f098421ea681d9976a83aad73d15be1027 /mm
parente1f5b94fd0c93c3e27ede88b7ab652d086dc960f (diff)
parent722f2a6c87f34ee0fd0130a8cf45f81e0705594a (diff)
Merge branch 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck
* 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck: (39 commits) signal: fix __send_signal() false positive kmemcheck warning fs: fix do_mount_root() false positive kmemcheck warning fs: introduce __getname_gfp() trace: annotate bitfields in struct ring_buffer_event net: annotate struct sock bitfield c2port: annotate bitfield for kmemcheck net: annotate inet_timewait_sock bitfields ieee1394/csr1212: fix false positive kmemcheck report ieee1394: annotate bitfield net: annotate bitfields in struct inet_sock net: use kmemcheck bitfields API for skbuff kmemcheck: introduce bitfield API kmemcheck: add opcode self-testing at boot x86: unify pte_hidden x86: make _PAGE_HIDDEN conditional kmemcheck: make kconfig accessible for other architectures kmemcheck: enable in the x86 Kconfig kmemcheck: add hooks for the page allocator kmemcheck: add hooks for page- and sg-dma-mappings kmemcheck: don't track page tables ...
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig.debug1
-rw-r--r--mm/Makefile1
-rw-r--r--mm/kmemcheck.c122
-rw-r--r--mm/page_alloc.c18
-rw-r--r--mm/slab.c108
-rw-r--r--mm/slub.c38
6 files changed, 200 insertions, 88 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index bb01e298f26..aa99fd1f710 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -2,6 +2,7 @@ config DEBUG_PAGEALLOC
2 bool "Debug page memory allocations" 2 bool "Debug page memory allocations"
3 depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC 3 depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC
4 depends on !HIBERNATION || !PPC && !SPARC 4 depends on !HIBERNATION || !PPC && !SPARC
5 depends on !KMEMCHECK
5 ---help--- 6 ---help---
6 Unmap pages from the kernel linear mapping after free_pages(). 7 Unmap pages from the kernel linear mapping after free_pages().
7 This results in a large slowdown, but helps to find certain types 8 This results in a large slowdown, but helps to find certain types
diff --git a/mm/Makefile b/mm/Makefile
index e89acb090b4..c379ce08354 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
27obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o 27obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
28obj-$(CONFIG_SLAB) += slab.o 28obj-$(CONFIG_SLAB) += slab.o
29obj-$(CONFIG_SLUB) += slub.o 29obj-$(CONFIG_SLUB) += slub.o
30obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
30obj-$(CONFIG_FAILSLAB) += failslab.o 31obj-$(CONFIG_FAILSLAB) += failslab.o
31obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 32obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
32obj-$(CONFIG_FS_XIP) += filemap_xip.o 33obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
new file mode 100644
index 00000000000..fd814fd6131
--- /dev/null
+++ b/mm/kmemcheck.c
@@ -0,0 +1,122 @@
1#include <linux/gfp.h>
2#include <linux/mm_types.h>
3#include <linux/mm.h>
4#include <linux/slab.h>
5#include <linux/kmemcheck.h>
6
7void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
8{
9 struct page *shadow;
10 int pages;
11 int i;
12
13 pages = 1 << order;
14
15 /*
16 * With kmemcheck enabled, we need to allocate a memory area for the
17 * shadow bits as well.
18 */
19 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
20 if (!shadow) {
21 if (printk_ratelimit())
22 printk(KERN_ERR "kmemcheck: failed to allocate "
23 "shadow bitmap\n");
24 return;
25 }
26
27 for(i = 0; i < pages; ++i)
28 page[i].shadow = page_address(&shadow[i]);
29
30 /*
31 * Mark it as non-present for the MMU so that our accesses to
32 * this memory will trigger a page fault and let us analyze
33 * the memory accesses.
34 */
35 kmemcheck_hide_pages(page, pages);
36}
37
38void kmemcheck_free_shadow(struct page *page, int order)
39{
40 struct page *shadow;
41 int pages;
42 int i;
43
44 if (!kmemcheck_page_is_tracked(page))
45 return;
46
47 pages = 1 << order;
48
49 kmemcheck_show_pages(page, pages);
50
51 shadow = virt_to_page(page[0].shadow);
52
53 for(i = 0; i < pages; ++i)
54 page[i].shadow = NULL;
55
56 __free_pages(shadow, order);
57}
58
59void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
60 size_t size)
61{
62 /*
63 * Has already been memset(), which initializes the shadow for us
64 * as well.
65 */
66 if (gfpflags & __GFP_ZERO)
67 return;
68
69 /* No need to initialize the shadow of a non-tracked slab. */
70 if (s->flags & SLAB_NOTRACK)
71 return;
72
73 if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
74 /*
75 * Allow notracked objects to be allocated from
76 * tracked caches. Note however that these objects
77 * will still get page faults on access, they just
78 * won't ever be flagged as uninitialized. If page
79 * faults are not acceptable, the slab cache itself
80 * should be marked NOTRACK.
81 */
82 kmemcheck_mark_initialized(object, size);
83 } else if (!s->ctor) {
84 /*
85 * New objects should be marked uninitialized before
86 * they're returned to the called.
87 */
88 kmemcheck_mark_uninitialized(object, size);
89 }
90}
91
92void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
93{
94 /* TODO: RCU freeing is unsupported for now; hide false positives. */
95 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
96 kmemcheck_mark_freed(object, size);
97}
98
99void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
100 gfp_t gfpflags)
101{
102 int pages;
103
104 if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
105 return;
106
107 pages = 1 << order;
108
109 /*
110 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
111 * can become uninitialized by copying uninitialized memory
112 * into them.
113 */
114
115 /* XXX: Can use zone->node for node? */
116 kmemcheck_alloc_shadow(page, order, gfpflags, -1);
117
118 if (gfpflags & __GFP_ZERO)
119 kmemcheck_mark_initialized_pages(page, pages);
120 else
121 kmemcheck_mark_uninitialized_pages(page, pages);
122}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 17d5f539a9a..0727896a88a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -23,6 +23,7 @@
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/suspend.h> 28#include <linux/suspend.h>
28#include <linux/pagevec.h> 29#include <linux/pagevec.h>
@@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
546 int i; 547 int i;
547 int bad = 0; 548 int bad = 0;
548 549
550 kmemcheck_free_shadow(page, order);
551
549 for (i = 0 ; i < (1 << order) ; ++i) 552 for (i = 0 ; i < (1 << order) ; ++i)
550 bad += free_pages_check(page + i); 553 bad += free_pages_check(page + i);
551 if (bad) 554 if (bad)
@@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold)
994 struct per_cpu_pages *pcp; 997 struct per_cpu_pages *pcp;
995 unsigned long flags; 998 unsigned long flags;
996 999
1000 kmemcheck_free_shadow(page, 0);
1001
997 if (PageAnon(page)) 1002 if (PageAnon(page))
998 page->mapping = NULL; 1003 page->mapping = NULL;
999 if (free_pages_check(page)) 1004 if (free_pages_check(page))
@@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order)
1047 1052
1048 VM_BUG_ON(PageCompound(page)); 1053 VM_BUG_ON(PageCompound(page));
1049 VM_BUG_ON(!page_count(page)); 1054 VM_BUG_ON(!page_count(page));
1055
1056#ifdef CONFIG_KMEMCHECK
1057 /*
1058 * Split shadow pages too, because free(page[0]) would
1059 * otherwise free the whole shadow.
1060 */
1061 if (kmemcheck_page_is_tracked(page))
1062 split_page(virt_to_page(page[0].shadow), order);
1063#endif
1064
1050 for (i = 1; i < (1 << order); i++) 1065 for (i = 1; i < (1 << order); i++)
1051 set_page_refcounted(page + i); 1066 set_page_refcounted(page + i);
1052} 1067}
@@ -1667,7 +1682,10 @@ nopage:
1667 dump_stack(); 1682 dump_stack();
1668 show_mem(); 1683 show_mem();
1669 } 1684 }
1685 return page;
1670got_pg: 1686got_pg:
1687 if (kmemcheck_enabled)
1688 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1671 return page; 1689 return page;
1672} 1690}
1673EXPORT_SYMBOL(__alloc_pages_internal); 1691EXPORT_SYMBOL(__alloc_pages_internal);
diff --git a/mm/slab.c b/mm/slab.c
index 18e3164de09..af3376d0a83 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -114,6 +114,7 @@
114#include <linux/rtmutex.h> 114#include <linux/rtmutex.h>
115#include <linux/reciprocal_div.h> 115#include <linux/reciprocal_div.h>
116#include <linux/debugobjects.h> 116#include <linux/debugobjects.h>
117#include <linux/kmemcheck.h>
117 118
118#include <asm/cacheflush.h> 119#include <asm/cacheflush.h>
119#include <asm/tlbflush.h> 120#include <asm/tlbflush.h>
@@ -179,13 +180,13 @@
179 SLAB_STORE_USER | \ 180 SLAB_STORE_USER | \
180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 181 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 182 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
182 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) 183 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
183#else 184#else
184# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 185# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
185 SLAB_CACHE_DMA | \ 186 SLAB_CACHE_DMA | \
186 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 187 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 188 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) 189 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
189#endif 190#endif
190 191
191/* 192/*
@@ -380,87 +381,6 @@ static void kmem_list3_init(struct kmem_list3 *parent)
380 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 381 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
381 } while (0) 382 } while (0)
382 383
383/*
384 * struct kmem_cache
385 *
386 * manages a cache.
387 */
388
389struct kmem_cache {
390/* 1) per-cpu data, touched during every alloc/free */
391 struct array_cache *array[NR_CPUS];
392/* 2) Cache tunables. Protected by cache_chain_mutex */
393 unsigned int batchcount;
394 unsigned int limit;
395 unsigned int shared;
396
397 unsigned int buffer_size;
398 u32 reciprocal_buffer_size;
399/* 3) touched by every alloc & free from the backend */
400
401 unsigned int flags; /* constant flags */
402 unsigned int num; /* # of objs per slab */
403
404/* 4) cache_grow/shrink */
405 /* order of pgs per slab (2^n) */
406 unsigned int gfporder;
407
408 /* force GFP flags, e.g. GFP_DMA */
409 gfp_t gfpflags;
410
411 size_t colour; /* cache colouring range */
412 unsigned int colour_off; /* colour offset */
413 struct kmem_cache *slabp_cache;
414 unsigned int slab_size;
415 unsigned int dflags; /* dynamic flags */
416
417 /* constructor func */
418 void (*ctor)(void *obj);
419
420/* 5) cache creation/removal */
421 const char *name;
422 struct list_head next;
423
424/* 6) statistics */
425#if STATS
426 unsigned long num_active;
427 unsigned long num_allocations;
428 unsigned long high_mark;
429 unsigned long grown;
430 unsigned long reaped;
431 unsigned long errors;
432 unsigned long max_freeable;
433 unsigned long node_allocs;
434 unsigned long node_frees;
435 unsigned long node_overflow;
436 atomic_t allochit;
437 atomic_t allocmiss;
438 atomic_t freehit;
439 atomic_t freemiss;
440#endif
441#if DEBUG
442 /*
443 * If debugging is enabled, then the allocator can add additional
444 * fields and/or padding to every object. buffer_size contains the total
445 * object size including these internal fields, the following two
446 * variables contain the offset to the user object and its size.
447 */
448 int obj_offset;
449 int obj_size;
450#endif
451 /*
452 * We put nodelists[] at the end of kmem_cache, because we want to size
453 * this array to nr_node_ids slots instead of MAX_NUMNODES
454 * (see kmem_cache_init())
455 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
456 * is statically defined, so we reserve the max number of nodes.
457 */
458 struct kmem_list3 *nodelists[MAX_NUMNODES];
459 /*
460 * Do not add fields after nodelists[]
461 */
462};
463
464#define CFLGS_OFF_SLAB (0x80000000UL) 384#define CFLGS_OFF_SLAB (0x80000000UL)
465#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 385#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
466 386
@@ -1707,7 +1627,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1707 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1627 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1708 flags |= __GFP_RECLAIMABLE; 1628 flags |= __GFP_RECLAIMABLE;
1709 1629
1710 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1630 page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1711 if (!page) 1631 if (!page)
1712 return NULL; 1632 return NULL;
1713 1633
@@ -1720,6 +1640,16 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1720 NR_SLAB_UNRECLAIMABLE, nr_pages); 1640 NR_SLAB_UNRECLAIMABLE, nr_pages);
1721 for (i = 0; i < nr_pages; i++) 1641 for (i = 0; i < nr_pages; i++)
1722 __SetPageSlab(page + i); 1642 __SetPageSlab(page + i);
1643
1644 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1645 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1646
1647 if (cachep->ctor)
1648 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1649 else
1650 kmemcheck_mark_unallocated_pages(page, nr_pages);
1651 }
1652
1723 return page_address(page); 1653 return page_address(page);
1724} 1654}
1725 1655
@@ -1732,6 +1662,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1732 struct page *page = virt_to_page(addr); 1662 struct page *page = virt_to_page(addr);
1733 const unsigned long nr_freed = i; 1663 const unsigned long nr_freed = i;
1734 1664
1665 kmemcheck_free_shadow(page, cachep->gfporder);
1666
1735 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1667 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1736 sub_zone_page_state(page_zone(page), 1668 sub_zone_page_state(page_zone(page),
1737 NR_SLAB_RECLAIMABLE, nr_freed); 1669 NR_SLAB_RECLAIMABLE, nr_freed);
@@ -3407,6 +3339,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3407 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, 3339 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3408 flags); 3340 flags);
3409 3341
3342 if (likely(ptr))
3343 kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3344
3410 if (unlikely((flags & __GFP_ZERO) && ptr)) 3345 if (unlikely((flags & __GFP_ZERO) && ptr))
3411 memset(ptr, 0, obj_size(cachep)); 3346 memset(ptr, 0, obj_size(cachep));
3412 3347
@@ -3467,6 +3402,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3467 flags); 3402 flags);
3468 prefetchw(objp); 3403 prefetchw(objp);
3469 3404
3405 if (likely(objp))
3406 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3407
3470 if (unlikely((flags & __GFP_ZERO) && objp)) 3408 if (unlikely((flags & __GFP_ZERO) && objp))
3471 memset(objp, 0, obj_size(cachep)); 3409 memset(objp, 0, obj_size(cachep));
3472 3410
@@ -3583,6 +3521,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3583 kmemleak_free_recursive(objp, cachep->flags); 3521 kmemleak_free_recursive(objp, cachep->flags);
3584 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3522 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3585 3523
3524 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3525
3586 /* 3526 /*
3587 * Skip calling cache_free_alien() when the platform is not numa. 3527 * Skip calling cache_free_alien() when the platform is not numa.
3588 * This will avoid cache misses that happen while accessing slabp (which 3528 * This will avoid cache misses that happen while accessing slabp (which
diff --git a/mm/slub.c b/mm/slub.c
index 30354bfeb43..15960a09abb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/kmemtrace.h> 20#include <linux/kmemtrace.h>
21#include <linux/kmemcheck.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
22#include <linux/cpuset.h> 23#include <linux/cpuset.h>
23#include <linux/kmemleak.h> 24#include <linux/kmemleak.h>
@@ -147,7 +148,7 @@
147 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) 148 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
148 149
149#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 150#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
150 SLAB_CACHE_DMA) 151 SLAB_CACHE_DMA | SLAB_NOTRACK)
151 152
152#ifndef ARCH_KMALLOC_MINALIGN 153#ifndef ARCH_KMALLOC_MINALIGN
153#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 154#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -1071,6 +1072,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1071{ 1072{
1072 int order = oo_order(oo); 1073 int order = oo_order(oo);
1073 1074
1075 flags |= __GFP_NOTRACK;
1076
1074 if (node == -1) 1077 if (node == -1)
1075 return alloc_pages(flags, order); 1078 return alloc_pages(flags, order);
1076 else 1079 else
@@ -1098,6 +1101,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1098 1101
1099 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1102 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1100 } 1103 }
1104
1105 if (kmemcheck_enabled
1106 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
1107 {
1108 int pages = 1 << oo_order(oo);
1109
1110 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1111
1112 /*
1113 * Objects from caches that have a constructor don't get
1114 * cleared when they're allocated, so we need to do it here.
1115 */
1116 if (s->ctor)
1117 kmemcheck_mark_uninitialized_pages(page, pages);
1118 else
1119 kmemcheck_mark_unallocated_pages(page, pages);
1120 }
1121
1101 page->objects = oo_objects(oo); 1122 page->objects = oo_objects(oo);
1102 mod_zone_page_state(page_zone(page), 1123 mod_zone_page_state(page_zone(page),
1103 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1124 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1192,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1171 __ClearPageSlubDebug(page); 1192 __ClearPageSlubDebug(page);
1172 } 1193 }
1173 1194
1195 kmemcheck_free_shadow(page, compound_order(page));
1196
1174 mod_zone_page_state(page_zone(page), 1197 mod_zone_page_state(page_zone(page),
1175 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1198 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1176 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1199 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1626,7 +1649,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1626 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1649 if (unlikely((gfpflags & __GFP_ZERO) && object))
1627 memset(object, 0, objsize); 1650 memset(object, 0, objsize);
1628 1651
1652 kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
1629 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); 1653 kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
1654
1630 return object; 1655 return object;
1631} 1656}
1632 1657
@@ -1759,6 +1784,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1759 kmemleak_free_recursive(x, s->flags); 1784 kmemleak_free_recursive(x, s->flags);
1760 local_irq_save(flags); 1785 local_irq_save(flags);
1761 c = get_cpu_slab(s, smp_processor_id()); 1786 c = get_cpu_slab(s, smp_processor_id());
1787 kmemcheck_slab_free(s, object, c->objsize);
1762 debug_check_no_locks_freed(object, c->objsize); 1788 debug_check_no_locks_freed(object, c->objsize);
1763 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1789 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1764 debug_check_no_obj_freed(object, c->objsize); 1790 debug_check_no_obj_freed(object, c->objsize);
@@ -2633,7 +2659,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2633 2659
2634 if (!s || !text || !kmem_cache_open(s, flags, text, 2660 if (!s || !text || !kmem_cache_open(s, flags, text,
2635 realsize, ARCH_KMALLOC_MINALIGN, 2661 realsize, ARCH_KMALLOC_MINALIGN,
2636 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2662 SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
2663 NULL)) {
2637 kfree(s); 2664 kfree(s);
2638 kfree(text); 2665 kfree(text);
2639 goto unlock_out; 2666 goto unlock_out;
@@ -2727,9 +2754,10 @@ EXPORT_SYMBOL(__kmalloc);
2727 2754
2728static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2755static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2729{ 2756{
2730 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2757 struct page *page;
2731 get_order(size));
2732 2758
2759 flags |= __GFP_COMP | __GFP_NOTRACK;
2760 page = alloc_pages_node(node, flags, get_order(size));
2733 if (page) 2761 if (page)
2734 return page_address(page); 2762 return page_address(page);
2735 else 2763 else
@@ -4412,6 +4440,8 @@ static char *create_unique_id(struct kmem_cache *s)
4412 *p++ = 'a'; 4440 *p++ = 'a';
4413 if (s->flags & SLAB_DEBUG_FREE) 4441 if (s->flags & SLAB_DEBUG_FREE)
4414 *p++ = 'F'; 4442 *p++ = 'F';
4443 if (!(s->flags & SLAB_NOTRACK))
4444 *p++ = 't';
4415 if (p != name + 1) 4445 if (p != name + 1)
4416 *p++ = '-'; 4446 *p++ = '-';
4417 p += sprintf(p, "%07d", s->size); 4447 p += sprintf(p, "%07d", s->size);