diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-04-03 18:54:48 -0400 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 06:40:07 -0400 |
commit | 5a896d9e7c921742d0437a452f991288f4dc2c42 (patch) | |
tree | ea21c8fcfb4524ae5bfc68681920dd9a15b30795 /mm/slub.c | |
parent | d7002857dee6e9a3ce1f78d23f37caba106b29c5 (diff) |
slub: add hooks for kmemcheck
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 21 |
1 files changed, 19 insertions, 2 deletions
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/proc_fs.h> | 18 | #include <linux/proc_fs.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/kmemtrace.h> | 20 | #include <linux/kmemtrace.h> |
21 | #include <linux/kmemcheck.h> | ||
21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
22 | #include <linux/cpuset.h> | 23 | #include <linux/cpuset.h> |
23 | #include <linux/kmemleak.h> | 24 | #include <linux/kmemleak.h> |
@@ -147,7 +148,7 @@ | |||
147 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) | 148 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) |
148 | 149 | ||
149 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ | 150 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ |
150 | SLAB_CACHE_DMA) | 151 | SLAB_CACHE_DMA | SLAB_NOTRACK) |
151 | 152 | ||
152 | #ifndef ARCH_KMALLOC_MINALIGN | 153 | #ifndef ARCH_KMALLOC_MINALIGN |
153 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | 154 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
@@ -1092,6 +1093,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1092 | 1093 | ||
1093 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); | 1094 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); |
1094 | } | 1095 | } |
1096 | |||
1097 | if (kmemcheck_enabled | ||
1098 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) | ||
1099 | { | ||
1100 | kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); | ||
1101 | } | ||
1102 | |||
1095 | page->objects = oo_objects(oo); | 1103 | page->objects = oo_objects(oo); |
1096 | mod_zone_page_state(page_zone(page), | 1104 | mod_zone_page_state(page_zone(page), |
1097 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1105 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
@@ -1165,6 +1173,9 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1165 | __ClearPageSlubDebug(page); | 1173 | __ClearPageSlubDebug(page); |
1166 | } | 1174 | } |
1167 | 1175 | ||
1176 | if (kmemcheck_page_is_tracked(page)) | ||
1177 | kmemcheck_free_shadow(s, page, compound_order(page)); | ||
1178 | |||
1168 | mod_zone_page_state(page_zone(page), | 1179 | mod_zone_page_state(page_zone(page), |
1169 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1180 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
1170 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1181 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
@@ -1618,7 +1629,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1618 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1629 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
1619 | memset(object, 0, objsize); | 1630 | memset(object, 0, objsize); |
1620 | 1631 | ||
1632 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); | ||
1621 | kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); | 1633 | kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); |
1634 | |||
1622 | return object; | 1635 | return object; |
1623 | } | 1636 | } |
1624 | 1637 | ||
@@ -1751,6 +1764,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1751 | kmemleak_free_recursive(x, s->flags); | 1764 | kmemleak_free_recursive(x, s->flags); |
1752 | local_irq_save(flags); | 1765 | local_irq_save(flags); |
1753 | c = get_cpu_slab(s, smp_processor_id()); | 1766 | c = get_cpu_slab(s, smp_processor_id()); |
1767 | kmemcheck_slab_free(s, object, c->objsize); | ||
1754 | debug_check_no_locks_freed(object, c->objsize); | 1768 | debug_check_no_locks_freed(object, c->objsize); |
1755 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1769 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
1756 | debug_check_no_obj_freed(object, c->objsize); | 1770 | debug_check_no_obj_freed(object, c->objsize); |
@@ -2625,7 +2639,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2625 | 2639 | ||
2626 | if (!s || !text || !kmem_cache_open(s, flags, text, | 2640 | if (!s || !text || !kmem_cache_open(s, flags, text, |
2627 | realsize, ARCH_KMALLOC_MINALIGN, | 2641 | realsize, ARCH_KMALLOC_MINALIGN, |
2628 | SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { | 2642 | SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED, |
2643 | NULL)) { | ||
2629 | kfree(s); | 2644 | kfree(s); |
2630 | kfree(text); | 2645 | kfree(text); |
2631 | goto unlock_out; | 2646 | goto unlock_out; |
@@ -4396,6 +4411,8 @@ static char *create_unique_id(struct kmem_cache *s) | |||
4396 | *p++ = 'a'; | 4411 | *p++ = 'a'; |
4397 | if (s->flags & SLAB_DEBUG_FREE) | 4412 | if (s->flags & SLAB_DEBUG_FREE) |
4398 | *p++ = 'F'; | 4413 | *p++ = 'F'; |
4414 | if (!(s->flags & SLAB_NOTRACK)) | ||
4415 | *p++ = 't'; | ||
4399 | if (p != name + 1) | 4416 | if (p != name + 1) |
4400 | *p++ = '-'; | 4417 | *p++ = '-'; |
4401 | p += sprintf(p, "%07d", s->size); | 4418 | p += sprintf(p, "%07d", s->size); |