diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-11-25 10:55:53 -0500 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:48:33 -0400 |
commit | b1eeab67682a5e397aecf172046b3a8bd4808ae4 (patch) | |
tree | c357b6ac1945dc8beecc2f8c4d84660ad8d35aae /mm/kmemcheck.c | |
parent | 9b5cab31897e9e89e36c0c2a89b16b93ff1a971a (diff) |
kmemcheck: add hooks for the page allocator
This adds support for tracking the initializedness of memory that
was allocated with the page allocator. Highmem requests are not
tracked.
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
[build fix for !CONFIG_KMEMCHECK]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/kmemcheck.c')
-rw-r--r-- | mm/kmemcheck.c | 45 |
1 files changed, 32 insertions, 13 deletions
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index eaa41b802611..fd814fd61319 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c | |||
@@ -1,10 +1,10 @@ | |||
1 | #include <linux/gfp.h> | ||
1 | #include <linux/mm_types.h> | 2 | #include <linux/mm_types.h> |
2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/kmemcheck.h> | 5 | #include <linux/kmemcheck.h> |
5 | 6 | ||
6 | void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | 7 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) |
7 | struct page *page, int order) | ||
8 | { | 8 | { |
9 | struct page *shadow; | 9 | struct page *shadow; |
10 | int pages; | 10 | int pages; |
@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | |||
16 | * With kmemcheck enabled, we need to allocate a memory area for the | 16 | * With kmemcheck enabled, we need to allocate a memory area for the |
17 | * shadow bits as well. | 17 | * shadow bits as well. |
18 | */ | 18 | */ |
19 | shadow = alloc_pages_node(node, flags, order); | 19 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); |
20 | if (!shadow) { | 20 | if (!shadow) { |
21 | if (printk_ratelimit()) | 21 | if (printk_ratelimit()) |
22 | printk(KERN_ERR "kmemcheck: failed to allocate " | 22 | printk(KERN_ERR "kmemcheck: failed to allocate " |
@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | |||
33 | * the memory accesses. | 33 | * the memory accesses. |
34 | */ | 34 | */ |
35 | kmemcheck_hide_pages(page, pages); | 35 | kmemcheck_hide_pages(page, pages); |
36 | |||
37 | /* | ||
38 | * Objects from caches that have a constructor don't get | ||
39 | * cleared when they're allocated, so we need to do it here. | ||
40 | */ | ||
41 | if (s->ctor) | ||
42 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
43 | else | ||
44 | kmemcheck_mark_unallocated_pages(page, pages); | ||
45 | } | 36 | } |
46 | 37 | ||
47 | void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) | 38 | void kmemcheck_free_shadow(struct page *page, int order) |
48 | { | 39 | { |
49 | struct page *shadow; | 40 | struct page *shadow; |
50 | int pages; | 41 | int pages; |
51 | int i; | 42 | int i; |
52 | 43 | ||
44 | if (!kmemcheck_page_is_tracked(page)) | ||
45 | return; | ||
46 | |||
53 | pages = 1 << order; | 47 | pages = 1 << order; |
54 | 48 | ||
55 | kmemcheck_show_pages(page, pages); | 49 | kmemcheck_show_pages(page, pages); |
@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) | |||
101 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) | 95 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) |
102 | kmemcheck_mark_freed(object, size); | 96 | kmemcheck_mark_freed(object, size); |
103 | } | 97 | } |
98 | |||
99 | void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, | ||
100 | gfp_t gfpflags) | ||
101 | { | ||
102 | int pages; | ||
103 | |||
104 | if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) | ||
105 | return; | ||
106 | |||
107 | pages = 1 << order; | ||
108 | |||
109 | /* | ||
110 | * NOTE: We choose to track GFP_ZERO pages too; in fact, they | ||
111 | * can become uninitialized by copying uninitialized memory | ||
112 | * into them. | ||
113 | */ | ||
114 | |||
115 | /* XXX: Can use zone->node for node? */ | ||
116 | kmemcheck_alloc_shadow(page, order, gfpflags, -1); | ||
117 | |||
118 | if (gfpflags & __GFP_ZERO) | ||
119 | kmemcheck_mark_initialized_pages(page, pages); | ||
120 | else | ||
121 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
122 | } | ||