aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kmemcheck.c
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2008-05-31 09:56:17 -0400
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 06:40:03 -0400
commit2dff440525f8faba8836e9f05297b76f23b4af30 (patch)
tree9f15e1dc2da06dba97cd939e41f34342caf05097 /mm/kmemcheck.c
parentf85612967c93b67b10dd240e3e8bf8a0eee9def7 (diff)
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this: 1. Tell kmemcheck to allocate the shadow memory which stores the status of each byte in the allocation proper, e.g. whether it is initialized or uninitialized. 2. Tell kmemcheck which parts of memory that should be marked uninitialized. There are actually a few more states, such as "not yet allocated" and "recently freed". If a slab cache is set up using the SLAB_NOTRACK flag, it will never return memory that can take page faults because of kmemcheck. If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still request memory with the __GFP_NOTRACK flag. This does not prevent the page faults from occuring, however, but marks the object in question as being initialized so that no warnings will ever be produced for this object. In addition to (and in contrast to) __GFP_NOTRACK, the __GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should not be tracked _because_ it would produce a false positive. Their values are identical, but need not be so in the future (for example, we could now enable/disable false positives with a config option). Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/kmemcheck.c')
-rw-r--r--mm/kmemcheck.c103
1 files changed, 103 insertions, 0 deletions
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
new file mode 100644
index 000000000000..eaa41b802611
--- /dev/null
+++ b/mm/kmemcheck.c
@@ -0,0 +1,103 @@
1#include <linux/mm_types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/kmemcheck.h>
5
6void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
7 struct page *page, int order)
8{
9 struct page *shadow;
10 int pages;
11 int i;
12
13 pages = 1 << order;
14
15 /*
16 * With kmemcheck enabled, we need to allocate a memory area for the
17 * shadow bits as well.
18 */
19 shadow = alloc_pages_node(node, flags, order);
20 if (!shadow) {
21 if (printk_ratelimit())
22 printk(KERN_ERR "kmemcheck: failed to allocate "
23 "shadow bitmap\n");
24 return;
25 }
26
27 for(i = 0; i < pages; ++i)
28 page[i].shadow = page_address(&shadow[i]);
29
30 /*
31 * Mark it as non-present for the MMU so that our accesses to
32 * this memory will trigger a page fault and let us analyze
33 * the memory accesses.
34 */
35 kmemcheck_hide_pages(page, pages);
36
37 /*
38 * Objects from caches that have a constructor don't get
39 * cleared when they're allocated, so we need to do it here.
40 */
41 if (s->ctor)
42 kmemcheck_mark_uninitialized_pages(page, pages);
43 else
44 kmemcheck_mark_unallocated_pages(page, pages);
45}
46
47void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
48{
49 struct page *shadow;
50 int pages;
51 int i;
52
53 pages = 1 << order;
54
55 kmemcheck_show_pages(page, pages);
56
57 shadow = virt_to_page(page[0].shadow);
58
59 for(i = 0; i < pages; ++i)
60 page[i].shadow = NULL;
61
62 __free_pages(shadow, order);
63}
64
65void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
66 size_t size)
67{
68 /*
69 * Has already been memset(), which initializes the shadow for us
70 * as well.
71 */
72 if (gfpflags & __GFP_ZERO)
73 return;
74
75 /* No need to initialize the shadow of a non-tracked slab. */
76 if (s->flags & SLAB_NOTRACK)
77 return;
78
79 if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
80 /*
81 * Allow notracked objects to be allocated from
82 * tracked caches. Note however that these objects
83 * will still get page faults on access, they just
84 * won't ever be flagged as uninitialized. If page
85 * faults are not acceptable, the slab cache itself
86 * should be marked NOTRACK.
87 */
88 kmemcheck_mark_initialized(object, size);
89 } else if (!s->ctor) {
90 /*
91 * New objects should be marked uninitialized before
92 * they're returned to the called.
93 */
94 kmemcheck_mark_uninitialized(object, size);
95 }
96}
97
98void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
99{
100 /* TODO: RCU freeing is unsupported for now; hide false positives. */
101 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
102 kmemcheck_mark_freed(object, size);
103}