diff options
Diffstat (limited to 'mm/kmemcheck.c')
-rw-r--r-- | mm/kmemcheck.c | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c new file mode 100644 index 000000000000..eaa41b802611 --- /dev/null +++ b/mm/kmemcheck.c | |||
@@ -0,0 +1,103 @@ | |||
1 | #include <linux/mm_types.h> | ||
2 | #include <linux/mm.h> | ||
3 | #include <linux/slab.h> | ||
4 | #include <linux/kmemcheck.h> | ||
5 | |||
6 | void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | ||
7 | struct page *page, int order) | ||
8 | { | ||
9 | struct page *shadow; | ||
10 | int pages; | ||
11 | int i; | ||
12 | |||
13 | pages = 1 << order; | ||
14 | |||
15 | /* | ||
16 | * With kmemcheck enabled, we need to allocate a memory area for the | ||
17 | * shadow bits as well. | ||
18 | */ | ||
19 | shadow = alloc_pages_node(node, flags, order); | ||
20 | if (!shadow) { | ||
21 | if (printk_ratelimit()) | ||
22 | printk(KERN_ERR "kmemcheck: failed to allocate " | ||
23 | "shadow bitmap\n"); | ||
24 | return; | ||
25 | } | ||
26 | |||
27 | for(i = 0; i < pages; ++i) | ||
28 | page[i].shadow = page_address(&shadow[i]); | ||
29 | |||
30 | /* | ||
31 | * Mark it as non-present for the MMU so that our accesses to | ||
32 | * this memory will trigger a page fault and let us analyze | ||
33 | * the memory accesses. | ||
34 | */ | ||
35 | kmemcheck_hide_pages(page, pages); | ||
36 | |||
37 | /* | ||
38 | * Objects from caches that have a constructor don't get | ||
39 | * cleared when they're allocated, so we need to do it here. | ||
40 | */ | ||
41 | if (s->ctor) | ||
42 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
43 | else | ||
44 | kmemcheck_mark_unallocated_pages(page, pages); | ||
45 | } | ||
46 | |||
47 | void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) | ||
48 | { | ||
49 | struct page *shadow; | ||
50 | int pages; | ||
51 | int i; | ||
52 | |||
53 | pages = 1 << order; | ||
54 | |||
55 | kmemcheck_show_pages(page, pages); | ||
56 | |||
57 | shadow = virt_to_page(page[0].shadow); | ||
58 | |||
59 | for(i = 0; i < pages; ++i) | ||
60 | page[i].shadow = NULL; | ||
61 | |||
62 | __free_pages(shadow, order); | ||
63 | } | ||
64 | |||
65 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | ||
66 | size_t size) | ||
67 | { | ||
68 | /* | ||
69 | * Has already been memset(), which initializes the shadow for us | ||
70 | * as well. | ||
71 | */ | ||
72 | if (gfpflags & __GFP_ZERO) | ||
73 | return; | ||
74 | |||
75 | /* No need to initialize the shadow of a non-tracked slab. */ | ||
76 | if (s->flags & SLAB_NOTRACK) | ||
77 | return; | ||
78 | |||
79 | if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { | ||
80 | /* | ||
81 | * Allow notracked objects to be allocated from | ||
82 | * tracked caches. Note however that these objects | ||
83 | * will still get page faults on access, they just | ||
84 | * won't ever be flagged as uninitialized. If page | ||
85 | * faults are not acceptable, the slab cache itself | ||
86 | * should be marked NOTRACK. | ||
87 | */ | ||
88 | kmemcheck_mark_initialized(object, size); | ||
89 | } else if (!s->ctor) { | ||
90 | /* | ||
91 | * New objects should be marked uninitialized before | ||
92 | * they're returned to the called. | ||
93 | */ | ||
94 | kmemcheck_mark_uninitialized(object, size); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) | ||
99 | { | ||
100 | /* TODO: RCU freeing is unsupported for now; hide false positives. */ | ||
101 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) | ||
102 | kmemcheck_mark_freed(object, size); | ||
103 | } | ||