aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2008-05-31 09:56:17 -0400
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 06:40:03 -0400
commit2dff440525f8faba8836e9f05297b76f23b4af30 (patch)
tree9f15e1dc2da06dba97cd939e41f34342caf05097
parentf85612967c93b67b10dd240e3e8bf8a0eee9def7 (diff)
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this: 1. Tell kmemcheck to allocate the shadow memory which stores the status of each byte in the allocation proper, e.g. whether it is initialized or uninitialized. 2. Tell kmemcheck which parts of memory that should be marked uninitialized. There are actually a few more states, such as "not yet allocated" and "recently freed". If a slab cache is set up using the SLAB_NOTRACK flag, it will never return memory that can take page faults because of kmemcheck. If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still request memory with the __GFP_NOTRACK flag. This does not prevent the page faults from occuring, however, but marks the object in question as being initialized so that no warnings will ever be produced for this object. In addition to (and in contrast to) __GFP_NOTRACK, the __GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should not be tracked _because_ it would produce a false positive. Their values are identical, but need not be so in the future (for example, we could now enable/disable false positives with a config option). Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--include/linux/gfp.h9
-rw-r--r--include/linux/kmemcheck.h47
-rw-r--r--include/linux/slab.h7
-rw-r--r--kernel/fork.c14
-rw-r--r--mm/Makefile1
-rw-r--r--mm/kmemcheck.c103
7 files changed, 174 insertions, 9 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3bb2be1649bd..994dd6a4a2a0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -63,7 +63,7 @@ void arch_task_cache_init(void)
63 task_xstate_cachep = 63 task_xstate_cachep =
64 kmem_cache_create("task_xstate", xstate_size, 64 kmem_cache_create("task_xstate", xstate_size,
65 __alignof__(union thread_xstate), 65 __alignof__(union thread_xstate),
66 SLAB_PANIC, NULL); 66 SLAB_PANIC | SLAB_NOTRACK, NULL);
67} 67}
68 68
69/* 69/*
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0bbc15f54536..daeaa8fe1bbd 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -51,8 +51,15 @@ struct vm_area_struct;
51#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 51#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
52#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ 52#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
53#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ 53#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
54#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
54 55
55#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ 56/*
57 * This may seem redundant, but it's a way of annotating false positives vs.
58 * allocations that simply cannot be supported (e.g. page tables).
59 */
60#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
61
62#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
56#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 63#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
57 64
58/* This equals 0, but use constants in case they ever change */ 65/* This equals 0, but use constants in case they ever change */
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 39480c91b2f9..5b65f4ebeadf 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -7,11 +7,58 @@
7#ifdef CONFIG_KMEMCHECK 7#ifdef CONFIG_KMEMCHECK
8extern int kmemcheck_enabled; 8extern int kmemcheck_enabled;
9 9
10/* The slab-related functions. */
11void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
12 struct page *page, int order);
13void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order);
14void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
15 size_t size);
16void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
17
18void kmemcheck_show_pages(struct page *p, unsigned int n);
19void kmemcheck_hide_pages(struct page *p, unsigned int n);
20
21bool kmemcheck_page_is_tracked(struct page *p);
22
23void kmemcheck_mark_unallocated(void *address, unsigned int n);
24void kmemcheck_mark_uninitialized(void *address, unsigned int n);
25void kmemcheck_mark_initialized(void *address, unsigned int n);
26void kmemcheck_mark_freed(void *address, unsigned int n);
27
28void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
29void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
30
10int kmemcheck_show_addr(unsigned long address); 31int kmemcheck_show_addr(unsigned long address);
11int kmemcheck_hide_addr(unsigned long address); 32int kmemcheck_hide_addr(unsigned long address);
12#else 33#else
13#define kmemcheck_enabled 0 34#define kmemcheck_enabled 0
14 35
36static inline void
37kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
38 struct page *page, int order)
39{
40}
41
42static inline void
43kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
44{
45}
46
47static inline void
48kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
49 size_t size)
50{
51}
52
53static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
54 size_t size)
55{
56}
57
58static inline bool kmemcheck_page_is_tracked(struct page *p)
59{
60 return false;
61}
15#endif /* CONFIG_KMEMCHECK */ 62#endif /* CONFIG_KMEMCHECK */
16 63
17#endif /* LINUX_KMEMCHECK_H */ 64#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 48803064cedf..e339fcf17cd3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,13 @@
64 64
65#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 65#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
66 66
67/* Don't track use of uninitialized memory */
68#ifdef CONFIG_KMEMCHECK
69# define SLAB_NOTRACK 0x01000000UL
70#else
71# define SLAB_NOTRACK 0x00000000UL
72#endif
73
67/* The following flags affect the page allocator grouping pages by mobility */ 74/* The following flags affect the page allocator grouping pages by mobility */
68#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 75#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
69#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 76#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
diff --git a/kernel/fork.c b/kernel/fork.c
index 4430eb1376f2..be022c200da6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages)
178 /* create a slab on which task_structs can be allocated */ 178 /* create a slab on which task_structs can be allocated */
179 task_struct_cachep = 179 task_struct_cachep =
180 kmem_cache_create("task_struct", sizeof(struct task_struct), 180 kmem_cache_create("task_struct", sizeof(struct task_struct),
181 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 181 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
182#endif 182#endif
183 183
184 /* do the arch specific task caches init */ 184 /* do the arch specific task caches init */
@@ -1470,20 +1470,20 @@ void __init proc_caches_init(void)
1470{ 1470{
1471 sighand_cachep = kmem_cache_create("sighand_cache", 1471 sighand_cachep = kmem_cache_create("sighand_cache",
1472 sizeof(struct sighand_struct), 0, 1472 sizeof(struct sighand_struct), 0,
1473 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1473 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1474 sighand_ctor); 1474 SLAB_NOTRACK, sighand_ctor);
1475 signal_cachep = kmem_cache_create("signal_cache", 1475 signal_cachep = kmem_cache_create("signal_cache",
1476 sizeof(struct signal_struct), 0, 1476 sizeof(struct signal_struct), 0,
1477 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1477 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1478 files_cachep = kmem_cache_create("files_cache", 1478 files_cachep = kmem_cache_create("files_cache",
1479 sizeof(struct files_struct), 0, 1479 sizeof(struct files_struct), 0,
1480 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1480 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1481 fs_cachep = kmem_cache_create("fs_cache", 1481 fs_cachep = kmem_cache_create("fs_cache",
1482 sizeof(struct fs_struct), 0, 1482 sizeof(struct fs_struct), 0,
1483 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1483 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1484 mm_cachep = kmem_cache_create("mm_struct", 1484 mm_cachep = kmem_cache_create("mm_struct",
1485 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1485 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1486 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1486 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1487 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1487 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1488 mmap_init(); 1488 mmap_init();
1489} 1489}
diff --git a/mm/Makefile b/mm/Makefile
index e89acb090b4d..c379ce08354a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
27obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o 27obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
28obj-$(CONFIG_SLAB) += slab.o 28obj-$(CONFIG_SLAB) += slab.o
29obj-$(CONFIG_SLUB) += slub.o 29obj-$(CONFIG_SLUB) += slub.o
30obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
30obj-$(CONFIG_FAILSLAB) += failslab.o 31obj-$(CONFIG_FAILSLAB) += failslab.o
31obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 32obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
32obj-$(CONFIG_FS_XIP) += filemap_xip.o 33obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
new file mode 100644
index 000000000000..eaa41b802611
--- /dev/null
+++ b/mm/kmemcheck.c
@@ -0,0 +1,103 @@
1#include <linux/mm_types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/kmemcheck.h>
5
6void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
7 struct page *page, int order)
8{
9 struct page *shadow;
10 int pages;
11 int i;
12
13 pages = 1 << order;
14
15 /*
16 * With kmemcheck enabled, we need to allocate a memory area for the
17 * shadow bits as well.
18 */
19 shadow = alloc_pages_node(node, flags, order);
20 if (!shadow) {
21 if (printk_ratelimit())
22 printk(KERN_ERR "kmemcheck: failed to allocate "
23 "shadow bitmap\n");
24 return;
25 }
26
27 for(i = 0; i < pages; ++i)
28 page[i].shadow = page_address(&shadow[i]);
29
30 /*
31 * Mark it as non-present for the MMU so that our accesses to
32 * this memory will trigger a page fault and let us analyze
33 * the memory accesses.
34 */
35 kmemcheck_hide_pages(page, pages);
36
37 /*
38 * Objects from caches that have a constructor don't get
39 * cleared when they're allocated, so we need to do it here.
40 */
41 if (s->ctor)
42 kmemcheck_mark_uninitialized_pages(page, pages);
43 else
44 kmemcheck_mark_unallocated_pages(page, pages);
45}
46
47void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
48{
49 struct page *shadow;
50 int pages;
51 int i;
52
53 pages = 1 << order;
54
55 kmemcheck_show_pages(page, pages);
56
57 shadow = virt_to_page(page[0].shadow);
58
59 for(i = 0; i < pages; ++i)
60 page[i].shadow = NULL;
61
62 __free_pages(shadow, order);
63}
64
65void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
66 size_t size)
67{
68 /*
69 * Has already been memset(), which initializes the shadow for us
70 * as well.
71 */
72 if (gfpflags & __GFP_ZERO)
73 return;
74
75 /* No need to initialize the shadow of a non-tracked slab. */
76 if (s->flags & SLAB_NOTRACK)
77 return;
78
79 if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
80 /*
81 * Allow notracked objects to be allocated from
82 * tracked caches. Note however that these objects
83 * will still get page faults on access, they just
84 * won't ever be flagged as uninitialized. If page
85 * faults are not acceptable, the slab cache itself
86 * should be marked NOTRACK.
87 */
88 kmemcheck_mark_initialized(object, size);
89 } else if (!s->ctor) {
90 /*
91 * New objects should be marked uninitialized before
92 * they're returned to the called.
93 */
94 kmemcheck_mark_uninitialized(object, size);
95 }
96}
97
98void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
99{
100 /* TODO: RCU freeing is unsupported for now; hide false positives. */
101 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
102 kmemcheck_mark_freed(object, size);
103}