diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-05-31 09:56:17 -0400 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 06:40:03 -0400 |
commit | 2dff440525f8faba8836e9f05297b76f23b4af30 (patch) | |
tree | 9f15e1dc2da06dba97cd939e41f34342caf05097 /kernel | |
parent | f85612967c93b67b10dd240e3e8bf8a0eee9def7 (diff) |
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 4430eb1376f2..be022c200da6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages) | |||
178 | /* create a slab on which task_structs can be allocated */ | 178 | /* create a slab on which task_structs can be allocated */ |
179 | task_struct_cachep = | 179 | task_struct_cachep = |
180 | kmem_cache_create("task_struct", sizeof(struct task_struct), | 180 | kmem_cache_create("task_struct", sizeof(struct task_struct), |
181 | ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); | 181 | ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | /* do the arch specific task caches init */ | 184 | /* do the arch specific task caches init */ |
@@ -1470,20 +1470,20 @@ void __init proc_caches_init(void) | |||
1470 | { | 1470 | { |
1471 | sighand_cachep = kmem_cache_create("sighand_cache", | 1471 | sighand_cachep = kmem_cache_create("sighand_cache", |
1472 | sizeof(struct sighand_struct), 0, | 1472 | sizeof(struct sighand_struct), 0, |
1473 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, | 1473 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| |
1474 | sighand_ctor); | 1474 | SLAB_NOTRACK, sighand_ctor); |
1475 | signal_cachep = kmem_cache_create("signal_cache", | 1475 | signal_cachep = kmem_cache_create("signal_cache", |
1476 | sizeof(struct signal_struct), 0, | 1476 | sizeof(struct signal_struct), 0, |
1477 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1477 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); |
1478 | files_cachep = kmem_cache_create("files_cache", | 1478 | files_cachep = kmem_cache_create("files_cache", |
1479 | sizeof(struct files_struct), 0, | 1479 | sizeof(struct files_struct), 0, |
1480 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1480 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); |
1481 | fs_cachep = kmem_cache_create("fs_cache", | 1481 | fs_cachep = kmem_cache_create("fs_cache", |
1482 | sizeof(struct fs_struct), 0, | 1482 | sizeof(struct fs_struct), 0, |
1483 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1483 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); |
1484 | mm_cachep = kmem_cache_create("mm_struct", | 1484 | mm_cachep = kmem_cache_create("mm_struct", |
1485 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1485 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1486 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1486 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); |
1487 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | 1487 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); |
1488 | mmap_init(); | 1488 | mmap_init(); |
1489 | } | 1489 | } |