diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-04-30 03:55:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 11:29:53 -0400 |
commit | 3ac7fe5a4aab409bd5674d0b070bce97f9d20872 (patch) | |
tree | 5e12e8864bb8737695e4eb9c63970602d5f69e73 /mm | |
parent | 30327acf7846c5eb97c8e31c78317a2918d3e515 (diff) |
infrastructure to debug (dynamic) objects
We can see an ever repeating problem pattern with objects of any kind in the
kernel:
1) freeing of active objects
2) reinitialization of active objects
Both problems can be hard to debug because the crash happens at a point where
we have no chance to decode the root cause anymore. One problem spot are
kernel timers, where the detection of the problem often happens in interrupt
context and usually causes the machine to panic.
While working on a timer related bug report I had to hack specialized code
into the timer subsystem to get a reasonable hint for the root cause. This
debug hack was fine for temporary use, but far from a mergeable solution due
to the intrusiveness into the timer code.
The code further lacked the ability to detect and report the root cause
instantly and keep the system operational.
Keeping the system operational is important to get hold of the debug
information without special debugging aids like serial consoles and special
knowledge of the bug reporter.
The problems described above are not restricted to timers, but timers tend to
expose it usually in a full system crash. Other objects are less explosive,
but the symptoms caused by such mistakes can be even harder to debug.
Instead of creating specialized debugging code for the timer subsystem a
generic infrastructure is created which allows developers to verify their code
and provides an easy to enable debug facility for users in case of trouble.
The debugobjects core code keeps track of operations on static and dynamic
objects by inserting them into a hashed list and sanity checking them on
object operations and provides additional checks whenever kernel memory is
freed.
The tracked object operations are:
- initializing an object
- adding an object to a subsystem list
- deleting an object from a subsystem list
Each operation is sanity checked before the operation is executed and the
subsystem specific code can provide a fixup function which allows to prevent
the damage of the operation. When the sanity check triggers a warning message
and a stack trace is printed.
The list of operations can be extended if the need arises. For now it's
limited to the requirements of the first user (timers).
The core code enqueues the objects into hash buckets. The hash index is
generated from the address of the object to simplify the lookup for the check
on kfree/vfree. Each bucket has it's own spinlock to avoid contention on a
global lock.
The debug code can be compiled in without being active. The runtime overhead
is minimal and could be optimized by asm alternatives. A kernel command line
option enables the debugging code.
Thanks to Ingo Molnar for review, suggestions and cleanup patches.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Greg KH <greg@kroah.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/slab.c | 10 | ||||
-rw-r--r-- | mm/slub.c | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
4 files changed, 21 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0a502e99ee22..bdd5c432c426 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/fault-inject.h> | 45 | #include <linux/fault-inject.h> |
46 | #include <linux/page-isolation.h> | 46 | #include <linux/page-isolation.h> |
47 | #include <linux/memcontrol.h> | 47 | #include <linux/memcontrol.h> |
48 | #include <linux/debugobjects.h> | ||
48 | 49 | ||
49 | #include <asm/tlbflush.h> | 50 | #include <asm/tlbflush.h> |
50 | #include <asm/div64.h> | 51 | #include <asm/div64.h> |
@@ -532,8 +533,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
532 | if (reserved) | 533 | if (reserved) |
533 | return; | 534 | return; |
534 | 535 | ||
535 | if (!PageHighMem(page)) | 536 | if (!PageHighMem(page)) { |
536 | debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); | 537 | debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); |
538 | debug_check_no_obj_freed(page_address(page), | ||
539 | PAGE_SIZE << order); | ||
540 | } | ||
537 | arch_free_page(page, order); | 541 | arch_free_page(page, order); |
538 | kernel_map_pages(page, 1 << order, 0); | 542 | kernel_map_pages(page, 1 << order, 0); |
539 | 543 | ||
@@ -995,8 +999,10 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
995 | if (free_pages_check(page)) | 999 | if (free_pages_check(page)) |
996 | return; | 1000 | return; |
997 | 1001 | ||
998 | if (!PageHighMem(page)) | 1002 | if (!PageHighMem(page)) { |
999 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); | 1003 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); |
1004 | debug_check_no_obj_freed(page_address(page), PAGE_SIZE); | ||
1005 | } | ||
1000 | arch_free_page(page, 0); | 1006 | arch_free_page(page, 0); |
1001 | kernel_map_pages(page, 1, 0); | 1007 | kernel_map_pages(page, 1, 0); |
1002 | 1008 | ||
@@ -110,6 +110,7 @@ | |||
110 | #include <linux/fault-inject.h> | 110 | #include <linux/fault-inject.h> |
111 | #include <linux/rtmutex.h> | 111 | #include <linux/rtmutex.h> |
112 | #include <linux/reciprocal_div.h> | 112 | #include <linux/reciprocal_div.h> |
113 | #include <linux/debugobjects.h> | ||
113 | 114 | ||
114 | #include <asm/cacheflush.h> | 115 | #include <asm/cacheflush.h> |
115 | #include <asm/tlbflush.h> | 116 | #include <asm/tlbflush.h> |
@@ -174,12 +175,14 @@ | |||
174 | SLAB_CACHE_DMA | \ | 175 | SLAB_CACHE_DMA | \ |
175 | SLAB_STORE_USER | \ | 176 | SLAB_STORE_USER | \ |
176 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 177 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
177 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 178 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
179 | SLAB_DEBUG_OBJECTS) | ||
178 | #else | 180 | #else |
179 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 181 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
180 | SLAB_CACHE_DMA | \ | 182 | SLAB_CACHE_DMA | \ |
181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 183 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 184 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
185 | SLAB_DEBUG_OBJECTS) | ||
183 | #endif | 186 | #endif |
184 | 187 | ||
185 | /* | 188 | /* |
@@ -3760,6 +3763,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3760 | 3763 | ||
3761 | local_irq_save(flags); | 3764 | local_irq_save(flags); |
3762 | debug_check_no_locks_freed(objp, obj_size(cachep)); | 3765 | debug_check_no_locks_freed(objp, obj_size(cachep)); |
3766 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | ||
3767 | debug_check_no_obj_freed(objp, obj_size(cachep)); | ||
3763 | __cache_free(cachep, objp); | 3768 | __cache_free(cachep, objp); |
3764 | local_irq_restore(flags); | 3769 | local_irq_restore(flags); |
3765 | } | 3770 | } |
@@ -3785,6 +3790,7 @@ void kfree(const void *objp) | |||
3785 | kfree_debugcheck(objp); | 3790 | kfree_debugcheck(objp); |
3786 | c = virt_to_cache(objp); | 3791 | c = virt_to_cache(objp); |
3787 | debug_check_no_locks_freed(objp, obj_size(c)); | 3792 | debug_check_no_locks_freed(objp, obj_size(c)); |
3793 | debug_check_no_obj_freed(objp, obj_size(c)); | ||
3788 | __cache_free(c, (void *)objp); | 3794 | __cache_free(c, (void *)objp); |
3789 | local_irq_restore(flags); | 3795 | local_irq_restore(flags); |
3790 | } | 3796 | } |
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpuset.h> | 19 | #include <linux/cpuset.h> |
20 | #include <linux/mempolicy.h> | 20 | #include <linux/mempolicy.h> |
21 | #include <linux/ctype.h> | 21 | #include <linux/ctype.h> |
22 | #include <linux/debugobjects.h> | ||
22 | #include <linux/kallsyms.h> | 23 | #include <linux/kallsyms.h> |
23 | #include <linux/memory.h> | 24 | #include <linux/memory.h> |
24 | 25 | ||
@@ -1747,6 +1748,8 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1747 | local_irq_save(flags); | 1748 | local_irq_save(flags); |
1748 | c = get_cpu_slab(s, smp_processor_id()); | 1749 | c = get_cpu_slab(s, smp_processor_id()); |
1749 | debug_check_no_locks_freed(object, c->objsize); | 1750 | debug_check_no_locks_freed(object, c->objsize); |
1751 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | ||
1752 | debug_check_no_obj_freed(object, s->objsize); | ||
1750 | if (likely(page == c->page && c->node >= 0)) { | 1753 | if (likely(page == c->page && c->node >= 0)) { |
1751 | object[c->offset] = c->freelist; | 1754 | object[c->offset] = c->freelist; |
1752 | c->freelist = object; | 1755 | c->freelist = object; |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e33e0ae69ad1..2a39cf128aba 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
18 | #include <linux/debugobjects.h> | ||
18 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
19 | #include <linux/kallsyms.h> | 20 | #include <linux/kallsyms.h> |
20 | 21 | ||
@@ -394,6 +395,7 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
394 | } | 395 | } |
395 | 396 | ||
396 | debug_check_no_locks_freed(addr, area->size); | 397 | debug_check_no_locks_freed(addr, area->size); |
398 | debug_check_no_obj_freed(addr, area->size); | ||
397 | 399 | ||
398 | if (deallocate_pages) { | 400 | if (deallocate_pages) { |
399 | int i; | 401 | int i; |