diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-08-31 17:39:21 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-09-01 03:47:16 -0400 |
commit | 673d62cc5ea6fca046650f17f77985b112c62322 (patch) | |
tree | 7cde0d756ffa71ad732cc079d28254b256c14e68 | |
parent | bef69ea0dcce574a425feb0a5aa4c63dd108b9a6 (diff) |
debugobjects: fix lockdep warning
Daniel J. Blueman reported:
> =======================================================
> [ INFO: possible circular locking dependency detected ]
> 2.6.27-rc4-224c #1
> -------------------------------------------------------
> hald/4680 is trying to acquire lock:
> (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
>
> but task is already holding lock:
> (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
> debug_object_free+0x5c/0x120
We fix it by moving the actual freeing to outside the lock (the lock
now only protects the list).
The pool lock is also promoted to irq-safe (suggested by Dan). It's
necessary because free_pool is now called outside the irq disabled
region. So we need to protect against an interrupt handler which calls
debug_object_init().
[tglx@linutronix.de: added hlist_move_list helper to avoid looping
through the list twice]
Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/list.h | 13 | ||||
-rw-r--r-- | lib/debugobjects.c | 31 |
2 files changed, 36 insertions, 8 deletions
diff --git a/include/linux/list.h b/include/linux/list.h index db35ef02e745..969f6e92d089 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -619,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n, | |||
619 | next->next->pprev = &next->next; | 619 | next->next->pprev = &next->next; |
620 | } | 620 | } |
621 | 621 | ||
622 | /* | ||
623 | * Move a list from one list head to another. Fixup the pprev | ||
624 | * reference of the first entry if it exists. | ||
625 | */ | ||
626 | static inline void hlist_move_list(struct hlist_head *old, | ||
627 | struct hlist_head *new) | ||
628 | { | ||
629 | new->first = old->first; | ||
630 | if (new->first) | ||
631 | new->first->pprev = &new->first; | ||
632 | old->first = NULL; | ||
633 | } | ||
634 | |||
622 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | 635 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
623 | 636 | ||
624 | #define hlist_for_each(pos, head) \ | 637 | #define hlist_for_each(pos, head) \ |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 45a6bde762d1..e3ab374e1334 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |||
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Allocate a new object. If the pool is empty, switch off the debugger. | 114 | * Allocate a new object. If the pool is empty, switch off the debugger. |
115 | * Must be called with interrupts disabled. | ||
115 | */ | 116 | */ |
116 | static struct debug_obj * | 117 | static struct debug_obj * |
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | 118 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
148 | static void free_object(struct debug_obj *obj) | 149 | static void free_object(struct debug_obj *obj) |
149 | { | 150 | { |
150 | unsigned long idx = (unsigned long)(obj - obj_static_pool); | 151 | unsigned long idx = (unsigned long)(obj - obj_static_pool); |
152 | unsigned long flags; | ||
151 | 153 | ||
152 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { | 154 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { |
153 | spin_lock(&pool_lock); | 155 | spin_lock_irqsave(&pool_lock, flags); |
154 | hlist_add_head(&obj->node, &obj_pool); | 156 | hlist_add_head(&obj->node, &obj_pool); |
155 | obj_pool_free++; | 157 | obj_pool_free++; |
156 | obj_pool_used--; | 158 | obj_pool_used--; |
157 | spin_unlock(&pool_lock); | 159 | spin_unlock_irqrestore(&pool_lock, flags); |
158 | } else { | 160 | } else { |
159 | spin_lock(&pool_lock); | 161 | spin_lock_irqsave(&pool_lock, flags); |
160 | obj_pool_used--; | 162 | obj_pool_used--; |
161 | spin_unlock(&pool_lock); | 163 | spin_unlock_irqrestore(&pool_lock, flags); |
162 | kmem_cache_free(obj_cache, obj); | 164 | kmem_cache_free(obj_cache, obj); |
163 | } | 165 | } |
164 | } | 166 | } |
@@ -171,6 +173,7 @@ static void debug_objects_oom(void) | |||
171 | { | 173 | { |
172 | struct debug_bucket *db = obj_hash; | 174 | struct debug_bucket *db = obj_hash; |
173 | struct hlist_node *node, *tmp; | 175 | struct hlist_node *node, *tmp; |
176 | HLIST_HEAD(freelist); | ||
174 | struct debug_obj *obj; | 177 | struct debug_obj *obj; |
175 | unsigned long flags; | 178 | unsigned long flags; |
176 | int i; | 179 | int i; |
@@ -179,11 +182,14 @@ static void debug_objects_oom(void) | |||
179 | 182 | ||
180 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 183 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
181 | spin_lock_irqsave(&db->lock, flags); | 184 | spin_lock_irqsave(&db->lock, flags); |
182 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 185 | hlist_move_list(&db->list, &freelist); |
186 | spin_unlock_irqrestore(&db->lock, flags); | ||
187 | |||
188 | /* Now free them */ | ||
189 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | ||
183 | hlist_del(&obj->node); | 190 | hlist_del(&obj->node); |
184 | free_object(obj); | 191 | free_object(obj); |
185 | } | 192 | } |
186 | spin_unlock_irqrestore(&db->lock, flags); | ||
187 | } | 193 | } |
188 | } | 194 | } |
189 | 195 | ||
@@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
498 | return; | 504 | return; |
499 | default: | 505 | default: |
500 | hlist_del(&obj->node); | 506 | hlist_del(&obj->node); |
507 | spin_unlock_irqrestore(&db->lock, flags); | ||
501 | free_object(obj); | 508 | free_object(obj); |
502 | break; | 509 | return; |
503 | } | 510 | } |
504 | out_unlock: | 511 | out_unlock: |
505 | spin_unlock_irqrestore(&db->lock, flags); | 512 | spin_unlock_irqrestore(&db->lock, flags); |
@@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
510 | { | 517 | { |
511 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 518 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
512 | struct hlist_node *node, *tmp; | 519 | struct hlist_node *node, *tmp; |
520 | HLIST_HEAD(freelist); | ||
513 | struct debug_obj_descr *descr; | 521 | struct debug_obj_descr *descr; |
514 | enum debug_obj_state state; | 522 | enum debug_obj_state state; |
515 | struct debug_bucket *db; | 523 | struct debug_bucket *db; |
@@ -545,11 +553,18 @@ repeat: | |||
545 | goto repeat; | 553 | goto repeat; |
546 | default: | 554 | default: |
547 | hlist_del(&obj->node); | 555 | hlist_del(&obj->node); |
548 | free_object(obj); | 556 | hlist_add_head(&obj->node, &freelist); |
549 | break; | 557 | break; |
550 | } | 558 | } |
551 | } | 559 | } |
552 | spin_unlock_irqrestore(&db->lock, flags); | 560 | spin_unlock_irqrestore(&db->lock, flags); |
561 | |||
562 | /* Now free them */ | ||
563 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | ||
564 | hlist_del(&obj->node); | ||
565 | free_object(obj); | ||
566 | } | ||
567 | |||
553 | if (cnt > debug_objects_maxchain) | 568 | if (cnt > debug_objects_maxchain) |
554 | debug_objects_maxchain = cnt; | 569 | debug_objects_maxchain = cnt; |
555 | } | 570 | } |