diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-06-14 18:47:36 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-06-18 05:09:54 -0400 |
commit | 50db04dd9c74178e68a981a7127c37252ffb3242 (patch) | |
tree | ac961b1ccc6f5073537be2b4b731bc79ec5448c4 /lib | |
parent | 952f4a0a9b27e6dbd5d32e330b3f609ebfa0b061 (diff) |
debugobjects: fix lockdep warning
Daniel J Blueman reported:
| =======================================================
| [ INFO: possible circular locking dependency detected ]
| 2.6.26-rc5-201c #1
| -------------------------------------------------------
| nscd/3669 is trying to acquire lock:
| (&n->list_lock){.+..}, at: [<ffffffff802bab03>] deactivate_slab+0x173/0x1e0
|
| but task is already holding lock:
| (&obj_hash[i].lock){++..}, at: [<ffffffff803fa56f>]
| __debug_object_init+0x2f/0x350
|
| which lock already depends on the new lock.
There are two locks involved here; the first is a SLUB-local lock, and
the second is a debugobjects-local lock. They are basically taken in two
different orders:
1. SLUB { debugobjects { ... } }
2. debugobjects { SLUB { ... } }
This patch changes pattern #2 by trying to fill the memory pool (e.g.
the call into SLUB/kmalloc()) outside the debugobjects lock, so now the
two patterns look like this:
1. SLUB { debugobjects { ... } }
2. SLUB { } debugobjects { ... }
[ daniel.blueman@gmail.com: pool_lock needs to be taken irq safe in fill_pool ]
Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/debugobjects.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a76a5e122ae1..85b18d79be89 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -68,6 +68,7 @@ static int fill_pool(void) | |||
68 | { | 68 | { |
69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | 69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
70 | struct debug_obj *new; | 70 | struct debug_obj *new; |
71 | unsigned long flags; | ||
71 | 72 | ||
72 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | 73 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
73 | return obj_pool_free; | 74 | return obj_pool_free; |
@@ -81,10 +82,10 @@ static int fill_pool(void) | |||
81 | if (!new) | 82 | if (!new) |
82 | return obj_pool_free; | 83 | return obj_pool_free; |
83 | 84 | ||
84 | spin_lock(&pool_lock); | 85 | spin_lock_irqsave(&pool_lock, flags); |
85 | hlist_add_head(&new->node, &obj_pool); | 86 | hlist_add_head(&new->node, &obj_pool); |
86 | obj_pool_free++; | 87 | obj_pool_free++; |
87 | spin_unlock(&pool_lock); | 88 | spin_unlock_irqrestore(&pool_lock, flags); |
88 | } | 89 | } |
89 | return obj_pool_free; | 90 | return obj_pool_free; |
90 | } | 91 | } |
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |||
110 | } | 111 | } |
111 | 112 | ||
112 | /* | 113 | /* |
113 | * Allocate a new object. If the pool is empty and no refill possible, | 114 | * Allocate a new object. If the pool is empty, switch off the debugger. |
114 | * switch off the debugger. | ||
115 | */ | 115 | */ |
116 | static struct debug_obj * | 116 | static struct debug_obj * |
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | 117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
118 | { | 118 | { |
119 | struct debug_obj *obj = NULL; | 119 | struct debug_obj *obj = NULL; |
120 | int retry = 0; | ||
121 | 120 | ||
122 | repeat: | ||
123 | spin_lock(&pool_lock); | 121 | spin_lock(&pool_lock); |
124 | if (obj_pool.first) { | 122 | if (obj_pool.first) { |
125 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 123 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
@@ -141,9 +139,6 @@ repeat: | |||
141 | } | 139 | } |
142 | spin_unlock(&pool_lock); | 140 | spin_unlock(&pool_lock); |
143 | 141 | ||
144 | if (fill_pool() && !obj && !retry++) | ||
145 | goto repeat; | ||
146 | |||
147 | return obj; | 142 | return obj; |
148 | } | 143 | } |
149 | 144 | ||
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
261 | struct debug_obj *obj; | 256 | struct debug_obj *obj; |
262 | unsigned long flags; | 257 | unsigned long flags; |
263 | 258 | ||
259 | fill_pool(); | ||
260 | |||
264 | db = get_bucket((unsigned long) addr); | 261 | db = get_bucket((unsigned long) addr); |
265 | 262 | ||
266 | spin_lock_irqsave(&db->lock, flags); | 263 | spin_lock_irqsave(&db->lock, flags); |