aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-02-11 06:37:25 -0500
committerJens Axboe <axboe@kernel.dk>2012-02-11 06:37:25 -0500
commitd8c66c5d59247e25a69428aced0b79d33b9c66d6 (patch)
tree1aed7dc560df36157d8cde6fdebc1d86f4ed61ad /block/blk-ioc.c
parentf6302f1bcd75a042df69866d98b8d775a668f8f1 (diff)
block: fix lockdep warning on io_context release put_io_context()
11a3122f6c "block: strip out locking optimization in put_io_context()" removed ioc_lock depth lockdep annoation along with locking optimization; however, while recursing from put_io_context() is no longer possible, ioc_release_fn() may still end up putting the last reference of another ioc through elevator, which wlil grab ioc->lock triggering spurious (as the ioc is always different one) A-A deadlock warning. As this can only happen one time from ioc_release_fn(), using non-zero subclass from ioc_release_fn() is enough. Use subclass 1. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 9884fd7427fe..8b782a63c297 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
80 struct io_context *ioc = container_of(work, struct io_context, 80 struct io_context *ioc = container_of(work, struct io_context,
81 release_work); 81 release_work);
82 struct request_queue *last_q = NULL; 82 struct request_queue *last_q = NULL;
83 unsigned long flags;
83 84
84 spin_lock_irq(&ioc->lock); 85 /*
86 * Exiting icq may call into put_io_context() through elevator
87 * which will trigger lockdep warning. The ioc's are guaranteed to
88 * be different, use a different locking subclass here. Use
89 * irqsave variant as there's no spin_lock_irq_nested().
90 */
91 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
85 92
86 while (!hlist_empty(&ioc->icq_list)) { 93 while (!hlist_empty(&ioc->icq_list)) {
87 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 94 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
@@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
103 */ 110 */
104 if (last_q) { 111 if (last_q) {
105 spin_unlock(last_q->queue_lock); 112 spin_unlock(last_q->queue_lock);
106 spin_unlock_irq(&ioc->lock); 113 spin_unlock_irqrestore(&ioc->lock, flags);
107 blk_put_queue(last_q); 114 blk_put_queue(last_q);
108 } else { 115 } else {
109 spin_unlock_irq(&ioc->lock); 116 spin_unlock_irqrestore(&ioc->lock, flags);
110 } 117 }
111 118
112 last_q = this_q; 119 last_q = this_q;
113 spin_lock_irq(this_q->queue_lock); 120 spin_lock_irqsave(this_q->queue_lock, flags);
114 spin_lock(&ioc->lock); 121 spin_lock_nested(&ioc->lock, 1);
115 continue; 122 continue;
116 } 123 }
117 ioc_exit_icq(icq); 124 ioc_exit_icq(icq);
@@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
119 126
120 if (last_q) { 127 if (last_q) {
121 spin_unlock(last_q->queue_lock); 128 spin_unlock(last_q->queue_lock);
122 spin_unlock_irq(&ioc->lock); 129 spin_unlock_irqrestore(&ioc->lock, flags);
123 blk_put_queue(last_q); 130 blk_put_queue(last_q);
124 } else { 131 } else {
125 spin_unlock_irq(&ioc->lock); 132 spin_unlock_irqrestore(&ioc->lock, flags);
126 } 133 }
127 134
128 kmem_cache_free(iocontext_cachep, ioc); 135 kmem_cache_free(iocontext_cachep, ioc);