diff options
-rw-r--r-- | block/blk-ioc.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 9884fd7427fe..8b782a63c297 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work) | |||
80 | struct io_context *ioc = container_of(work, struct io_context, | 80 | struct io_context *ioc = container_of(work, struct io_context, |
81 | release_work); | 81 | release_work); |
82 | struct request_queue *last_q = NULL; | 82 | struct request_queue *last_q = NULL; |
83 | unsigned long flags; | ||
83 | 84 | ||
84 | spin_lock_irq(&ioc->lock); | 85 | /* |
86 | * Exiting icq may call into put_io_context() through elevator | ||
87 | * which will trigger lockdep warning. The ioc's are guaranteed to | ||
88 | * be different, use a different locking subclass here. Use | ||
89 | * irqsave variant as there's no spin_lock_irq_nested(). | ||
90 | */ | ||
91 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); | ||
85 | 92 | ||
86 | while (!hlist_empty(&ioc->icq_list)) { | 93 | while (!hlist_empty(&ioc->icq_list)) { |
87 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, | 94 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
@@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work) | |||
103 | */ | 110 | */ |
104 | if (last_q) { | 111 | if (last_q) { |
105 | spin_unlock(last_q->queue_lock); | 112 | spin_unlock(last_q->queue_lock); |
106 | spin_unlock_irq(&ioc->lock); | 113 | spin_unlock_irqrestore(&ioc->lock, flags); |
107 | blk_put_queue(last_q); | 114 | blk_put_queue(last_q); |
108 | } else { | 115 | } else { |
109 | spin_unlock_irq(&ioc->lock); | 116 | spin_unlock_irqrestore(&ioc->lock, flags); |
110 | } | 117 | } |
111 | 118 | ||
112 | last_q = this_q; | 119 | last_q = this_q; |
113 | spin_lock_irq(this_q->queue_lock); | 120 | spin_lock_irqsave(this_q->queue_lock, flags); |
114 | spin_lock(&ioc->lock); | 121 | spin_lock_nested(&ioc->lock, 1); |
115 | continue; | 122 | continue; |
116 | } | 123 | } |
117 | ioc_exit_icq(icq); | 124 | ioc_exit_icq(icq); |
@@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work) | |||
119 | 126 | ||
120 | if (last_q) { | 127 | if (last_q) { |
121 | spin_unlock(last_q->queue_lock); | 128 | spin_unlock(last_q->queue_lock); |
122 | spin_unlock_irq(&ioc->lock); | 129 | spin_unlock_irqrestore(&ioc->lock, flags); |
123 | blk_put_queue(last_q); | 130 | blk_put_queue(last_q); |
124 | } else { | 131 | } else { |
125 | spin_unlock_irq(&ioc->lock); | 132 | spin_unlock_irqrestore(&ioc->lock, flags); |
126 | } | 133 | } |
127 | 134 | ||
128 | kmem_cache_free(iocontext_cachep, ioc); | 135 | kmem_cache_free(iocontext_cachep, ioc); |