diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/blk-ioc.c | 92 | ||||
-rw-r--r-- | block/cfq-iosched.c | 2 |
4 files changed, 14 insertions, 84 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fa8f26309444..75642a352a8f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
1659 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); | 1659 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); |
1660 | if (ioc) { | 1660 | if (ioc) { |
1661 | ioc_cgroup_changed(ioc); | 1661 | ioc_cgroup_changed(ioc); |
1662 | put_io_context(ioc, NULL); | 1662 | put_io_context(ioc); |
1663 | } | 1663 | } |
1664 | } | 1664 | } |
1665 | } | 1665 | } |
diff --git a/block/blk-core.c b/block/blk-core.c index 636702575118..532b3a21b383 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) | |||
642 | if (rq->cmd_flags & REQ_ELVPRIV) { | 642 | if (rq->cmd_flags & REQ_ELVPRIV) { |
643 | elv_put_request(q, rq); | 643 | elv_put_request(q, rq); |
644 | if (rq->elv.icq) | 644 | if (rq->elv.icq) |
645 | put_io_context(rq->elv.icq->ioc, q); | 645 | put_io_context(rq->elv.icq->ioc); |
646 | } | 646 | } |
647 | 647 | ||
648 | mempool_free(rq, q->rq.rq_pool); | 648 | mempool_free(rq, q->rq.rq_pool); |
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 7490b6da2453..9884fd7427fe 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc) | |||
29 | } | 29 | } |
30 | EXPORT_SYMBOL(get_io_context); | 30 | EXPORT_SYMBOL(get_io_context); |
31 | 31 | ||
32 | /* | ||
33 | * Releasing ioc may nest into another put_io_context() leading to nested | ||
34 | * fast path release. As the ioc's can't be the same, this is okay but | ||
35 | * makes lockdep whine. Keep track of nesting and use it as subclass. | ||
36 | */ | ||
37 | #ifdef CONFIG_LOCKDEP | ||
38 | #define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) | ||
39 | #define ioc_release_depth_inc(q) (q)->ioc_release_depth++ | ||
40 | #define ioc_release_depth_dec(q) (q)->ioc_release_depth-- | ||
41 | #else | ||
42 | #define ioc_release_depth(q) 0 | ||
43 | #define ioc_release_depth_inc(q) do { } while (0) | ||
44 | #define ioc_release_depth_dec(q) do { } while (0) | ||
45 | #endif | ||
46 | |||
47 | static void icq_free_icq_rcu(struct rcu_head *head) | 32 | static void icq_free_icq_rcu(struct rcu_head *head) |
48 | { | 33 | { |
49 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); | 34 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); |
@@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq) | |||
75 | if (rcu_dereference_raw(ioc->icq_hint) == icq) | 60 | if (rcu_dereference_raw(ioc->icq_hint) == icq) |
76 | rcu_assign_pointer(ioc->icq_hint, NULL); | 61 | rcu_assign_pointer(ioc->icq_hint, NULL); |
77 | 62 | ||
78 | if (et->ops.elevator_exit_icq_fn) { | 63 | if (et->ops.elevator_exit_icq_fn) |
79 | ioc_release_depth_inc(q); | ||
80 | et->ops.elevator_exit_icq_fn(icq); | 64 | et->ops.elevator_exit_icq_fn(icq); |
81 | ioc_release_depth_dec(q); | ||
82 | } | ||
83 | 65 | ||
84 | /* | 66 | /* |
85 | * @icq->q might have gone away by the time RCU callback runs | 67 | * @icq->q might have gone away by the time RCU callback runs |
@@ -149,81 +131,29 @@ static void ioc_release_fn(struct work_struct *work) | |||
149 | /** | 131 | /** |
150 | * put_io_context - put a reference of io_context | 132 | * put_io_context - put a reference of io_context |
151 | * @ioc: io_context to put | 133 | * @ioc: io_context to put |
152 | * @locked_q: request_queue the caller is holding queue_lock of (hint) | ||
153 | * | 134 | * |
154 | * Decrement reference count of @ioc and release it if the count reaches | 135 | * Decrement reference count of @ioc and release it if the count reaches |
155 | * zero. If the caller is holding queue_lock of a queue, it can indicate | 136 | * zero. |
156 | * that with @locked_q. This is an optimization hint and the caller is | ||
157 | * allowed to pass in %NULL even when it's holding a queue_lock. | ||
158 | */ | 137 | */ |
159 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | 138 | void put_io_context(struct io_context *ioc) |
160 | { | 139 | { |
161 | struct request_queue *last_q = locked_q; | ||
162 | unsigned long flags; | 140 | unsigned long flags; |
163 | 141 | ||
164 | if (ioc == NULL) | 142 | if (ioc == NULL) |
165 | return; | 143 | return; |
166 | 144 | ||
167 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | 145 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
168 | if (locked_q) | ||
169 | lockdep_assert_held(locked_q->queue_lock); | ||
170 | |||
171 | if (!atomic_long_dec_and_test(&ioc->refcount)) | ||
172 | return; | ||
173 | 146 | ||
174 | /* | 147 | /* |
175 | * Destroy @ioc. This is a bit messy because icq's are chained | 148 | * Releasing ioc requires reverse order double locking and we may |
176 | * from both ioc and queue, and ioc->lock nests inside queue_lock. | 149 | * already be holding a queue_lock. Do it asynchronously from wq. |
177 | * The inner ioc->lock should be held to walk our icq_list and then | ||
178 | * for each icq the outer matching queue_lock should be grabbed. | ||
179 | * ie. We need to do reverse-order double lock dancing. | ||
180 | * | ||
181 | * Another twist is that we are often called with one of the | ||
182 | * matching queue_locks held as indicated by @locked_q, which | ||
183 | * prevents performing double-lock dance for other queues. | ||
184 | * | ||
185 | * So, we do it in two stages. The fast path uses the queue_lock | ||
186 | * the caller is holding and, if other queues need to be accessed, | ||
187 | * uses trylock to avoid introducing locking dependency. This can | ||
188 | * handle most cases, especially if @ioc was performing IO on only | ||
189 | * single device. | ||
190 | * | ||
191 | * If trylock doesn't cut it, we defer to @ioc->release_work which | ||
192 | * can do all the double-locking dancing. | ||
193 | */ | 150 | */ |
194 | spin_lock_irqsave_nested(&ioc->lock, flags, | 151 | if (atomic_long_dec_and_test(&ioc->refcount)) { |
195 | ioc_release_depth(locked_q)); | 152 | spin_lock_irqsave(&ioc->lock, flags); |
196 | 153 | if (!hlist_empty(&ioc->icq_list)) | |
197 | while (!hlist_empty(&ioc->icq_list)) { | 154 | schedule_work(&ioc->release_work); |
198 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, | 155 | spin_unlock_irqrestore(&ioc->lock, flags); |
199 | struct io_cq, ioc_node); | ||
200 | struct request_queue *this_q = icq->q; | ||
201 | |||
202 | if (this_q != last_q) { | ||
203 | if (last_q && last_q != locked_q) | ||
204 | spin_unlock(last_q->queue_lock); | ||
205 | last_q = NULL; | ||
206 | |||
207 | /* spin_trylock() always successes in UP case */ | ||
208 | if (this_q != locked_q && | ||
209 | !spin_trylock(this_q->queue_lock)) | ||
210 | break; | ||
211 | last_q = this_q; | ||
212 | continue; | ||
213 | } | ||
214 | ioc_exit_icq(icq); | ||
215 | } | 156 | } |
216 | |||
217 | if (last_q && last_q != locked_q) | ||
218 | spin_unlock(last_q->queue_lock); | ||
219 | |||
220 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
221 | |||
222 | /* if no icq is left, we're done; otherwise, kick release_work */ | ||
223 | if (hlist_empty(&ioc->icq_list)) | ||
224 | kmem_cache_free(iocontext_cachep, ioc); | ||
225 | else | ||
226 | schedule_work(&ioc->release_work); | ||
227 | } | 157 | } |
228 | EXPORT_SYMBOL(put_io_context); | 158 | EXPORT_SYMBOL(put_io_context); |
229 | 159 | ||
@@ -238,7 +168,7 @@ void exit_io_context(struct task_struct *task) | |||
238 | task_unlock(task); | 168 | task_unlock(task); |
239 | 169 | ||
240 | atomic_dec(&ioc->nr_tasks); | 170 | atomic_dec(&ioc->nr_tasks); |
241 | put_io_context(ioc, NULL); | 171 | put_io_context(ioc); |
242 | } | 172 | } |
243 | 173 | ||
244 | /** | 174 | /** |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index da21c24dbed3..5684df6848bc 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1794,7 +1794,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1794 | cfqd->active_queue = NULL; | 1794 | cfqd->active_queue = NULL; |
1795 | 1795 | ||
1796 | if (cfqd->active_cic) { | 1796 | if (cfqd->active_cic) { |
1797 | put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); | 1797 | put_io_context(cfqd->active_cic->icq.ioc); |
1798 | cfqd->active_cic = NULL; | 1798 | cfqd->active_cic = NULL; |
1799 | } | 1799 | } |
1800 | } | 1800 | } |