diff options
author | Tejun Heo <tj@kernel.org> | 2012-02-07 01:51:30 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-02-07 01:51:30 -0500 |
commit | 11a3122f6cf2d988a77eb8883d0fc49cd013a6d5 (patch) | |
tree | ded8ea8a2982754ff0c58448a7ed2e59487104cb /block/blk-ioc.c | |
parent | 822bfa51ce44f2c63c300fdb76dc99c4d5a5ca9f (diff) |
block: strip out locking optimization in put_io_context()
put_io_context() performed a complex trylock dancing to avoid
deferring ioc release to workqueue. It was also broken on UP because
trylock was always assumed to succeed which resulted in unbalanced
preemption count.
While there are ways to fix the UP breakage, even the most
pathological microbench (forced ioc allocation and tight fork/exit
loop) fails to show any appreciable performance benefit of the
optimization. Strip it out. If there turns out to be workloads which
are affected by this change, simpler optimization from the discussion
thread can be applied later.
Signed-off-by: Tejun Heo <tj@kernel.org>
LKML-Reference: <1328514611.21268.66.camel@sli10-conroe>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 92 |
1 files changed, 11 insertions, 81 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 7490b6da2453..9884fd7427fe 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc) | |||
29 | } | 29 | } |
30 | EXPORT_SYMBOL(get_io_context); | 30 | EXPORT_SYMBOL(get_io_context); |
31 | 31 | ||
32 | /* | ||
33 | * Releasing ioc may nest into another put_io_context() leading to nested | ||
34 | * fast path release. As the ioc's can't be the same, this is okay but | ||
35 | * makes lockdep whine. Keep track of nesting and use it as subclass. | ||
36 | */ | ||
37 | #ifdef CONFIG_LOCKDEP | ||
38 | #define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) | ||
39 | #define ioc_release_depth_inc(q) (q)->ioc_release_depth++ | ||
40 | #define ioc_release_depth_dec(q) (q)->ioc_release_depth-- | ||
41 | #else | ||
42 | #define ioc_release_depth(q) 0 | ||
43 | #define ioc_release_depth_inc(q) do { } while (0) | ||
44 | #define ioc_release_depth_dec(q) do { } while (0) | ||
45 | #endif | ||
46 | |||
47 | static void icq_free_icq_rcu(struct rcu_head *head) | 32 | static void icq_free_icq_rcu(struct rcu_head *head) |
48 | { | 33 | { |
49 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); | 34 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); |
@@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq) | |||
75 | if (rcu_dereference_raw(ioc->icq_hint) == icq) | 60 | if (rcu_dereference_raw(ioc->icq_hint) == icq) |
76 | rcu_assign_pointer(ioc->icq_hint, NULL); | 61 | rcu_assign_pointer(ioc->icq_hint, NULL); |
77 | 62 | ||
78 | if (et->ops.elevator_exit_icq_fn) { | 63 | if (et->ops.elevator_exit_icq_fn) |
79 | ioc_release_depth_inc(q); | ||
80 | et->ops.elevator_exit_icq_fn(icq); | 64 | et->ops.elevator_exit_icq_fn(icq); |
81 | ioc_release_depth_dec(q); | ||
82 | } | ||
83 | 65 | ||
84 | /* | 66 | /* |
85 | * @icq->q might have gone away by the time RCU callback runs | 67 | * @icq->q might have gone away by the time RCU callback runs |
@@ -149,81 +131,29 @@ static void ioc_release_fn(struct work_struct *work) | |||
149 | /** | 131 | /** |
150 | * put_io_context - put a reference of io_context | 132 | * put_io_context - put a reference of io_context |
151 | * @ioc: io_context to put | 133 | * @ioc: io_context to put |
152 | * @locked_q: request_queue the caller is holding queue_lock of (hint) | ||
153 | * | 134 | * |
154 | * Decrement reference count of @ioc and release it if the count reaches | 135 | * Decrement reference count of @ioc and release it if the count reaches |
155 | * zero. If the caller is holding queue_lock of a queue, it can indicate | 136 | * zero. |
156 | * that with @locked_q. This is an optimization hint and the caller is | ||
157 | * allowed to pass in %NULL even when it's holding a queue_lock. | ||
158 | */ | 137 | */ |
159 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | 138 | void put_io_context(struct io_context *ioc) |
160 | { | 139 | { |
161 | struct request_queue *last_q = locked_q; | ||
162 | unsigned long flags; | 140 | unsigned long flags; |
163 | 141 | ||
164 | if (ioc == NULL) | 142 | if (ioc == NULL) |
165 | return; | 143 | return; |
166 | 144 | ||
167 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | 145 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
168 | if (locked_q) | ||
169 | lockdep_assert_held(locked_q->queue_lock); | ||
170 | |||
171 | if (!atomic_long_dec_and_test(&ioc->refcount)) | ||
172 | return; | ||
173 | 146 | ||
174 | /* | 147 | /* |
175 | * Destroy @ioc. This is a bit messy because icq's are chained | 148 | * Releasing ioc requires reverse order double locking and we may |
176 | * from both ioc and queue, and ioc->lock nests inside queue_lock. | 149 | * already be holding a queue_lock. Do it asynchronously from wq. |
177 | * The inner ioc->lock should be held to walk our icq_list and then | ||
178 | * for each icq the outer matching queue_lock should be grabbed. | ||
179 | * ie. We need to do reverse-order double lock dancing. | ||
180 | * | ||
181 | * Another twist is that we are often called with one of the | ||
182 | * matching queue_locks held as indicated by @locked_q, which | ||
183 | * prevents performing double-lock dance for other queues. | ||
184 | * | ||
185 | * So, we do it in two stages. The fast path uses the queue_lock | ||
186 | * the caller is holding and, if other queues need to be accessed, | ||
187 | * uses trylock to avoid introducing locking dependency. This can | ||
188 | * handle most cases, especially if @ioc was performing IO on only | ||
189 | * single device. | ||
190 | * | ||
191 | * If trylock doesn't cut it, we defer to @ioc->release_work which | ||
192 | * can do all the double-locking dancing. | ||
193 | */ | 150 | */ |
194 | spin_lock_irqsave_nested(&ioc->lock, flags, | 151 | if (atomic_long_dec_and_test(&ioc->refcount)) { |
195 | ioc_release_depth(locked_q)); | 152 | spin_lock_irqsave(&ioc->lock, flags); |
196 | 153 | if (!hlist_empty(&ioc->icq_list)) | |
197 | while (!hlist_empty(&ioc->icq_list)) { | 154 | schedule_work(&ioc->release_work); |
198 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, | 155 | spin_unlock_irqrestore(&ioc->lock, flags); |
199 | struct io_cq, ioc_node); | ||
200 | struct request_queue *this_q = icq->q; | ||
201 | |||
202 | if (this_q != last_q) { | ||
203 | if (last_q && last_q != locked_q) | ||
204 | spin_unlock(last_q->queue_lock); | ||
205 | last_q = NULL; | ||
206 | |||
207 | /* spin_trylock() always successes in UP case */ | ||
208 | if (this_q != locked_q && | ||
209 | !spin_trylock(this_q->queue_lock)) | ||
210 | break; | ||
211 | last_q = this_q; | ||
212 | continue; | ||
213 | } | ||
214 | ioc_exit_icq(icq); | ||
215 | } | 156 | } |
216 | |||
217 | if (last_q && last_q != locked_q) | ||
218 | spin_unlock(last_q->queue_lock); | ||
219 | |||
220 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
221 | |||
222 | /* if no icq is left, we're done; otherwise, kick release_work */ | ||
223 | if (hlist_empty(&ioc->icq_list)) | ||
224 | kmem_cache_free(iocontext_cachep, ioc); | ||
225 | else | ||
226 | schedule_work(&ioc->release_work); | ||
227 | } | 157 | } |
228 | EXPORT_SYMBOL(put_io_context); | 158 | EXPORT_SYMBOL(put_io_context); |
229 | 159 | ||
@@ -238,7 +168,7 @@ void exit_io_context(struct task_struct *task) | |||
238 | task_unlock(task); | 168 | task_unlock(task); |
239 | 169 | ||
240 | atomic_dec(&ioc->nr_tasks); | 170 | atomic_dec(&ioc->nr_tasks); |
241 | put_io_context(ioc, NULL); | 171 | put_io_context(ioc); |
242 | } | 172 | } |
243 | 173 | ||
244 | /** | 174 | /** |