aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:39 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:39 -0500
commitb2efa05265d62bc29f3a64400fad4b44340eedb8 (patch)
tree4555f0b5f0025c099997b81f7b3f8bc48b44220d /block/blk-ioc.c
parentf1a4f4d35ff30a328d5ea28f6cc826b2083111d2 (diff)
block, cfq: unlink cfq_io_context's immediately
cic is association between io_context and request_queue. A cic is linked from both ioc and q and should be destroyed when either one goes away. As ioc and q both have their own locks, locking becomes a bit complex - both orders work for removal from one but not from the other. Currently, cfq tries to circumvent this locking order issue with RCU. ioc->lock nests inside queue_lock but the radix tree and cic's are also protected by RCU allowing either side to walk their lists without grabbing lock. This rather unconventional use of RCU quickly devolves into extremely fragile convolution. e.g. The following is from cfqd going away too soon after ioc and q exits raced. general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: [ 88.503444] Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ #158 Bochs Bochs RIP: 0010:[<ffffffff81397628>] [<ffffffff81397628>] cfq_exit_single_io_context+0x58/0xf0 ... Call Trace: [<ffffffff81395a4a>] call_for_each_cic+0x5a/0x90 [<ffffffff81395ab5>] cfq_exit_io_context+0x15/0x20 [<ffffffff81389130>] exit_io_context+0x100/0x140 [<ffffffff81098a29>] do_exit+0x579/0x850 [<ffffffff81098d5b>] do_group_exit+0x5b/0xd0 [<ffffffff81098de7>] sys_exit_group+0x17/0x20 [<ffffffff81b02f2b>] system_call_fastpath+0x16/0x1b The only real hot path here is cic lookup during request initialization and avoiding extra locking requires very confined use of RCU. This patch makes cic removal from both ioc and request_queue perform double-locking and unlink immediately. * From q side, the change is almost trivial as ioc->lock nests inside queue_lock. It just needs to grab each ioc->lock as it walks cic_list and unlink it. * From ioc side, it's a bit more difficult because of inversed lock order. ioc needs its lock to walk its cic_list but can't grab the matching queue_lock and needs to perform unlock-relock dancing. Unlinking is now wholly done from put_io_context() and fast path is optimized by using the queue_lock the caller already holds, which is by far the most common case. If the ioc accessed multiple devices, it tries with trylock. In unlikely cases of fast path failure, it falls back to full double-locking dance from workqueue. Double-locking isn't the prettiest thing in the world but it's *far* simpler and more understandable than RCU trick without adding any meaningful overhead. This still leaves a lot of now unnecessary RCU logics. Future patches will trim them. -v2: Vivek pointed out that cic->q was being dereferenced after cic->release() was called. Updated to use local variable @this_q instead. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c166
1 files changed, 137 insertions, 29 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 6f59fbad93d9..fb23965595da 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -29,55 +29,164 @@ void get_io_context(struct io_context *ioc)
29} 29}
30EXPORT_SYMBOL(get_io_context); 30EXPORT_SYMBOL(get_io_context);
31 31
32static void cfq_dtor(struct io_context *ioc) 32/*
33 * Releasing ioc may nest into another put_io_context() leading to nested
34 * fast path release. As the ioc's can't be the same, this is okay but
35 * makes lockdep whine. Keep track of nesting and use it as subclass.
36 */
37#ifdef CONFIG_LOCKDEP
38#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
39#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
40#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
41#else
42#define ioc_release_depth(q) 0
43#define ioc_release_depth_inc(q) do { } while (0)
44#define ioc_release_depth_dec(q) do { } while (0)
45#endif
46
47/*
48 * Slow path for ioc release in put_io_context(). Performs double-lock
49 * dancing to unlink all cic's and then frees ioc.
50 */
51static void ioc_release_fn(struct work_struct *work)
33{ 52{
34 if (!hlist_empty(&ioc->cic_list)) { 53 struct io_context *ioc = container_of(work, struct io_context,
35 struct cfq_io_context *cic; 54 release_work);
55 struct request_queue *last_q = NULL;
56
57 spin_lock_irq(&ioc->lock);
58
59 while (!hlist_empty(&ioc->cic_list)) {
60 struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
61 struct cfq_io_context,
62 cic_list);
63 struct request_queue *this_q = cic->q;
64
65 if (this_q != last_q) {
66 /*
67 * Need to switch to @this_q. Once we release
68 * @ioc->lock, it can go away along with @cic.
69 * Hold on to it.
70 */
71 __blk_get_queue(this_q);
72
73 /*
74 * blk_put_queue() might sleep thanks to kobject
75 * idiocy. Always release both locks, put and
76 * restart.
77 */
78 if (last_q) {
79 spin_unlock(last_q->queue_lock);
80 spin_unlock_irq(&ioc->lock);
81 blk_put_queue(last_q);
82 } else {
83 spin_unlock_irq(&ioc->lock);
84 }
85
86 last_q = this_q;
87 spin_lock_irq(this_q->queue_lock);
88 spin_lock(&ioc->lock);
89 continue;
90 }
91 ioc_release_depth_inc(this_q);
92 cic->exit(cic);
93 cic->release(cic);
94 ioc_release_depth_dec(this_q);
95 }
36 96
37 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, 97 if (last_q) {
38 cic_list); 98 spin_unlock(last_q->queue_lock);
39 cic->dtor(ioc); 99 spin_unlock_irq(&ioc->lock);
100 blk_put_queue(last_q);
101 } else {
102 spin_unlock_irq(&ioc->lock);
40 } 103 }
104
105 kmem_cache_free(iocontext_cachep, ioc);
41} 106}
42 107
43/** 108/**
44 * put_io_context - put a reference of io_context 109 * put_io_context - put a reference of io_context
45 * @ioc: io_context to put 110 * @ioc: io_context to put
111 * @locked_q: request_queue the caller is holding queue_lock of (hint)
46 * 112 *
47 * Decrement reference count of @ioc and release it if the count reaches 113 * Decrement reference count of @ioc and release it if the count reaches
48 * zero. 114 * zero. If the caller is holding queue_lock of a queue, it can indicate
115 * that with @locked_q. This is an optimization hint and the caller is
116 * allowed to pass in %NULL even when it's holding a queue_lock.
49 */ 117 */
50void put_io_context(struct io_context *ioc) 118void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
51{ 119{
120 struct request_queue *last_q = locked_q;
121 unsigned long flags;
122
52 if (ioc == NULL) 123 if (ioc == NULL)
53 return; 124 return;
54 125
55 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 126 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
127 if (locked_q)
128 lockdep_assert_held(locked_q->queue_lock);
56 129
57 if (!atomic_long_dec_and_test(&ioc->refcount)) 130 if (!atomic_long_dec_and_test(&ioc->refcount))
58 return; 131 return;
59 132
60 rcu_read_lock(); 133 /*
61 cfq_dtor(ioc); 134 * Destroy @ioc. This is a bit messy because cic's are chained
62 rcu_read_unlock(); 135 * from both ioc and queue, and ioc->lock nests inside queue_lock.
63 136 * The inner ioc->lock should be held to walk our cic_list and then
64 kmem_cache_free(iocontext_cachep, ioc); 137 * for each cic the outer matching queue_lock should be grabbed.
65} 138 * ie. We need to do reverse-order double lock dancing.
66EXPORT_SYMBOL(put_io_context); 139 *
140 * Another twist is that we are often called with one of the
141 * matching queue_locks held as indicated by @locked_q, which
142 * prevents performing double-lock dance for other queues.
143 *
144 * So, we do it in two stages. The fast path uses the queue_lock
145 * the caller is holding and, if other queues need to be accessed,
146 * uses trylock to avoid introducing locking dependency. This can
147 * handle most cases, especially if @ioc was performing IO on only
148 * single device.
149 *
150 * If trylock doesn't cut it, we defer to @ioc->release_work which
151 * can do all the double-locking dancing.
152 */
153 spin_lock_irqsave_nested(&ioc->lock, flags,
154 ioc_release_depth(locked_q));
155
156 while (!hlist_empty(&ioc->cic_list)) {
157 struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first,
158 struct cfq_io_context,
159 cic_list);
160 struct request_queue *this_q = cic->q;
161
162 if (this_q != last_q) {
163 if (last_q && last_q != locked_q)
164 spin_unlock(last_q->queue_lock);
165 last_q = NULL;
166
167 if (!spin_trylock(this_q->queue_lock))
168 break;
169 last_q = this_q;
170 continue;
171 }
172 ioc_release_depth_inc(this_q);
173 cic->exit(cic);
174 cic->release(cic);
175 ioc_release_depth_dec(this_q);
176 }
67 177
68static void cfq_exit(struct io_context *ioc) 178 if (last_q && last_q != locked_q)
69{ 179 spin_unlock(last_q->queue_lock);
70 rcu_read_lock();
71 180
72 if (!hlist_empty(&ioc->cic_list)) { 181 spin_unlock_irqrestore(&ioc->lock, flags);
73 struct cfq_io_context *cic;
74 182
75 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, 183 /* if no cic's left, we're done; otherwise, kick release_work */
76 cic_list); 184 if (hlist_empty(&ioc->cic_list))
77 cic->exit(ioc); 185 kmem_cache_free(iocontext_cachep, ioc);
78 } 186 else
79 rcu_read_unlock(); 187 schedule_work(&ioc->release_work);
80} 188}
189EXPORT_SYMBOL(put_io_context);
81 190
82/* Called by the exiting task */ 191/* Called by the exiting task */
83void exit_io_context(struct task_struct *task) 192void exit_io_context(struct task_struct *task)
@@ -92,10 +201,8 @@ void exit_io_context(struct task_struct *task)
92 task->io_context = NULL; 201 task->io_context = NULL;
93 task_unlock(task); 202 task_unlock(task);
94 203
95 if (atomic_dec_and_test(&ioc->nr_tasks)) 204 atomic_dec(&ioc->nr_tasks);
96 cfq_exit(ioc); 205 put_io_context(ioc, NULL);
97
98 put_io_context(ioc);
99} 206}
100 207
101static struct io_context *create_task_io_context(struct task_struct *task, 208static struct io_context *create_task_io_context(struct task_struct *task,
@@ -115,6 +222,7 @@ static struct io_context *create_task_io_context(struct task_struct *task,
115 spin_lock_init(&ioc->lock); 222 spin_lock_init(&ioc->lock);
116 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); 223 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
117 INIT_HLIST_HEAD(&ioc->cic_list); 224 INIT_HLIST_HEAD(&ioc->cic_list);
225 INIT_WORK(&ioc->release_work, ioc_release_fn);
118 226
119 /* try to install, somebody might already have beaten us to it */ 227 /* try to install, somebody might already have beaten us to it */
120 task_lock(task); 228 task_lock(task);