aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:39 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:39 -0500
commitb2efa05265d62bc29f3a64400fad4b44340eedb8 (patch)
tree4555f0b5f0025c099997b81f7b3f8bc48b44220d /block/cfq-iosched.c
parentf1a4f4d35ff30a328d5ea28f6cc826b2083111d2 (diff)
block, cfq: unlink cfq_io_context's immediately
cic is association between io_context and request_queue. A cic is linked from both ioc and q and should be destroyed when either one goes away. As ioc and q both have their own locks, locking becomes a bit complex - both orders work for removal from one but not from the other. Currently, cfq tries to circumvent this locking order issue with RCU. ioc->lock nests inside queue_lock but the radix tree and cic's are also protected by RCU allowing either side to walk their lists without grabbing lock. This rather unconventional use of RCU quickly devolves into extremely fragile convolution. e.g. The following is from cfqd going away too soon after ioc and q exits raced. general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: [ 88.503444] Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ #158 Bochs Bochs RIP: 0010:[<ffffffff81397628>] [<ffffffff81397628>] cfq_exit_single_io_context+0x58/0xf0 ... Call Trace: [<ffffffff81395a4a>] call_for_each_cic+0x5a/0x90 [<ffffffff81395ab5>] cfq_exit_io_context+0x15/0x20 [<ffffffff81389130>] exit_io_context+0x100/0x140 [<ffffffff81098a29>] do_exit+0x579/0x850 [<ffffffff81098d5b>] do_group_exit+0x5b/0xd0 [<ffffffff81098de7>] sys_exit_group+0x17/0x20 [<ffffffff81b02f2b>] system_call_fastpath+0x16/0x1b The only real hot path here is cic lookup during request initialization and avoiding extra locking requires very confined use of RCU. This patch makes cic removal from both ioc and request_queue perform double-locking and unlink immediately. * From q side, the change is almost trivial as ioc->lock nests inside queue_lock. It just needs to grab each ioc->lock as it walks cic_list and unlink it. * From ioc side, it's a bit more difficult because of inversed lock order. ioc needs its lock to walk its cic_list but can't grab the matching queue_lock and needs to perform unlock-relock dancing. Unlinking is now wholly done from put_io_context() and fast path is optimized by using the queue_lock the caller already holds, which is by far the most common case. If the ioc accessed multiple devices, it tries with trylock. In unlikely cases of fast path failure, it falls back to full double-locking dance from workqueue. Double-locking isn't the prettiest thing in the world but it's *far* simpler and more understandable than RCU trick without adding any meaningful overhead. This still leaves a lot of now unnecessary RCU logics. Future patches will trim them. -v2: Vivek pointed out that cic->q was being dereferenced after cic->release() was called. Updated to use local variable @this_q instead. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c44
1 files changed, 8 insertions, 36 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e617b088c59b..6cc606560402 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1778,7 +1778,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1778 cfqd->active_queue = NULL; 1778 cfqd->active_queue = NULL;
1779 1779
1780 if (cfqd->active_cic) { 1780 if (cfqd->active_cic) {
1781 put_io_context(cfqd->active_cic->ioc); 1781 put_io_context(cfqd->active_cic->ioc, cfqd->queue);
1782 cfqd->active_cic = NULL; 1782 cfqd->active_cic = NULL;
1783 } 1783 }
1784} 1784}
@@ -2812,38 +2812,6 @@ static void cfq_exit_cic(struct cfq_io_context *cic)
2812 } 2812 }
2813} 2813}
2814 2814
2815static void cfq_exit_single_io_context(struct io_context *ioc,
2816 struct cfq_io_context *cic)
2817{
2818 struct cfq_data *cfqd = cic_to_cfqd(cic);
2819
2820 if (cfqd) {
2821 struct request_queue *q = cfqd->queue;
2822 unsigned long flags;
2823
2824 spin_lock_irqsave(q->queue_lock, flags);
2825
2826 /*
2827 * Ensure we get a fresh copy of the ->key to prevent
2828 * race between exiting task and queue
2829 */
2830 smp_read_barrier_depends();
2831 if (cic->key == cfqd)
2832 cfq_exit_cic(cic);
2833
2834 spin_unlock_irqrestore(q->queue_lock, flags);
2835 }
2836}
2837
2838/*
2839 * The process that ioc belongs to has exited, we need to clean up
2840 * and put the internal structures we have that belongs to that process.
2841 */
2842static void cfq_exit_io_context(struct io_context *ioc)
2843{
2844 call_for_each_cic(ioc, cfq_exit_single_io_context);
2845}
2846
2847static struct cfq_io_context * 2815static struct cfq_io_context *
2848cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 2816cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2849{ 2817{
@@ -2855,8 +2823,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2855 cic->ttime.last_end_request = jiffies; 2823 cic->ttime.last_end_request = jiffies;
2856 INIT_LIST_HEAD(&cic->queue_list); 2824 INIT_LIST_HEAD(&cic->queue_list);
2857 INIT_HLIST_NODE(&cic->cic_list); 2825 INIT_HLIST_NODE(&cic->cic_list);
2858 cic->dtor = cfq_free_io_context; 2826 cic->exit = cfq_exit_cic;
2859 cic->exit = cfq_exit_io_context; 2827 cic->release = cfq_release_cic;
2860 elv_ioc_count_inc(cfq_ioc_count); 2828 elv_ioc_count_inc(cfq_ioc_count);
2861 } 2829 }
2862 2830
@@ -3726,7 +3694,7 @@ static void cfq_put_request(struct request *rq)
3726 BUG_ON(!cfqq->allocated[rw]); 3694 BUG_ON(!cfqq->allocated[rw]);
3727 cfqq->allocated[rw]--; 3695 cfqq->allocated[rw]--;
3728 3696
3729 put_io_context(RQ_CIC(rq)->ioc); 3697 put_io_context(RQ_CIC(rq)->ioc, cfqq->cfqd->queue);
3730 3698
3731 rq->elevator_private[0] = NULL; 3699 rq->elevator_private[0] = NULL;
3732 rq->elevator_private[1] = NULL; 3700 rq->elevator_private[1] = NULL;
@@ -3937,8 +3905,12 @@ static void cfq_exit_queue(struct elevator_queue *e)
3937 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 3905 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3938 struct cfq_io_context, 3906 struct cfq_io_context,
3939 queue_list); 3907 queue_list);
3908 struct io_context *ioc = cic->ioc;
3940 3909
3910 spin_lock(&ioc->lock);
3941 cfq_exit_cic(cic); 3911 cfq_exit_cic(cic);
3912 cfq_release_cic(cic);
3913 spin_unlock(&ioc->lock);
3942 } 3914 }
3943 3915
3944 cfq_put_async_queues(cfqd); 3916 cfq_put_async_queues(cfqd);