diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:39 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:39 -0500 |
commit | b2efa05265d62bc29f3a64400fad4b44340eedb8 (patch) | |
tree | 4555f0b5f0025c099997b81f7b3f8bc48b44220d /block | |
parent | f1a4f4d35ff30a328d5ea28f6cc826b2083111d2 (diff) |
block, cfq: unlink cfq_io_context's immediately
cic is association between io_context and request_queue. A cic is
linked from both ioc and q and should be destroyed when either one
goes away. As ioc and q both have their own locks, locking becomes a
bit complex - both orders work for removal from one but not from the
other.
Currently, cfq tries to circumvent this locking order issue with RCU.
ioc->lock nests inside queue_lock but the radix tree and cic's are
also protected by RCU allowing either side to walk their lists without
grabbing lock.
This rather unconventional use of RCU quickly devolves into extremely
fragile convolution. e.g. The following is from cfqd going away too
soon after ioc and q exits raced.
general protection fault: 0000 [#1] PREEMPT SMP
CPU 2
Modules linked in:
[ 88.503444]
Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ #158 Bochs Bochs
RIP: 0010:[<ffffffff81397628>] [<ffffffff81397628>] cfq_exit_single_io_context+0x58/0xf0
...
Call Trace:
[<ffffffff81395a4a>] call_for_each_cic+0x5a/0x90
[<ffffffff81395ab5>] cfq_exit_io_context+0x15/0x20
[<ffffffff81389130>] exit_io_context+0x100/0x140
[<ffffffff81098a29>] do_exit+0x579/0x850
[<ffffffff81098d5b>] do_group_exit+0x5b/0xd0
[<ffffffff81098de7>] sys_exit_group+0x17/0x20
[<ffffffff81b02f2b>] system_call_fastpath+0x16/0x1b
The only real hot path here is cic lookup during request
initialization and avoiding extra locking requires very confined use
of RCU. This patch makes cic removal from both ioc and request_queue
perform double-locking and unlink immediately.
* From q side, the change is almost trivial as ioc->lock nests inside
queue_lock. It just needs to grab each ioc->lock as it walks
cic_list and unlink it.
* From ioc side, it's a bit more difficult because of inversed lock
order. ioc needs its lock to walk its cic_list but can't grab the
matching queue_lock and needs to perform unlock-relock dancing.
Unlinking is now wholly done from put_io_context() and fast path is
optimized by using the queue_lock the caller already holds, which is
by far the most common case. If the ioc accessed multiple devices,
it tries with trylock. In unlikely cases of fast path failure, it
falls back to full double-locking dance from workqueue.
Double-locking isn't the prettiest thing in the world but it's *far*
simpler and more understandable than RCU trick without adding any
meaningful overhead.
This still leaves a lot of now unnecessary RCU logics. Future patches
will trim them.
-v2: Vivek pointed out that cic->q was being dereferenced after
cic->release() was called. Updated to use local variable @this_q
instead.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 2 | ||||
-rw-r--r-- | block/blk-ioc.c | 166 | ||||
-rw-r--r-- | block/cfq-iosched.c | 44 |
3 files changed, 146 insertions, 66 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index dc00835aab6a..278869358049 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -1649,7 +1649,7 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1649 | ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE); | 1649 | ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE); |
1650 | if (ioc) { | 1650 | if (ioc) { |
1651 | ioc_cgroup_changed(ioc); | 1651 | ioc_cgroup_changed(ioc); |
1652 | put_io_context(ioc); | 1652 | put_io_context(ioc, NULL); |
1653 | } | 1653 | } |
1654 | } | 1654 | } |
1655 | 1655 | ||
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 6f59fbad93d9..fb23965595da 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -29,55 +29,164 @@ void get_io_context(struct io_context *ioc) | |||
29 | } | 29 | } |
30 | EXPORT_SYMBOL(get_io_context); | 30 | EXPORT_SYMBOL(get_io_context); |
31 | 31 | ||
32 | static void cfq_dtor(struct io_context *ioc) | 32 | /* |
33 | * Releasing ioc may nest into another put_io_context() leading to nested | ||
34 | * fast path release. As the ioc's can't be the same, this is okay but | ||
35 | * makes lockdep whine. Keep track of nesting and use it as subclass. | ||
36 | */ | ||
37 | #ifdef CONFIG_LOCKDEP | ||
38 | #define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) | ||
39 | #define ioc_release_depth_inc(q) (q)->ioc_release_depth++ | ||
40 | #define ioc_release_depth_dec(q) (q)->ioc_release_depth-- | ||
41 | #else | ||
42 | #define ioc_release_depth(q) 0 | ||
43 | #define ioc_release_depth_inc(q) do { } while (0) | ||
44 | #define ioc_release_depth_dec(q) do { } while (0) | ||
45 | #endif | ||
46 | |||
47 | /* | ||
48 | * Slow path for ioc release in put_io_context(). Performs double-lock | ||
49 | * dancing to unlink all cic's and then frees ioc. | ||
50 | */ | ||
51 | static void ioc_release_fn(struct work_struct *work) | ||
33 | { | 52 | { |
34 | if (!hlist_empty(&ioc->cic_list)) { | 53 | struct io_context *ioc = container_of(work, struct io_context, |
35 | struct cfq_io_context *cic; | 54 | release_work); |
55 | struct request_queue *last_q = NULL; | ||
56 | |||
57 | spin_lock_irq(&ioc->lock); | ||
58 | |||
59 | while (!hlist_empty(&ioc->cic_list)) { | ||
60 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | ||
61 | struct cfq_io_context, | ||
62 | cic_list); | ||
63 | struct request_queue *this_q = cic->q; | ||
64 | |||
65 | if (this_q != last_q) { | ||
66 | /* | ||
67 | * Need to switch to @this_q. Once we release | ||
68 | * @ioc->lock, it can go away along with @cic. | ||
69 | * Hold on to it. | ||
70 | */ | ||
71 | __blk_get_queue(this_q); | ||
72 | |||
73 | /* | ||
74 | * blk_put_queue() might sleep thanks to kobject | ||
75 | * idiocy. Always release both locks, put and | ||
76 | * restart. | ||
77 | */ | ||
78 | if (last_q) { | ||
79 | spin_unlock(last_q->queue_lock); | ||
80 | spin_unlock_irq(&ioc->lock); | ||
81 | blk_put_queue(last_q); | ||
82 | } else { | ||
83 | spin_unlock_irq(&ioc->lock); | ||
84 | } | ||
85 | |||
86 | last_q = this_q; | ||
87 | spin_lock_irq(this_q->queue_lock); | ||
88 | spin_lock(&ioc->lock); | ||
89 | continue; | ||
90 | } | ||
91 | ioc_release_depth_inc(this_q); | ||
92 | cic->exit(cic); | ||
93 | cic->release(cic); | ||
94 | ioc_release_depth_dec(this_q); | ||
95 | } | ||
36 | 96 | ||
37 | cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, | 97 | if (last_q) { |
38 | cic_list); | 98 | spin_unlock(last_q->queue_lock); |
39 | cic->dtor(ioc); | 99 | spin_unlock_irq(&ioc->lock); |
100 | blk_put_queue(last_q); | ||
101 | } else { | ||
102 | spin_unlock_irq(&ioc->lock); | ||
40 | } | 103 | } |
104 | |||
105 | kmem_cache_free(iocontext_cachep, ioc); | ||
41 | } | 106 | } |
42 | 107 | ||
43 | /** | 108 | /** |
44 | * put_io_context - put a reference of io_context | 109 | * put_io_context - put a reference of io_context |
45 | * @ioc: io_context to put | 110 | * @ioc: io_context to put |
111 | * @locked_q: request_queue the caller is holding queue_lock of (hint) | ||
46 | * | 112 | * |
47 | * Decrement reference count of @ioc and release it if the count reaches | 113 | * Decrement reference count of @ioc and release it if the count reaches |
48 | * zero. | 114 | * zero. If the caller is holding queue_lock of a queue, it can indicate |
115 | * that with @locked_q. This is an optimization hint and the caller is | ||
116 | * allowed to pass in %NULL even when it's holding a queue_lock. | ||
49 | */ | 117 | */ |
50 | void put_io_context(struct io_context *ioc) | 118 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q) |
51 | { | 119 | { |
120 | struct request_queue *last_q = locked_q; | ||
121 | unsigned long flags; | ||
122 | |||
52 | if (ioc == NULL) | 123 | if (ioc == NULL) |
53 | return; | 124 | return; |
54 | 125 | ||
55 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | 126 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
127 | if (locked_q) | ||
128 | lockdep_assert_held(locked_q->queue_lock); | ||
56 | 129 | ||
57 | if (!atomic_long_dec_and_test(&ioc->refcount)) | 130 | if (!atomic_long_dec_and_test(&ioc->refcount)) |
58 | return; | 131 | return; |
59 | 132 | ||
60 | rcu_read_lock(); | 133 | /* |
61 | cfq_dtor(ioc); | 134 | * Destroy @ioc. This is a bit messy because cic's are chained |
62 | rcu_read_unlock(); | 135 | * from both ioc and queue, and ioc->lock nests inside queue_lock. |
63 | 136 | * The inner ioc->lock should be held to walk our cic_list and then | |
64 | kmem_cache_free(iocontext_cachep, ioc); | 137 | * for each cic the outer matching queue_lock should be grabbed. |
65 | } | 138 | * ie. We need to do reverse-order double lock dancing. |
66 | EXPORT_SYMBOL(put_io_context); | 139 | * |
140 | * Another twist is that we are often called with one of the | ||
141 | * matching queue_locks held as indicated by @locked_q, which | ||
142 | * prevents performing double-lock dance for other queues. | ||
143 | * | ||
144 | * So, we do it in two stages. The fast path uses the queue_lock | ||
145 | * the caller is holding and, if other queues need to be accessed, | ||
146 | * uses trylock to avoid introducing locking dependency. This can | ||
147 | * handle most cases, especially if @ioc was performing IO on only | ||
148 | * single device. | ||
149 | * | ||
150 | * If trylock doesn't cut it, we defer to @ioc->release_work which | ||
151 | * can do all the double-locking dancing. | ||
152 | */ | ||
153 | spin_lock_irqsave_nested(&ioc->lock, flags, | ||
154 | ioc_release_depth(locked_q)); | ||
155 | |||
156 | while (!hlist_empty(&ioc->cic_list)) { | ||
157 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | ||
158 | struct cfq_io_context, | ||
159 | cic_list); | ||
160 | struct request_queue *this_q = cic->q; | ||
161 | |||
162 | if (this_q != last_q) { | ||
163 | if (last_q && last_q != locked_q) | ||
164 | spin_unlock(last_q->queue_lock); | ||
165 | last_q = NULL; | ||
166 | |||
167 | if (!spin_trylock(this_q->queue_lock)) | ||
168 | break; | ||
169 | last_q = this_q; | ||
170 | continue; | ||
171 | } | ||
172 | ioc_release_depth_inc(this_q); | ||
173 | cic->exit(cic); | ||
174 | cic->release(cic); | ||
175 | ioc_release_depth_dec(this_q); | ||
176 | } | ||
67 | 177 | ||
68 | static void cfq_exit(struct io_context *ioc) | 178 | if (last_q && last_q != locked_q) |
69 | { | 179 | spin_unlock(last_q->queue_lock); |
70 | rcu_read_lock(); | ||
71 | 180 | ||
72 | if (!hlist_empty(&ioc->cic_list)) { | 181 | spin_unlock_irqrestore(&ioc->lock, flags); |
73 | struct cfq_io_context *cic; | ||
74 | 182 | ||
75 | cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, | 183 | /* if no cic's left, we're done; otherwise, kick release_work */ |
76 | cic_list); | 184 | if (hlist_empty(&ioc->cic_list)) |
77 | cic->exit(ioc); | 185 | kmem_cache_free(iocontext_cachep, ioc); |
78 | } | 186 | else |
79 | rcu_read_unlock(); | 187 | schedule_work(&ioc->release_work); |
80 | } | 188 | } |
189 | EXPORT_SYMBOL(put_io_context); | ||
81 | 190 | ||
82 | /* Called by the exiting task */ | 191 | /* Called by the exiting task */ |
83 | void exit_io_context(struct task_struct *task) | 192 | void exit_io_context(struct task_struct *task) |
@@ -92,10 +201,8 @@ void exit_io_context(struct task_struct *task) | |||
92 | task->io_context = NULL; | 201 | task->io_context = NULL; |
93 | task_unlock(task); | 202 | task_unlock(task); |
94 | 203 | ||
95 | if (atomic_dec_and_test(&ioc->nr_tasks)) | 204 | atomic_dec(&ioc->nr_tasks); |
96 | cfq_exit(ioc); | 205 | put_io_context(ioc, NULL); |
97 | |||
98 | put_io_context(ioc); | ||
99 | } | 206 | } |
100 | 207 | ||
101 | static struct io_context *create_task_io_context(struct task_struct *task, | 208 | static struct io_context *create_task_io_context(struct task_struct *task, |
@@ -115,6 +222,7 @@ static struct io_context *create_task_io_context(struct task_struct *task, | |||
115 | spin_lock_init(&ioc->lock); | 222 | spin_lock_init(&ioc->lock); |
116 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); | 223 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); |
117 | INIT_HLIST_HEAD(&ioc->cic_list); | 224 | INIT_HLIST_HEAD(&ioc->cic_list); |
225 | INIT_WORK(&ioc->release_work, ioc_release_fn); | ||
118 | 226 | ||
119 | /* try to install, somebody might already have beaten us to it */ | 227 | /* try to install, somebody might already have beaten us to it */ |
120 | task_lock(task); | 228 | task_lock(task); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e617b088c59b..6cc606560402 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1778,7 +1778,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1778 | cfqd->active_queue = NULL; | 1778 | cfqd->active_queue = NULL; |
1779 | 1779 | ||
1780 | if (cfqd->active_cic) { | 1780 | if (cfqd->active_cic) { |
1781 | put_io_context(cfqd->active_cic->ioc); | 1781 | put_io_context(cfqd->active_cic->ioc, cfqd->queue); |
1782 | cfqd->active_cic = NULL; | 1782 | cfqd->active_cic = NULL; |
1783 | } | 1783 | } |
1784 | } | 1784 | } |
@@ -2812,38 +2812,6 @@ static void cfq_exit_cic(struct cfq_io_context *cic) | |||
2812 | } | 2812 | } |
2813 | } | 2813 | } |
2814 | 2814 | ||
2815 | static void cfq_exit_single_io_context(struct io_context *ioc, | ||
2816 | struct cfq_io_context *cic) | ||
2817 | { | ||
2818 | struct cfq_data *cfqd = cic_to_cfqd(cic); | ||
2819 | |||
2820 | if (cfqd) { | ||
2821 | struct request_queue *q = cfqd->queue; | ||
2822 | unsigned long flags; | ||
2823 | |||
2824 | spin_lock_irqsave(q->queue_lock, flags); | ||
2825 | |||
2826 | /* | ||
2827 | * Ensure we get a fresh copy of the ->key to prevent | ||
2828 | * race between exiting task and queue | ||
2829 | */ | ||
2830 | smp_read_barrier_depends(); | ||
2831 | if (cic->key == cfqd) | ||
2832 | cfq_exit_cic(cic); | ||
2833 | |||
2834 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2835 | } | ||
2836 | } | ||
2837 | |||
2838 | /* | ||
2839 | * The process that ioc belongs to has exited, we need to clean up | ||
2840 | * and put the internal structures we have that belongs to that process. | ||
2841 | */ | ||
2842 | static void cfq_exit_io_context(struct io_context *ioc) | ||
2843 | { | ||
2844 | call_for_each_cic(ioc, cfq_exit_single_io_context); | ||
2845 | } | ||
2846 | |||
2847 | static struct cfq_io_context * | 2815 | static struct cfq_io_context * |
2848 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | 2816 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
2849 | { | 2817 | { |
@@ -2855,8 +2823,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
2855 | cic->ttime.last_end_request = jiffies; | 2823 | cic->ttime.last_end_request = jiffies; |
2856 | INIT_LIST_HEAD(&cic->queue_list); | 2824 | INIT_LIST_HEAD(&cic->queue_list); |
2857 | INIT_HLIST_NODE(&cic->cic_list); | 2825 | INIT_HLIST_NODE(&cic->cic_list); |
2858 | cic->dtor = cfq_free_io_context; | 2826 | cic->exit = cfq_exit_cic; |
2859 | cic->exit = cfq_exit_io_context; | 2827 | cic->release = cfq_release_cic; |
2860 | elv_ioc_count_inc(cfq_ioc_count); | 2828 | elv_ioc_count_inc(cfq_ioc_count); |
2861 | } | 2829 | } |
2862 | 2830 | ||
@@ -3726,7 +3694,7 @@ static void cfq_put_request(struct request *rq) | |||
3726 | BUG_ON(!cfqq->allocated[rw]); | 3694 | BUG_ON(!cfqq->allocated[rw]); |
3727 | cfqq->allocated[rw]--; | 3695 | cfqq->allocated[rw]--; |
3728 | 3696 | ||
3729 | put_io_context(RQ_CIC(rq)->ioc); | 3697 | put_io_context(RQ_CIC(rq)->ioc, cfqq->cfqd->queue); |
3730 | 3698 | ||
3731 | rq->elevator_private[0] = NULL; | 3699 | rq->elevator_private[0] = NULL; |
3732 | rq->elevator_private[1] = NULL; | 3700 | rq->elevator_private[1] = NULL; |
@@ -3937,8 +3905,12 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3937 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, | 3905 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, |
3938 | struct cfq_io_context, | 3906 | struct cfq_io_context, |
3939 | queue_list); | 3907 | queue_list); |
3908 | struct io_context *ioc = cic->ioc; | ||
3940 | 3909 | ||
3910 | spin_lock(&ioc->lock); | ||
3941 | cfq_exit_cic(cic); | 3911 | cfq_exit_cic(cic); |
3912 | cfq_release_cic(cic); | ||
3913 | spin_unlock(&ioc->lock); | ||
3942 | } | 3914 | } |
3943 | 3915 | ||
3944 | cfq_put_async_queues(cfqd); | 3916 | cfq_put_async_queues(cfqd); |