diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:39 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:39 -0500 |
commit | b2efa05265d62bc29f3a64400fad4b44340eedb8 (patch) | |
tree | 4555f0b5f0025c099997b81f7b3f8bc48b44220d /include | |
parent | f1a4f4d35ff30a328d5ea28f6cc826b2083111d2 (diff) |
block, cfq: unlink cfq_io_context's immediately
cic is association between io_context and request_queue. A cic is
linked from both ioc and q and should be destroyed when either one
goes away. As ioc and q both have their own locks, locking becomes a
bit complex - both orders work for removal from one but not from the
other.
Currently, cfq tries to circumvent this locking order issue with RCU.
ioc->lock nests inside queue_lock but the radix tree and cic's are
also protected by RCU allowing either side to walk their lists without
grabbing lock.
This rather unconventional use of RCU quickly devolves into extremely
fragile convolution. e.g. The following is from cfqd going away too
soon after ioc and q exits raced.
general protection fault: 0000 [#1] PREEMPT SMP
CPU 2
Modules linked in:
[ 88.503444]
Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ #158 Bochs Bochs
RIP: 0010:[<ffffffff81397628>] [<ffffffff81397628>] cfq_exit_single_io_context+0x58/0xf0
...
Call Trace:
[<ffffffff81395a4a>] call_for_each_cic+0x5a/0x90
[<ffffffff81395ab5>] cfq_exit_io_context+0x15/0x20
[<ffffffff81389130>] exit_io_context+0x100/0x140
[<ffffffff81098a29>] do_exit+0x579/0x850
[<ffffffff81098d5b>] do_group_exit+0x5b/0xd0
[<ffffffff81098de7>] sys_exit_group+0x17/0x20
[<ffffffff81b02f2b>] system_call_fastpath+0x16/0x1b
The only real hot path here is cic lookup during request
initialization and avoiding extra locking requires very confined use
of RCU. This patch makes cic removal from both ioc and request_queue
perform double-locking and unlink immediately.
* From q side, the change is almost trivial as ioc->lock nests inside
queue_lock. It just needs to grab each ioc->lock as it walks
cic_list and unlink it.
* From ioc side, it's a bit more difficult because of inversed lock
order. ioc needs its lock to walk its cic_list but can't grab the
matching queue_lock and needs to perform unlock-relock dancing.
Unlinking is now wholly done from put_io_context() and fast path is
optimized by using the queue_lock the caller already holds, which is
by far the most common case. If the ioc accessed multiple devices,
it tries with trylock. In unlikely cases of fast path failure, it
falls back to full double-locking dance from workqueue.
Double-locking isn't the prettiest thing in the world but it's *far*
simpler and more understandable than RCU trick without adding any
meaningful overhead.
This still leaves a lot of now unnecessary RCU logics. Future patches
will trim them.
-v2: Vivek pointed out that cic->q was being dereferenced after
cic->release() was called. Updated to use local variable @this_q
instead.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blkdev.h | 3 | ||||
-rw-r--r-- | include/linux/iocontext.h | 12 |
2 files changed, 11 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d1b6f4ed1f96..65c2f8c70089 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -393,6 +393,9 @@ struct request_queue { | |||
393 | /* Throttle data */ | 393 | /* Throttle data */ |
394 | struct throtl_data *td; | 394 | struct throtl_data *td; |
395 | #endif | 395 | #endif |
396 | #ifdef CONFIG_LOCKDEP | ||
397 | int ioc_release_depth; | ||
398 | #endif | ||
396 | }; | 399 | }; |
397 | 400 | ||
398 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 401 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 2c2b6da96b3c..01e863128780 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/radix-tree.h> | 4 | #include <linux/radix-tree.h> |
5 | #include <linux/rcupdate.h> | 5 | #include <linux/rcupdate.h> |
6 | #include <linux/workqueue.h> | ||
6 | 7 | ||
7 | struct cfq_queue; | 8 | struct cfq_queue; |
8 | struct cfq_ttime { | 9 | struct cfq_ttime { |
@@ -33,8 +34,8 @@ struct cfq_io_context { | |||
33 | 34 | ||
34 | unsigned long changed; | 35 | unsigned long changed; |
35 | 36 | ||
36 | void (*dtor)(struct io_context *); /* destructor */ | 37 | void (*exit)(struct cfq_io_context *); |
37 | void (*exit)(struct io_context *); /* called on task exit */ | 38 | void (*release)(struct cfq_io_context *); |
38 | 39 | ||
39 | struct rcu_head rcu_head; | 40 | struct rcu_head rcu_head; |
40 | }; | 41 | }; |
@@ -61,6 +62,8 @@ struct io_context { | |||
61 | struct radix_tree_root radix_root; | 62 | struct radix_tree_root radix_root; |
62 | struct hlist_head cic_list; | 63 | struct hlist_head cic_list; |
63 | void __rcu *ioc_data; | 64 | void __rcu *ioc_data; |
65 | |||
66 | struct work_struct release_work; | ||
64 | }; | 67 | }; |
65 | 68 | ||
66 | static inline struct io_context *ioc_task_link(struct io_context *ioc) | 69 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
@@ -79,7 +82,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
79 | 82 | ||
80 | struct task_struct; | 83 | struct task_struct; |
81 | #ifdef CONFIG_BLOCK | 84 | #ifdef CONFIG_BLOCK |
82 | void put_io_context(struct io_context *ioc); | 85 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q); |
83 | void exit_io_context(struct task_struct *task); | 86 | void exit_io_context(struct task_struct *task); |
84 | struct io_context *get_task_io_context(struct task_struct *task, | 87 | struct io_context *get_task_io_context(struct task_struct *task, |
85 | gfp_t gfp_flags, int node); | 88 | gfp_t gfp_flags, int node); |
@@ -87,7 +90,8 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio); | |||
87 | void ioc_cgroup_changed(struct io_context *ioc); | 90 | void ioc_cgroup_changed(struct io_context *ioc); |
88 | #else | 91 | #else |
89 | struct io_context; | 92 | struct io_context; |
90 | static inline void put_io_context(struct io_context *ioc) { } | 93 | static inline void put_io_context(struct io_context *ioc, |
94 | struct request_queue *locked_q) { } | ||
91 | static inline void exit_io_context(struct task_struct *task) { } | 95 | static inline void exit_io_context(struct task_struct *task) { } |
92 | #endif | 96 | #endif |
93 | 97 | ||