diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:42 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:42 -0500 |
commit | 7e5a8794492e43e9eebb68a98a23be055888ccd0 (patch) | |
tree | cc049a23b2c994f910d3101860bc1c2ecb7aa35f /block | |
parent | 3d3c2379feb177a5fd55bb0ed76776dc9d4f3243 (diff) |
block, cfq: move io_cq exit/release to blk-ioc.c
With kmem_cache managed by blk-ioc, io_cq exit/release can be moved to
blk-ioc too. The odd ->io_cq->exit/release() callbacks are replaced
with elevator_ops->elevator_exit_icq_fn() with unlinking from both ioc
and q, and freeing automatically handled by blk-ioc. The elevator
operation only need to perform exit operation specific to the elevator
- in cfq's case, exiting the cfqq's.
Also, clearing of io_cq's on q detach is moved to block core and
automatically performed on elevator switch and q release.
Because the q io_cq points to might be freed before RCU callback for
the io_cq runs, blk-ioc code should remember to which cache the io_cq
needs to be freed when the io_cq is released. New field
io_cq->__rcu_icq_cache is added for this purpose. As both the new
field and rcu_head are used only after io_cq is released and the
q/ioc_node fields aren't, they are put into unions.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-ioc.c | 76 | ||||
-rw-r--r-- | block/blk-sysfs.c | 6 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/cfq-iosched.c | 47 | ||||
-rw-r--r-- | block/elevator.c | 3 |
5 files changed, 78 insertions, 55 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 87ecc98b8ade..0910a5584d38 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -44,6 +44,51 @@ EXPORT_SYMBOL(get_io_context); | |||
44 | #define ioc_release_depth_dec(q) do { } while (0) | 44 | #define ioc_release_depth_dec(q) do { } while (0) |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | static void icq_free_icq_rcu(struct rcu_head *head) | ||
48 | { | ||
49 | struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); | ||
50 | |||
51 | kmem_cache_free(icq->__rcu_icq_cache, icq); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Exit and free an icq. Called with both ioc and q locked. | ||
56 | */ | ||
57 | static void ioc_exit_icq(struct io_cq *icq) | ||
58 | { | ||
59 | struct io_context *ioc = icq->ioc; | ||
60 | struct request_queue *q = icq->q; | ||
61 | struct elevator_type *et = q->elevator->type; | ||
62 | |||
63 | lockdep_assert_held(&ioc->lock); | ||
64 | lockdep_assert_held(q->queue_lock); | ||
65 | |||
66 | radix_tree_delete(&ioc->icq_tree, icq->q->id); | ||
67 | hlist_del_init(&icq->ioc_node); | ||
68 | list_del_init(&icq->q_node); | ||
69 | |||
70 | /* | ||
71 | * Both setting lookup hint to and clearing it from @icq are done | ||
72 | * under queue_lock. If it's not pointing to @icq now, it never | ||
73 | * will. Hint assignment itself can race safely. | ||
74 | */ | ||
75 | if (rcu_dereference_raw(ioc->icq_hint) == icq) | ||
76 | rcu_assign_pointer(ioc->icq_hint, NULL); | ||
77 | |||
78 | if (et->ops.elevator_exit_icq_fn) { | ||
79 | ioc_release_depth_inc(q); | ||
80 | et->ops.elevator_exit_icq_fn(icq); | ||
81 | ioc_release_depth_dec(q); | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * @icq->q might have gone away by the time RCU callback runs | ||
86 | * making it impossible to determine icq_cache. Record it in @icq. | ||
87 | */ | ||
88 | icq->__rcu_icq_cache = et->icq_cache; | ||
89 | call_rcu(&icq->__rcu_head, icq_free_icq_rcu); | ||
90 | } | ||
91 | |||
47 | /* | 92 | /* |
48 | * Slow path for ioc release in put_io_context(). Performs double-lock | 93 | * Slow path for ioc release in put_io_context(). Performs double-lock |
49 | * dancing to unlink all icq's and then frees ioc. | 94 | * dancing to unlink all icq's and then frees ioc. |
@@ -87,10 +132,7 @@ static void ioc_release_fn(struct work_struct *work) | |||
87 | spin_lock(&ioc->lock); | 132 | spin_lock(&ioc->lock); |
88 | continue; | 133 | continue; |
89 | } | 134 | } |
90 | ioc_release_depth_inc(this_q); | 135 | ioc_exit_icq(icq); |
91 | icq->exit(icq); | ||
92 | icq->release(icq); | ||
93 | ioc_release_depth_dec(this_q); | ||
94 | } | 136 | } |
95 | 137 | ||
96 | if (last_q) { | 138 | if (last_q) { |
@@ -167,10 +209,7 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
167 | last_q = this_q; | 209 | last_q = this_q; |
168 | continue; | 210 | continue; |
169 | } | 211 | } |
170 | ioc_release_depth_inc(this_q); | 212 | ioc_exit_icq(icq); |
171 | icq->exit(icq); | ||
172 | icq->release(icq); | ||
173 | ioc_release_depth_dec(this_q); | ||
174 | } | 213 | } |
175 | 214 | ||
176 | if (last_q && last_q != locked_q) | 215 | if (last_q && last_q != locked_q) |
@@ -203,6 +242,27 @@ void exit_io_context(struct task_struct *task) | |||
203 | put_io_context(ioc, NULL); | 242 | put_io_context(ioc, NULL); |
204 | } | 243 | } |
205 | 244 | ||
245 | /** | ||
246 | * ioc_clear_queue - break any ioc association with the specified queue | ||
247 | * @q: request_queue being cleared | ||
248 | * | ||
249 | * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. | ||
250 | */ | ||
251 | void ioc_clear_queue(struct request_queue *q) | ||
252 | { | ||
253 | lockdep_assert_held(q->queue_lock); | ||
254 | |||
255 | while (!list_empty(&q->icq_list)) { | ||
256 | struct io_cq *icq = list_entry(q->icq_list.next, | ||
257 | struct io_cq, q_node); | ||
258 | struct io_context *ioc = icq->ioc; | ||
259 | |||
260 | spin_lock(&ioc->lock); | ||
261 | ioc_exit_icq(icq); | ||
262 | spin_unlock(&ioc->lock); | ||
263 | } | ||
264 | } | ||
265 | |||
206 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, | 266 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, |
207 | int node) | 267 | int node) |
208 | { | 268 | { |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 5b4b4ab5e785..cf150011d808 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -479,8 +479,12 @@ static void blk_release_queue(struct kobject *kobj) | |||
479 | 479 | ||
480 | blk_sync_queue(q); | 480 | blk_sync_queue(q); |
481 | 481 | ||
482 | if (q->elevator) | 482 | if (q->elevator) { |
483 | spin_lock_irq(q->queue_lock); | ||
484 | ioc_clear_queue(q); | ||
485 | spin_unlock_irq(q->queue_lock); | ||
483 | elevator_exit(q->elevator); | 486 | elevator_exit(q->elevator); |
487 | } | ||
484 | 488 | ||
485 | blk_throtl_exit(q); | 489 | blk_throtl_exit(q); |
486 | 490 | ||
diff --git a/block/blk.h b/block/blk.h index 3c510a4b5054..ed4d9bf2ab16 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -200,6 +200,7 @@ static inline int blk_do_io_stat(struct request *rq) | |||
200 | */ | 200 | */ |
201 | void get_io_context(struct io_context *ioc); | 201 | void get_io_context(struct io_context *ioc); |
202 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); | 202 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); |
203 | void ioc_clear_queue(struct request_queue *q); | ||
203 | 204 | ||
204 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, | 205 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, |
205 | int node); | 206 | int node); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 06e59abcb57f..f6d315551496 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -2674,26 +2674,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2674 | cfq_put_cfqg(cfqg); | 2674 | cfq_put_cfqg(cfqg); |
2675 | } | 2675 | } |
2676 | 2676 | ||
2677 | static void cfq_icq_free_rcu(struct rcu_head *head) | ||
2678 | { | ||
2679 | kmem_cache_free(cfq_icq_pool, | ||
2680 | icq_to_cic(container_of(head, struct io_cq, rcu_head))); | ||
2681 | } | ||
2682 | |||
2683 | static void cfq_icq_free(struct io_cq *icq) | ||
2684 | { | ||
2685 | call_rcu(&icq->rcu_head, cfq_icq_free_rcu); | ||
2686 | } | ||
2687 | |||
2688 | static void cfq_release_icq(struct io_cq *icq) | ||
2689 | { | ||
2690 | struct io_context *ioc = icq->ioc; | ||
2691 | |||
2692 | radix_tree_delete(&ioc->icq_tree, icq->q->id); | ||
2693 | hlist_del(&icq->ioc_node); | ||
2694 | cfq_icq_free(icq); | ||
2695 | } | ||
2696 | |||
2697 | static void cfq_put_cooperator(struct cfq_queue *cfqq) | 2677 | static void cfq_put_cooperator(struct cfq_queue *cfqq) |
2698 | { | 2678 | { |
2699 | struct cfq_queue *__cfqq, *next; | 2679 | struct cfq_queue *__cfqq, *next; |
@@ -2731,17 +2711,6 @@ static void cfq_exit_icq(struct io_cq *icq) | |||
2731 | { | 2711 | { |
2732 | struct cfq_io_cq *cic = icq_to_cic(icq); | 2712 | struct cfq_io_cq *cic = icq_to_cic(icq); |
2733 | struct cfq_data *cfqd = cic_to_cfqd(cic); | 2713 | struct cfq_data *cfqd = cic_to_cfqd(cic); |
2734 | struct io_context *ioc = icq->ioc; | ||
2735 | |||
2736 | list_del_init(&icq->q_node); | ||
2737 | |||
2738 | /* | ||
2739 | * Both setting lookup hint to and clearing it from @icq are done | ||
2740 | * under queue_lock. If it's not pointing to @icq now, it never | ||
2741 | * will. Hint assignment itself can race safely. | ||
2742 | */ | ||
2743 | if (rcu_dereference_raw(ioc->icq_hint) == icq) | ||
2744 | rcu_assign_pointer(ioc->icq_hint, NULL); | ||
2745 | 2714 | ||
2746 | if (cic->cfqq[BLK_RW_ASYNC]) { | 2715 | if (cic->cfqq[BLK_RW_ASYNC]) { |
2747 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); | 2716 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); |
@@ -2764,8 +2733,6 @@ static struct cfq_io_cq *cfq_alloc_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
2764 | cic->ttime.last_end_request = jiffies; | 2733 | cic->ttime.last_end_request = jiffies; |
2765 | INIT_LIST_HEAD(&cic->icq.q_node); | 2734 | INIT_LIST_HEAD(&cic->icq.q_node); |
2766 | INIT_HLIST_NODE(&cic->icq.ioc_node); | 2735 | INIT_HLIST_NODE(&cic->icq.ioc_node); |
2767 | cic->icq.exit = cfq_exit_icq; | ||
2768 | cic->icq.release = cfq_release_icq; | ||
2769 | } | 2736 | } |
2770 | 2737 | ||
2771 | return cic; | 2738 | return cic; |
@@ -3034,7 +3001,7 @@ out: | |||
3034 | if (ret) | 3001 | if (ret) |
3035 | printk(KERN_ERR "cfq: icq link failed!\n"); | 3002 | printk(KERN_ERR "cfq: icq link failed!\n"); |
3036 | if (icq) | 3003 | if (icq) |
3037 | cfq_icq_free(icq); | 3004 | kmem_cache_free(cfq_icq_pool, icq); |
3038 | return ret; | 3005 | return ret; |
3039 | } | 3006 | } |
3040 | 3007 | ||
@@ -3774,17 +3741,6 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3774 | if (cfqd->active_queue) | 3741 | if (cfqd->active_queue) |
3775 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | 3742 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); |
3776 | 3743 | ||
3777 | while (!list_empty(&q->icq_list)) { | ||
3778 | struct io_cq *icq = list_entry(q->icq_list.next, | ||
3779 | struct io_cq, q_node); | ||
3780 | struct io_context *ioc = icq->ioc; | ||
3781 | |||
3782 | spin_lock(&ioc->lock); | ||
3783 | cfq_exit_icq(icq); | ||
3784 | cfq_release_icq(icq); | ||
3785 | spin_unlock(&ioc->lock); | ||
3786 | } | ||
3787 | |||
3788 | cfq_put_async_queues(cfqd); | 3744 | cfq_put_async_queues(cfqd); |
3789 | cfq_release_cfq_groups(cfqd); | 3745 | cfq_release_cfq_groups(cfqd); |
3790 | 3746 | ||
@@ -4019,6 +3975,7 @@ static struct elevator_type iosched_cfq = { | |||
4019 | .elevator_completed_req_fn = cfq_completed_request, | 3975 | .elevator_completed_req_fn = cfq_completed_request, |
4020 | .elevator_former_req_fn = elv_rb_former_request, | 3976 | .elevator_former_req_fn = elv_rb_former_request, |
4021 | .elevator_latter_req_fn = elv_rb_latter_request, | 3977 | .elevator_latter_req_fn = elv_rb_latter_request, |
3978 | .elevator_exit_icq_fn = cfq_exit_icq, | ||
4022 | .elevator_set_req_fn = cfq_set_request, | 3979 | .elevator_set_req_fn = cfq_set_request, |
4023 | .elevator_put_req_fn = cfq_put_request, | 3980 | .elevator_put_req_fn = cfq_put_request, |
4024 | .elevator_may_queue_fn = cfq_may_queue, | 3981 | .elevator_may_queue_fn = cfq_may_queue, |
diff --git a/block/elevator.c b/block/elevator.c index cca049fb45c8..91e18f8af9be 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -979,8 +979,9 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
979 | goto fail_register; | 979 | goto fail_register; |
980 | } | 980 | } |
981 | 981 | ||
982 | /* done, replace the old one with new one and turn off BYPASS */ | 982 | /* done, clear io_cq's, switch elevators and turn off BYPASS */ |
983 | spin_lock_irq(q->queue_lock); | 983 | spin_lock_irq(q->queue_lock); |
984 | ioc_clear_queue(q); | ||
984 | old_elevator = q->elevator; | 985 | old_elevator = q->elevator; |
985 | q->elevator = e; | 986 | q->elevator = e; |
986 | spin_unlock_irq(q->queue_lock); | 987 | spin_unlock_irq(q->queue_lock); |