summaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:42 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:42 -0500
commit7e5a8794492e43e9eebb68a98a23be055888ccd0 (patch)
treecc049a23b2c994f910d3101860bc1c2ecb7aa35f /block/blk-ioc.c
parent3d3c2379feb177a5fd55bb0ed76776dc9d4f3243 (diff)
block, cfq: move io_cq exit/release to blk-ioc.c
With kmem_cache managed by blk-ioc, io_cq exit/release can be moved to blk-ioc too. The odd ->io_cq->exit/release() callbacks are replaced with elevator_ops->elevator_exit_icq_fn() with unlinking from both ioc and q, and freeing automatically handled by blk-ioc. The elevator operation only need to perform exit operation specific to the elevator - in cfq's case, exiting the cfqq's. Also, clearing of io_cq's on q detach is moved to block core and automatically performed on elevator switch and q release. Because the q io_cq points to might be freed before RCU callback for the io_cq runs, blk-ioc code should remember to which cache the io_cq needs to be freed when the io_cq is released. New field io_cq->__rcu_icq_cache is added for this purpose. As both the new field and rcu_head are used only after io_cq is released and the q/ioc_node fields aren't, they are put into unions. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c76
1 files changed, 68 insertions, 8 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 87ecc98b8ade..0910a5584d38 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -44,6 +44,51 @@ EXPORT_SYMBOL(get_io_context);
44#define ioc_release_depth_dec(q) do { } while (0) 44#define ioc_release_depth_dec(q) do { } while (0)
45#endif 45#endif
46 46
47static void icq_free_icq_rcu(struct rcu_head *head)
48{
49 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
50
51 kmem_cache_free(icq->__rcu_icq_cache, icq);
52}
53
54/*
55 * Exit and free an icq. Called with both ioc and q locked.
56 */
57static void ioc_exit_icq(struct io_cq *icq)
58{
59 struct io_context *ioc = icq->ioc;
60 struct request_queue *q = icq->q;
61 struct elevator_type *et = q->elevator->type;
62
63 lockdep_assert_held(&ioc->lock);
64 lockdep_assert_held(q->queue_lock);
65
66 radix_tree_delete(&ioc->icq_tree, icq->q->id);
67 hlist_del_init(&icq->ioc_node);
68 list_del_init(&icq->q_node);
69
70 /*
71 * Both setting lookup hint to and clearing it from @icq are done
72 * under queue_lock. If it's not pointing to @icq now, it never
73 * will. Hint assignment itself can race safely.
74 */
75 if (rcu_dereference_raw(ioc->icq_hint) == icq)
76 rcu_assign_pointer(ioc->icq_hint, NULL);
77
78 if (et->ops.elevator_exit_icq_fn) {
79 ioc_release_depth_inc(q);
80 et->ops.elevator_exit_icq_fn(icq);
81 ioc_release_depth_dec(q);
82 }
83
84 /*
85 * @icq->q might have gone away by the time RCU callback runs
86 * making it impossible to determine icq_cache. Record it in @icq.
87 */
88 icq->__rcu_icq_cache = et->icq_cache;
89 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
90}
91
47/* 92/*
48 * Slow path for ioc release in put_io_context(). Performs double-lock 93 * Slow path for ioc release in put_io_context(). Performs double-lock
49 * dancing to unlink all icq's and then frees ioc. 94 * dancing to unlink all icq's and then frees ioc.
@@ -87,10 +132,7 @@ static void ioc_release_fn(struct work_struct *work)
87 spin_lock(&ioc->lock); 132 spin_lock(&ioc->lock);
88 continue; 133 continue;
89 } 134 }
90 ioc_release_depth_inc(this_q); 135 ioc_exit_icq(icq);
91 icq->exit(icq);
92 icq->release(icq);
93 ioc_release_depth_dec(this_q);
94 } 136 }
95 137
96 if (last_q) { 138 if (last_q) {
@@ -167,10 +209,7 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
167 last_q = this_q; 209 last_q = this_q;
168 continue; 210 continue;
169 } 211 }
170 ioc_release_depth_inc(this_q); 212 ioc_exit_icq(icq);
171 icq->exit(icq);
172 icq->release(icq);
173 ioc_release_depth_dec(this_q);
174 } 213 }
175 214
176 if (last_q && last_q != locked_q) 215 if (last_q && last_q != locked_q)
@@ -203,6 +242,27 @@ void exit_io_context(struct task_struct *task)
203 put_io_context(ioc, NULL); 242 put_io_context(ioc, NULL);
204} 243}
205 244
245/**
246 * ioc_clear_queue - break any ioc association with the specified queue
247 * @q: request_queue being cleared
248 *
249 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
250 */
251void ioc_clear_queue(struct request_queue *q)
252{
253 lockdep_assert_held(q->queue_lock);
254
255 while (!list_empty(&q->icq_list)) {
256 struct io_cq *icq = list_entry(q->icq_list.next,
257 struct io_cq, q_node);
258 struct io_context *ioc = icq->ioc;
259
260 spin_lock(&ioc->lock);
261 ioc_exit_icq(icq);
262 spin_unlock(&ioc->lock);
263 }
264}
265
206void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, 266void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
207 int node) 267 int node)
208{ 268{