diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:42 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:42 -0500 |
commit | 47fdd4ca96bf4b28ac4d05d7a6e382df31d3d758 (patch) | |
tree | bcb928575b66511345b00102a7e8cace84526e3e /block | |
parent | a612fddf0d8090f2877305c9168b6c1a34fb5d90 (diff) |
block, cfq: move io_cq lookup to blk-ioc.c
Now that all io_cq related data structures are in block core layer,
io_cq lookup can be moved from cfq-iosched.c to blk-ioc.c.
Lookup logic from cfq_cic_lookup() is moved to ioc_lookup_icq() with
parameter return type changes (cfqd -> request_queue, cfq_io_cq ->
io_cq) and cfq_cic_lookup() becomes thin wrapper around
cfq_cic_lookup().
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-ioc.c | 36 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/cfq-iosched.c | 48 |
3 files changed, 45 insertions, 40 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index dc5e69d335a0..87ecc98b8ade 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -266,6 +266,42 @@ struct io_context *get_task_io_context(struct task_struct *task, | |||
266 | } | 266 | } |
267 | EXPORT_SYMBOL(get_task_io_context); | 267 | EXPORT_SYMBOL(get_task_io_context); |
268 | 268 | ||
269 | /** | ||
270 | * ioc_lookup_icq - lookup io_cq from ioc | ||
271 | * @ioc: the associated io_context | ||
272 | * @q: the associated request_queue | ||
273 | * | ||
274 | * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called | ||
275 | * with @q->queue_lock held. | ||
276 | */ | ||
277 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) | ||
278 | { | ||
279 | struct io_cq *icq; | ||
280 | |||
281 | lockdep_assert_held(q->queue_lock); | ||
282 | |||
283 | /* | ||
284 | * icq's are indexed from @ioc using radix tree and hint pointer, | ||
285 | * both of which are protected with RCU. All removals are done | ||
286 | * holding both q and ioc locks, and we're holding q lock - if we | ||
287 | * find a icq which points to us, it's guaranteed to be valid. | ||
288 | */ | ||
289 | rcu_read_lock(); | ||
290 | icq = rcu_dereference(ioc->icq_hint); | ||
291 | if (icq && icq->q == q) | ||
292 | goto out; | ||
293 | |||
294 | icq = radix_tree_lookup(&ioc->icq_tree, q->id); | ||
295 | if (icq && icq->q == q) | ||
296 | rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ | ||
297 | else | ||
298 | icq = NULL; | ||
299 | out: | ||
300 | rcu_read_unlock(); | ||
301 | return icq; | ||
302 | } | ||
303 | EXPORT_SYMBOL(ioc_lookup_icq); | ||
304 | |||
269 | void ioc_set_changed(struct io_context *ioc, int which) | 305 | void ioc_set_changed(struct io_context *ioc, int which) |
270 | { | 306 | { |
271 | struct io_cq *icq; | 307 | struct io_cq *icq; |
diff --git a/block/blk.h b/block/blk.h index 4943770e0792..3c510a4b5054 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -199,6 +199,7 @@ static inline int blk_do_io_stat(struct request *rq) | |||
199 | * Internal io_context interface | 199 | * Internal io_context interface |
200 | */ | 200 | */ |
201 | void get_io_context(struct io_context *ioc); | 201 | void get_io_context(struct io_context *ioc); |
202 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); | ||
202 | 203 | ||
203 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, | 204 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, |
204 | int node); | 205 | int node); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9bc5ecc1b336..048fa699adf9 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -468,7 +468,6 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, | |||
468 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 468 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
469 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, | 469 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, |
470 | struct io_context *, gfp_t); | 470 | struct io_context *, gfp_t); |
471 | static struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *, struct io_context *); | ||
472 | 471 | ||
473 | static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) | 472 | static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) |
474 | { | 473 | { |
@@ -476,6 +475,14 @@ static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) | |||
476 | return container_of(icq, struct cfq_io_cq, icq); | 475 | return container_of(icq, struct cfq_io_cq, icq); |
477 | } | 476 | } |
478 | 477 | ||
478 | static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, | ||
479 | struct io_context *ioc) | ||
480 | { | ||
481 | if (ioc) | ||
482 | return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); | ||
483 | return NULL; | ||
484 | } | ||
485 | |||
479 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) | 486 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) |
480 | { | 487 | { |
481 | return cic->cfqq[is_sync]; | 488 | return cic->cfqq[is_sync]; |
@@ -2971,45 +2978,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, | |||
2971 | } | 2978 | } |
2972 | 2979 | ||
2973 | /** | 2980 | /** |
2974 | * cfq_cic_lookup - lookup cfq_io_cq | ||
2975 | * @cfqd: the associated cfq_data | ||
2976 | * @ioc: the associated io_context | ||
2977 | * | ||
2978 | * Look up cfq_io_cq associated with @cfqd - @ioc pair. Must be called | ||
2979 | * with queue_lock held. | ||
2980 | */ | ||
2981 | static struct cfq_io_cq * | ||
2982 | cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) | ||
2983 | { | ||
2984 | struct request_queue *q = cfqd->queue; | ||
2985 | struct io_cq *icq; | ||
2986 | |||
2987 | lockdep_assert_held(cfqd->queue->queue_lock); | ||
2988 | if (unlikely(!ioc)) | ||
2989 | return NULL; | ||
2990 | |||
2991 | /* | ||
2992 | * icq's are indexed from @ioc using radix tree and hint pointer, | ||
2993 | * both of which are protected with RCU. All removals are done | ||
2994 | * holding both q and ioc locks, and we're holding q lock - if we | ||
2995 | * find a icq which points to us, it's guaranteed to be valid. | ||
2996 | */ | ||
2997 | rcu_read_lock(); | ||
2998 | icq = rcu_dereference(ioc->icq_hint); | ||
2999 | if (icq && icq->q == q) | ||
3000 | goto out; | ||
3001 | |||
3002 | icq = radix_tree_lookup(&ioc->icq_tree, cfqd->queue->id); | ||
3003 | if (icq && icq->q == q) | ||
3004 | rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ | ||
3005 | else | ||
3006 | icq = NULL; | ||
3007 | out: | ||
3008 | rcu_read_unlock(); | ||
3009 | return icq_to_cic(icq); | ||
3010 | } | ||
3011 | |||
3012 | /** | ||
3013 | * cfq_create_cic - create and link a cfq_io_cq | 2981 | * cfq_create_cic - create and link a cfq_io_cq |
3014 | * @cfqd: cfqd of interest | 2982 | * @cfqd: cfqd of interest |
3015 | * @gfp_mask: allocation mask | 2983 | * @gfp_mask: allocation mask |