diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:41 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:41 -0500 |
commit | c58698073218f2c8f2fc5982fa3938c2d3803b9f (patch) | |
tree | ccd2403fd8674051a062efd2c22e6fdd179b7b8f /block | |
parent | 22f746e235a5cbee2a6ca9887b1be2aa7d31fe71 (diff) |
block, cfq: reorganize cfq_io_context into generic and cfq specific parts
Currently io_context and cfq logics are mixed without clear boundary.
Most of io_context is independent from cfq but cfq_io_context handling
logic is dispersed between generic ioc code and cfq.
cfq_io_context represents association between an io_context and a
request_queue, which is a concept useful outside of cfq, but it also
contains fields which are useful only to cfq.
This patch takes out generic part and put it into io_cq (io
context-queue) and the rest into cfq_io_cq (cic moniker remains the
same) which contains io_cq. The following changes are made together.
* cfq_ttime and cfq_io_cq now live in cfq-iosched.c.
* All related fields, functions and constants are renamed accordingly.
* ioc->ioc_data is now "struct io_cq *" instead of "void *" and
renamed to icq_hint.
This prepares for io_context API cleanup. Documentation is currently
sparse. It will be added later.
Changes in this patch are mechanical and don't cause functional
change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-ioc.c | 58 | ||||
-rw-r--r-- | block/cfq-iosched.c | 248 |
2 files changed, 161 insertions, 145 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index e23c797b4685..dc5e69d335a0 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(get_io_context); | |||
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Slow path for ioc release in put_io_context(). Performs double-lock | 48 | * Slow path for ioc release in put_io_context(). Performs double-lock |
49 | * dancing to unlink all cic's and then frees ioc. | 49 | * dancing to unlink all icq's and then frees ioc. |
50 | */ | 50 | */ |
51 | static void ioc_release_fn(struct work_struct *work) | 51 | static void ioc_release_fn(struct work_struct *work) |
52 | { | 52 | { |
@@ -56,11 +56,10 @@ static void ioc_release_fn(struct work_struct *work) | |||
56 | 56 | ||
57 | spin_lock_irq(&ioc->lock); | 57 | spin_lock_irq(&ioc->lock); |
58 | 58 | ||
59 | while (!hlist_empty(&ioc->cic_list)) { | 59 | while (!hlist_empty(&ioc->icq_list)) { |
60 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | 60 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
61 | struct cfq_io_context, | 61 | struct io_cq, ioc_node); |
62 | cic_list); | 62 | struct request_queue *this_q = icq->q; |
63 | struct request_queue *this_q = cic->q; | ||
64 | 63 | ||
65 | if (this_q != last_q) { | 64 | if (this_q != last_q) { |
66 | /* | 65 | /* |
@@ -89,8 +88,8 @@ static void ioc_release_fn(struct work_struct *work) | |||
89 | continue; | 88 | continue; |
90 | } | 89 | } |
91 | ioc_release_depth_inc(this_q); | 90 | ioc_release_depth_inc(this_q); |
92 | cic->exit(cic); | 91 | icq->exit(icq); |
93 | cic->release(cic); | 92 | icq->release(icq); |
94 | ioc_release_depth_dec(this_q); | 93 | ioc_release_depth_dec(this_q); |
95 | } | 94 | } |
96 | 95 | ||
@@ -131,10 +130,10 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
131 | return; | 130 | return; |
132 | 131 | ||
133 | /* | 132 | /* |
134 | * Destroy @ioc. This is a bit messy because cic's are chained | 133 | * Destroy @ioc. This is a bit messy because icq's are chained |
135 | * from both ioc and queue, and ioc->lock nests inside queue_lock. | 134 | * from both ioc and queue, and ioc->lock nests inside queue_lock. |
136 | * The inner ioc->lock should be held to walk our cic_list and then | 135 | * The inner ioc->lock should be held to walk our icq_list and then |
137 | * for each cic the outer matching queue_lock should be grabbed. | 136 | * for each icq the outer matching queue_lock should be grabbed. |
138 | * ie. We need to do reverse-order double lock dancing. | 137 | * ie. We need to do reverse-order double lock dancing. |
139 | * | 138 | * |
140 | * Another twist is that we are often called with one of the | 139 | * Another twist is that we are often called with one of the |
@@ -153,11 +152,10 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
153 | spin_lock_irqsave_nested(&ioc->lock, flags, | 152 | spin_lock_irqsave_nested(&ioc->lock, flags, |
154 | ioc_release_depth(locked_q)); | 153 | ioc_release_depth(locked_q)); |
155 | 154 | ||
156 | while (!hlist_empty(&ioc->cic_list)) { | 155 | while (!hlist_empty(&ioc->icq_list)) { |
157 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | 156 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
158 | struct cfq_io_context, | 157 | struct io_cq, ioc_node); |
159 | cic_list); | 158 | struct request_queue *this_q = icq->q; |
160 | struct request_queue *this_q = cic->q; | ||
161 | 159 | ||
162 | if (this_q != last_q) { | 160 | if (this_q != last_q) { |
163 | if (last_q && last_q != locked_q) | 161 | if (last_q && last_q != locked_q) |
@@ -170,8 +168,8 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
170 | continue; | 168 | continue; |
171 | } | 169 | } |
172 | ioc_release_depth_inc(this_q); | 170 | ioc_release_depth_inc(this_q); |
173 | cic->exit(cic); | 171 | icq->exit(icq); |
174 | cic->release(cic); | 172 | icq->release(icq); |
175 | ioc_release_depth_dec(this_q); | 173 | ioc_release_depth_dec(this_q); |
176 | } | 174 | } |
177 | 175 | ||
@@ -180,8 +178,8 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
180 | 178 | ||
181 | spin_unlock_irqrestore(&ioc->lock, flags); | 179 | spin_unlock_irqrestore(&ioc->lock, flags); |
182 | 180 | ||
183 | /* if no cic's left, we're done; otherwise, kick release_work */ | 181 | /* if no icq is left, we're done; otherwise, kick release_work */ |
184 | if (hlist_empty(&ioc->cic_list)) | 182 | if (hlist_empty(&ioc->icq_list)) |
185 | kmem_cache_free(iocontext_cachep, ioc); | 183 | kmem_cache_free(iocontext_cachep, ioc); |
186 | else | 184 | else |
187 | schedule_work(&ioc->release_work); | 185 | schedule_work(&ioc->release_work); |
@@ -219,8 +217,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, | |||
219 | atomic_long_set(&ioc->refcount, 1); | 217 | atomic_long_set(&ioc->refcount, 1); |
220 | atomic_set(&ioc->nr_tasks, 1); | 218 | atomic_set(&ioc->nr_tasks, 1); |
221 | spin_lock_init(&ioc->lock); | 219 | spin_lock_init(&ioc->lock); |
222 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); | 220 | INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); |
223 | INIT_HLIST_HEAD(&ioc->cic_list); | 221 | INIT_HLIST_HEAD(&ioc->icq_list); |
224 | INIT_WORK(&ioc->release_work, ioc_release_fn); | 222 | INIT_WORK(&ioc->release_work, ioc_release_fn); |
225 | 223 | ||
226 | /* try to install, somebody might already have beaten us to it */ | 224 | /* try to install, somebody might already have beaten us to it */ |
@@ -270,11 +268,11 @@ EXPORT_SYMBOL(get_task_io_context); | |||
270 | 268 | ||
271 | void ioc_set_changed(struct io_context *ioc, int which) | 269 | void ioc_set_changed(struct io_context *ioc, int which) |
272 | { | 270 | { |
273 | struct cfq_io_context *cic; | 271 | struct io_cq *icq; |
274 | struct hlist_node *n; | 272 | struct hlist_node *n; |
275 | 273 | ||
276 | hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list) | 274 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) |
277 | set_bit(which, &cic->changed); | 275 | set_bit(which, &icq->changed); |
278 | } | 276 | } |
279 | 277 | ||
280 | /** | 278 | /** |
@@ -282,8 +280,8 @@ void ioc_set_changed(struct io_context *ioc, int which) | |||
282 | * @ioc: io_context of interest | 280 | * @ioc: io_context of interest |
283 | * @ioprio: new ioprio | 281 | * @ioprio: new ioprio |
284 | * | 282 | * |
285 | * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all | 283 | * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all |
286 | * cic's. iosched is responsible for checking the bit and applying it on | 284 | * icq's. iosched is responsible for checking the bit and applying it on |
287 | * request issue path. | 285 | * request issue path. |
288 | */ | 286 | */ |
289 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | 287 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) |
@@ -292,7 +290,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |||
292 | 290 | ||
293 | spin_lock_irqsave(&ioc->lock, flags); | 291 | spin_lock_irqsave(&ioc->lock, flags); |
294 | ioc->ioprio = ioprio; | 292 | ioc->ioprio = ioprio; |
295 | ioc_set_changed(ioc, CIC_IOPRIO_CHANGED); | 293 | ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED); |
296 | spin_unlock_irqrestore(&ioc->lock, flags); | 294 | spin_unlock_irqrestore(&ioc->lock, flags); |
297 | } | 295 | } |
298 | 296 | ||
@@ -300,7 +298,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |||
300 | * ioc_cgroup_changed - notify cgroup change | 298 | * ioc_cgroup_changed - notify cgroup change |
301 | * @ioc: io_context of interest | 299 | * @ioc: io_context of interest |
302 | * | 300 | * |
303 | * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's. | 301 | * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's. |
304 | * iosched is responsible for checking the bit and applying it on request | 302 | * iosched is responsible for checking the bit and applying it on request |
305 | * issue path. | 303 | * issue path. |
306 | */ | 304 | */ |
@@ -309,7 +307,7 @@ void ioc_cgroup_changed(struct io_context *ioc) | |||
309 | unsigned long flags; | 307 | unsigned long flags; |
310 | 308 | ||
311 | spin_lock_irqsave(&ioc->lock, flags); | 309 | spin_lock_irqsave(&ioc->lock, flags); |
312 | ioc_set_changed(ioc, CIC_CGROUP_CHANGED); | 310 | ioc_set_changed(ioc, ICQ_CGROUP_CHANGED); |
313 | spin_unlock_irqrestore(&ioc->lock, flags); | 311 | spin_unlock_irqrestore(&ioc->lock, flags); |
314 | } | 312 | } |
315 | 313 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5f7e4d161404..d2f16fcdec7f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -54,13 +54,12 @@ static const int cfq_hist_divisor = 4; | |||
54 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) | 54 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
55 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) | 55 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) |
56 | 56 | ||
57 | #define RQ_CIC(rq) \ | 57 | #define RQ_CIC(rq) icq_to_cic((rq)->elevator_private[0]) |
58 | ((struct cfq_io_context *) (rq)->elevator_private[0]) | ||
59 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) | 58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) |
60 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) | 59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) |
61 | 60 | ||
62 | static struct kmem_cache *cfq_pool; | 61 | static struct kmem_cache *cfq_pool; |
63 | static struct kmem_cache *cfq_ioc_pool; | 62 | static struct kmem_cache *cfq_icq_pool; |
64 | 63 | ||
65 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 64 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
66 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 65 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
@@ -69,6 +68,14 @@ static struct kmem_cache *cfq_ioc_pool; | |||
69 | #define sample_valid(samples) ((samples) > 80) | 68 | #define sample_valid(samples) ((samples) > 80) |
70 | #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) | 69 | #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) |
71 | 70 | ||
71 | struct cfq_ttime { | ||
72 | unsigned long last_end_request; | ||
73 | |||
74 | unsigned long ttime_total; | ||
75 | unsigned long ttime_samples; | ||
76 | unsigned long ttime_mean; | ||
77 | }; | ||
78 | |||
72 | /* | 79 | /* |
73 | * Most of our rbtree usage is for sorting with min extraction, so | 80 | * Most of our rbtree usage is for sorting with min extraction, so |
74 | * if we cache the leftmost node we don't have to walk down the tree | 81 | * if we cache the leftmost node we don't have to walk down the tree |
@@ -210,6 +217,12 @@ struct cfq_group { | |||
210 | struct cfq_ttime ttime; | 217 | struct cfq_ttime ttime; |
211 | }; | 218 | }; |
212 | 219 | ||
220 | struct cfq_io_cq { | ||
221 | struct io_cq icq; /* must be the first member */ | ||
222 | struct cfq_queue *cfqq[2]; | ||
223 | struct cfq_ttime ttime; | ||
224 | }; | ||
225 | |||
213 | /* | 226 | /* |
214 | * Per block device queue structure | 227 | * Per block device queue structure |
215 | */ | 228 | */ |
@@ -261,7 +274,7 @@ struct cfq_data { | |||
261 | struct work_struct unplug_work; | 274 | struct work_struct unplug_work; |
262 | 275 | ||
263 | struct cfq_queue *active_queue; | 276 | struct cfq_queue *active_queue; |
264 | struct cfq_io_context *active_cic; | 277 | struct cfq_io_cq *active_cic; |
265 | 278 | ||
266 | /* | 279 | /* |
267 | * async queue for each priority case | 280 | * async queue for each priority case |
@@ -284,7 +297,7 @@ struct cfq_data { | |||
284 | unsigned int cfq_group_idle; | 297 | unsigned int cfq_group_idle; |
285 | unsigned int cfq_latency; | 298 | unsigned int cfq_latency; |
286 | 299 | ||
287 | struct list_head cic_list; | 300 | struct list_head icq_list; |
288 | 301 | ||
289 | /* | 302 | /* |
290 | * Fallback dummy cfqq for extreme OOM conditions | 303 | * Fallback dummy cfqq for extreme OOM conditions |
@@ -457,24 +470,28 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, | |||
457 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 470 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
458 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, | 471 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, |
459 | struct io_context *, gfp_t); | 472 | struct io_context *, gfp_t); |
460 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, | 473 | static struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *, struct io_context *); |
461 | struct io_context *); | ||
462 | 474 | ||
463 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | 475 | static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) |
464 | bool is_sync) | 476 | { |
477 | /* cic->icq is the first member, %NULL will convert to %NULL */ | ||
478 | return container_of(icq, struct cfq_io_cq, icq); | ||
479 | } | ||
480 | |||
481 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) | ||
465 | { | 482 | { |
466 | return cic->cfqq[is_sync]; | 483 | return cic->cfqq[is_sync]; |
467 | } | 484 | } |
468 | 485 | ||
469 | static inline void cic_set_cfqq(struct cfq_io_context *cic, | 486 | static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, |
470 | struct cfq_queue *cfqq, bool is_sync) | 487 | bool is_sync) |
471 | { | 488 | { |
472 | cic->cfqq[is_sync] = cfqq; | 489 | cic->cfqq[is_sync] = cfqq; |
473 | } | 490 | } |
474 | 491 | ||
475 | static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) | 492 | static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) |
476 | { | 493 | { |
477 | return cic->q->elevator->elevator_data; | 494 | return cic->icq.q->elevator->elevator_data; |
478 | } | 495 | } |
479 | 496 | ||
480 | /* | 497 | /* |
@@ -1541,7 +1558,7 @@ static struct request * | |||
1541 | cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) | 1558 | cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) |
1542 | { | 1559 | { |
1543 | struct task_struct *tsk = current; | 1560 | struct task_struct *tsk = current; |
1544 | struct cfq_io_context *cic; | 1561 | struct cfq_io_cq *cic; |
1545 | struct cfq_queue *cfqq; | 1562 | struct cfq_queue *cfqq; |
1546 | 1563 | ||
1547 | cic = cfq_cic_lookup(cfqd, tsk->io_context); | 1564 | cic = cfq_cic_lookup(cfqd, tsk->io_context); |
@@ -1655,7 +1672,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
1655 | struct bio *bio) | 1672 | struct bio *bio) |
1656 | { | 1673 | { |
1657 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1674 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1658 | struct cfq_io_context *cic; | 1675 | struct cfq_io_cq *cic; |
1659 | struct cfq_queue *cfqq; | 1676 | struct cfq_queue *cfqq; |
1660 | 1677 | ||
1661 | /* | 1678 | /* |
@@ -1671,7 +1688,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
1671 | * and %current are guaranteed to be equal. Avoid lookup which | 1688 | * and %current are guaranteed to be equal. Avoid lookup which |
1672 | * requires queue_lock by using @rq's cic. | 1689 | * requires queue_lock by using @rq's cic. |
1673 | */ | 1690 | */ |
1674 | if (current->io_context == RQ_CIC(rq)->ioc) { | 1691 | if (current->io_context == RQ_CIC(rq)->icq.ioc) { |
1675 | cic = RQ_CIC(rq); | 1692 | cic = RQ_CIC(rq); |
1676 | } else { | 1693 | } else { |
1677 | cic = cfq_cic_lookup(cfqd, current->io_context); | 1694 | cic = cfq_cic_lookup(cfqd, current->io_context); |
@@ -1761,7 +1778,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1761 | cfqd->active_queue = NULL; | 1778 | cfqd->active_queue = NULL; |
1762 | 1779 | ||
1763 | if (cfqd->active_cic) { | 1780 | if (cfqd->active_cic) { |
1764 | put_io_context(cfqd->active_cic->ioc, cfqd->queue); | 1781 | put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); |
1765 | cfqd->active_cic = NULL; | 1782 | cfqd->active_cic = NULL; |
1766 | } | 1783 | } |
1767 | } | 1784 | } |
@@ -1981,7 +1998,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1981 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1998 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
1982 | { | 1999 | { |
1983 | struct cfq_queue *cfqq = cfqd->active_queue; | 2000 | struct cfq_queue *cfqq = cfqd->active_queue; |
1984 | struct cfq_io_context *cic; | 2001 | struct cfq_io_cq *cic; |
1985 | unsigned long sl, group_idle = 0; | 2002 | unsigned long sl, group_idle = 0; |
1986 | 2003 | ||
1987 | /* | 2004 | /* |
@@ -2016,7 +2033,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
2016 | * task has exited, don't wait | 2033 | * task has exited, don't wait |
2017 | */ | 2034 | */ |
2018 | cic = cfqd->active_cic; | 2035 | cic = cfqd->active_cic; |
2019 | if (!cic || !atomic_read(&cic->ioc->nr_tasks)) | 2036 | if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks)) |
2020 | return; | 2037 | return; |
2021 | 2038 | ||
2022 | /* | 2039 | /* |
@@ -2567,9 +2584,9 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2567 | cfq_dispatch_insert(cfqd->queue, rq); | 2584 | cfq_dispatch_insert(cfqd->queue, rq); |
2568 | 2585 | ||
2569 | if (!cfqd->active_cic) { | 2586 | if (!cfqd->active_cic) { |
2570 | struct cfq_io_context *cic = RQ_CIC(rq); | 2587 | struct cfq_io_cq *cic = RQ_CIC(rq); |
2571 | 2588 | ||
2572 | atomic_long_inc(&cic->ioc->refcount); | 2589 | atomic_long_inc(&cic->icq.ioc->refcount); |
2573 | cfqd->active_cic = cic; | 2590 | cfqd->active_cic = cic; |
2574 | } | 2591 | } |
2575 | 2592 | ||
@@ -2652,24 +2669,24 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2652 | cfq_put_cfqg(cfqg); | 2669 | cfq_put_cfqg(cfqg); |
2653 | } | 2670 | } |
2654 | 2671 | ||
2655 | static void cfq_cic_free_rcu(struct rcu_head *head) | 2672 | static void cfq_icq_free_rcu(struct rcu_head *head) |
2656 | { | 2673 | { |
2657 | kmem_cache_free(cfq_ioc_pool, | 2674 | kmem_cache_free(cfq_icq_pool, |
2658 | container_of(head, struct cfq_io_context, rcu_head)); | 2675 | icq_to_cic(container_of(head, struct io_cq, rcu_head))); |
2659 | } | 2676 | } |
2660 | 2677 | ||
2661 | static void cfq_cic_free(struct cfq_io_context *cic) | 2678 | static void cfq_icq_free(struct io_cq *icq) |
2662 | { | 2679 | { |
2663 | call_rcu(&cic->rcu_head, cfq_cic_free_rcu); | 2680 | call_rcu(&icq->rcu_head, cfq_icq_free_rcu); |
2664 | } | 2681 | } |
2665 | 2682 | ||
2666 | static void cfq_release_cic(struct cfq_io_context *cic) | 2683 | static void cfq_release_icq(struct io_cq *icq) |
2667 | { | 2684 | { |
2668 | struct io_context *ioc = cic->ioc; | 2685 | struct io_context *ioc = icq->ioc; |
2669 | 2686 | ||
2670 | radix_tree_delete(&ioc->radix_root, cic->q->id); | 2687 | radix_tree_delete(&ioc->icq_tree, icq->q->id); |
2671 | hlist_del(&cic->cic_list); | 2688 | hlist_del(&icq->ioc_node); |
2672 | cfq_cic_free(cic); | 2689 | cfq_icq_free(icq); |
2673 | } | 2690 | } |
2674 | 2691 | ||
2675 | static void cfq_put_cooperator(struct cfq_queue *cfqq) | 2692 | static void cfq_put_cooperator(struct cfq_queue *cfqq) |
@@ -2705,20 +2722,21 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2705 | cfq_put_queue(cfqq); | 2722 | cfq_put_queue(cfqq); |
2706 | } | 2723 | } |
2707 | 2724 | ||
2708 | static void cfq_exit_cic(struct cfq_io_context *cic) | 2725 | static void cfq_exit_icq(struct io_cq *icq) |
2709 | { | 2726 | { |
2727 | struct cfq_io_cq *cic = icq_to_cic(icq); | ||
2710 | struct cfq_data *cfqd = cic_to_cfqd(cic); | 2728 | struct cfq_data *cfqd = cic_to_cfqd(cic); |
2711 | struct io_context *ioc = cic->ioc; | 2729 | struct io_context *ioc = icq->ioc; |
2712 | 2730 | ||
2713 | list_del_init(&cic->queue_list); | 2731 | list_del_init(&icq->q_node); |
2714 | 2732 | ||
2715 | /* | 2733 | /* |
2716 | * Both setting lookup hint to and clearing it from @cic are done | 2734 | * Both setting lookup hint to and clearing it from @icq are done |
2717 | * under queue_lock. If it's not pointing to @cic now, it never | 2735 | * under queue_lock. If it's not pointing to @icq now, it never |
2718 | * will. Hint assignment itself can race safely. | 2736 | * will. Hint assignment itself can race safely. |
2719 | */ | 2737 | */ |
2720 | if (rcu_dereference_raw(ioc->ioc_data) == cic) | 2738 | if (rcu_dereference_raw(ioc->icq_hint) == icq) |
2721 | rcu_assign_pointer(ioc->ioc_data, NULL); | 2739 | rcu_assign_pointer(ioc->icq_hint, NULL); |
2722 | 2740 | ||
2723 | if (cic->cfqq[BLK_RW_ASYNC]) { | 2741 | if (cic->cfqq[BLK_RW_ASYNC]) { |
2724 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); | 2742 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); |
@@ -2731,19 +2749,18 @@ static void cfq_exit_cic(struct cfq_io_context *cic) | |||
2731 | } | 2749 | } |
2732 | } | 2750 | } |
2733 | 2751 | ||
2734 | static struct cfq_io_context * | 2752 | static struct cfq_io_cq *cfq_alloc_cic(struct cfq_data *cfqd, gfp_t gfp_mask) |
2735 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | ||
2736 | { | 2753 | { |
2737 | struct cfq_io_context *cic; | 2754 | struct cfq_io_cq *cic; |
2738 | 2755 | ||
2739 | cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, | 2756 | cic = kmem_cache_alloc_node(cfq_icq_pool, gfp_mask | __GFP_ZERO, |
2740 | cfqd->queue->node); | 2757 | cfqd->queue->node); |
2741 | if (cic) { | 2758 | if (cic) { |
2742 | cic->ttime.last_end_request = jiffies; | 2759 | cic->ttime.last_end_request = jiffies; |
2743 | INIT_LIST_HEAD(&cic->queue_list); | 2760 | INIT_LIST_HEAD(&cic->icq.q_node); |
2744 | INIT_HLIST_NODE(&cic->cic_list); | 2761 | INIT_HLIST_NODE(&cic->icq.ioc_node); |
2745 | cic->exit = cfq_exit_cic; | 2762 | cic->icq.exit = cfq_exit_icq; |
2746 | cic->release = cfq_release_cic; | 2763 | cic->icq.release = cfq_release_icq; |
2747 | } | 2764 | } |
2748 | 2765 | ||
2749 | return cic; | 2766 | return cic; |
@@ -2791,7 +2808,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) | |||
2791 | cfq_clear_cfqq_prio_changed(cfqq); | 2808 | cfq_clear_cfqq_prio_changed(cfqq); |
2792 | } | 2809 | } |
2793 | 2810 | ||
2794 | static void changed_ioprio(struct cfq_io_context *cic) | 2811 | static void changed_ioprio(struct cfq_io_cq *cic) |
2795 | { | 2812 | { |
2796 | struct cfq_data *cfqd = cic_to_cfqd(cic); | 2813 | struct cfq_data *cfqd = cic_to_cfqd(cic); |
2797 | struct cfq_queue *cfqq; | 2814 | struct cfq_queue *cfqq; |
@@ -2802,7 +2819,7 @@ static void changed_ioprio(struct cfq_io_context *cic) | |||
2802 | cfqq = cic->cfqq[BLK_RW_ASYNC]; | 2819 | cfqq = cic->cfqq[BLK_RW_ASYNC]; |
2803 | if (cfqq) { | 2820 | if (cfqq) { |
2804 | struct cfq_queue *new_cfqq; | 2821 | struct cfq_queue *new_cfqq; |
2805 | new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, | 2822 | new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc, |
2806 | GFP_ATOMIC); | 2823 | GFP_ATOMIC); |
2807 | if (new_cfqq) { | 2824 | if (new_cfqq) { |
2808 | cic->cfqq[BLK_RW_ASYNC] = new_cfqq; | 2825 | cic->cfqq[BLK_RW_ASYNC] = new_cfqq; |
@@ -2836,7 +2853,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2836 | } | 2853 | } |
2837 | 2854 | ||
2838 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 2855 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
2839 | static void changed_cgroup(struct cfq_io_context *cic) | 2856 | static void changed_cgroup(struct cfq_io_cq *cic) |
2840 | { | 2857 | { |
2841 | struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); | 2858 | struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); |
2842 | struct cfq_data *cfqd = cic_to_cfqd(cic); | 2859 | struct cfq_data *cfqd = cic_to_cfqd(cic); |
@@ -2864,7 +2881,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, | |||
2864 | struct io_context *ioc, gfp_t gfp_mask) | 2881 | struct io_context *ioc, gfp_t gfp_mask) |
2865 | { | 2882 | { |
2866 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 2883 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
2867 | struct cfq_io_context *cic; | 2884 | struct cfq_io_cq *cic; |
2868 | struct cfq_group *cfqg; | 2885 | struct cfq_group *cfqg; |
2869 | 2886 | ||
2870 | retry: | 2887 | retry: |
@@ -2956,56 +2973,57 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, | |||
2956 | } | 2973 | } |
2957 | 2974 | ||
2958 | /** | 2975 | /** |
2959 | * cfq_cic_lookup - lookup cfq_io_context | 2976 | * cfq_cic_lookup - lookup cfq_io_cq |
2960 | * @cfqd: the associated cfq_data | 2977 | * @cfqd: the associated cfq_data |
2961 | * @ioc: the associated io_context | 2978 | * @ioc: the associated io_context |
2962 | * | 2979 | * |
2963 | * Look up cfq_io_context associated with @cfqd - @ioc pair. Must be | 2980 | * Look up cfq_io_cq associated with @cfqd - @ioc pair. Must be called |
2964 | * called with queue_lock held. | 2981 | * with queue_lock held. |
2965 | */ | 2982 | */ |
2966 | static struct cfq_io_context * | 2983 | static struct cfq_io_cq * |
2967 | cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) | 2984 | cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) |
2968 | { | 2985 | { |
2969 | struct request_queue *q = cfqd->queue; | 2986 | struct request_queue *q = cfqd->queue; |
2970 | struct cfq_io_context *cic; | 2987 | struct io_cq *icq; |
2971 | 2988 | ||
2972 | lockdep_assert_held(cfqd->queue->queue_lock); | 2989 | lockdep_assert_held(cfqd->queue->queue_lock); |
2973 | if (unlikely(!ioc)) | 2990 | if (unlikely(!ioc)) |
2974 | return NULL; | 2991 | return NULL; |
2975 | 2992 | ||
2976 | /* | 2993 | /* |
2977 | * cic's are indexed from @ioc using radix tree and hint pointer, | 2994 | * icq's are indexed from @ioc using radix tree and hint pointer, |
2978 | * both of which are protected with RCU. All removals are done | 2995 | * both of which are protected with RCU. All removals are done |
2979 | * holding both q and ioc locks, and we're holding q lock - if we | 2996 | * holding both q and ioc locks, and we're holding q lock - if we |
2980 | * find a cic which points to us, it's guaranteed to be valid. | 2997 | * find a icq which points to us, it's guaranteed to be valid. |
2981 | */ | 2998 | */ |
2982 | rcu_read_lock(); | 2999 | rcu_read_lock(); |
2983 | cic = rcu_dereference(ioc->ioc_data); | 3000 | icq = rcu_dereference(ioc->icq_hint); |
2984 | if (cic && cic->q == q) | 3001 | if (icq && icq->q == q) |
2985 | goto out; | 3002 | goto out; |
2986 | 3003 | ||
2987 | cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id); | 3004 | icq = radix_tree_lookup(&ioc->icq_tree, cfqd->queue->id); |
2988 | if (cic && cic->q == q) | 3005 | if (icq && icq->q == q) |
2989 | rcu_assign_pointer(ioc->ioc_data, cic); /* allowed to race */ | 3006 | rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ |
2990 | else | 3007 | else |
2991 | cic = NULL; | 3008 | icq = NULL; |
2992 | out: | 3009 | out: |
2993 | rcu_read_unlock(); | 3010 | rcu_read_unlock(); |
2994 | return cic; | 3011 | return icq_to_cic(icq); |
2995 | } | 3012 | } |
2996 | 3013 | ||
2997 | /** | 3014 | /** |
2998 | * cfq_create_cic - create and link a cfq_io_context | 3015 | * cfq_create_cic - create and link a cfq_io_cq |
2999 | * @cfqd: cfqd of interest | 3016 | * @cfqd: cfqd of interest |
3000 | * @gfp_mask: allocation mask | 3017 | * @gfp_mask: allocation mask |
3001 | * | 3018 | * |
3002 | * Make sure cfq_io_context linking %current->io_context and @cfqd exists. | 3019 | * Make sure cfq_io_cq linking %current->io_context and @cfqd exists. If |
3003 | * If ioc and/or cic doesn't exist, they will be created using @gfp_mask. | 3020 | * ioc and/or cic doesn't exist, they will be created using @gfp_mask. |
3004 | */ | 3021 | */ |
3005 | static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | 3022 | static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) |
3006 | { | 3023 | { |
3007 | struct request_queue *q = cfqd->queue; | 3024 | struct request_queue *q = cfqd->queue; |
3008 | struct cfq_io_context *cic = NULL; | 3025 | struct io_cq *icq = NULL; |
3026 | struct cfq_io_cq *cic; | ||
3009 | struct io_context *ioc; | 3027 | struct io_context *ioc; |
3010 | int ret = -ENOMEM; | 3028 | int ret = -ENOMEM; |
3011 | 3029 | ||
@@ -3016,26 +3034,27 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
3016 | if (!ioc) | 3034 | if (!ioc) |
3017 | goto out; | 3035 | goto out; |
3018 | 3036 | ||
3019 | cic = cfq_alloc_io_context(cfqd, gfp_mask); | 3037 | cic = cfq_alloc_cic(cfqd, gfp_mask); |
3020 | if (!cic) | 3038 | if (!cic) |
3021 | goto out; | 3039 | goto out; |
3040 | icq = &cic->icq; | ||
3022 | 3041 | ||
3023 | ret = radix_tree_preload(gfp_mask); | 3042 | ret = radix_tree_preload(gfp_mask); |
3024 | if (ret) | 3043 | if (ret) |
3025 | goto out; | 3044 | goto out; |
3026 | 3045 | ||
3027 | cic->ioc = ioc; | 3046 | icq->ioc = ioc; |
3028 | cic->q = cfqd->queue; | 3047 | icq->q = cfqd->queue; |
3029 | 3048 | ||
3030 | /* lock both q and ioc and try to link @cic */ | 3049 | /* lock both q and ioc and try to link @icq */ |
3031 | spin_lock_irq(q->queue_lock); | 3050 | spin_lock_irq(q->queue_lock); |
3032 | spin_lock(&ioc->lock); | 3051 | spin_lock(&ioc->lock); |
3033 | 3052 | ||
3034 | ret = radix_tree_insert(&ioc->radix_root, q->id, cic); | 3053 | ret = radix_tree_insert(&ioc->icq_tree, q->id, icq); |
3035 | if (likely(!ret)) { | 3054 | if (likely(!ret)) { |
3036 | hlist_add_head(&cic->cic_list, &ioc->cic_list); | 3055 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); |
3037 | list_add(&cic->queue_list, &cfqd->cic_list); | 3056 | list_add(&icq->q_node, &cfqd->icq_list); |
3038 | cic = NULL; | 3057 | icq = NULL; |
3039 | } else if (ret == -EEXIST) { | 3058 | } else if (ret == -EEXIST) { |
3040 | /* someone else already did it */ | 3059 | /* someone else already did it */ |
3041 | ret = 0; | 3060 | ret = 0; |
@@ -3047,29 +3066,28 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
3047 | radix_tree_preload_end(); | 3066 | radix_tree_preload_end(); |
3048 | out: | 3067 | out: |
3049 | if (ret) | 3068 | if (ret) |
3050 | printk(KERN_ERR "cfq: cic link failed!\n"); | 3069 | printk(KERN_ERR "cfq: icq link failed!\n"); |
3051 | if (cic) | 3070 | if (icq) |
3052 | cfq_cic_free(cic); | 3071 | cfq_icq_free(icq); |
3053 | return ret; | 3072 | return ret; |
3054 | } | 3073 | } |
3055 | 3074 | ||
3056 | /** | 3075 | /** |
3057 | * cfq_get_io_context - acquire cfq_io_context and bump refcnt on io_context | 3076 | * cfq_get_cic - acquire cfq_io_cq and bump refcnt on io_context |
3058 | * @cfqd: cfqd to setup cic for | 3077 | * @cfqd: cfqd to setup cic for |
3059 | * @gfp_mask: allocation mask | 3078 | * @gfp_mask: allocation mask |
3060 | * | 3079 | * |
3061 | * Return cfq_io_context associating @cfqd and %current->io_context and | 3080 | * Return cfq_io_cq associating @cfqd and %current->io_context and |
3062 | * bump refcnt on io_context. If ioc or cic doesn't exist, they're created | 3081 | * bump refcnt on io_context. If ioc or cic doesn't exist, they're created |
3063 | * using @gfp_mask. | 3082 | * using @gfp_mask. |
3064 | * | 3083 | * |
3065 | * Must be called under queue_lock which may be released and re-acquired. | 3084 | * Must be called under queue_lock which may be released and re-acquired. |
3066 | * This function also may sleep depending on @gfp_mask. | 3085 | * This function also may sleep depending on @gfp_mask. |
3067 | */ | 3086 | */ |
3068 | static struct cfq_io_context * | 3087 | static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask) |
3069 | cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | ||
3070 | { | 3088 | { |
3071 | struct request_queue *q = cfqd->queue; | 3089 | struct request_queue *q = cfqd->queue; |
3072 | struct cfq_io_context *cic = NULL; | 3090 | struct cfq_io_cq *cic = NULL; |
3073 | struct io_context *ioc; | 3091 | struct io_context *ioc; |
3074 | int err; | 3092 | int err; |
3075 | 3093 | ||
@@ -3095,11 +3113,11 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
3095 | /* bump @ioc's refcnt and handle changed notifications */ | 3113 | /* bump @ioc's refcnt and handle changed notifications */ |
3096 | get_io_context(ioc); | 3114 | get_io_context(ioc); |
3097 | 3115 | ||
3098 | if (unlikely(cic->changed)) { | 3116 | if (unlikely(cic->icq.changed)) { |
3099 | if (test_and_clear_bit(CIC_IOPRIO_CHANGED, &cic->changed)) | 3117 | if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed)) |
3100 | changed_ioprio(cic); | 3118 | changed_ioprio(cic); |
3101 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 3119 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
3102 | if (test_and_clear_bit(CIC_CGROUP_CHANGED, &cic->changed)) | 3120 | if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed)) |
3103 | changed_cgroup(cic); | 3121 | changed_cgroup(cic); |
3104 | #endif | 3122 | #endif |
3105 | } | 3123 | } |
@@ -3120,7 +3138,7 @@ __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle) | |||
3120 | 3138 | ||
3121 | static void | 3139 | static void |
3122 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 3140 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
3123 | struct cfq_io_context *cic) | 3141 | struct cfq_io_cq *cic) |
3124 | { | 3142 | { |
3125 | if (cfq_cfqq_sync(cfqq)) { | 3143 | if (cfq_cfqq_sync(cfqq)) { |
3126 | __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); | 3144 | __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); |
@@ -3158,7 +3176,7 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3158 | */ | 3176 | */ |
3159 | static void | 3177 | static void |
3160 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 3178 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
3161 | struct cfq_io_context *cic) | 3179 | struct cfq_io_cq *cic) |
3162 | { | 3180 | { |
3163 | int old_idle, enable_idle; | 3181 | int old_idle, enable_idle; |
3164 | 3182 | ||
@@ -3175,8 +3193,9 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3175 | 3193 | ||
3176 | if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) | 3194 | if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) |
3177 | enable_idle = 0; | 3195 | enable_idle = 0; |
3178 | else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 3196 | else if (!atomic_read(&cic->icq.ioc->nr_tasks) || |
3179 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) | 3197 | !cfqd->cfq_slice_idle || |
3198 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) | ||
3180 | enable_idle = 0; | 3199 | enable_idle = 0; |
3181 | else if (sample_valid(cic->ttime.ttime_samples)) { | 3200 | else if (sample_valid(cic->ttime.ttime_samples)) { |
3182 | if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) | 3201 | if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) |
@@ -3308,7 +3327,7 @@ static void | |||
3308 | cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 3327 | cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
3309 | struct request *rq) | 3328 | struct request *rq) |
3310 | { | 3329 | { |
3311 | struct cfq_io_context *cic = RQ_CIC(rq); | 3330 | struct cfq_io_cq *cic = RQ_CIC(rq); |
3312 | 3331 | ||
3313 | cfqd->rq_queued++; | 3332 | cfqd->rq_queued++; |
3314 | if (rq->cmd_flags & REQ_PRIO) | 3333 | if (rq->cmd_flags & REQ_PRIO) |
@@ -3361,7 +3380,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
3361 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 3380 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
3362 | 3381 | ||
3363 | cfq_log_cfqq(cfqd, cfqq, "insert_request"); | 3382 | cfq_log_cfqq(cfqd, cfqq, "insert_request"); |
3364 | cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); | 3383 | cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc); |
3365 | 3384 | ||
3366 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); | 3385 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); |
3367 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 3386 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
@@ -3411,7 +3430,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) | |||
3411 | 3430 | ||
3412 | static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 3431 | static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
3413 | { | 3432 | { |
3414 | struct cfq_io_context *cic = cfqd->active_cic; | 3433 | struct cfq_io_cq *cic = cfqd->active_cic; |
3415 | 3434 | ||
3416 | /* If the queue already has requests, don't wait */ | 3435 | /* If the queue already has requests, don't wait */ |
3417 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) | 3436 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
@@ -3548,7 +3567,7 @@ static int cfq_may_queue(struct request_queue *q, int rw) | |||
3548 | { | 3567 | { |
3549 | struct cfq_data *cfqd = q->elevator->elevator_data; | 3568 | struct cfq_data *cfqd = q->elevator->elevator_data; |
3550 | struct task_struct *tsk = current; | 3569 | struct task_struct *tsk = current; |
3551 | struct cfq_io_context *cic; | 3570 | struct cfq_io_cq *cic; |
3552 | struct cfq_queue *cfqq; | 3571 | struct cfq_queue *cfqq; |
3553 | 3572 | ||
3554 | /* | 3573 | /* |
@@ -3563,7 +3582,7 @@ static int cfq_may_queue(struct request_queue *q, int rw) | |||
3563 | 3582 | ||
3564 | cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); | 3583 | cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); |
3565 | if (cfqq) { | 3584 | if (cfqq) { |
3566 | cfq_init_prio_data(cfqq, cic->ioc); | 3585 | cfq_init_prio_data(cfqq, cic->icq.ioc); |
3567 | 3586 | ||
3568 | return __cfq_may_queue(cfqq); | 3587 | return __cfq_may_queue(cfqq); |
3569 | } | 3588 | } |
@@ -3584,7 +3603,7 @@ static void cfq_put_request(struct request *rq) | |||
3584 | BUG_ON(!cfqq->allocated[rw]); | 3603 | BUG_ON(!cfqq->allocated[rw]); |
3585 | cfqq->allocated[rw]--; | 3604 | cfqq->allocated[rw]--; |
3586 | 3605 | ||
3587 | put_io_context(RQ_CIC(rq)->ioc, cfqq->cfqd->queue); | 3606 | put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue); |
3588 | 3607 | ||
3589 | rq->elevator_private[0] = NULL; | 3608 | rq->elevator_private[0] = NULL; |
3590 | rq->elevator_private[1] = NULL; | 3609 | rq->elevator_private[1] = NULL; |
@@ -3598,7 +3617,7 @@ static void cfq_put_request(struct request *rq) | |||
3598 | } | 3617 | } |
3599 | 3618 | ||
3600 | static struct cfq_queue * | 3619 | static struct cfq_queue * |
3601 | cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, | 3620 | cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, |
3602 | struct cfq_queue *cfqq) | 3621 | struct cfq_queue *cfqq) |
3603 | { | 3622 | { |
3604 | cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); | 3623 | cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); |
@@ -3613,7 +3632,7 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, | |||
3613 | * was the last process referring to said cfqq. | 3632 | * was the last process referring to said cfqq. |
3614 | */ | 3633 | */ |
3615 | static struct cfq_queue * | 3634 | static struct cfq_queue * |
3616 | split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) | 3635 | split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) |
3617 | { | 3636 | { |
3618 | if (cfqq_process_refs(cfqq) == 1) { | 3637 | if (cfqq_process_refs(cfqq) == 1) { |
3619 | cfqq->pid = current->pid; | 3638 | cfqq->pid = current->pid; |
@@ -3636,7 +3655,7 @@ static int | |||
3636 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | 3655 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
3637 | { | 3656 | { |
3638 | struct cfq_data *cfqd = q->elevator->elevator_data; | 3657 | struct cfq_data *cfqd = q->elevator->elevator_data; |
3639 | struct cfq_io_context *cic; | 3658 | struct cfq_io_cq *cic; |
3640 | const int rw = rq_data_dir(rq); | 3659 | const int rw = rq_data_dir(rq); |
3641 | const bool is_sync = rq_is_sync(rq); | 3660 | const bool is_sync = rq_is_sync(rq); |
3642 | struct cfq_queue *cfqq; | 3661 | struct cfq_queue *cfqq; |
@@ -3644,14 +3663,14 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
3644 | might_sleep_if(gfp_mask & __GFP_WAIT); | 3663 | might_sleep_if(gfp_mask & __GFP_WAIT); |
3645 | 3664 | ||
3646 | spin_lock_irq(q->queue_lock); | 3665 | spin_lock_irq(q->queue_lock); |
3647 | cic = cfq_get_io_context(cfqd, gfp_mask); | 3666 | cic = cfq_get_cic(cfqd, gfp_mask); |
3648 | if (!cic) | 3667 | if (!cic) |
3649 | goto queue_fail; | 3668 | goto queue_fail; |
3650 | 3669 | ||
3651 | new_queue: | 3670 | new_queue: |
3652 | cfqq = cic_to_cfqq(cic, is_sync); | 3671 | cfqq = cic_to_cfqq(cic, is_sync); |
3653 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { | 3672 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { |
3654 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); | 3673 | cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask); |
3655 | cic_set_cfqq(cic, cfqq, is_sync); | 3674 | cic_set_cfqq(cic, cfqq, is_sync); |
3656 | } else { | 3675 | } else { |
3657 | /* | 3676 | /* |
@@ -3677,7 +3696,7 @@ new_queue: | |||
3677 | cfqq->allocated[rw]++; | 3696 | cfqq->allocated[rw]++; |
3678 | 3697 | ||
3679 | cfqq->ref++; | 3698 | cfqq->ref++; |
3680 | rq->elevator_private[0] = cic; | 3699 | rq->elevator_private[0] = &cic->icq; |
3681 | rq->elevator_private[1] = cfqq; | 3700 | rq->elevator_private[1] = cfqq; |
3682 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); | 3701 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); |
3683 | spin_unlock_irq(q->queue_lock); | 3702 | spin_unlock_irq(q->queue_lock); |
@@ -3791,15 +3810,14 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3791 | if (cfqd->active_queue) | 3810 | if (cfqd->active_queue) |
3792 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | 3811 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); |
3793 | 3812 | ||
3794 | while (!list_empty(&cfqd->cic_list)) { | 3813 | while (!list_empty(&cfqd->icq_list)) { |
3795 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, | 3814 | struct io_cq *icq = list_entry(cfqd->icq_list.next, |
3796 | struct cfq_io_context, | 3815 | struct io_cq, q_node); |
3797 | queue_list); | 3816 | struct io_context *ioc = icq->ioc; |
3798 | struct io_context *ioc = cic->ioc; | ||
3799 | 3817 | ||
3800 | spin_lock(&ioc->lock); | 3818 | spin_lock(&ioc->lock); |
3801 | cfq_exit_cic(cic); | 3819 | cfq_exit_icq(icq); |
3802 | cfq_release_cic(cic); | 3820 | cfq_release_icq(icq); |
3803 | spin_unlock(&ioc->lock); | 3821 | spin_unlock(&ioc->lock); |
3804 | } | 3822 | } |
3805 | 3823 | ||
@@ -3904,7 +3922,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3904 | cfqd->oom_cfqq.ref++; | 3922 | cfqd->oom_cfqq.ref++; |
3905 | cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); | 3923 | cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); |
3906 | 3924 | ||
3907 | INIT_LIST_HEAD(&cfqd->cic_list); | 3925 | INIT_LIST_HEAD(&cfqd->icq_list); |
3908 | 3926 | ||
3909 | cfqd->queue = q; | 3927 | cfqd->queue = q; |
3910 | 3928 | ||
@@ -3942,8 +3960,8 @@ static void cfq_slab_kill(void) | |||
3942 | */ | 3960 | */ |
3943 | if (cfq_pool) | 3961 | if (cfq_pool) |
3944 | kmem_cache_destroy(cfq_pool); | 3962 | kmem_cache_destroy(cfq_pool); |
3945 | if (cfq_ioc_pool) | 3963 | if (cfq_icq_pool) |
3946 | kmem_cache_destroy(cfq_ioc_pool); | 3964 | kmem_cache_destroy(cfq_icq_pool); |
3947 | } | 3965 | } |
3948 | 3966 | ||
3949 | static int __init cfq_slab_setup(void) | 3967 | static int __init cfq_slab_setup(void) |
@@ -3952,8 +3970,8 @@ static int __init cfq_slab_setup(void) | |||
3952 | if (!cfq_pool) | 3970 | if (!cfq_pool) |
3953 | goto fail; | 3971 | goto fail; |
3954 | 3972 | ||
3955 | cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); | 3973 | cfq_icq_pool = KMEM_CACHE(cfq_io_cq, 0); |
3956 | if (!cfq_ioc_pool) | 3974 | if (!cfq_icq_pool) |
3957 | goto fail; | 3975 | goto fail; |
3958 | 3976 | ||
3959 | return 0; | 3977 | return 0; |