diff options
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/cfq-iosched.c | 28 | ||||
-rw-r--r-- | block/elevator.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 10 |
4 files changed, 20 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 6804fdf27eff..3c26c7f48703 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -497,6 +497,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
497 | laptop_mode_timer_fn, (unsigned long) q); | 497 | laptop_mode_timer_fn, (unsigned long) q); |
498 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 498 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
499 | INIT_LIST_HEAD(&q->timeout_list); | 499 | INIT_LIST_HEAD(&q->timeout_list); |
500 | INIT_LIST_HEAD(&q->icq_list); | ||
500 | INIT_LIST_HEAD(&q->flush_queue[0]); | 501 | INIT_LIST_HEAD(&q->flush_queue[0]); |
501 | INIT_LIST_HEAD(&q->flush_queue[1]); | 502 | INIT_LIST_HEAD(&q->flush_queue[1]); |
502 | INIT_LIST_HEAD(&q->flush_data_in_flight); | 503 | INIT_LIST_HEAD(&q->flush_data_in_flight); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d2f16fcdec7f..9bc5ecc1b336 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4; | |||
54 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) | 54 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
55 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) | 55 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) |
56 | 56 | ||
57 | #define RQ_CIC(rq) icq_to_cic((rq)->elevator_private[0]) | 57 | #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) |
58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) | 58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) |
59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) | 59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) |
60 | 60 | ||
61 | static struct kmem_cache *cfq_pool; | 61 | static struct kmem_cache *cfq_pool; |
62 | static struct kmem_cache *cfq_icq_pool; | 62 | static struct kmem_cache *cfq_icq_pool; |
@@ -297,8 +297,6 @@ struct cfq_data { | |||
297 | unsigned int cfq_group_idle; | 297 | unsigned int cfq_group_idle; |
298 | unsigned int cfq_latency; | 298 | unsigned int cfq_latency; |
299 | 299 | ||
300 | struct list_head icq_list; | ||
301 | |||
302 | /* | 300 | /* |
303 | * Fallback dummy cfqq for extreme OOM conditions | 301 | * Fallback dummy cfqq for extreme OOM conditions |
304 | */ | 302 | */ |
@@ -3053,7 +3051,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
3053 | ret = radix_tree_insert(&ioc->icq_tree, q->id, icq); | 3051 | ret = radix_tree_insert(&ioc->icq_tree, q->id, icq); |
3054 | if (likely(!ret)) { | 3052 | if (likely(!ret)) { |
3055 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); | 3053 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); |
3056 | list_add(&icq->q_node, &cfqd->icq_list); | 3054 | list_add(&icq->q_node, &q->icq_list); |
3057 | icq = NULL; | 3055 | icq = NULL; |
3058 | } else if (ret == -EEXIST) { | 3056 | } else if (ret == -EEXIST) { |
3059 | /* someone else already did it */ | 3057 | /* someone else already did it */ |
@@ -3605,12 +3603,10 @@ static void cfq_put_request(struct request *rq) | |||
3605 | 3603 | ||
3606 | put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue); | 3604 | put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue); |
3607 | 3605 | ||
3608 | rq->elevator_private[0] = NULL; | ||
3609 | rq->elevator_private[1] = NULL; | ||
3610 | |||
3611 | /* Put down rq reference on cfqg */ | 3606 | /* Put down rq reference on cfqg */ |
3612 | cfq_put_cfqg(RQ_CFQG(rq)); | 3607 | cfq_put_cfqg(RQ_CFQG(rq)); |
3613 | rq->elevator_private[2] = NULL; | 3608 | rq->elv.priv[0] = NULL; |
3609 | rq->elv.priv[1] = NULL; | ||
3614 | 3610 | ||
3615 | cfq_put_queue(cfqq); | 3611 | cfq_put_queue(cfqq); |
3616 | } | 3612 | } |
@@ -3696,9 +3692,9 @@ new_queue: | |||
3696 | cfqq->allocated[rw]++; | 3692 | cfqq->allocated[rw]++; |
3697 | 3693 | ||
3698 | cfqq->ref++; | 3694 | cfqq->ref++; |
3699 | rq->elevator_private[0] = &cic->icq; | 3695 | rq->elv.icq = &cic->icq; |
3700 | rq->elevator_private[1] = cfqq; | 3696 | rq->elv.priv[0] = cfqq; |
3701 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); | 3697 | rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg); |
3702 | spin_unlock_irq(q->queue_lock); | 3698 | spin_unlock_irq(q->queue_lock); |
3703 | return 0; | 3699 | return 0; |
3704 | 3700 | ||
@@ -3810,8 +3806,8 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3810 | if (cfqd->active_queue) | 3806 | if (cfqd->active_queue) |
3811 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | 3807 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); |
3812 | 3808 | ||
3813 | while (!list_empty(&cfqd->icq_list)) { | 3809 | while (!list_empty(&q->icq_list)) { |
3814 | struct io_cq *icq = list_entry(cfqd->icq_list.next, | 3810 | struct io_cq *icq = list_entry(q->icq_list.next, |
3815 | struct io_cq, q_node); | 3811 | struct io_cq, q_node); |
3816 | struct io_context *ioc = icq->ioc; | 3812 | struct io_context *ioc = icq->ioc; |
3817 | 3813 | ||
@@ -3922,8 +3918,6 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3922 | cfqd->oom_cfqq.ref++; | 3918 | cfqd->oom_cfqq.ref++; |
3923 | cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); | 3919 | cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); |
3924 | 3920 | ||
3925 | INIT_LIST_HEAD(&cfqd->icq_list); | ||
3926 | |||
3927 | cfqd->queue = q; | 3921 | cfqd->queue = q; |
3928 | 3922 | ||
3929 | init_timer(&cfqd->idle_slice_timer); | 3923 | init_timer(&cfqd->idle_slice_timer); |
diff --git a/block/elevator.c b/block/elevator.c index 31ffe76aed3d..c5c6214829cb 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -745,8 +745,6 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
745 | 745 | ||
746 | if (e->type->ops.elevator_set_req_fn) | 746 | if (e->type->ops.elevator_set_req_fn) |
747 | return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask); | 747 | return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask); |
748 | |||
749 | rq->elevator_private[0] = NULL; | ||
750 | return 0; | 748 | return 0; |
751 | } | 749 | } |
752 | 750 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 65c2f8c70089..8bca04873f53 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -111,10 +111,14 @@ struct request { | |||
111 | * Three pointers are available for the IO schedulers, if they need | 111 | * Three pointers are available for the IO schedulers, if they need |
112 | * more they have to dynamically allocate it. Flush requests are | 112 | * more they have to dynamically allocate it. Flush requests are |
113 | * never put on the IO scheduler. So let the flush fields share | 113 | * never put on the IO scheduler. So let the flush fields share |
114 | * space with the three elevator_private pointers. | 114 | * space with the elevator data. |
115 | */ | 115 | */ |
116 | union { | 116 | union { |
117 | void *elevator_private[3]; | 117 | struct { |
118 | struct io_cq *icq; | ||
119 | void *priv[2]; | ||
120 | } elv; | ||
121 | |||
118 | struct { | 122 | struct { |
119 | unsigned int seq; | 123 | unsigned int seq; |
120 | struct list_head list; | 124 | struct list_head list; |
@@ -357,6 +361,8 @@ struct request_queue { | |||
357 | struct timer_list timeout; | 361 | struct timer_list timeout; |
358 | struct list_head timeout_list; | 362 | struct list_head timeout_list; |
359 | 363 | ||
364 | struct list_head icq_list; | ||
365 | |||
360 | struct queue_limits limits; | 366 | struct queue_limits limits; |
361 | 367 | ||
362 | /* | 368 | /* |