aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f2387b50f82e..9e8624e9e246 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -468,7 +468,7 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
468 468
469static void cfq_dispatch_insert(struct request_queue *, struct request *); 469static void cfq_dispatch_insert(struct request_queue *, struct request *);
470static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, 470static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
471 struct io_context *ioc, struct bio *bio, 471 struct cfq_io_cq *cic, struct bio *bio,
472 gfp_t gfp_mask); 472 gfp_t gfp_mask);
473 473
474static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) 474static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
@@ -2560,7 +2560,7 @@ static void cfq_exit_icq(struct io_cq *icq)
2560 } 2560 }
2561} 2561}
2562 2562
2563static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) 2563static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
2564{ 2564{
2565 struct task_struct *tsk = current; 2565 struct task_struct *tsk = current;
2566 int ioprio_class; 2566 int ioprio_class;
@@ -2568,7 +2568,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2568 if (!cfq_cfqq_prio_changed(cfqq)) 2568 if (!cfq_cfqq_prio_changed(cfqq))
2569 return; 2569 return;
2570 2570
2571 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 2571 ioprio_class = IOPRIO_PRIO_CLASS(cic->icq.ioc->ioprio);
2572 switch (ioprio_class) { 2572 switch (ioprio_class) {
2573 default: 2573 default:
2574 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 2574 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2580,11 +2580,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2580 cfqq->ioprio_class = task_nice_ioclass(tsk); 2580 cfqq->ioprio_class = task_nice_ioclass(tsk);
2581 break; 2581 break;
2582 case IOPRIO_CLASS_RT: 2582 case IOPRIO_CLASS_RT:
2583 cfqq->ioprio = task_ioprio(ioc); 2583 cfqq->ioprio = task_ioprio(cic->icq.ioc);
2584 cfqq->ioprio_class = IOPRIO_CLASS_RT; 2584 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2585 break; 2585 break;
2586 case IOPRIO_CLASS_BE: 2586 case IOPRIO_CLASS_BE:
2587 cfqq->ioprio = task_ioprio(ioc); 2587 cfqq->ioprio = task_ioprio(cic->icq.ioc);
2588 cfqq->ioprio_class = IOPRIO_CLASS_BE; 2588 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2589 break; 2589 break;
2590 case IOPRIO_CLASS_IDLE: 2590 case IOPRIO_CLASS_IDLE:
@@ -2613,8 +2613,8 @@ static void changed_ioprio(struct cfq_io_cq *cic, struct bio *bio)
2613 cfqq = cic->cfqq[BLK_RW_ASYNC]; 2613 cfqq = cic->cfqq[BLK_RW_ASYNC];
2614 if (cfqq) { 2614 if (cfqq) {
2615 struct cfq_queue *new_cfqq; 2615 struct cfq_queue *new_cfqq;
2616 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc, 2616 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
2617 bio, GFP_ATOMIC); 2617 GFP_ATOMIC);
2618 if (new_cfqq) { 2618 if (new_cfqq) {
2619 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; 2619 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2620 cfq_put_queue(cfqq); 2620 cfq_put_queue(cfqq);
@@ -2671,23 +2671,18 @@ static void changed_cgroup(struct cfq_io_cq *cic)
2671#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 2671#endif /* CONFIG_CFQ_GROUP_IOSCHED */
2672 2672
2673static struct cfq_queue * 2673static struct cfq_queue *
2674cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, 2674cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
2675 struct io_context *ioc, struct bio *bio, gfp_t gfp_mask) 2675 struct bio *bio, gfp_t gfp_mask)
2676{ 2676{
2677 struct blkio_cgroup *blkcg; 2677 struct blkio_cgroup *blkcg;
2678 struct cfq_queue *cfqq, *new_cfqq = NULL; 2678 struct cfq_queue *cfqq, *new_cfqq = NULL;
2679 struct cfq_io_cq *cic;
2680 struct cfq_group *cfqg; 2679 struct cfq_group *cfqg;
2681 2680
2682retry: 2681retry:
2683 rcu_read_lock(); 2682 rcu_read_lock();
2684 2683
2685 blkcg = bio_blkio_cgroup(bio); 2684 blkcg = bio_blkio_cgroup(bio);
2686
2687 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); 2685 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
2688
2689 cic = cfq_cic_lookup(cfqd, ioc);
2690 /* cic always exists here */
2691 cfqq = cic_to_cfqq(cic, is_sync); 2686 cfqq = cic_to_cfqq(cic, is_sync);
2692 2687
2693 /* 2688 /*
@@ -2716,7 +2711,7 @@ retry:
2716 2711
2717 if (cfqq) { 2712 if (cfqq) {
2718 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); 2713 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2719 cfq_init_prio_data(cfqq, ioc); 2714 cfq_init_prio_data(cfqq, cic);
2720 cfq_link_cfqq_cfqg(cfqq, cfqg); 2715 cfq_link_cfqq_cfqg(cfqq, cfqg);
2721 cfq_log_cfqq(cfqd, cfqq, "alloced"); 2716 cfq_log_cfqq(cfqd, cfqq, "alloced");
2722 } else 2717 } else
@@ -2746,11 +2741,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2746} 2741}
2747 2742
2748static struct cfq_queue * 2743static struct cfq_queue *
2749cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, 2744cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
2750 struct bio *bio, gfp_t gfp_mask) 2745 struct bio *bio, gfp_t gfp_mask)
2751{ 2746{
2752 const int ioprio = task_ioprio(ioc); 2747 const int ioprio = task_ioprio(cic->icq.ioc);
2753 const int ioprio_class = task_ioprio_class(ioc); 2748 const int ioprio_class = task_ioprio_class(cic->icq.ioc);
2754 struct cfq_queue **async_cfqq = NULL; 2749 struct cfq_queue **async_cfqq = NULL;
2755 struct cfq_queue *cfqq = NULL; 2750 struct cfq_queue *cfqq = NULL;
2756 2751
@@ -2760,7 +2755,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2760 } 2755 }
2761 2756
2762 if (!cfqq) 2757 if (!cfqq)
2763 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, bio, gfp_mask); 2758 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
2764 2759
2765 /* 2760 /*
2766 * pin the queue now that it's allocated, scheduler exit will prune it 2761 * pin the queue now that it's allocated, scheduler exit will prune it
@@ -3030,7 +3025,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3030 struct cfq_queue *cfqq = RQ_CFQQ(rq); 3025 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3031 3026
3032 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 3027 cfq_log_cfqq(cfqd, cfqq, "insert_request");
3033 cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc); 3028 cfq_init_prio_data(cfqq, RQ_CIC(rq));
3034 3029
3035 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3030 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3036 list_add_tail(&rq->queuelist, &cfqq->fifo); 3031 list_add_tail(&rq->queuelist, &cfqq->fifo);
@@ -3234,7 +3229,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
3234 3229
3235 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); 3230 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3236 if (cfqq) { 3231 if (cfqq) {
3237 cfq_init_prio_data(cfqq, cic->icq.ioc); 3232 cfq_init_prio_data(cfqq, cic);
3238 3233
3239 return __cfq_may_queue(cfqq); 3234 return __cfq_may_queue(cfqq);
3240 } 3235 }
@@ -3326,7 +3321,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
3326new_queue: 3321new_queue:
3327 cfqq = cic_to_cfqq(cic, is_sync); 3322 cfqq = cic_to_cfqq(cic, is_sync);
3328 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 3323 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3329 cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, bio, gfp_mask); 3324 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
3330 cic_set_cfqq(cic, cfqq, is_sync); 3325 cic_set_cfqq(cic, cfqq, is_sync);
3331 } else { 3326 } else {
3332 /* 3327 /*