aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:17 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commitc1768268f9424410761da57ea71107acae7b03cc (patch)
treebe6a534b1a15ab9df9f23e585b039776c5a5e498 /block/cfq-iosched.c
parent549d3aa872cd1aec1ee540fd93afd9611faa0def (diff)
blkcg: don't use blkg->plid in stat related functions
blkg is scheduled to be unified for all policies and thus there won't be one-to-one mapping from blkg to policy. Update stat related functions to take explicit @pol or @plid arguments and not use blkg->plid. This is painful for now but most of specific stat interface functions will be replaced with a handful of generic helpers. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 86980023339a..11dd9d7f2edb 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -945,7 +945,8 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
945 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 945 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
946 cfq_group_service_tree_del(st, cfqg); 946 cfq_group_service_tree_del(st, cfqg);
947 cfqg->saved_workload_slice = 0; 947 cfqg->saved_workload_slice = 0;
948 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg), 1); 948 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg),
949 &blkio_policy_cfq, 1);
949} 950}
950 951
951static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 952static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1017,9 +1018,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1017 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", 1018 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1018 used_sl, cfqq->slice_dispatch, charge, 1019 used_sl, cfqq->slice_dispatch, charge,
1019 iops_mode(cfqd), cfqq->nr_sectors); 1020 iops_mode(cfqd), cfqq->nr_sectors);
1020 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), used_sl, 1021 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq,
1021 unaccounted_sl); 1022 used_sl, unaccounted_sl);
1022 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg)); 1023 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq);
1023} 1024}
1024 1025
1025/** 1026/**
@@ -1463,9 +1464,11 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1463 elv_rb_del(&cfqq->sort_list, rq); 1464 elv_rb_del(&cfqq->sort_list, rq);
1464 cfqq->queued[rq_is_sync(rq)]--; 1465 cfqq->queued[rq_is_sync(rq)]--;
1465 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1466 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1466 rq_data_dir(rq), rq_is_sync(rq)); 1467 &blkio_policy_cfq, rq_data_dir(rq),
1468 rq_is_sync(rq));
1467 cfq_add_rq_rb(rq); 1469 cfq_add_rq_rb(rq);
1468 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1470 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1471 &blkio_policy_cfq,
1469 cfqg_to_blkg(cfqq->cfqd->serving_group), 1472 cfqg_to_blkg(cfqq->cfqd->serving_group),
1470 rq_data_dir(rq), rq_is_sync(rq)); 1473 rq_data_dir(rq), rq_is_sync(rq));
1471} 1474}
@@ -1524,7 +1527,8 @@ static void cfq_remove_request(struct request *rq)
1524 1527
1525 cfqq->cfqd->rq_queued--; 1528 cfqq->cfqd->rq_queued--;
1526 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1529 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1527 rq_data_dir(rq), rq_is_sync(rq)); 1530 &blkio_policy_cfq, rq_data_dir(rq),
1531 rq_is_sync(rq));
1528 if (rq->cmd_flags & REQ_PRIO) { 1532 if (rq->cmd_flags & REQ_PRIO) {
1529 WARN_ON(!cfqq->prio_pending); 1533 WARN_ON(!cfqq->prio_pending);
1530 cfqq->prio_pending--; 1534 cfqq->prio_pending--;
@@ -1560,7 +1564,8 @@ static void cfq_bio_merged(struct request_queue *q, struct request *req,
1560 struct bio *bio) 1564 struct bio *bio)
1561{ 1565{
1562 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)), 1566 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1563 bio_data_dir(bio), cfq_bio_sync(bio)); 1567 &blkio_policy_cfq, bio_data_dir(bio),
1568 cfq_bio_sync(bio));
1564} 1569}
1565 1570
1566static void 1571static void
@@ -1583,7 +1588,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1583 cfqq->next_rq = rq; 1588 cfqq->next_rq = rq;
1584 cfq_remove_request(next); 1589 cfq_remove_request(next);
1585 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1590 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1586 rq_data_dir(next), rq_is_sync(next)); 1591 &blkio_policy_cfq, rq_data_dir(next),
1592 rq_is_sync(next));
1587 1593
1588 cfqq = RQ_CFQQ(next); 1594 cfqq = RQ_CFQQ(next);
1589 /* 1595 /*
@@ -1624,7 +1630,8 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1624static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1630static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1625{ 1631{
1626 del_timer(&cfqd->idle_slice_timer); 1632 del_timer(&cfqd->idle_slice_timer);
1627 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg)); 1633 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1634 &blkio_policy_cfq);
1628} 1635}
1629 1636
1630static void __cfq_set_active_queue(struct cfq_data *cfqd, 1637static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1633,7 +1640,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1633 if (cfqq) { 1640 if (cfqq) {
1634 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1641 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1635 cfqd->serving_prio, cfqd->serving_type); 1642 cfqd->serving_prio, cfqd->serving_type);
1636 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg)); 1643 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg),
1644 &blkio_policy_cfq);
1637 cfqq->slice_start = 0; 1645 cfqq->slice_start = 0;
1638 cfqq->dispatch_start = jiffies; 1646 cfqq->dispatch_start = jiffies;
1639 cfqq->allocated_slice = 0; 1647 cfqq->allocated_slice = 0;
@@ -1981,7 +1989,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1981 sl = cfqd->cfq_slice_idle; 1989 sl = cfqd->cfq_slice_idle;
1982 1990
1983 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1991 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1984 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg)); 1992 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1993 &blkio_policy_cfq);
1985 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, 1994 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1986 group_idle ? 1 : 0); 1995 group_idle ? 1 : 0);
1987} 1996}
@@ -2005,8 +2014,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2005 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 2014 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2006 cfqq->nr_sectors += blk_rq_sectors(rq); 2015 cfqq->nr_sectors += blk_rq_sectors(rq);
2007 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg), 2016 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
2008 blk_rq_bytes(rq), rq_data_dir(rq), 2017 &blkio_policy_cfq, blk_rq_bytes(rq),
2009 rq_is_sync(rq)); 2018 rq_data_dir(rq), rq_is_sync(rq));
2010} 2019}
2011 2020
2012/* 2021/*
@@ -3094,7 +3103,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3094 __blk_run_queue(cfqd->queue); 3103 __blk_run_queue(cfqd->queue);
3095 } else { 3104 } else {
3096 cfq_blkiocg_update_idle_time_stats( 3105 cfq_blkiocg_update_idle_time_stats(
3097 cfqg_to_blkg(cfqq->cfqg)); 3106 cfqg_to_blkg(cfqq->cfqg),
3107 &blkio_policy_cfq);
3098 cfq_mark_cfqq_must_dispatch(cfqq); 3108 cfq_mark_cfqq_must_dispatch(cfqq);
3099 } 3109 }
3100 } 3110 }
@@ -3122,6 +3132,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3122 list_add_tail(&rq->queuelist, &cfqq->fifo); 3132 list_add_tail(&rq->queuelist, &cfqq->fifo);
3123 cfq_add_rq_rb(rq); 3133 cfq_add_rq_rb(rq);
3124 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)), 3134 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3135 &blkio_policy_cfq,
3125 cfqg_to_blkg(cfqd->serving_group), 3136 cfqg_to_blkg(cfqd->serving_group),
3126 rq_data_dir(rq), rq_is_sync(rq)); 3137 rq_data_dir(rq), rq_is_sync(rq));
3127 cfq_rq_enqueued(cfqd, cfqq, rq); 3138 cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -3220,8 +3231,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3220 cfqq->dispatched--; 3231 cfqq->dispatched--;
3221 (RQ_CFQG(rq))->dispatched--; 3232 (RQ_CFQG(rq))->dispatched--;
3222 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg), 3233 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3223 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3234 &blkio_policy_cfq, rq_start_time_ns(rq),
3224 rq_data_dir(rq), rq_is_sync(rq)); 3235 rq_io_start_time_ns(rq), rq_data_dir(rq),
3236 rq_is_sync(rq));
3225 3237
3226 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3238 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3227 3239