aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c81
1 files changed, 64 insertions, 17 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5f127cfb2e92..ed897b5ef315 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -55,6 +55,7 @@ static const int cfq_hist_divisor = 4;
55#define RQ_CIC(rq) \ 55#define RQ_CIC(rq) \
56 ((struct cfq_io_context *) (rq)->elevator_private) 56 ((struct cfq_io_context *) (rq)->elevator_private)
57#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 57#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
58#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3)
58 59
59static struct kmem_cache *cfq_pool; 60static struct kmem_cache *cfq_pool;
60static struct kmem_cache *cfq_ioc_pool; 61static struct kmem_cache *cfq_ioc_pool;
@@ -143,8 +144,6 @@ struct cfq_queue {
143 struct cfq_queue *new_cfqq; 144 struct cfq_queue *new_cfqq;
144 struct cfq_group *cfqg; 145 struct cfq_group *cfqg;
145 struct cfq_group *orig_cfqg; 146 struct cfq_group *orig_cfqg;
146 /* Sectors dispatched in current dispatch round */
147 unsigned long nr_sectors;
148}; 147};
149 148
150/* 149/*
@@ -346,7 +345,7 @@ CFQ_CFQQ_FNS(deep);
346CFQ_CFQQ_FNS(wait_busy); 345CFQ_CFQQ_FNS(wait_busy);
347#undef CFQ_CFQQ_FNS 346#undef CFQ_CFQQ_FNS
348 347
349#ifdef CONFIG_DEBUG_CFQ_IOSCHED 348#ifdef CONFIG_CFQ_GROUP_IOSCHED
350#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 349#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
351 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ 350 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
352 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 351 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
@@ -858,7 +857,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
858 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 857 if (!RB_EMPTY_NODE(&cfqg->rb_node))
859 cfq_rb_erase(&cfqg->rb_node, st); 858 cfq_rb_erase(&cfqg->rb_node, st);
860 cfqg->saved_workload_slice = 0; 859 cfqg->saved_workload_slice = 0;
861 blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1); 860 blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
862} 861}
863 862
864static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 863static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -884,8 +883,7 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
884 slice_used = cfqq->allocated_slice; 883 slice_used = cfqq->allocated_slice;
885 } 884 }
886 885
887 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used, 886 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
888 cfqq->nr_sectors);
889 return slice_used; 887 return slice_used;
890} 888}
891 889
@@ -919,8 +917,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
919 917
920 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 918 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
921 st->min_vdisktime); 919 st->min_vdisktime);
922 blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl, 920 blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
923 cfqq->nr_sectors); 921 blkiocg_set_start_empty_time(&cfqg->blkg);
924} 922}
925 923
926#ifdef CONFIG_CFQ_GROUP_IOSCHED 924#ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -961,7 +959,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
961 if (!cfqg) 959 if (!cfqg)
962 goto done; 960 goto done;
963 961
964 cfqg->weight = blkcg->weight;
965 for_each_cfqg_st(cfqg, i, j, st) 962 for_each_cfqg_st(cfqg, i, j, st)
966 *st = CFQ_RB_ROOT; 963 *st = CFQ_RB_ROOT;
967 RB_CLEAR_NODE(&cfqg->rb_node); 964 RB_CLEAR_NODE(&cfqg->rb_node);
@@ -978,6 +975,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
978 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 975 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
979 blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 976 blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
980 MKDEV(major, minor)); 977 MKDEV(major, minor));
978 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
981 979
982 /* Add group on cfqd list */ 980 /* Add group on cfqd list */
983 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); 981 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
@@ -1004,6 +1002,12 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1004 return cfqg; 1002 return cfqg;
1005} 1003}
1006 1004
1005static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1006{
1007 atomic_inc(&cfqg->ref);
1008 return cfqg;
1009}
1010
1007static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) 1011static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1008{ 1012{
1009 /* Currently, all async queues are mapped to root group */ 1013 /* Currently, all async queues are mapped to root group */
@@ -1087,6 +1091,12 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1087{ 1091{
1088 return &cfqd->root_group; 1092 return &cfqd->root_group;
1089} 1093}
1094
1095static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1096{
1097 return cfqg;
1098}
1099
1090static inline void 1100static inline void
1091cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { 1101cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1092 cfqq->cfqg = cfqg; 1102 cfqq->cfqg = cfqg;
@@ -1389,7 +1399,12 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1389{ 1399{
1390 elv_rb_del(&cfqq->sort_list, rq); 1400 elv_rb_del(&cfqq->sort_list, rq);
1391 cfqq->queued[rq_is_sync(rq)]--; 1401 cfqq->queued[rq_is_sync(rq)]--;
1402 blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
1403 rq_is_sync(rq));
1392 cfq_add_rq_rb(rq); 1404 cfq_add_rq_rb(rq);
1405 blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1406 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1407 rq_is_sync(rq));
1393} 1408}
1394 1409
1395static struct request * 1410static struct request *
@@ -1445,6 +1460,8 @@ static void cfq_remove_request(struct request *rq)
1445 cfq_del_rq_rb(rq); 1460 cfq_del_rq_rb(rq);
1446 1461
1447 cfqq->cfqd->rq_queued--; 1462 cfqq->cfqd->rq_queued--;
1463 blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
1464 rq_is_sync(rq));
1448 if (rq_is_meta(rq)) { 1465 if (rq_is_meta(rq)) {
1449 WARN_ON(!cfqq->meta_pending); 1466 WARN_ON(!cfqq->meta_pending);
1450 cfqq->meta_pending--; 1467 cfqq->meta_pending--;
@@ -1476,6 +1493,13 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
1476 } 1493 }
1477} 1494}
1478 1495
1496static void cfq_bio_merged(struct request_queue *q, struct request *req,
1497 struct bio *bio)
1498{
1499 blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
1500 cfq_bio_sync(bio));
1501}
1502
1479static void 1503static void
1480cfq_merged_requests(struct request_queue *q, struct request *rq, 1504cfq_merged_requests(struct request_queue *q, struct request *rq,
1481 struct request *next) 1505 struct request *next)
@@ -1493,6 +1517,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1493 if (cfqq->next_rq == next) 1517 if (cfqq->next_rq == next)
1494 cfqq->next_rq = rq; 1518 cfqq->next_rq = rq;
1495 cfq_remove_request(next); 1519 cfq_remove_request(next);
1520 blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
1521 rq_is_sync(next));
1496} 1522}
1497 1523
1498static int cfq_allow_merge(struct request_queue *q, struct request *rq, 1524static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1520,18 +1546,24 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1520 return cfqq == RQ_CFQQ(rq); 1546 return cfqq == RQ_CFQQ(rq);
1521} 1547}
1522 1548
1549static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1550{
1551 del_timer(&cfqd->idle_slice_timer);
1552 blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1553}
1554
1523static void __cfq_set_active_queue(struct cfq_data *cfqd, 1555static void __cfq_set_active_queue(struct cfq_data *cfqd,
1524 struct cfq_queue *cfqq) 1556 struct cfq_queue *cfqq)
1525{ 1557{
1526 if (cfqq) { 1558 if (cfqq) {
1527 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1559 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1528 cfqd->serving_prio, cfqd->serving_type); 1560 cfqd->serving_prio, cfqd->serving_type);
1561 blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1529 cfqq->slice_start = 0; 1562 cfqq->slice_start = 0;
1530 cfqq->dispatch_start = jiffies; 1563 cfqq->dispatch_start = jiffies;
1531 cfqq->allocated_slice = 0; 1564 cfqq->allocated_slice = 0;
1532 cfqq->slice_end = 0; 1565 cfqq->slice_end = 0;
1533 cfqq->slice_dispatch = 0; 1566 cfqq->slice_dispatch = 0;
1534 cfqq->nr_sectors = 0;
1535 1567
1536 cfq_clear_cfqq_wait_request(cfqq); 1568 cfq_clear_cfqq_wait_request(cfqq);
1537 cfq_clear_cfqq_must_dispatch(cfqq); 1569 cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1539,7 +1571,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1539 cfq_clear_cfqq_fifo_expire(cfqq); 1571 cfq_clear_cfqq_fifo_expire(cfqq);
1540 cfq_mark_cfqq_slice_new(cfqq); 1572 cfq_mark_cfqq_slice_new(cfqq);
1541 1573
1542 del_timer(&cfqd->idle_slice_timer); 1574 cfq_del_timer(cfqd, cfqq);
1543 } 1575 }
1544 1576
1545 cfqd->active_queue = cfqq; 1577 cfqd->active_queue = cfqq;
@@ -1555,7 +1587,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1555 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 1587 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1556 1588
1557 if (cfq_cfqq_wait_request(cfqq)) 1589 if (cfq_cfqq_wait_request(cfqq))
1558 del_timer(&cfqd->idle_slice_timer); 1590 cfq_del_timer(cfqd, cfqq);
1559 1591
1560 cfq_clear_cfqq_wait_request(cfqq); 1592 cfq_clear_cfqq_wait_request(cfqq);
1561 cfq_clear_cfqq_wait_busy(cfqq); 1593 cfq_clear_cfqq_wait_busy(cfqq);
@@ -1857,6 +1889,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1857 sl = cfqd->cfq_slice_idle; 1889 sl = cfqd->cfq_slice_idle;
1858 1890
1859 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1891 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1892 blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1860 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1893 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1861} 1894}
1862 1895
@@ -1876,7 +1909,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1876 elv_dispatch_sort(q, rq); 1909 elv_dispatch_sort(q, rq);
1877 1910
1878 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1911 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1879 cfqq->nr_sectors += blk_rq_sectors(rq); 1912 blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1913 rq_data_dir(rq), rq_is_sync(rq));
1880} 1914}
1881 1915
1882/* 1916/*
@@ -3185,11 +3219,14 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3185 if (cfq_cfqq_wait_request(cfqq)) { 3219 if (cfq_cfqq_wait_request(cfqq)) {
3186 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 3220 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3187 cfqd->busy_queues > 1) { 3221 cfqd->busy_queues > 1) {
3188 del_timer(&cfqd->idle_slice_timer); 3222 cfq_del_timer(cfqd, cfqq);
3189 cfq_clear_cfqq_wait_request(cfqq); 3223 cfq_clear_cfqq_wait_request(cfqq);
3190 __blk_run_queue(cfqd->queue); 3224 __blk_run_queue(cfqd->queue);
3191 } else 3225 } else {
3226 blkiocg_update_idle_time_stats(
3227 &cfqq->cfqg->blkg);
3192 cfq_mark_cfqq_must_dispatch(cfqq); 3228 cfq_mark_cfqq_must_dispatch(cfqq);
3229 }
3193 } 3230 }
3194 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 3231 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3195 /* 3232 /*
@@ -3214,7 +3251,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3214 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3251 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3215 list_add_tail(&rq->queuelist, &cfqq->fifo); 3252 list_add_tail(&rq->queuelist, &cfqq->fifo);
3216 cfq_add_rq_rb(rq); 3253 cfq_add_rq_rb(rq);
3217 3254 blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3255 &cfqd->serving_group->blkg, rq_data_dir(rq),
3256 rq_is_sync(rq));
3218 cfq_rq_enqueued(cfqd, cfqq, rq); 3257 cfq_rq_enqueued(cfqd, cfqq, rq);
3219} 3258}
3220 3259
@@ -3300,6 +3339,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3300 WARN_ON(!cfqq->dispatched); 3339 WARN_ON(!cfqq->dispatched);
3301 cfqd->rq_in_driver--; 3340 cfqd->rq_in_driver--;
3302 cfqq->dispatched--; 3341 cfqq->dispatched--;
3342 blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
3343 rq_io_start_time_ns(rq), rq_data_dir(rq),
3344 rq_is_sync(rq));
3303 3345
3304 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3346 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3305 3347
@@ -3440,6 +3482,10 @@ static void cfq_put_request(struct request *rq)
3440 rq->elevator_private = NULL; 3482 rq->elevator_private = NULL;
3441 rq->elevator_private2 = NULL; 3483 rq->elevator_private2 = NULL;
3442 3484
3485 /* Put down rq reference on cfqg */
3486 cfq_put_cfqg(RQ_CFQG(rq));
3487 rq->elevator_private3 = NULL;
3488
3443 cfq_put_queue(cfqq); 3489 cfq_put_queue(cfqq);
3444 } 3490 }
3445} 3491}
@@ -3528,6 +3574,7 @@ new_queue:
3528 3574
3529 rq->elevator_private = cic; 3575 rq->elevator_private = cic;
3530 rq->elevator_private2 = cfqq; 3576 rq->elevator_private2 = cfqq;
3577 rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3531 return 0; 3578 return 0;
3532 3579
3533queue_fail: 3580queue_fail:
@@ -3743,7 +3790,6 @@ static void *cfq_init_queue(struct request_queue *q)
3743 * second, in order to have larger depth for async operations. 3790 * second, in order to have larger depth for async operations.
3744 */ 3791 */
3745 cfqd->last_delayed_sync = jiffies - HZ; 3792 cfqd->last_delayed_sync = jiffies - HZ;
3746 INIT_RCU_HEAD(&cfqd->rcu);
3747 return cfqd; 3793 return cfqd;
3748} 3794}
3749 3795
@@ -3872,6 +3918,7 @@ static struct elevator_type iosched_cfq = {
3872 .elevator_merged_fn = cfq_merged_request, 3918 .elevator_merged_fn = cfq_merged_request,
3873 .elevator_merge_req_fn = cfq_merged_requests, 3919 .elevator_merge_req_fn = cfq_merged_requests,
3874 .elevator_allow_merge_fn = cfq_allow_merge, 3920 .elevator_allow_merge_fn = cfq_allow_merge,
3921 .elevator_bio_merged_fn = cfq_bio_merged,
3875 .elevator_dispatch_fn = cfq_dispatch_requests, 3922 .elevator_dispatch_fn = cfq_dispatch_requests,
3876 .elevator_add_req_fn = cfq_insert_request, 3923 .elevator_add_req_fn = cfq_insert_request,
3877 .elevator_activate_req_fn = cfq_activate_request, 3924 .elevator_activate_req_fn = cfq_activate_request,