aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJustin TerAvest <teravest@google.com>2011-03-12 10:54:00 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-12 10:54:00 -0500
commit167400d34070ebbc408dc0f447c4ddb4bf837360 (patch)
tree19100b0511a7e3e18f6f61d90a5fe5f1c40f59c4 /block
parent1f940bdfc0d03265d178d9dfd840d854819f797d (diff)
blk-cgroup: Add unaccounted time to timeslice_used.
There are two kind of times that tasks are not charged for: the first seek and the extra time slice used over the allocated timeslice. Both of these exported as a new unaccounted_time stat. I think it would be good to have this reported in 'time' as well, but that is probably a separate discussion. Signed-off-by: Justin TerAvest <teravest@google.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c16
-rw-r--r--block/blk-cgroup.h12
-rw-r--r--block/cfq-iosched.c21
-rw-r--r--block/cfq.h6
4 files changed, 41 insertions, 14 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 455768a3eb9..77ee3c1ec1a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -371,12 +371,14 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
371} 371}
372EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 372EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
373 373
374void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) 374void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
375 unsigned long unaccounted_time)
375{ 376{
376 unsigned long flags; 377 unsigned long flags;
377 378
378 spin_lock_irqsave(&blkg->stats_lock, flags); 379 spin_lock_irqsave(&blkg->stats_lock, flags);
379 blkg->stats.time += time; 380 blkg->stats.time += time;
381 blkg->stats.unaccounted_time += unaccounted_time;
380 spin_unlock_irqrestore(&blkg->stats_lock, flags); 382 spin_unlock_irqrestore(&blkg->stats_lock, flags);
381} 383}
382EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); 384EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
@@ -603,6 +605,9 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
603 if (type == BLKIO_STAT_SECTORS) 605 if (type == BLKIO_STAT_SECTORS)
604 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 606 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
605 blkg->stats.sectors, cb, dev); 607 blkg->stats.sectors, cb, dev);
608 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
609 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
610 blkg->stats.unaccounted_time, cb, dev);
606#ifdef CONFIG_DEBUG_BLK_CGROUP 611#ifdef CONFIG_DEBUG_BLK_CGROUP
607 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { 612 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
608 uint64_t sum = blkg->stats.avg_queue_size_sum; 613 uint64_t sum = blkg->stats.avg_queue_size_sum;
@@ -1106,6 +1111,9 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1106 case BLKIO_PROP_sectors: 1111 case BLKIO_PROP_sectors:
1107 return blkio_read_blkg_stats(blkcg, cft, cb, 1112 return blkio_read_blkg_stats(blkcg, cft, cb,
1108 BLKIO_STAT_SECTORS, 0); 1113 BLKIO_STAT_SECTORS, 0);
1114 case BLKIO_PROP_unaccounted_time:
1115 return blkio_read_blkg_stats(blkcg, cft, cb,
1116 BLKIO_STAT_UNACCOUNTED_TIME, 0);
1109 case BLKIO_PROP_io_service_bytes: 1117 case BLKIO_PROP_io_service_bytes:
1110 return blkio_read_blkg_stats(blkcg, cft, cb, 1118 return blkio_read_blkg_stats(blkcg, cft, cb,
1111 BLKIO_STAT_SERVICE_BYTES, 1); 1119 BLKIO_STAT_SERVICE_BYTES, 1);
@@ -1262,6 +1270,12 @@ struct cftype blkio_files[] = {
1262 .read_map = blkiocg_file_read_map, 1270 .read_map = blkiocg_file_read_map,
1263 }, 1271 },
1264 { 1272 {
1273 .name = "unaccounted_time",
1274 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1275 BLKIO_PROP_unaccounted_time),
1276 .read_map = blkiocg_file_read_map,
1277 },
1278 {
1265 .name = "io_service_bytes", 1279 .name = "io_service_bytes",
1266 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, 1280 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1267 BLKIO_PROP_io_service_bytes), 1281 BLKIO_PROP_io_service_bytes),
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 57e7234c5ae..10919fae2d3 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -49,6 +49,8 @@ enum stat_type {
49 /* All the single valued stats go below this */ 49 /* All the single valued stats go below this */
50 BLKIO_STAT_TIME, 50 BLKIO_STAT_TIME,
51 BLKIO_STAT_SECTORS, 51 BLKIO_STAT_SECTORS,
52 /* Time not charged to this cgroup */
53 BLKIO_STAT_UNACCOUNTED_TIME,
52#ifdef CONFIG_DEBUG_BLK_CGROUP 54#ifdef CONFIG_DEBUG_BLK_CGROUP
53 BLKIO_STAT_AVG_QUEUE_SIZE, 55 BLKIO_STAT_AVG_QUEUE_SIZE,
54 BLKIO_STAT_IDLE_TIME, 56 BLKIO_STAT_IDLE_TIME,
@@ -81,6 +83,7 @@ enum blkcg_file_name_prop {
81 BLKIO_PROP_io_serviced, 83 BLKIO_PROP_io_serviced,
82 BLKIO_PROP_time, 84 BLKIO_PROP_time,
83 BLKIO_PROP_sectors, 85 BLKIO_PROP_sectors,
86 BLKIO_PROP_unaccounted_time,
84 BLKIO_PROP_io_service_time, 87 BLKIO_PROP_io_service_time,
85 BLKIO_PROP_io_wait_time, 88 BLKIO_PROP_io_wait_time,
86 BLKIO_PROP_io_merged, 89 BLKIO_PROP_io_merged,
@@ -114,6 +117,8 @@ struct blkio_group_stats {
114 /* total disk time and nr sectors dispatched by this group */ 117 /* total disk time and nr sectors dispatched by this group */
115 uint64_t time; 118 uint64_t time;
116 uint64_t sectors; 119 uint64_t sectors;
120 /* Time not charged to this cgroup */
121 uint64_t unaccounted_time;
117 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 122 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
118#ifdef CONFIG_DEBUG_BLK_CGROUP 123#ifdef CONFIG_DEBUG_BLK_CGROUP
119 /* Sum of number of IOs queued across all samples */ 124 /* Sum of number of IOs queued across all samples */
@@ -293,7 +298,8 @@ extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
293extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 298extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
294 void *key); 299 void *key);
295void blkiocg_update_timeslice_used(struct blkio_group *blkg, 300void blkiocg_update_timeslice_used(struct blkio_group *blkg,
296 unsigned long time); 301 unsigned long time,
302 unsigned long unaccounted_time);
297void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, 303void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
298 bool direction, bool sync); 304 bool direction, bool sync);
299void blkiocg_update_completion_stats(struct blkio_group *blkg, 305void blkiocg_update_completion_stats(struct blkio_group *blkg,
@@ -319,7 +325,9 @@ blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
319static inline struct blkio_group * 325static inline struct blkio_group *
320blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } 326blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
321static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 327static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
322 unsigned long time) {} 328 unsigned long time,
329 unsigned long unaccounted_time)
330{}
323static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 331static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
324 uint64_t bytes, bool direction, bool sync) {} 332 uint64_t bytes, bool direction, bool sync) {}
325static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, 333static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c826ef81c67..89e0d1cc14b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -899,7 +899,8 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
899 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 899 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
900} 900}
901 901
902static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 902static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
903 unsigned int *unaccounted_time)
903{ 904{
904 unsigned int slice_used; 905 unsigned int slice_used;
905 906
@@ -918,8 +919,13 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
918 1); 919 1);
919 } else { 920 } else {
920 slice_used = jiffies - cfqq->slice_start; 921 slice_used = jiffies - cfqq->slice_start;
921 if (slice_used > cfqq->allocated_slice) 922 if (slice_used > cfqq->allocated_slice) {
923 *unaccounted_time = slice_used - cfqq->allocated_slice;
922 slice_used = cfqq->allocated_slice; 924 slice_used = cfqq->allocated_slice;
925 }
926 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
927 *unaccounted_time += cfqq->slice_start -
928 cfqq->dispatch_start;
923 } 929 }
924 930
925 return slice_used; 931 return slice_used;
@@ -929,12 +935,12 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
929 struct cfq_queue *cfqq) 935 struct cfq_queue *cfqq)
930{ 936{
931 struct cfq_rb_root *st = &cfqd->grp_service_tree; 937 struct cfq_rb_root *st = &cfqd->grp_service_tree;
932 unsigned int used_sl, charge; 938 unsigned int used_sl, charge, unaccounted_sl = 0;
933 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 939 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
934 - cfqg->service_tree_idle.count; 940 - cfqg->service_tree_idle.count;
935 941
936 BUG_ON(nr_sync < 0); 942 BUG_ON(nr_sync < 0);
937 used_sl = charge = cfq_cfqq_slice_usage(cfqq); 943 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
938 944
939 if (iops_mode(cfqd)) 945 if (iops_mode(cfqd))
940 charge = cfqq->slice_dispatch; 946 charge = cfqq->slice_dispatch;
@@ -960,7 +966,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
960 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" 966 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
961 " sect=%u", used_sl, cfqq->slice_dispatch, charge, 967 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
962 iops_mode(cfqd), cfqq->nr_sectors); 968 iops_mode(cfqd), cfqq->nr_sectors);
963 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 969 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
970 unaccounted_sl);
964 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 971 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
965} 972}
966 973
@@ -3296,9 +3303,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3296 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 3303 BUG_ON(!cfq_cfqq_on_rr(cfqq));
3297 3304
3298 cfq_service_tree_add(cfqd, cfqq, 1); 3305 cfq_service_tree_add(cfqd, cfqq, 1);
3299 3306 __cfq_set_active_queue(cfqd, cfqq);
3300 cfqq->slice_end = 0;
3301 cfq_mark_cfqq_slice_new(cfqq);
3302} 3307}
3303 3308
3304/* 3309/*
diff --git a/block/cfq.h b/block/cfq.h
index 54a6d90f8e8..2a155927e37 100644
--- a/block/cfq.h
+++ b/block/cfq.h
@@ -16,9 +16,9 @@ static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
16} 16}
17 17
18static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 18static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
19 unsigned long time) 19 unsigned long time, unsigned long unaccounted_time)
20{ 20{
21 blkiocg_update_timeslice_used(blkg, time); 21 blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
22} 22}
23 23
24static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) 24static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
@@ -85,7 +85,7 @@ static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
85 unsigned long dequeue) {} 85 unsigned long dequeue) {}
86 86
87static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 87static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
88 unsigned long time) {} 88 unsigned long time, unsigned long unaccounted_time) {}
89static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 89static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
90static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 90static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
91 bool direction, bool sync) {} 91 bool direction, bool sync) {}