aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2009-12-03 12:59:45 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-12-03 13:28:52 -0500
commitdae739ebc4c590630039533a5bbd05865966094f (patch)
tree005f98ed4c4302ea71b48ad6a074fa6ff714df4a /block
parent58ff82f34cded3812af5b6c69b6aa626b6be2490 (diff)
blkio: Group time used accounting and workload context save restore
o This patch introduces the functionality to do the accounting of group time when a queue expires. This time used decides which is the group to go next. o Also introduce the functionlity to save and restore the workload type context with-in group. It might happen that once we expire the cfq queue and group, a different group will schedule in and we will lose the context of the workload type. Hence save and restore it upon queue expiry. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c79
1 files changed, 79 insertions, 0 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 84887e2eb21..55d2a21f7f0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -115,6 +115,10 @@ struct cfq_queue {
115 /* fifo list of requests in sort_list */ 115 /* fifo list of requests in sort_list */
116 struct list_head fifo; 116 struct list_head fifo;
117 117
118 /* time when queue got scheduled in to dispatch first request. */
119 unsigned long dispatch_start;
120 /* time when first request from queue completed and slice started. */
121 unsigned long slice_start;
118 unsigned long slice_end; 122 unsigned long slice_end;
119 long slice_resid; 123 long slice_resid;
120 unsigned int slice_dispatch; 124 unsigned int slice_dispatch;
@@ -181,6 +185,10 @@ struct cfq_group {
181 */ 185 */
182 struct cfq_rb_root service_trees[2][3]; 186 struct cfq_rb_root service_trees[2][3];
183 struct cfq_rb_root service_tree_idle; 187 struct cfq_rb_root service_tree_idle;
188
189 unsigned long saved_workload_slice;
190 enum wl_type_t saved_workload;
191 enum wl_prio_t saved_serving_prio;
184}; 192};
185 193
186/* 194/*
@@ -543,6 +551,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
543 low_slice); 551 low_slice);
544 } 552 }
545 } 553 }
554 cfqq->slice_start = jiffies;
546 cfqq->slice_end = jiffies + slice; 555 cfqq->slice_end = jiffies + slice;
547 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 556 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
548} 557}
@@ -818,6 +827,58 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
818 st->total_weight -= cfqg->weight; 827 st->total_weight -= cfqg->weight;
819 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 828 if (!RB_EMPTY_NODE(&cfqg->rb_node))
820 cfq_rb_erase(&cfqg->rb_node, st); 829 cfq_rb_erase(&cfqg->rb_node, st);
830 cfqg->saved_workload_slice = 0;
831}
832
833static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
834{
835 unsigned int slice_used, allocated_slice;
836
837 /*
838 * Queue got expired before even a single request completed or
839 * got expired immediately after first request completion.
840 */
841 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
842 /*
843 * Also charge the seek time incurred to the group, otherwise
844 * if there are mutiple queues in the group, each can dispatch
845 * a single request on seeky media and cause lots of seek time
846 * and group will never know it.
847 */
848 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
849 1);
850 } else {
851 slice_used = jiffies - cfqq->slice_start;
852 allocated_slice = cfqq->slice_end - cfqq->slice_start;
853 if (slice_used > allocated_slice)
854 slice_used = allocated_slice;
855 }
856
857 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
858 return slice_used;
859}
860
861static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
862 struct cfq_queue *cfqq)
863{
864 struct cfq_rb_root *st = &cfqd->grp_service_tree;
865 unsigned int used_sl;
866
867 used_sl = cfq_cfqq_slice_usage(cfqq);
868
869 /* Can't update vdisktime while group is on service tree */
870 cfq_rb_erase(&cfqg->rb_node, st);
871 cfqg->vdisktime += cfq_scale_slice(used_sl, cfqg);
872 __cfq_group_service_tree_add(st, cfqg);
873
874 /* This group is being expired. Save the context */
875 if (time_after(cfqd->workload_expires, jiffies)) {
876 cfqg->saved_workload_slice = cfqd->workload_expires
877 - jiffies;
878 cfqg->saved_workload = cfqd->serving_type;
879 cfqg->saved_serving_prio = cfqd->serving_prio;
880 } else
881 cfqg->saved_workload_slice = 0;
821} 882}
822 883
823/* 884/*
@@ -833,6 +894,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
833 unsigned long rb_key; 894 unsigned long rb_key;
834 struct cfq_rb_root *service_tree; 895 struct cfq_rb_root *service_tree;
835 int left; 896 int left;
897 int new_cfqq = 1;
836 898
837 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 899 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
838 cfqq_type(cfqq), cfqd); 900 cfqq_type(cfqq), cfqd);
@@ -861,6 +923,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
861 } 923 }
862 924
863 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 925 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
926 new_cfqq = 0;
864 /* 927 /*
865 * same position, nothing more to do 928 * same position, nothing more to do
866 */ 929 */
@@ -902,6 +965,8 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
902 rb_link_node(&cfqq->rb_node, parent, p); 965 rb_link_node(&cfqq->rb_node, parent, p);
903 rb_insert_color(&cfqq->rb_node, &service_tree->rb); 966 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
904 service_tree->count++; 967 service_tree->count++;
968 if (add_front || !new_cfqq)
969 return;
905 cfq_group_service_tree_add(cfqd, cfqq->cfqg); 970 cfq_group_service_tree_add(cfqd, cfqq->cfqg);
906} 971}
907 972
@@ -1218,6 +1283,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1218{ 1283{
1219 if (cfqq) { 1284 if (cfqq) {
1220 cfq_log_cfqq(cfqd, cfqq, "set_active"); 1285 cfq_log_cfqq(cfqd, cfqq, "set_active");
1286 cfqq->slice_start = 0;
1287 cfqq->dispatch_start = jiffies;
1221 cfqq->slice_end = 0; 1288 cfqq->slice_end = 0;
1222 cfqq->slice_dispatch = 0; 1289 cfqq->slice_dispatch = 0;
1223 1290
@@ -1255,6 +1322,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1255 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 1322 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1256 } 1323 }
1257 1324
1325 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1326
1258 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 1327 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1259 cfq_del_cfqq_rr(cfqd, cfqq); 1328 cfq_del_cfqq_rr(cfqd, cfqq);
1260 1329
@@ -1263,6 +1332,9 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1263 if (cfqq == cfqd->active_queue) 1332 if (cfqq == cfqd->active_queue)
1264 cfqd->active_queue = NULL; 1333 cfqd->active_queue = NULL;
1265 1334
1335 if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1336 cfqd->grp_service_tree.active = NULL;
1337
1266 if (cfqd->active_cic) { 1338 if (cfqd->active_cic) {
1267 put_io_context(cfqd->active_cic->ioc); 1339 put_io_context(cfqd->active_cic->ioc);
1268 cfqd->active_cic = NULL; 1340 cfqd->active_cic = NULL;
@@ -1747,6 +1819,13 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
1747 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); 1819 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
1748 1820
1749 cfqd->serving_group = cfqg; 1821 cfqd->serving_group = cfqg;
1822
1823 /* Restore the workload type data */
1824 if (cfqg->saved_workload_slice) {
1825 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
1826 cfqd->serving_type = cfqg->saved_workload;
1827 cfqd->serving_prio = cfqg->saved_serving_prio;
1828 }
1750 choose_service_tree(cfqd, cfqg); 1829 choose_service_tree(cfqd, cfqg);
1751} 1830}
1752 1831