aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2012-10-03 16:56:57 -0400
committerTejun Heo <tj@kernel.org>2013-01-09 11:05:09 -0500
commit4d2ceea4cb86060b03b2aa4826b365320bc78651 (patch)
tree2d848153c4feb146f603b3d2273f9a486febe992 /block
parent3bf10fea3ba3804090e82aa59fabf25f43fa74c2 (diff)
cfq-iosched: More renaming to better represent wl_class and wl_type
Some more renaming. Again making the code uniform w.r.t use of wl_class/class to represent IO class (RT, BE, IDLE) and using wl_type/type to represent subclass (SYNC, SYNC-IDLE, ASYNC). At places this patch shortens the string "workload" to "wl". Renamed "saved_workload" to "saved_wl_type". Renamed "saved_serving_class" to "saved_wl_class". For uniformity with "saved_wl_*" variables, renamed "serving_class" to "serving_wl_class" and renamed "serving_type" to "serving_wl_type". Again, just trying to improve upon code uniformity and improve readability. No functional change. v2: - Restored the usage of keyword "service" based on Jeff Moyer's feedback. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c64
1 files changed, 33 insertions, 31 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7646dfddba03..8f890bfba8fd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -248,9 +248,9 @@ struct cfq_group {
248 struct cfq_rb_root service_trees[2][3]; 248 struct cfq_rb_root service_trees[2][3];
249 struct cfq_rb_root service_tree_idle; 249 struct cfq_rb_root service_tree_idle;
250 250
251 unsigned long saved_workload_slice; 251 unsigned long saved_wl_slice;
252 enum wl_type_t saved_workload; 252 enum wl_type_t saved_wl_type;
253 enum wl_class_t saved_serving_class; 253 enum wl_class_t saved_wl_class;
254 254
255 /* number of requests that are on the dispatch list or inside driver */ 255 /* number of requests that are on the dispatch list or inside driver */
256 int dispatched; 256 int dispatched;
@@ -280,8 +280,8 @@ struct cfq_data {
280 /* 280 /*
281 * The priority currently being served 281 * The priority currently being served
282 */ 282 */
283 enum wl_class_t serving_class; 283 enum wl_class_t serving_wl_class;
284 enum wl_type_t serving_type; 284 enum wl_type_t serving_wl_type;
285 unsigned long workload_expires; 285 unsigned long workload_expires;
286 struct cfq_group *serving_group; 286 struct cfq_group *serving_group;
287 287
@@ -1241,7 +1241,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1241 1241
1242 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 1242 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1243 cfq_group_service_tree_del(st, cfqg); 1243 cfq_group_service_tree_del(st, cfqg);
1244 cfqg->saved_workload_slice = 0; 1244 cfqg->saved_wl_slice = 0;
1245 cfqg_stats_update_dequeue(cfqg); 1245 cfqg_stats_update_dequeue(cfqg);
1246} 1246}
1247 1247
@@ -1301,12 +1301,12 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1301 1301
1302 /* This group is being expired. Save the context */ 1302 /* This group is being expired. Save the context */
1303 if (time_after(cfqd->workload_expires, jiffies)) { 1303 if (time_after(cfqd->workload_expires, jiffies)) {
1304 cfqg->saved_workload_slice = cfqd->workload_expires 1304 cfqg->saved_wl_slice = cfqd->workload_expires
1305 - jiffies; 1305 - jiffies;
1306 cfqg->saved_workload = cfqd->serving_type; 1306 cfqg->saved_wl_type = cfqd->serving_wl_type;
1307 cfqg->saved_serving_class = cfqd->serving_class; 1307 cfqg->saved_wl_class = cfqd->serving_wl_class;
1308 } else 1308 } else
1309 cfqg->saved_workload_slice = 0; 1309 cfqg->saved_wl_slice = 0;
1310 1310
1311 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 1311 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1312 st->min_vdisktime); 1312 st->min_vdisktime);
@@ -2031,7 +2031,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
2031{ 2031{
2032 if (cfqq) { 2032 if (cfqq) {
2033 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", 2033 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2034 cfqd->serving_class, cfqd->serving_type); 2034 cfqd->serving_wl_class, cfqd->serving_wl_type);
2035 cfqg_stats_update_avg_queue_size(cfqq->cfqg); 2035 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2036 cfqq->slice_start = 0; 2036 cfqq->slice_start = 0;
2037 cfqq->dispatch_start = jiffies; 2037 cfqq->dispatch_start = jiffies;
@@ -2118,8 +2118,8 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2118static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 2118static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2119{ 2119{
2120 struct cfq_rb_root *service_tree = 2120 struct cfq_rb_root *service_tree =
2121 service_tree_for(cfqd->serving_group, cfqd->serving_class, 2121 service_tree_for(cfqd->serving_group, cfqd->serving_wl_class,
2122 cfqd->serving_type); 2122 cfqd->serving_wl_type);
2123 2123
2124 if (!cfqd->rq_queued) 2124 if (!cfqd->rq_queued)
2125 return NULL; 2125 return NULL;
@@ -2523,20 +2523,20 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2523 unsigned count; 2523 unsigned count;
2524 struct cfq_rb_root *st; 2524 struct cfq_rb_root *st;
2525 unsigned group_slice; 2525 unsigned group_slice;
2526 enum wl_class_t original_class = cfqd->serving_class; 2526 enum wl_class_t original_class = cfqd->serving_wl_class;
2527 2527
2528 /* Choose next priority. RT > BE > IDLE */ 2528 /* Choose next priority. RT > BE > IDLE */
2529 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) 2529 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2530 cfqd->serving_class = RT_WORKLOAD; 2530 cfqd->serving_wl_class = RT_WORKLOAD;
2531 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) 2531 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2532 cfqd->serving_class = BE_WORKLOAD; 2532 cfqd->serving_wl_class = BE_WORKLOAD;
2533 else { 2533 else {
2534 cfqd->serving_class = IDLE_WORKLOAD; 2534 cfqd->serving_wl_class = IDLE_WORKLOAD;
2535 cfqd->workload_expires = jiffies + 1; 2535 cfqd->workload_expires = jiffies + 1;
2536 return; 2536 return;
2537 } 2537 }
2538 2538
2539 if (original_class != cfqd->serving_class) 2539 if (original_class != cfqd->serving_wl_class)
2540 goto new_workload; 2540 goto new_workload;
2541 2541
2542 /* 2542 /*
@@ -2544,7 +2544,8 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2544 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2544 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2545 * expiration time 2545 * expiration time
2546 */ 2546 */
2547 st = service_tree_for(cfqg, cfqd->serving_class, cfqd->serving_type); 2547 st = service_tree_for(cfqg, cfqd->serving_wl_class,
2548 cfqd->serving_wl_type);
2548 count = st->count; 2549 count = st->count;
2549 2550
2550 /* 2551 /*
@@ -2555,9 +2556,10 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2555 2556
2556new_workload: 2557new_workload:
2557 /* otherwise select new workload type */ 2558 /* otherwise select new workload type */
2558 cfqd->serving_type = 2559 cfqd->serving_wl_type = cfq_choose_wl(cfqd, cfqg,
2559 cfq_choose_wl(cfqd, cfqg, cfqd->serving_class); 2560 cfqd->serving_wl_class);
2560 st = service_tree_for(cfqg, cfqd->serving_class, cfqd->serving_type); 2561 st = service_tree_for(cfqg, cfqd->serving_wl_class,
2562 cfqd->serving_wl_type);
2561 count = st->count; 2563 count = st->count;
2562 2564
2563 /* 2565 /*
@@ -2568,11 +2570,11 @@ new_workload:
2568 group_slice = cfq_group_slice(cfqd, cfqg); 2570 group_slice = cfq_group_slice(cfqd, cfqg);
2569 2571
2570 slice = group_slice * count / 2572 slice = group_slice * count /
2571 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_class], 2573 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2572 cfq_group_busy_queues_wl(cfqd->serving_class, cfqd, 2574 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2573 cfqg)); 2575 cfqg));
2574 2576
2575 if (cfqd->serving_type == ASYNC_WORKLOAD) { 2577 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2576 unsigned int tmp; 2578 unsigned int tmp;
2577 2579
2578 /* 2580 /*
@@ -2618,10 +2620,10 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
2618 cfqd->serving_group = cfqg; 2620 cfqd->serving_group = cfqg;
2619 2621
2620 /* Restore the workload type data */ 2622 /* Restore the workload type data */
2621 if (cfqg->saved_workload_slice) { 2623 if (cfqg->saved_wl_slice) {
2622 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; 2624 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
2623 cfqd->serving_type = cfqg->saved_workload; 2625 cfqd->serving_wl_type = cfqg->saved_wl_type;
2624 cfqd->serving_class = cfqg->saved_serving_class; 2626 cfqd->serving_wl_class = cfqg->saved_wl_class;
2625 } else 2627 } else
2626 cfqd->workload_expires = jiffies - 1; 2628 cfqd->workload_expires = jiffies - 1;
2627 2629
@@ -3404,7 +3406,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3404 return true; 3406 return true;
3405 3407
3406 /* Allow preemption only if we are idling on sync-noidle tree */ 3408 /* Allow preemption only if we are idling on sync-noidle tree */
3407 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && 3409 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3408 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && 3410 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3409 new_cfqq->service_tree->count == 2 && 3411 new_cfqq->service_tree->count == 2 &&
3410 RB_EMPTY_ROOT(&cfqq->sort_list)) 3412 RB_EMPTY_ROOT(&cfqq->sort_list))
@@ -3456,7 +3458,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3456 * doesn't happen 3458 * doesn't happen
3457 */ 3459 */
3458 if (old_type != cfqq_type(cfqq)) 3460 if (old_type != cfqq_type(cfqq))
3459 cfqq->cfqg->saved_workload_slice = 0; 3461 cfqq->cfqg->saved_wl_slice = 0;
3460 3462
3461 /* 3463 /*
3462 * Put the new queue at the front of the of the current list, 3464 * Put the new queue at the front of the of the current list,