aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2012-10-03 16:56:56 -0400
committerTejun Heo <tj@kernel.org>2013-01-09 11:05:08 -0500
commit3bf10fea3ba3804090e82aa59fabf25f43fa74c2 (patch)
treeac129e9bc5655f9ae925db7093e6e7bc60f77737 /block/cfq-iosched.c
parentd1c3ed669a2d452cacfb48c2d171a1f364dae2ed (diff)
cfq-iosched: Properly name all references to IO class
Currently CFQ has three IO classes, RT, BE and IDLE. At many a places we are calling workloads belonging to these classes as "prio". This gets very confusing as one starts to associate it with ioprio. So this patch just does bunch of renaming so that reading code becomes easier. All reference to RT, BE and IDLE workload are done using keyword "class" and all references to subclass, SYNC, SYNC-IDLE, ASYNC are made using keyword "type". This makes me feel much better while I am reading the code. There is no functionality change due to this patch. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c67
1 files changed, 34 insertions, 33 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e62e9205b80a..7646dfddba03 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -155,7 +155,7 @@ struct cfq_queue {
155 * First index in the service_trees. 155 * First index in the service_trees.
156 * IDLE is handled separately, so it has negative index 156 * IDLE is handled separately, so it has negative index
157 */ 157 */
158enum wl_prio_t { 158enum wl_class_t {
159 BE_WORKLOAD = 0, 159 BE_WORKLOAD = 0,
160 RT_WORKLOAD = 1, 160 RT_WORKLOAD = 1,
161 IDLE_WORKLOAD = 2, 161 IDLE_WORKLOAD = 2,
@@ -250,7 +250,7 @@ struct cfq_group {
250 250
251 unsigned long saved_workload_slice; 251 unsigned long saved_workload_slice;
252 enum wl_type_t saved_workload; 252 enum wl_type_t saved_workload;
253 enum wl_prio_t saved_serving_prio; 253 enum wl_class_t saved_serving_class;
254 254
255 /* number of requests that are on the dispatch list or inside driver */ 255 /* number of requests that are on the dispatch list or inside driver */
256 int dispatched; 256 int dispatched;
@@ -280,7 +280,7 @@ struct cfq_data {
280 /* 280 /*
281 * The priority currently being served 281 * The priority currently being served
282 */ 282 */
283 enum wl_prio_t serving_prio; 283 enum wl_class_t serving_class;
284 enum wl_type_t serving_type; 284 enum wl_type_t serving_type;
285 unsigned long workload_expires; 285 unsigned long workload_expires;
286 struct cfq_group *serving_group; 286 struct cfq_group *serving_group;
@@ -354,16 +354,16 @@ struct cfq_data {
354static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 354static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
355 355
356static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 356static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
357 enum wl_prio_t prio, 357 enum wl_class_t class,
358 enum wl_type_t type) 358 enum wl_type_t type)
359{ 359{
360 if (!cfqg) 360 if (!cfqg)
361 return NULL; 361 return NULL;
362 362
363 if (prio == IDLE_WORKLOAD) 363 if (class == IDLE_WORKLOAD)
364 return &cfqg->service_tree_idle; 364 return &cfqg->service_tree_idle;
365 365
366 return &cfqg->service_trees[prio][type]; 366 return &cfqg->service_trees[class][type];
367} 367}
368 368
369enum cfqq_state_flags { 369enum cfqq_state_flags {
@@ -732,7 +732,7 @@ static inline bool iops_mode(struct cfq_data *cfqd)
732 return false; 732 return false;
733} 733}
734 734
735static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) 735static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
736{ 736{
737 if (cfq_class_idle(cfqq)) 737 if (cfq_class_idle(cfqq))
738 return IDLE_WORKLOAD; 738 return IDLE_WORKLOAD;
@@ -751,16 +751,16 @@ static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
751 return SYNC_WORKLOAD; 751 return SYNC_WORKLOAD;
752} 752}
753 753
754static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl, 754static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
755 struct cfq_data *cfqd, 755 struct cfq_data *cfqd,
756 struct cfq_group *cfqg) 756 struct cfq_group *cfqg)
757{ 757{
758 if (wl == IDLE_WORKLOAD) 758 if (wl_class == IDLE_WORKLOAD)
759 return cfqg->service_tree_idle.count; 759 return cfqg->service_tree_idle.count;
760 760
761 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count 761 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count
762 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count 762 + cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count
763 + cfqg->service_trees[wl][SYNC_WORKLOAD].count; 763 + cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
764} 764}
765 765
766static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, 766static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
@@ -1304,7 +1304,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1304 cfqg->saved_workload_slice = cfqd->workload_expires 1304 cfqg->saved_workload_slice = cfqd->workload_expires
1305 - jiffies; 1305 - jiffies;
1306 cfqg->saved_workload = cfqd->serving_type; 1306 cfqg->saved_workload = cfqd->serving_type;
1307 cfqg->saved_serving_prio = cfqd->serving_prio; 1307 cfqg->saved_serving_class = cfqd->serving_class;
1308 } else 1308 } else
1309 cfqg->saved_workload_slice = 0; 1309 cfqg->saved_workload_slice = 0;
1310 1310
@@ -1616,7 +1616,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1616 int left; 1616 int left;
1617 int new_cfqq = 1; 1617 int new_cfqq = 1;
1618 1618
1619 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 1619 service_tree = service_tree_for(cfqq->cfqg, cfqq_class(cfqq),
1620 cfqq_type(cfqq)); 1620 cfqq_type(cfqq));
1621 if (cfq_class_idle(cfqq)) { 1621 if (cfq_class_idle(cfqq)) {
1622 rb_key = CFQ_IDLE_DELAY; 1622 rb_key = CFQ_IDLE_DELAY;
@@ -2030,8 +2030,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
2030 struct cfq_queue *cfqq) 2030 struct cfq_queue *cfqq)
2031{ 2031{
2032 if (cfqq) { 2032 if (cfqq) {
2033 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 2033 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2034 cfqd->serving_prio, cfqd->serving_type); 2034 cfqd->serving_class, cfqd->serving_type);
2035 cfqg_stats_update_avg_queue_size(cfqq->cfqg); 2035 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2036 cfqq->slice_start = 0; 2036 cfqq->slice_start = 0;
2037 cfqq->dispatch_start = jiffies; 2037 cfqq->dispatch_start = jiffies;
@@ -2118,7 +2118,7 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2118static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 2118static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2119{ 2119{
2120 struct cfq_rb_root *service_tree = 2120 struct cfq_rb_root *service_tree =
2121 service_tree_for(cfqd->serving_group, cfqd->serving_prio, 2121 service_tree_for(cfqd->serving_group, cfqd->serving_class,
2122 cfqd->serving_type); 2122 cfqd->serving_type);
2123 2123
2124 if (!cfqd->rq_queued) 2124 if (!cfqd->rq_queued)
@@ -2285,7 +2285,7 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2285 2285
2286static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2286static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2287{ 2287{
2288 enum wl_prio_t prio = cfqq_prio(cfqq); 2288 enum wl_class_t wl_class = cfqq_class(cfqq);
2289 struct cfq_rb_root *service_tree = cfqq->service_tree; 2289 struct cfq_rb_root *service_tree = cfqq->service_tree;
2290 2290
2291 BUG_ON(!service_tree); 2291 BUG_ON(!service_tree);
@@ -2295,7 +2295,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2295 return false; 2295 return false;
2296 2296
2297 /* We never do for idle class queues. */ 2297 /* We never do for idle class queues. */
2298 if (prio == IDLE_WORKLOAD) 2298 if (wl_class == IDLE_WORKLOAD)
2299 return false; 2299 return false;
2300 2300
2301 /* We do for queues that were marked with idle window flag. */ 2301 /* We do for queues that were marked with idle window flag. */
@@ -2495,7 +2495,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2495} 2495}
2496 2496
2497static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, 2497static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2498 struct cfq_group *cfqg, enum wl_prio_t prio) 2498 struct cfq_group *cfqg, enum wl_class_t wl_class)
2499{ 2499{
2500 struct cfq_queue *queue; 2500 struct cfq_queue *queue;
2501 int i; 2501 int i;
@@ -2505,7 +2505,7 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2505 2505
2506 for (i = 0; i <= SYNC_WORKLOAD; ++i) { 2506 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2507 /* select the one with lowest rb_key */ 2507 /* select the one with lowest rb_key */
2508 queue = cfq_rb_first(service_tree_for(cfqg, prio, i)); 2508 queue = cfq_rb_first(service_tree_for(cfqg, wl_class, i));
2509 if (queue && 2509 if (queue &&
2510 (!key_valid || time_before(queue->rb_key, lowest_key))) { 2510 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2511 lowest_key = queue->rb_key; 2511 lowest_key = queue->rb_key;
@@ -2523,20 +2523,20 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2523 unsigned count; 2523 unsigned count;
2524 struct cfq_rb_root *st; 2524 struct cfq_rb_root *st;
2525 unsigned group_slice; 2525 unsigned group_slice;
2526 enum wl_prio_t original_prio = cfqd->serving_prio; 2526 enum wl_class_t original_class = cfqd->serving_class;
2527 2527
2528 /* Choose next priority. RT > BE > IDLE */ 2528 /* Choose next priority. RT > BE > IDLE */
2529 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) 2529 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2530 cfqd->serving_prio = RT_WORKLOAD; 2530 cfqd->serving_class = RT_WORKLOAD;
2531 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) 2531 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2532 cfqd->serving_prio = BE_WORKLOAD; 2532 cfqd->serving_class = BE_WORKLOAD;
2533 else { 2533 else {
2534 cfqd->serving_prio = IDLE_WORKLOAD; 2534 cfqd->serving_class = IDLE_WORKLOAD;
2535 cfqd->workload_expires = jiffies + 1; 2535 cfqd->workload_expires = jiffies + 1;
2536 return; 2536 return;
2537 } 2537 }
2538 2538
2539 if (original_prio != cfqd->serving_prio) 2539 if (original_class != cfqd->serving_class)
2540 goto new_workload; 2540 goto new_workload;
2541 2541
2542 /* 2542 /*
@@ -2544,7 +2544,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2544 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2544 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2545 * expiration time 2545 * expiration time
2546 */ 2546 */
2547 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); 2547 st = service_tree_for(cfqg, cfqd->serving_class, cfqd->serving_type);
2548 count = st->count; 2548 count = st->count;
2549 2549
2550 /* 2550 /*
@@ -2556,8 +2556,8 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2556new_workload: 2556new_workload:
2557 /* otherwise select new workload type */ 2557 /* otherwise select new workload type */
2558 cfqd->serving_type = 2558 cfqd->serving_type =
2559 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); 2559 cfq_choose_wl(cfqd, cfqg, cfqd->serving_class);
2560 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); 2560 st = service_tree_for(cfqg, cfqd->serving_class, cfqd->serving_type);
2561 count = st->count; 2561 count = st->count;
2562 2562
2563 /* 2563 /*
@@ -2568,8 +2568,9 @@ new_workload:
2568 group_slice = cfq_group_slice(cfqd, cfqg); 2568 group_slice = cfq_group_slice(cfqd, cfqg);
2569 2569
2570 slice = group_slice * count / 2570 slice = group_slice * count /
2571 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], 2571 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_class],
2572 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); 2572 cfq_group_busy_queues_wl(cfqd->serving_class, cfqd,
2573 cfqg));
2573 2574
2574 if (cfqd->serving_type == ASYNC_WORKLOAD) { 2575 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2575 unsigned int tmp; 2576 unsigned int tmp;
@@ -2620,7 +2621,7 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
2620 if (cfqg->saved_workload_slice) { 2621 if (cfqg->saved_workload_slice) {
2621 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; 2622 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2622 cfqd->serving_type = cfqg->saved_workload; 2623 cfqd->serving_type = cfqg->saved_workload;
2623 cfqd->serving_prio = cfqg->saved_serving_prio; 2624 cfqd->serving_class = cfqg->saved_serving_class;
2624 } else 2625 } else
2625 cfqd->workload_expires = jiffies - 1; 2626 cfqd->workload_expires = jiffies - 1;
2626 2627
@@ -3645,7 +3646,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3645 service_tree = cfqq->service_tree; 3646 service_tree = cfqq->service_tree;
3646 else 3647 else
3647 service_tree = service_tree_for(cfqq->cfqg, 3648 service_tree = service_tree_for(cfqq->cfqg,
3648 cfqq_prio(cfqq), cfqq_type(cfqq)); 3649 cfqq_class(cfqq), cfqq_type(cfqq));
3649 service_tree->ttime.last_end_request = now; 3650 service_tree->ttime.last_end_request = now;
3650 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) 3651 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3651 cfqd->last_delayed_sync = now; 3652 cfqd->last_delayed_sync = now;