aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2012-10-03 16:56:58 -0400
committerTejun Heo <tj@kernel.org>2013-01-09 11:05:09 -0500
commit34b98d03bd6e3f3c67af1e4933aaf19887a61192 (patch)
treef8c12ac87eda374852f02cc0de078e2c74146c23 /block
parent4d2ceea4cb86060b03b2aa4826b365320bc78651 (diff)
cfq-iosched: Rename "service_tree" to "st" at some places
At quite a few places we use the keyword "service_tree". At some places, especially local variables, I have abbreviated it to "st". Also at couple of places moved binary operator "+" from beginning of line to end of previous line, as per Tejun's feedback. v2: Reverted most of the service tree name change based on Jeff Moyer's feedback. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c77
1 files changed, 36 insertions, 41 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8f890bfba8fd..db4a1a52c3d9 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -353,7 +353,7 @@ struct cfq_data {
353 353
354static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 354static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
355 355
356static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 356static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
357 enum wl_class_t class, 357 enum wl_class_t class,
358 enum wl_type_t type) 358 enum wl_type_t type)
359{ 359{
@@ -758,16 +758,16 @@ static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
758 if (wl_class == IDLE_WORKLOAD) 758 if (wl_class == IDLE_WORKLOAD)
759 return cfqg->service_tree_idle.count; 759 return cfqg->service_tree_idle.count;
760 760
761 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count 761 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
762 + cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count 762 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
763 + cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; 763 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
764} 764}
765 765
766static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, 766static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
767 struct cfq_group *cfqg) 767 struct cfq_group *cfqg)
768{ 768{
769 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count 769 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
770 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; 770 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
771} 771}
772 772
773static void cfq_dispatch_insert(struct request_queue *, struct request *); 773static void cfq_dispatch_insert(struct request_queue *, struct request *);
@@ -1612,15 +1612,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1612 struct rb_node **p, *parent; 1612 struct rb_node **p, *parent;
1613 struct cfq_queue *__cfqq; 1613 struct cfq_queue *__cfqq;
1614 unsigned long rb_key; 1614 unsigned long rb_key;
1615 struct cfq_rb_root *service_tree; 1615 struct cfq_rb_root *st;
1616 int left; 1616 int left;
1617 int new_cfqq = 1; 1617 int new_cfqq = 1;
1618 1618
1619 service_tree = service_tree_for(cfqq->cfqg, cfqq_class(cfqq), 1619 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
1620 cfqq_type(cfqq));
1621 if (cfq_class_idle(cfqq)) { 1620 if (cfq_class_idle(cfqq)) {
1622 rb_key = CFQ_IDLE_DELAY; 1621 rb_key = CFQ_IDLE_DELAY;
1623 parent = rb_last(&service_tree->rb); 1622 parent = rb_last(&st->rb);
1624 if (parent && parent != &cfqq->rb_node) { 1623 if (parent && parent != &cfqq->rb_node) {
1625 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 1624 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1626 rb_key += __cfqq->rb_key; 1625 rb_key += __cfqq->rb_key;
@@ -1638,7 +1637,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1638 cfqq->slice_resid = 0; 1637 cfqq->slice_resid = 0;
1639 } else { 1638 } else {
1640 rb_key = -HZ; 1639 rb_key = -HZ;
1641 __cfqq = cfq_rb_first(service_tree); 1640 __cfqq = cfq_rb_first(st);
1642 rb_key += __cfqq ? __cfqq->rb_key : jiffies; 1641 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1643 } 1642 }
1644 1643
@@ -1647,8 +1646,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1647 /* 1646 /*
1648 * same position, nothing more to do 1647 * same position, nothing more to do
1649 */ 1648 */
1650 if (rb_key == cfqq->rb_key && 1649 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
1651 cfqq->service_tree == service_tree)
1652 return; 1650 return;
1653 1651
1654 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 1652 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
@@ -1657,8 +1655,8 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1657 1655
1658 left = 1; 1656 left = 1;
1659 parent = NULL; 1657 parent = NULL;
1660 cfqq->service_tree = service_tree; 1658 cfqq->service_tree = st;
1661 p = &service_tree->rb.rb_node; 1659 p = &st->rb.rb_node;
1662 while (*p) { 1660 while (*p) {
1663 struct rb_node **n; 1661 struct rb_node **n;
1664 1662
@@ -1679,12 +1677,12 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1679 } 1677 }
1680 1678
1681 if (left) 1679 if (left)
1682 service_tree->left = &cfqq->rb_node; 1680 st->left = &cfqq->rb_node;
1683 1681
1684 cfqq->rb_key = rb_key; 1682 cfqq->rb_key = rb_key;
1685 rb_link_node(&cfqq->rb_node, parent, p); 1683 rb_link_node(&cfqq->rb_node, parent, p);
1686 rb_insert_color(&cfqq->rb_node, &service_tree->rb); 1684 rb_insert_color(&cfqq->rb_node, &st->rb);
1687 service_tree->count++; 1685 st->count++;
1688 if (add_front || !new_cfqq) 1686 if (add_front || !new_cfqq)
1689 return; 1687 return;
1690 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); 1688 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
@@ -2117,19 +2115,18 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2117 */ 2115 */
2118static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 2116static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2119{ 2117{
2120 struct cfq_rb_root *service_tree = 2118 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2121 service_tree_for(cfqd->serving_group, cfqd->serving_wl_class, 2119 cfqd->serving_wl_class, cfqd->serving_wl_type);
2122 cfqd->serving_wl_type);
2123 2120
2124 if (!cfqd->rq_queued) 2121 if (!cfqd->rq_queued)
2125 return NULL; 2122 return NULL;
2126 2123
2127 /* There is nothing to dispatch */ 2124 /* There is nothing to dispatch */
2128 if (!service_tree) 2125 if (!st)
2129 return NULL; 2126 return NULL;
2130 if (RB_EMPTY_ROOT(&service_tree->rb)) 2127 if (RB_EMPTY_ROOT(&st->rb))
2131 return NULL; 2128 return NULL;
2132 return cfq_rb_first(service_tree); 2129 return cfq_rb_first(st);
2133} 2130}
2134 2131
2135static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) 2132static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
@@ -2286,10 +2283,10 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2286static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2283static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2287{ 2284{
2288 enum wl_class_t wl_class = cfqq_class(cfqq); 2285 enum wl_class_t wl_class = cfqq_class(cfqq);
2289 struct cfq_rb_root *service_tree = cfqq->service_tree; 2286 struct cfq_rb_root *st = cfqq->service_tree;
2290 2287
2291 BUG_ON(!service_tree); 2288 BUG_ON(!st);
2292 BUG_ON(!service_tree->count); 2289 BUG_ON(!st->count);
2293 2290
2294 if (!cfqd->cfq_slice_idle) 2291 if (!cfqd->cfq_slice_idle)
2295 return false; 2292 return false;
@@ -2307,11 +2304,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2307 * Otherwise, we do only if they are the last ones 2304 * Otherwise, we do only if they are the last ones
2308 * in their service tree. 2305 * in their service tree.
2309 */ 2306 */
2310 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) && 2307 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2311 !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false)) 2308 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2312 return true; 2309 return true;
2313 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", 2310 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2314 service_tree->count);
2315 return false; 2311 return false;
2316} 2312}
2317 2313
@@ -2505,7 +2501,7 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2505 2501
2506 for (i = 0; i <= SYNC_WORKLOAD; ++i) { 2502 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2507 /* select the one with lowest rb_key */ 2503 /* select the one with lowest rb_key */
2508 queue = cfq_rb_first(service_tree_for(cfqg, wl_class, i)); 2504 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2509 if (queue && 2505 if (queue &&
2510 (!key_valid || time_before(queue->rb_key, lowest_key))) { 2506 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2511 lowest_key = queue->rb_key; 2507 lowest_key = queue->rb_key;
@@ -2544,8 +2540,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2544 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2540 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2545 * expiration time 2541 * expiration time
2546 */ 2542 */
2547 st = service_tree_for(cfqg, cfqd->serving_wl_class, 2543 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2548 cfqd->serving_wl_type);
2549 count = st->count; 2544 count = st->count;
2550 2545
2551 /* 2546 /*
@@ -2558,8 +2553,7 @@ new_workload:
2558 /* otherwise select new workload type */ 2553 /* otherwise select new workload type */
2559 cfqd->serving_wl_type = cfq_choose_wl(cfqd, cfqg, 2554 cfqd->serving_wl_type = cfq_choose_wl(cfqd, cfqg,
2560 cfqd->serving_wl_class); 2555 cfqd->serving_wl_class);
2561 st = service_tree_for(cfqg, cfqd->serving_wl_class, 2556 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2562 cfqd->serving_wl_type);
2563 count = st->count; 2557 count = st->count;
2564 2558
2565 /* 2559 /*
@@ -3640,16 +3634,17 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3640 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3634 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3641 3635
3642 if (sync) { 3636 if (sync) {
3643 struct cfq_rb_root *service_tree; 3637 struct cfq_rb_root *st;
3644 3638
3645 RQ_CIC(rq)->ttime.last_end_request = now; 3639 RQ_CIC(rq)->ttime.last_end_request = now;
3646 3640
3647 if (cfq_cfqq_on_rr(cfqq)) 3641 if (cfq_cfqq_on_rr(cfqq))
3648 service_tree = cfqq->service_tree; 3642 st = cfqq->service_tree;
3649 else 3643 else
3650 service_tree = service_tree_for(cfqq->cfqg, 3644 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
3651 cfqq_class(cfqq), cfqq_type(cfqq)); 3645 cfqq_type(cfqq));
3652 service_tree->ttime.last_end_request = now; 3646
3647 st->ttime.last_end_request = now;
3653 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) 3648 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3654 cfqd->last_delayed_sync = now; 3649 cfqd->last_delayed_sync = now;
3655 } 3650 }