diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 75 |
1 files changed, 27 insertions, 48 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e2f80463ed0d..17b768d0d42f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -208,8 +208,6 @@ struct cfq_data { | |||
208 | /* Root service tree for cfq_groups */ | 208 | /* Root service tree for cfq_groups */ |
209 | struct cfq_rb_root grp_service_tree; | 209 | struct cfq_rb_root grp_service_tree; |
210 | struct cfq_group root_group; | 210 | struct cfq_group root_group; |
211 | /* Number of active cfq groups on group service tree */ | ||
212 | int nr_groups; | ||
213 | 211 | ||
214 | /* | 212 | /* |
215 | * The priority currently being served | 213 | * The priority currently being served |
@@ -294,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); | |||
294 | 292 | ||
295 | static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, | 293 | static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, |
296 | enum wl_prio_t prio, | 294 | enum wl_prio_t prio, |
297 | enum wl_type_t type, | 295 | enum wl_type_t type) |
298 | struct cfq_data *cfqd) | ||
299 | { | 296 | { |
300 | if (!cfqg) | 297 | if (!cfqg) |
301 | return NULL; | 298 | return NULL; |
@@ -842,7 +839,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
842 | 839 | ||
843 | __cfq_group_service_tree_add(st, cfqg); | 840 | __cfq_group_service_tree_add(st, cfqg); |
844 | cfqg->on_st = true; | 841 | cfqg->on_st = true; |
845 | cfqd->nr_groups++; | ||
846 | st->total_weight += cfqg->weight; | 842 | st->total_weight += cfqg->weight; |
847 | } | 843 | } |
848 | 844 | ||
@@ -863,7 +859,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
863 | 859 | ||
864 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); | 860 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); |
865 | cfqg->on_st = false; | 861 | cfqg->on_st = false; |
866 | cfqd->nr_groups--; | ||
867 | st->total_weight -= cfqg->weight; | 862 | st->total_weight -= cfqg->weight; |
868 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) | 863 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) |
869 | cfq_rb_erase(&cfqg->rb_node, st); | 864 | cfq_rb_erase(&cfqg->rb_node, st); |
@@ -1150,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1150 | #endif | 1145 | #endif |
1151 | 1146 | ||
1152 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), | 1147 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), |
1153 | cfqq_type(cfqq), cfqd); | 1148 | cfqq_type(cfqq)); |
1154 | if (cfq_class_idle(cfqq)) { | 1149 | if (cfq_class_idle(cfqq)) { |
1155 | rb_key = CFQ_IDLE_DELAY; | 1150 | rb_key = CFQ_IDLE_DELAY; |
1156 | parent = rb_last(&service_tree->rb); | 1151 | parent = rb_last(&service_tree->rb); |
@@ -1513,9 +1508,6 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
1513 | struct cfq_io_context *cic; | 1508 | struct cfq_io_context *cic; |
1514 | struct cfq_queue *cfqq; | 1509 | struct cfq_queue *cfqq; |
1515 | 1510 | ||
1516 | /* Deny merge if bio and rq don't belong to same cfq group */ | ||
1517 | if ((RQ_CFQQ(rq))->cfqg != cfq_get_cfqg(cfqd, 0)) | ||
1518 | return false; | ||
1519 | /* | 1511 | /* |
1520 | * Disallow merge of a sync bio into an async request. | 1512 | * Disallow merge of a sync bio into an async request. |
1521 | */ | 1513 | */ |
@@ -1616,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
1616 | { | 1608 | { |
1617 | struct cfq_rb_root *service_tree = | 1609 | struct cfq_rb_root *service_tree = |
1618 | service_tree_for(cfqd->serving_group, cfqd->serving_prio, | 1610 | service_tree_for(cfqd->serving_group, cfqd->serving_prio, |
1619 | cfqd->serving_type, cfqd); | 1611 | cfqd->serving_type); |
1620 | 1612 | ||
1621 | if (!cfqd->rq_queued) | 1613 | if (!cfqd->rq_queued) |
1622 | return NULL; | 1614 | return NULL; |
@@ -1675,13 +1667,17 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, | |||
1675 | #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) | 1667 | #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) |
1676 | 1668 | ||
1677 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1669 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1678 | struct request *rq) | 1670 | struct request *rq, bool for_preempt) |
1679 | { | 1671 | { |
1680 | sector_t sdist = cfqq->seek_mean; | 1672 | sector_t sdist = cfqq->seek_mean; |
1681 | 1673 | ||
1682 | if (!sample_valid(cfqq->seek_samples)) | 1674 | if (!sample_valid(cfqq->seek_samples)) |
1683 | sdist = CFQQ_SEEK_THR; | 1675 | sdist = CFQQ_SEEK_THR; |
1684 | 1676 | ||
1677 | /* if seek_mean is big, using it as close criteria is meaningless */ | ||
1678 | if (sdist > CFQQ_SEEK_THR && !for_preempt) | ||
1679 | sdist = CFQQ_SEEK_THR; | ||
1680 | |||
1685 | return cfq_dist_from_last(cfqd, rq) <= sdist; | 1681 | return cfq_dist_from_last(cfqd, rq) <= sdist; |
1686 | } | 1682 | } |
1687 | 1683 | ||
@@ -1709,7 +1705,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
1709 | * will contain the closest sector. | 1705 | * will contain the closest sector. |
1710 | */ | 1706 | */ |
1711 | __cfqq = rb_entry(parent, struct cfq_queue, p_node); | 1707 | __cfqq = rb_entry(parent, struct cfq_queue, p_node); |
1712 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) | 1708 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) |
1713 | return __cfqq; | 1709 | return __cfqq; |
1714 | 1710 | ||
1715 | if (blk_rq_pos(__cfqq->next_rq) < sector) | 1711 | if (blk_rq_pos(__cfqq->next_rq) < sector) |
@@ -1720,7 +1716,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
1720 | return NULL; | 1716 | return NULL; |
1721 | 1717 | ||
1722 | __cfqq = rb_entry(node, struct cfq_queue, p_node); | 1718 | __cfqq = rb_entry(node, struct cfq_queue, p_node); |
1723 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) | 1719 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) |
1724 | return __cfqq; | 1720 | return __cfqq; |
1725 | 1721 | ||
1726 | return NULL; | 1722 | return NULL; |
@@ -1807,7 +1803,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1807 | * Otherwise, we do only if they are the last ones | 1803 | * Otherwise, we do only if they are the last ones |
1808 | * in their service tree. | 1804 | * in their service tree. |
1809 | */ | 1805 | */ |
1810 | return service_tree->count == 1; | 1806 | return service_tree->count == 1 && cfq_cfqq_sync(cfqq); |
1811 | } | 1807 | } |
1812 | 1808 | ||
1813 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1809 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
@@ -1963,8 +1959,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) | |||
1963 | } | 1959 | } |
1964 | 1960 | ||
1965 | static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, | 1961 | static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, |
1966 | struct cfq_group *cfqg, enum wl_prio_t prio, | 1962 | struct cfq_group *cfqg, enum wl_prio_t prio) |
1967 | bool prio_changed) | ||
1968 | { | 1963 | { |
1969 | struct cfq_queue *queue; | 1964 | struct cfq_queue *queue; |
1970 | int i; | 1965 | int i; |
@@ -1972,24 +1967,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, | |||
1972 | unsigned long lowest_key = 0; | 1967 | unsigned long lowest_key = 0; |
1973 | enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; | 1968 | enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; |
1974 | 1969 | ||
1975 | if (prio_changed) { | 1970 | for (i = 0; i <= SYNC_WORKLOAD; ++i) { |
1976 | /* | 1971 | /* select the one with lowest rb_key */ |
1977 | * When priorities switched, we prefer starting | 1972 | queue = cfq_rb_first(service_tree_for(cfqg, prio, i)); |
1978 | * from SYNC_NOIDLE (first choice), or just SYNC | ||
1979 | * over ASYNC | ||
1980 | */ | ||
1981 | if (service_tree_for(cfqg, prio, cur_best, cfqd)->count) | ||
1982 | return cur_best; | ||
1983 | cur_best = SYNC_WORKLOAD; | ||
1984 | if (service_tree_for(cfqg, prio, cur_best, cfqd)->count) | ||
1985 | return cur_best; | ||
1986 | |||
1987 | return ASYNC_WORKLOAD; | ||
1988 | } | ||
1989 | |||
1990 | for (i = 0; i < 3; ++i) { | ||
1991 | /* otherwise, select the one with lowest rb_key */ | ||
1992 | queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd)); | ||
1993 | if (queue && | 1973 | if (queue && |
1994 | (!key_valid || time_before(queue->rb_key, lowest_key))) { | 1974 | (!key_valid || time_before(queue->rb_key, lowest_key))) { |
1995 | lowest_key = queue->rb_key; | 1975 | lowest_key = queue->rb_key; |
@@ -2003,8 +1983,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, | |||
2003 | 1983 | ||
2004 | static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | 1984 | static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) |
2005 | { | 1985 | { |
2006 | enum wl_prio_t previous_prio = cfqd->serving_prio; | ||
2007 | bool prio_changed; | ||
2008 | unsigned slice; | 1986 | unsigned slice; |
2009 | unsigned count; | 1987 | unsigned count; |
2010 | struct cfq_rb_root *st; | 1988 | struct cfq_rb_root *st; |
@@ -2032,24 +2010,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
2032 | * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload | 2010 | * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload |
2033 | * expiration time | 2011 | * expiration time |
2034 | */ | 2012 | */ |
2035 | prio_changed = (cfqd->serving_prio != previous_prio); | 2013 | st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); |
2036 | st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, | ||
2037 | cfqd); | ||
2038 | count = st->count; | 2014 | count = st->count; |
2039 | 2015 | ||
2040 | /* | 2016 | /* |
2041 | * If priority didn't change, check workload expiration, | 2017 | * check workload expiration, and that we still have other queues ready |
2042 | * and that we still have other queues ready | ||
2043 | */ | 2018 | */ |
2044 | if (!prio_changed && count && | 2019 | if (count && !time_after(jiffies, cfqd->workload_expires)) |
2045 | !time_after(jiffies, cfqd->workload_expires)) | ||
2046 | return; | 2020 | return; |
2047 | 2021 | ||
2048 | /* otherwise select new workload type */ | 2022 | /* otherwise select new workload type */ |
2049 | cfqd->serving_type = | 2023 | cfqd->serving_type = |
2050 | cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed); | 2024 | cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); |
2051 | st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, | 2025 | st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); |
2052 | cfqd); | ||
2053 | count = st->count; | 2026 | count = st->count; |
2054 | 2027 | ||
2055 | /* | 2028 | /* |
@@ -3104,6 +3077,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
3104 | return true; | 3077 | return true; |
3105 | 3078 | ||
3106 | /* | 3079 | /* |
3080 | * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. | ||
3081 | */ | ||
3082 | if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) | ||
3083 | return false; | ||
3084 | |||
3085 | /* | ||
3107 | * if the new request is sync, but the currently running queue is | 3086 | * if the new request is sync, but the currently running queue is |
3108 | * not, let the sync request have priority. | 3087 | * not, let the sync request have priority. |
3109 | */ | 3088 | */ |
@@ -3143,7 +3122,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
3143 | * if this request is as-good as one we would expect from the | 3122 | * if this request is as-good as one we would expect from the |
3144 | * current cfqq, let it preempt | 3123 | * current cfqq, let it preempt |
3145 | */ | 3124 | */ |
3146 | if (cfq_rq_close(cfqd, cfqq, rq)) | 3125 | if (cfq_rq_close(cfqd, cfqq, rq, true)) |
3147 | return true; | 3126 | return true; |
3148 | 3127 | ||
3149 | return false; | 3128 | return false; |