diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 158 |
1 files changed, 121 insertions, 37 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index eb4086f7dfef..4cd59b0d7c15 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10; | |||
30 | static int cfq_slice_async = HZ / 25; | 30 | static int cfq_slice_async = HZ / 25; |
31 | static const int cfq_slice_async_rq = 2; | 31 | static const int cfq_slice_async_rq = 2; |
32 | static int cfq_slice_idle = HZ / 125; | 32 | static int cfq_slice_idle = HZ / 125; |
33 | static int cfq_group_idle = HZ / 125; | ||
33 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ | 34 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ |
34 | static const int cfq_hist_divisor = 4; | 35 | static const int cfq_hist_divisor = 4; |
35 | 36 | ||
@@ -147,6 +148,8 @@ struct cfq_queue { | |||
147 | struct cfq_queue *new_cfqq; | 148 | struct cfq_queue *new_cfqq; |
148 | struct cfq_group *cfqg; | 149 | struct cfq_group *cfqg; |
149 | struct cfq_group *orig_cfqg; | 150 | struct cfq_group *orig_cfqg; |
151 | /* Number of sectors dispatched from queue in single dispatch round */ | ||
152 | unsigned long nr_sectors; | ||
150 | }; | 153 | }; |
151 | 154 | ||
152 | /* | 155 | /* |
@@ -157,6 +160,7 @@ enum wl_prio_t { | |||
157 | BE_WORKLOAD = 0, | 160 | BE_WORKLOAD = 0, |
158 | RT_WORKLOAD = 1, | 161 | RT_WORKLOAD = 1, |
159 | IDLE_WORKLOAD = 2, | 162 | IDLE_WORKLOAD = 2, |
163 | CFQ_PRIO_NR, | ||
160 | }; | 164 | }; |
161 | 165 | ||
162 | /* | 166 | /* |
@@ -181,10 +185,19 @@ struct cfq_group { | |||
181 | /* number of cfqq currently on this group */ | 185 | /* number of cfqq currently on this group */ |
182 | int nr_cfqq; | 186 | int nr_cfqq; |
183 | 187 | ||
184 | /* Per group busy queus average. Useful for workload slice calc. */ | ||
185 | unsigned int busy_queues_avg[2]; | ||
186 | /* | 188 | /* |
187 | * rr lists of queues with requests, onle rr for each priority class. | 189 | * Per group busy queus average. Useful for workload slice calc. We |
190 | * create the array for each prio class but at run time it is used | ||
191 | * only for RT and BE class and slot for IDLE class remains unused. | ||
192 | * This is primarily done to avoid confusion and a gcc warning. | ||
193 | */ | ||
194 | unsigned int busy_queues_avg[CFQ_PRIO_NR]; | ||
195 | /* | ||
196 | * rr lists of queues with requests. We maintain service trees for | ||
197 | * RT and BE classes. These trees are subdivided in subclasses | ||
198 | * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE | ||
199 | * class there is no subclassification and all the cfq queues go on | ||
200 | * a single tree service_tree_idle. | ||
188 | * Counts are embedded in the cfq_rb_root | 201 | * Counts are embedded in the cfq_rb_root |
189 | */ | 202 | */ |
190 | struct cfq_rb_root service_trees[2][3]; | 203 | struct cfq_rb_root service_trees[2][3]; |
@@ -198,6 +211,8 @@ struct cfq_group { | |||
198 | struct hlist_node cfqd_node; | 211 | struct hlist_node cfqd_node; |
199 | atomic_t ref; | 212 | atomic_t ref; |
200 | #endif | 213 | #endif |
214 | /* number of requests that are on the dispatch list or inside driver */ | ||
215 | int dispatched; | ||
201 | }; | 216 | }; |
202 | 217 | ||
203 | /* | 218 | /* |
@@ -216,7 +231,6 @@ struct cfq_data { | |||
216 | enum wl_type_t serving_type; | 231 | enum wl_type_t serving_type; |
217 | unsigned long workload_expires; | 232 | unsigned long workload_expires; |
218 | struct cfq_group *serving_group; | 233 | struct cfq_group *serving_group; |
219 | bool noidle_tree_requires_idle; | ||
220 | 234 | ||
221 | /* | 235 | /* |
222 | * Each priority tree is sorted by next_request position. These | 236 | * Each priority tree is sorted by next_request position. These |
@@ -271,6 +285,7 @@ struct cfq_data { | |||
271 | unsigned int cfq_slice[2]; | 285 | unsigned int cfq_slice[2]; |
272 | unsigned int cfq_slice_async_rq; | 286 | unsigned int cfq_slice_async_rq; |
273 | unsigned int cfq_slice_idle; | 287 | unsigned int cfq_slice_idle; |
288 | unsigned int cfq_group_idle; | ||
274 | unsigned int cfq_latency; | 289 | unsigned int cfq_latency; |
275 | unsigned int cfq_group_isolation; | 290 | unsigned int cfq_group_isolation; |
276 | 291 | ||
@@ -378,6 +393,21 @@ CFQ_CFQQ_FNS(wait_busy); | |||
378 | &cfqg->service_trees[i][j]: NULL) \ | 393 | &cfqg->service_trees[i][j]: NULL) \ |
379 | 394 | ||
380 | 395 | ||
396 | static inline bool iops_mode(struct cfq_data *cfqd) | ||
397 | { | ||
398 | /* | ||
399 | * If we are not idling on queues and it is a NCQ drive, parallel | ||
400 | * execution of requests is on and measuring time is not possible | ||
401 | * in most of the cases until and unless we drive shallower queue | ||
402 | * depths and that becomes a performance bottleneck. In such cases | ||
403 | * switch to start providing fairness in terms of number of IOs. | ||
404 | */ | ||
405 | if (!cfqd->cfq_slice_idle && cfqd->hw_tag) | ||
406 | return true; | ||
407 | else | ||
408 | return false; | ||
409 | } | ||
410 | |||
381 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) | 411 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) |
382 | { | 412 | { |
383 | if (cfq_class_idle(cfqq)) | 413 | if (cfq_class_idle(cfqq)) |
@@ -906,7 +936,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | |||
906 | slice_used = cfqq->allocated_slice; | 936 | slice_used = cfqq->allocated_slice; |
907 | } | 937 | } |
908 | 938 | ||
909 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used); | ||
910 | return slice_used; | 939 | return slice_used; |
911 | } | 940 | } |
912 | 941 | ||
@@ -914,19 +943,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
914 | struct cfq_queue *cfqq) | 943 | struct cfq_queue *cfqq) |
915 | { | 944 | { |
916 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 945 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
917 | unsigned int used_sl, charge_sl; | 946 | unsigned int used_sl, charge; |
918 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 947 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
919 | - cfqg->service_tree_idle.count; | 948 | - cfqg->service_tree_idle.count; |
920 | 949 | ||
921 | BUG_ON(nr_sync < 0); | 950 | BUG_ON(nr_sync < 0); |
922 | used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); | 951 | used_sl = charge = cfq_cfqq_slice_usage(cfqq); |
923 | 952 | ||
924 | if (!cfq_cfqq_sync(cfqq) && !nr_sync) | 953 | if (iops_mode(cfqd)) |
925 | charge_sl = cfqq->allocated_slice; | 954 | charge = cfqq->slice_dispatch; |
955 | else if (!cfq_cfqq_sync(cfqq) && !nr_sync) | ||
956 | charge = cfqq->allocated_slice; | ||
926 | 957 | ||
927 | /* Can't update vdisktime while group is on service tree */ | 958 | /* Can't update vdisktime while group is on service tree */ |
928 | cfq_rb_erase(&cfqg->rb_node, st); | 959 | cfq_rb_erase(&cfqg->rb_node, st); |
929 | cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); | 960 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); |
930 | __cfq_group_service_tree_add(st, cfqg); | 961 | __cfq_group_service_tree_add(st, cfqg); |
931 | 962 | ||
932 | /* This group is being expired. Save the context */ | 963 | /* This group is being expired. Save the context */ |
@@ -940,6 +971,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
940 | 971 | ||
941 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 972 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
942 | st->min_vdisktime); | 973 | st->min_vdisktime); |
974 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | ||
975 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | ||
976 | iops_mode(cfqd), cfqq->nr_sectors); | ||
943 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); | 977 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); |
944 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 978 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
945 | } | 979 | } |
@@ -952,8 +986,8 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) | |||
952 | return NULL; | 986 | return NULL; |
953 | } | 987 | } |
954 | 988 | ||
955 | void | 989 | void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, |
956 | cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight) | 990 | unsigned int weight) |
957 | { | 991 | { |
958 | cfqg_of_blkg(blkg)->weight = weight; | 992 | cfqg_of_blkg(blkg)->weight = weight; |
959 | } | 993 | } |
@@ -994,10 +1028,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
994 | */ | 1028 | */ |
995 | atomic_set(&cfqg->ref, 1); | 1029 | atomic_set(&cfqg->ref, 1); |
996 | 1030 | ||
997 | /* Add group onto cgroup list */ | 1031 | /* |
998 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | 1032 | * Add group onto cgroup list. It might happen that bdi->dev is |
999 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | 1033 | * not initiliazed yet. Initialize this new group without major |
1034 | * and minor info and this info will be filled in once a new thread | ||
1035 | * comes for IO. See code above. | ||
1036 | */ | ||
1037 | if (bdi->dev) { | ||
1038 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
1039 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | ||
1000 | MKDEV(major, minor)); | 1040 | MKDEV(major, minor)); |
1041 | } else | ||
1042 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | ||
1043 | 0); | ||
1044 | |||
1001 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); | 1045 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); |
1002 | 1046 | ||
1003 | /* Add group on cfqd list */ | 1047 | /* Add group on cfqd list */ |
@@ -1587,6 +1631,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
1587 | cfqq->allocated_slice = 0; | 1631 | cfqq->allocated_slice = 0; |
1588 | cfqq->slice_end = 0; | 1632 | cfqq->slice_end = 0; |
1589 | cfqq->slice_dispatch = 0; | 1633 | cfqq->slice_dispatch = 0; |
1634 | cfqq->nr_sectors = 0; | ||
1590 | 1635 | ||
1591 | cfq_clear_cfqq_wait_request(cfqq); | 1636 | cfq_clear_cfqq_wait_request(cfqq); |
1592 | cfq_clear_cfqq_must_dispatch(cfqq); | 1637 | cfq_clear_cfqq_must_dispatch(cfqq); |
@@ -1839,6 +1884,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1839 | BUG_ON(!service_tree); | 1884 | BUG_ON(!service_tree); |
1840 | BUG_ON(!service_tree->count); | 1885 | BUG_ON(!service_tree->count); |
1841 | 1886 | ||
1887 | if (!cfqd->cfq_slice_idle) | ||
1888 | return false; | ||
1889 | |||
1842 | /* We never do for idle class queues. */ | 1890 | /* We never do for idle class queues. */ |
1843 | if (prio == IDLE_WORKLOAD) | 1891 | if (prio == IDLE_WORKLOAD) |
1844 | return false; | 1892 | return false; |
@@ -1863,7 +1911,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1863 | { | 1911 | { |
1864 | struct cfq_queue *cfqq = cfqd->active_queue; | 1912 | struct cfq_queue *cfqq = cfqd->active_queue; |
1865 | struct cfq_io_context *cic; | 1913 | struct cfq_io_context *cic; |
1866 | unsigned long sl; | 1914 | unsigned long sl, group_idle = 0; |
1867 | 1915 | ||
1868 | /* | 1916 | /* |
1869 | * SSD device without seek penalty, disable idling. But only do so | 1917 | * SSD device without seek penalty, disable idling. But only do so |
@@ -1879,8 +1927,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1879 | /* | 1927 | /* |
1880 | * idle is disabled, either manually or by past process history | 1928 | * idle is disabled, either manually or by past process history |
1881 | */ | 1929 | */ |
1882 | if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) | 1930 | if (!cfq_should_idle(cfqd, cfqq)) { |
1883 | return; | 1931 | /* no queue idling. Check for group idling */ |
1932 | if (cfqd->cfq_group_idle) | ||
1933 | group_idle = cfqd->cfq_group_idle; | ||
1934 | else | ||
1935 | return; | ||
1936 | } | ||
1884 | 1937 | ||
1885 | /* | 1938 | /* |
1886 | * still active requests from this queue, don't idle | 1939 | * still active requests from this queue, don't idle |
@@ -1907,13 +1960,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1907 | return; | 1960 | return; |
1908 | } | 1961 | } |
1909 | 1962 | ||
1963 | /* There are other queues in the group, don't do group idle */ | ||
1964 | if (group_idle && cfqq->cfqg->nr_cfqq > 1) | ||
1965 | return; | ||
1966 | |||
1910 | cfq_mark_cfqq_wait_request(cfqq); | 1967 | cfq_mark_cfqq_wait_request(cfqq); |
1911 | 1968 | ||
1912 | sl = cfqd->cfq_slice_idle; | 1969 | if (group_idle) |
1970 | sl = cfqd->cfq_group_idle; | ||
1971 | else | ||
1972 | sl = cfqd->cfq_slice_idle; | ||
1913 | 1973 | ||
1914 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1974 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
1915 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); | 1975 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); |
1916 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); | 1976 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, |
1977 | group_idle ? 1 : 0); | ||
1917 | } | 1978 | } |
1918 | 1979 | ||
1919 | /* | 1980 | /* |
@@ -1929,9 +1990,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1929 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); | 1990 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); |
1930 | cfq_remove_request(rq); | 1991 | cfq_remove_request(rq); |
1931 | cfqq->dispatched++; | 1992 | cfqq->dispatched++; |
1993 | (RQ_CFQG(rq))->dispatched++; | ||
1932 | elv_dispatch_sort(q, rq); | 1994 | elv_dispatch_sort(q, rq); |
1933 | 1995 | ||
1934 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 1996 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
1997 | cfqq->nr_sectors += blk_rq_sectors(rq); | ||
1935 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), | 1998 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), |
1936 | rq_data_dir(rq), rq_is_sync(rq)); | 1999 | rq_data_dir(rq), rq_is_sync(rq)); |
1937 | } | 2000 | } |
@@ -2126,7 +2189,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
2126 | slice = max_t(unsigned, slice, CFQ_MIN_TT); | 2189 | slice = max_t(unsigned, slice, CFQ_MIN_TT); |
2127 | cfq_log(cfqd, "workload slice:%d", slice); | 2190 | cfq_log(cfqd, "workload slice:%d", slice); |
2128 | cfqd->workload_expires = jiffies + slice; | 2191 | cfqd->workload_expires = jiffies + slice; |
2129 | cfqd->noidle_tree_requires_idle = false; | ||
2130 | } | 2192 | } |
2131 | 2193 | ||
2132 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) | 2194 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) |
@@ -2198,7 +2260,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2198 | cfqq = NULL; | 2260 | cfqq = NULL; |
2199 | goto keep_queue; | 2261 | goto keep_queue; |
2200 | } else | 2262 | } else |
2201 | goto expire; | 2263 | goto check_group_idle; |
2202 | } | 2264 | } |
2203 | 2265 | ||
2204 | /* | 2266 | /* |
@@ -2226,8 +2288,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2226 | * flight or is idling for a new request, allow either of these | 2288 | * flight or is idling for a new request, allow either of these |
2227 | * conditions to happen (or time out) before selecting a new queue. | 2289 | * conditions to happen (or time out) before selecting a new queue. |
2228 | */ | 2290 | */ |
2229 | if (timer_pending(&cfqd->idle_slice_timer) || | 2291 | if (timer_pending(&cfqd->idle_slice_timer)) { |
2230 | (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { | 2292 | cfqq = NULL; |
2293 | goto keep_queue; | ||
2294 | } | ||
2295 | |||
2296 | if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { | ||
2297 | cfqq = NULL; | ||
2298 | goto keep_queue; | ||
2299 | } | ||
2300 | |||
2301 | /* | ||
2302 | * If group idle is enabled and there are requests dispatched from | ||
2303 | * this group, wait for requests to complete. | ||
2304 | */ | ||
2305 | check_group_idle: | ||
2306 | if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 | ||
2307 | && cfqq->cfqg->dispatched) { | ||
2231 | cfqq = NULL; | 2308 | cfqq = NULL; |
2232 | goto keep_queue; | 2309 | goto keep_queue; |
2233 | } | 2310 | } |
@@ -3108,7 +3185,9 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3108 | if (cfqq->queued[0] + cfqq->queued[1] >= 4) | 3185 | if (cfqq->queued[0] + cfqq->queued[1] >= 4) |
3109 | cfq_mark_cfqq_deep(cfqq); | 3186 | cfq_mark_cfqq_deep(cfqq); |
3110 | 3187 | ||
3111 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 3188 | if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) |
3189 | enable_idle = 0; | ||
3190 | else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | ||
3112 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) | 3191 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) |
3113 | enable_idle = 0; | 3192 | enable_idle = 0; |
3114 | else if (sample_valid(cic->ttime_samples)) { | 3193 | else if (sample_valid(cic->ttime_samples)) { |
@@ -3375,6 +3454,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3375 | WARN_ON(!cfqq->dispatched); | 3454 | WARN_ON(!cfqq->dispatched); |
3376 | cfqd->rq_in_driver--; | 3455 | cfqd->rq_in_driver--; |
3377 | cfqq->dispatched--; | 3456 | cfqq->dispatched--; |
3457 | (RQ_CFQG(rq))->dispatched--; | ||
3378 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, | 3458 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, |
3379 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), | 3459 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), |
3380 | rq_data_dir(rq), rq_is_sync(rq)); | 3460 | rq_data_dir(rq), rq_is_sync(rq)); |
@@ -3404,7 +3484,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3404 | * the queue. | 3484 | * the queue. |
3405 | */ | 3485 | */ |
3406 | if (cfq_should_wait_busy(cfqd, cfqq)) { | 3486 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
3407 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3487 | unsigned long extend_sl = cfqd->cfq_slice_idle; |
3488 | if (!cfqd->cfq_slice_idle) | ||
3489 | extend_sl = cfqd->cfq_group_idle; | ||
3490 | cfqq->slice_end = jiffies + extend_sl; | ||
3408 | cfq_mark_cfqq_wait_busy(cfqq); | 3491 | cfq_mark_cfqq_wait_busy(cfqq); |
3409 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); | 3492 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); |
3410 | } | 3493 | } |
@@ -3421,17 +3504,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3421 | cfq_slice_expired(cfqd, 1); | 3504 | cfq_slice_expired(cfqd, 1); |
3422 | else if (sync && cfqq_empty && | 3505 | else if (sync && cfqq_empty && |
3423 | !cfq_close_cooperator(cfqd, cfqq)) { | 3506 | !cfq_close_cooperator(cfqd, cfqq)) { |
3424 | cfqd->noidle_tree_requires_idle |= | 3507 | cfq_arm_slice_timer(cfqd); |
3425 | !(rq->cmd_flags & REQ_NOIDLE); | ||
3426 | /* | ||
3427 | * Idling is enabled for SYNC_WORKLOAD. | ||
3428 | * SYNC_NOIDLE_WORKLOAD idles at the end of the tree | ||
3429 | * only if we processed at least one !REQ_NOIDLE request | ||
3430 | */ | ||
3431 | if (cfqd->serving_type == SYNC_WORKLOAD | ||
3432 | || cfqd->noidle_tree_requires_idle | ||
3433 | || cfqq->cfqg->nr_cfqq == 1) | ||
3434 | cfq_arm_slice_timer(cfqd); | ||
3435 | } | 3508 | } |
3436 | } | 3509 | } |
3437 | 3510 | ||
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3850 | cfqd->cfq_slice[1] = cfq_slice_sync; | 3923 | cfqd->cfq_slice[1] = cfq_slice_sync; |
3851 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 3924 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
3852 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3925 | cfqd->cfq_slice_idle = cfq_slice_idle; |
3926 | cfqd->cfq_group_idle = cfq_group_idle; | ||
3853 | cfqd->cfq_latency = 1; | 3927 | cfqd->cfq_latency = 1; |
3854 | cfqd->cfq_group_isolation = 0; | 3928 | cfqd->cfq_group_isolation = 0; |
3855 | cfqd->hw_tag = -1; | 3929 | cfqd->hw_tag = -1; |
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | |||
3922 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); | 3996 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
3923 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); | 3997 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
3924 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 3998 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
3999 | SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); | ||
3925 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 4000 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
3926 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 4001 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
3927 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 4002 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | |||
3954 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, | 4029 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, |
3955 | UINT_MAX, 0); | 4030 | UINT_MAX, 0); |
3956 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 4031 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
4032 | STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); | ||
3957 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 4033 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
3958 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 4034 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
3959 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4035 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
3975 | CFQ_ATTR(slice_async), | 4051 | CFQ_ATTR(slice_async), |
3976 | CFQ_ATTR(slice_async_rq), | 4052 | CFQ_ATTR(slice_async_rq), |
3977 | CFQ_ATTR(slice_idle), | 4053 | CFQ_ATTR(slice_idle), |
4054 | CFQ_ATTR(group_idle), | ||
3978 | CFQ_ATTR(low_latency), | 4055 | CFQ_ATTR(low_latency), |
3979 | CFQ_ATTR(group_isolation), | 4056 | CFQ_ATTR(group_isolation), |
3980 | __ATTR_NULL | 4057 | __ATTR_NULL |
@@ -4013,6 +4090,7 @@ static struct blkio_policy_type blkio_policy_cfq = { | |||
4013 | .blkio_unlink_group_fn = cfq_unlink_blkio_group, | 4090 | .blkio_unlink_group_fn = cfq_unlink_blkio_group, |
4014 | .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, | 4091 | .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, |
4015 | }, | 4092 | }, |
4093 | .plid = BLKIO_POLICY_PROP, | ||
4016 | }; | 4094 | }; |
4017 | #else | 4095 | #else |
4018 | static struct blkio_policy_type blkio_policy_cfq; | 4096 | static struct blkio_policy_type blkio_policy_cfq; |
@@ -4028,6 +4106,12 @@ static int __init cfq_init(void) | |||
4028 | if (!cfq_slice_idle) | 4106 | if (!cfq_slice_idle) |
4029 | cfq_slice_idle = 1; | 4107 | cfq_slice_idle = 1; |
4030 | 4108 | ||
4109 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
4110 | if (!cfq_group_idle) | ||
4111 | cfq_group_idle = 1; | ||
4112 | #else | ||
4113 | cfq_group_idle = 0; | ||
4114 | #endif | ||
4031 | if (cfq_slab_setup()) | 4115 | if (cfq_slab_setup()) |
4032 | return -ENOMEM; | 4116 | return -ENOMEM; |
4033 | 4117 | ||