diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 6 | ||||
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | block/blk-sysfs.c | 1 | ||||
-rw-r--r-- | block/blk.h | 8 | ||||
-rw-r--r-- | block/cfq-iosched.c | 119 | ||||
-rw-r--r-- | block/elevator.c | 44 |
7 files changed, 144 insertions, 38 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index a6809645d212..2fef1ef931a0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
966 | 966 | ||
967 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | 967 | /* Currently we do not support hierarchy deeper than two level (0,1) */ |
968 | if (parent != cgroup->top_cgroup) | 968 | if (parent != cgroup->top_cgroup) |
969 | return ERR_PTR(-EINVAL); | 969 | return ERR_PTR(-EPERM); |
970 | 970 | ||
971 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | 971 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
972 | if (!blkcg) | 972 | if (!blkcg) |
diff --git a/block/blk-core.c b/block/blk-core.c index ee1a1e7e63cc..32a1c123dfb3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1198 | int el_ret; | 1198 | int el_ret; |
1199 | unsigned int bytes = bio->bi_size; | 1199 | unsigned int bytes = bio->bi_size; |
1200 | const unsigned short prio = bio_prio(bio); | 1200 | const unsigned short prio = bio_prio(bio); |
1201 | const bool sync = (bio->bi_rw & REQ_SYNC); | 1201 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
1202 | const bool unplug = (bio->bi_rw & REQ_UNPLUG); | 1202 | const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); |
1203 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1203 | const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1204 | int rw_flags; | 1204 | int rw_flags; |
1205 | 1205 | ||
1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && | 1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && |
diff --git a/block/blk-map.c b/block/blk-map.c index c65d7593f7f1..ade0a08c9099 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
307 | return PTR_ERR(bio); | 307 | return PTR_ERR(bio); |
308 | 308 | ||
309 | if (rq_data_dir(rq) == WRITE) | 309 | if (rq_data_dir(rq) == WRITE) |
310 | bio->bi_rw |= (1 << REQ_WRITE); | 310 | bio->bi_rw |= REQ_WRITE; |
311 | 311 | ||
312 | if (do_copy) | 312 | if (do_copy) |
313 | rq->cmd_flags |= REQ_COPY_USER; | 313 | rq->cmd_flags |= REQ_COPY_USER; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 001ab18078f5..0749b89c6885 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk) | |||
511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 511 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
512 | kobject_del(&q->kobj); | 512 | kobject_del(&q->kobj); |
513 | blk_trace_remove_sysfs(disk_to_dev(disk)); | 513 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
514 | kobject_put(&dev->kobj); | ||
514 | return ret; | 515 | return ret; |
515 | } | 516 | } |
516 | 517 | ||
diff --git a/block/blk.h b/block/blk.h index 6e7dc87141e4..d6b911ac002c 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) | |||
142 | 142 | ||
143 | static inline int blk_cpu_to_group(int cpu) | 143 | static inline int blk_cpu_to_group(int cpu) |
144 | { | 144 | { |
145 | int group = NR_CPUS; | ||
145 | #ifdef CONFIG_SCHED_MC | 146 | #ifdef CONFIG_SCHED_MC |
146 | const struct cpumask *mask = cpu_coregroup_mask(cpu); | 147 | const struct cpumask *mask = cpu_coregroup_mask(cpu); |
147 | return cpumask_first(mask); | 148 | group = cpumask_first(mask); |
148 | #elif defined(CONFIG_SCHED_SMT) | 149 | #elif defined(CONFIG_SCHED_SMT) |
149 | return cpumask_first(topology_thread_cpumask(cpu)); | 150 | group = cpumask_first(topology_thread_cpumask(cpu)); |
150 | #else | 151 | #else |
151 | return cpu; | 152 | return cpu; |
152 | #endif | 153 | #endif |
154 | if (likely(group < NR_CPUS)) | ||
155 | return group; | ||
156 | return cpu; | ||
153 | } | 157 | } |
154 | 158 | ||
155 | /* | 159 | /* |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index eb4086f7dfef..9eba291eb6fd 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10; | |||
30 | static int cfq_slice_async = HZ / 25; | 30 | static int cfq_slice_async = HZ / 25; |
31 | static const int cfq_slice_async_rq = 2; | 31 | static const int cfq_slice_async_rq = 2; |
32 | static int cfq_slice_idle = HZ / 125; | 32 | static int cfq_slice_idle = HZ / 125; |
33 | static int cfq_group_idle = HZ / 125; | ||
33 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ | 34 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ |
34 | static const int cfq_hist_divisor = 4; | 35 | static const int cfq_hist_divisor = 4; |
35 | 36 | ||
@@ -147,6 +148,8 @@ struct cfq_queue { | |||
147 | struct cfq_queue *new_cfqq; | 148 | struct cfq_queue *new_cfqq; |
148 | struct cfq_group *cfqg; | 149 | struct cfq_group *cfqg; |
149 | struct cfq_group *orig_cfqg; | 150 | struct cfq_group *orig_cfqg; |
151 | /* Number of sectors dispatched from queue in single dispatch round */ | ||
152 | unsigned long nr_sectors; | ||
150 | }; | 153 | }; |
151 | 154 | ||
152 | /* | 155 | /* |
@@ -198,6 +201,8 @@ struct cfq_group { | |||
198 | struct hlist_node cfqd_node; | 201 | struct hlist_node cfqd_node; |
199 | atomic_t ref; | 202 | atomic_t ref; |
200 | #endif | 203 | #endif |
204 | /* number of requests that are on the dispatch list or inside driver */ | ||
205 | int dispatched; | ||
201 | }; | 206 | }; |
202 | 207 | ||
203 | /* | 208 | /* |
@@ -271,6 +276,7 @@ struct cfq_data { | |||
271 | unsigned int cfq_slice[2]; | 276 | unsigned int cfq_slice[2]; |
272 | unsigned int cfq_slice_async_rq; | 277 | unsigned int cfq_slice_async_rq; |
273 | unsigned int cfq_slice_idle; | 278 | unsigned int cfq_slice_idle; |
279 | unsigned int cfq_group_idle; | ||
274 | unsigned int cfq_latency; | 280 | unsigned int cfq_latency; |
275 | unsigned int cfq_group_isolation; | 281 | unsigned int cfq_group_isolation; |
276 | 282 | ||
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy); | |||
378 | &cfqg->service_trees[i][j]: NULL) \ | 384 | &cfqg->service_trees[i][j]: NULL) \ |
379 | 385 | ||
380 | 386 | ||
387 | static inline bool iops_mode(struct cfq_data *cfqd) | ||
388 | { | ||
389 | /* | ||
390 | * If we are not idling on queues and it is a NCQ drive, parallel | ||
391 | * execution of requests is on and measuring time is not possible | ||
392 | * in most of the cases until and unless we drive shallower queue | ||
393 | * depths and that becomes a performance bottleneck. In such cases | ||
394 | * switch to start providing fairness in terms of number of IOs. | ||
395 | */ | ||
396 | if (!cfqd->cfq_slice_idle && cfqd->hw_tag) | ||
397 | return true; | ||
398 | else | ||
399 | return false; | ||
400 | } | ||
401 | |||
381 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) | 402 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) |
382 | { | 403 | { |
383 | if (cfq_class_idle(cfqq)) | 404 | if (cfq_class_idle(cfqq)) |
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | |||
906 | slice_used = cfqq->allocated_slice; | 927 | slice_used = cfqq->allocated_slice; |
907 | } | 928 | } |
908 | 929 | ||
909 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used); | ||
910 | return slice_used; | 930 | return slice_used; |
911 | } | 931 | } |
912 | 932 | ||
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
914 | struct cfq_queue *cfqq) | 934 | struct cfq_queue *cfqq) |
915 | { | 935 | { |
916 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 936 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
917 | unsigned int used_sl, charge_sl; | 937 | unsigned int used_sl, charge; |
918 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 938 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
919 | - cfqg->service_tree_idle.count; | 939 | - cfqg->service_tree_idle.count; |
920 | 940 | ||
921 | BUG_ON(nr_sync < 0); | 941 | BUG_ON(nr_sync < 0); |
922 | used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); | 942 | used_sl = charge = cfq_cfqq_slice_usage(cfqq); |
923 | 943 | ||
924 | if (!cfq_cfqq_sync(cfqq) && !nr_sync) | 944 | if (iops_mode(cfqd)) |
925 | charge_sl = cfqq->allocated_slice; | 945 | charge = cfqq->slice_dispatch; |
946 | else if (!cfq_cfqq_sync(cfqq) && !nr_sync) | ||
947 | charge = cfqq->allocated_slice; | ||
926 | 948 | ||
927 | /* Can't update vdisktime while group is on service tree */ | 949 | /* Can't update vdisktime while group is on service tree */ |
928 | cfq_rb_erase(&cfqg->rb_node, st); | 950 | cfq_rb_erase(&cfqg->rb_node, st); |
929 | cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); | 951 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); |
930 | __cfq_group_service_tree_add(st, cfqg); | 952 | __cfq_group_service_tree_add(st, cfqg); |
931 | 953 | ||
932 | /* This group is being expired. Save the context */ | 954 | /* This group is being expired. Save the context */ |
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
940 | 962 | ||
941 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 963 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
942 | st->min_vdisktime); | 964 | st->min_vdisktime); |
965 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | ||
966 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | ||
967 | iops_mode(cfqd), cfqq->nr_sectors); | ||
943 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); | 968 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); |
944 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 969 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
945 | } | 970 | } |
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
994 | */ | 1019 | */ |
995 | atomic_set(&cfqg->ref, 1); | 1020 | atomic_set(&cfqg->ref, 1); |
996 | 1021 | ||
997 | /* Add group onto cgroup list */ | 1022 | /* |
998 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | 1023 | * Add group onto cgroup list. It might happen that bdi->dev is |
999 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | 1024 | * not initiliazed yet. Initialize this new group without major |
1025 | * and minor info and this info will be filled in once a new thread | ||
1026 | * comes for IO. See code above. | ||
1027 | */ | ||
1028 | if (bdi->dev) { | ||
1029 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
1030 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | ||
1000 | MKDEV(major, minor)); | 1031 | MKDEV(major, minor)); |
1032 | } else | ||
1033 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | ||
1034 | 0); | ||
1035 | |||
1001 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); | 1036 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); |
1002 | 1037 | ||
1003 | /* Add group on cfqd list */ | 1038 | /* Add group on cfqd list */ |
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
1587 | cfqq->allocated_slice = 0; | 1622 | cfqq->allocated_slice = 0; |
1588 | cfqq->slice_end = 0; | 1623 | cfqq->slice_end = 0; |
1589 | cfqq->slice_dispatch = 0; | 1624 | cfqq->slice_dispatch = 0; |
1625 | cfqq->nr_sectors = 0; | ||
1590 | 1626 | ||
1591 | cfq_clear_cfqq_wait_request(cfqq); | 1627 | cfq_clear_cfqq_wait_request(cfqq); |
1592 | cfq_clear_cfqq_must_dispatch(cfqq); | 1628 | cfq_clear_cfqq_must_dispatch(cfqq); |
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1839 | BUG_ON(!service_tree); | 1875 | BUG_ON(!service_tree); |
1840 | BUG_ON(!service_tree->count); | 1876 | BUG_ON(!service_tree->count); |
1841 | 1877 | ||
1878 | if (!cfqd->cfq_slice_idle) | ||
1879 | return false; | ||
1880 | |||
1842 | /* We never do for idle class queues. */ | 1881 | /* We never do for idle class queues. */ |
1843 | if (prio == IDLE_WORKLOAD) | 1882 | if (prio == IDLE_WORKLOAD) |
1844 | return false; | 1883 | return false; |
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1863 | { | 1902 | { |
1864 | struct cfq_queue *cfqq = cfqd->active_queue; | 1903 | struct cfq_queue *cfqq = cfqd->active_queue; |
1865 | struct cfq_io_context *cic; | 1904 | struct cfq_io_context *cic; |
1866 | unsigned long sl; | 1905 | unsigned long sl, group_idle = 0; |
1867 | 1906 | ||
1868 | /* | 1907 | /* |
1869 | * SSD device without seek penalty, disable idling. But only do so | 1908 | * SSD device without seek penalty, disable idling. But only do so |
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1879 | /* | 1918 | /* |
1880 | * idle is disabled, either manually or by past process history | 1919 | * idle is disabled, either manually or by past process history |
1881 | */ | 1920 | */ |
1882 | if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) | 1921 | if (!cfq_should_idle(cfqd, cfqq)) { |
1883 | return; | 1922 | /* no queue idling. Check for group idling */ |
1923 | if (cfqd->cfq_group_idle) | ||
1924 | group_idle = cfqd->cfq_group_idle; | ||
1925 | else | ||
1926 | return; | ||
1927 | } | ||
1884 | 1928 | ||
1885 | /* | 1929 | /* |
1886 | * still active requests from this queue, don't idle | 1930 | * still active requests from this queue, don't idle |
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1907 | return; | 1951 | return; |
1908 | } | 1952 | } |
1909 | 1953 | ||
1954 | /* There are other queues in the group, don't do group idle */ | ||
1955 | if (group_idle && cfqq->cfqg->nr_cfqq > 1) | ||
1956 | return; | ||
1957 | |||
1910 | cfq_mark_cfqq_wait_request(cfqq); | 1958 | cfq_mark_cfqq_wait_request(cfqq); |
1911 | 1959 | ||
1912 | sl = cfqd->cfq_slice_idle; | 1960 | if (group_idle) |
1961 | sl = cfqd->cfq_group_idle; | ||
1962 | else | ||
1963 | sl = cfqd->cfq_slice_idle; | ||
1913 | 1964 | ||
1914 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1965 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
1915 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); | 1966 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); |
1916 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); | 1967 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, |
1968 | group_idle ? 1 : 0); | ||
1917 | } | 1969 | } |
1918 | 1970 | ||
1919 | /* | 1971 | /* |
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1929 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); | 1981 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); |
1930 | cfq_remove_request(rq); | 1982 | cfq_remove_request(rq); |
1931 | cfqq->dispatched++; | 1983 | cfqq->dispatched++; |
1984 | (RQ_CFQG(rq))->dispatched++; | ||
1932 | elv_dispatch_sort(q, rq); | 1985 | elv_dispatch_sort(q, rq); |
1933 | 1986 | ||
1934 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 1987 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
1988 | cfqq->nr_sectors += blk_rq_sectors(rq); | ||
1935 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), | 1989 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), |
1936 | rq_data_dir(rq), rq_is_sync(rq)); | 1990 | rq_data_dir(rq), rq_is_sync(rq)); |
1937 | } | 1991 | } |
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2198 | cfqq = NULL; | 2252 | cfqq = NULL; |
2199 | goto keep_queue; | 2253 | goto keep_queue; |
2200 | } else | 2254 | } else |
2201 | goto expire; | 2255 | goto check_group_idle; |
2202 | } | 2256 | } |
2203 | 2257 | ||
2204 | /* | 2258 | /* |
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2226 | * flight or is idling for a new request, allow either of these | 2280 | * flight or is idling for a new request, allow either of these |
2227 | * conditions to happen (or time out) before selecting a new queue. | 2281 | * conditions to happen (or time out) before selecting a new queue. |
2228 | */ | 2282 | */ |
2229 | if (timer_pending(&cfqd->idle_slice_timer) || | 2283 | if (timer_pending(&cfqd->idle_slice_timer)) { |
2230 | (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { | 2284 | cfqq = NULL; |
2285 | goto keep_queue; | ||
2286 | } | ||
2287 | |||
2288 | if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { | ||
2289 | cfqq = NULL; | ||
2290 | goto keep_queue; | ||
2291 | } | ||
2292 | |||
2293 | /* | ||
2294 | * If group idle is enabled and there are requests dispatched from | ||
2295 | * this group, wait for requests to complete. | ||
2296 | */ | ||
2297 | check_group_idle: | ||
2298 | if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 | ||
2299 | && cfqq->cfqg->dispatched) { | ||
2231 | cfqq = NULL; | 2300 | cfqq = NULL; |
2232 | goto keep_queue; | 2301 | goto keep_queue; |
2233 | } | 2302 | } |
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3375 | WARN_ON(!cfqq->dispatched); | 3444 | WARN_ON(!cfqq->dispatched); |
3376 | cfqd->rq_in_driver--; | 3445 | cfqd->rq_in_driver--; |
3377 | cfqq->dispatched--; | 3446 | cfqq->dispatched--; |
3447 | (RQ_CFQG(rq))->dispatched--; | ||
3378 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, | 3448 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, |
3379 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), | 3449 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), |
3380 | rq_data_dir(rq), rq_is_sync(rq)); | 3450 | rq_data_dir(rq), rq_is_sync(rq)); |
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3404 | * the queue. | 3474 | * the queue. |
3405 | */ | 3475 | */ |
3406 | if (cfq_should_wait_busy(cfqd, cfqq)) { | 3476 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
3407 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3477 | unsigned long extend_sl = cfqd->cfq_slice_idle; |
3478 | if (!cfqd->cfq_slice_idle) | ||
3479 | extend_sl = cfqd->cfq_group_idle; | ||
3480 | cfqq->slice_end = jiffies + extend_sl; | ||
3408 | cfq_mark_cfqq_wait_busy(cfqq); | 3481 | cfq_mark_cfqq_wait_busy(cfqq); |
3409 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); | 3482 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); |
3410 | } | 3483 | } |
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3850 | cfqd->cfq_slice[1] = cfq_slice_sync; | 3923 | cfqd->cfq_slice[1] = cfq_slice_sync; |
3851 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 3924 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
3852 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3925 | cfqd->cfq_slice_idle = cfq_slice_idle; |
3926 | cfqd->cfq_group_idle = cfq_group_idle; | ||
3853 | cfqd->cfq_latency = 1; | 3927 | cfqd->cfq_latency = 1; |
3854 | cfqd->cfq_group_isolation = 0; | 3928 | cfqd->cfq_group_isolation = 0; |
3855 | cfqd->hw_tag = -1; | 3929 | cfqd->hw_tag = -1; |
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | |||
3922 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); | 3996 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
3923 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); | 3997 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
3924 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 3998 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
3999 | SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); | ||
3925 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 4000 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
3926 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 4001 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
3927 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 4002 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | |||
3954 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, | 4029 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, |
3955 | UINT_MAX, 0); | 4030 | UINT_MAX, 0); |
3956 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 4031 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
4032 | STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); | ||
3957 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 4033 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
3958 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 4034 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
3959 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4035 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
3975 | CFQ_ATTR(slice_async), | 4051 | CFQ_ATTR(slice_async), |
3976 | CFQ_ATTR(slice_async_rq), | 4052 | CFQ_ATTR(slice_async_rq), |
3977 | CFQ_ATTR(slice_idle), | 4053 | CFQ_ATTR(slice_idle), |
4054 | CFQ_ATTR(group_idle), | ||
3978 | CFQ_ATTR(low_latency), | 4055 | CFQ_ATTR(low_latency), |
3979 | CFQ_ATTR(group_isolation), | 4056 | CFQ_ATTR(group_isolation), |
3980 | __ATTR_NULL | 4057 | __ATTR_NULL |
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void) | |||
4028 | if (!cfq_slice_idle) | 4105 | if (!cfq_slice_idle) |
4029 | cfq_slice_idle = 1; | 4106 | cfq_slice_idle = 1; |
4030 | 4107 | ||
4108 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
4109 | if (!cfq_group_idle) | ||
4110 | cfq_group_idle = 1; | ||
4111 | #else | ||
4112 | cfq_group_idle = 0; | ||
4113 | #endif | ||
4031 | if (cfq_slab_setup()) | 4114 | if (cfq_slab_setup()) |
4032 | return -ENOMEM; | 4115 | return -ENOMEM; |
4033 | 4116 | ||
diff --git a/block/elevator.c b/block/elevator.c index ec585c9554d3..205b09a5bd9e 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1009 | { | 1009 | { |
1010 | struct elevator_queue *old_elevator, *e; | 1010 | struct elevator_queue *old_elevator, *e; |
1011 | void *data; | 1011 | void *data; |
1012 | int err; | ||
1012 | 1013 | ||
1013 | /* | 1014 | /* |
1014 | * Allocate new elevator | 1015 | * Allocate new elevator |
1015 | */ | 1016 | */ |
1016 | e = elevator_alloc(q, new_e); | 1017 | e = elevator_alloc(q, new_e); |
1017 | if (!e) | 1018 | if (!e) |
1018 | return 0; | 1019 | return -ENOMEM; |
1019 | 1020 | ||
1020 | data = elevator_init_queue(q, e); | 1021 | data = elevator_init_queue(q, e); |
1021 | if (!data) { | 1022 | if (!data) { |
1022 | kobject_put(&e->kobj); | 1023 | kobject_put(&e->kobj); |
1023 | return 0; | 1024 | return -ENOMEM; |
1024 | } | 1025 | } |
1025 | 1026 | ||
1026 | /* | 1027 | /* |
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1043 | 1044 | ||
1044 | __elv_unregister_queue(old_elevator); | 1045 | __elv_unregister_queue(old_elevator); |
1045 | 1046 | ||
1046 | if (elv_register_queue(q)) | 1047 | err = elv_register_queue(q); |
1048 | if (err) | ||
1047 | goto fail_register; | 1049 | goto fail_register; |
1048 | 1050 | ||
1049 | /* | 1051 | /* |
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1056 | 1058 | ||
1057 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 1059 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
1058 | 1060 | ||
1059 | return 1; | 1061 | return 0; |
1060 | 1062 | ||
1061 | fail_register: | 1063 | fail_register: |
1062 | /* | 1064 | /* |
@@ -1071,17 +1073,19 @@ fail_register: | |||
1071 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 1073 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
1072 | spin_unlock_irq(q->queue_lock); | 1074 | spin_unlock_irq(q->queue_lock); |
1073 | 1075 | ||
1074 | return 0; | 1076 | return err; |
1075 | } | 1077 | } |
1076 | 1078 | ||
1077 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | 1079 | /* |
1078 | size_t count) | 1080 | * Switch this queue to the given IO scheduler. |
1081 | */ | ||
1082 | int elevator_change(struct request_queue *q, const char *name) | ||
1079 | { | 1083 | { |
1080 | char elevator_name[ELV_NAME_MAX]; | 1084 | char elevator_name[ELV_NAME_MAX]; |
1081 | struct elevator_type *e; | 1085 | struct elevator_type *e; |
1082 | 1086 | ||
1083 | if (!q->elevator) | 1087 | if (!q->elevator) |
1084 | return count; | 1088 | return -ENXIO; |
1085 | 1089 | ||
1086 | strlcpy(elevator_name, name, sizeof(elevator_name)); | 1090 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
1087 | e = elevator_get(strstrip(elevator_name)); | 1091 | e = elevator_get(strstrip(elevator_name)); |
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |||
1092 | 1096 | ||
1093 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { | 1097 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { |
1094 | elevator_put(e); | 1098 | elevator_put(e); |
1095 | return count; | 1099 | return 0; |
1096 | } | 1100 | } |
1097 | 1101 | ||
1098 | if (!elevator_switch(q, e)) | 1102 | return elevator_switch(q, e); |
1099 | printk(KERN_ERR "elevator: switch to %s failed\n", | 1103 | } |
1100 | elevator_name); | 1104 | EXPORT_SYMBOL(elevator_change); |
1101 | return count; | 1105 | |
1106 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | ||
1107 | size_t count) | ||
1108 | { | ||
1109 | int ret; | ||
1110 | |||
1111 | if (!q->elevator) | ||
1112 | return count; | ||
1113 | |||
1114 | ret = elevator_change(q, name); | ||
1115 | if (!ret) | ||
1116 | return count; | ||
1117 | |||
1118 | printk(KERN_ERR "elevator: switch to %s failed\n", name); | ||
1119 | return ret; | ||
1102 | } | 1120 | } |
1103 | 1121 | ||
1104 | ssize_t elv_iosched_show(struct request_queue *q, char *name) | 1122 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |