aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2010-10-19 03:13:04 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-10-19 03:13:04 -0400
commitfa251f89903d73989e2f63e13d0eaed1e07ce0da (patch)
tree3f7fe779941e3b6d67754dd7c44a32f48ea47c74 /block
parentdd3932eddf428571762596e17b65f5dc92ca361b (diff)
parentcd07202cc8262e1669edff0d97715f3dd9260917 (diff)
Merge branch 'v2.6.36-rc8' into for-2.6.37/barrier
Conflicts: block/blk-core.c drivers/block/loop.c mm/swapfile.c Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c12
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--block/blk.h8
-rw-r--r--block/cfq-iosched.c119
-rw-r--r--block/elevator.c52
8 files changed, 162 insertions, 40 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a6809645d212..2fef1ef931a0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
966 966
967 /* Currently we do not support hierarchy deeper than two level (0,1) */ 967 /* Currently we do not support hierarchy deeper than two level (0,1) */
968 if (parent != cgroup->top_cgroup) 968 if (parent != cgroup->top_cgroup)
969 return ERR_PTR(-EINVAL); 969 return ERR_PTR(-EPERM);
970 970
971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
972 if (!blkcg) 972 if (!blkcg)
diff --git a/block/blk-core.c b/block/blk-core.c
index 2a5b19204546..a840523e3b40 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1182,9 +1182,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1182 int el_ret; 1182 int el_ret;
1183 unsigned int bytes = bio->bi_size; 1183 unsigned int bytes = bio->bi_size;
1184 const unsigned short prio = bio_prio(bio); 1184 const unsigned short prio = bio_prio(bio);
1185 const bool sync = (bio->bi_rw & REQ_SYNC); 1185 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1186 const bool unplug = (bio->bi_rw & REQ_UNPLUG); 1186 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
1187 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1187 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
1188 int where = ELEVATOR_INSERT_SORT; 1188 int where = ELEVATOR_INSERT_SORT;
1189 int rw_flags; 1189 int rw_flags;
1190 1190
diff --git a/block/blk-map.c b/block/blk-map.c
index c65d7593f7f1..ade0a08c9099 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
307 return PTR_ERR(bio); 307 return PTR_ERR(bio);
308 308
309 if (rq_data_dir(rq) == WRITE) 309 if (rq_data_dir(rq) == WRITE)
310 bio->bi_rw |= (1 << REQ_WRITE); 310 bio->bi_rw |= REQ_WRITE;
311 311
312 if (do_copy) 312 if (do_copy)
313 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd4249671..eafc94f68d79 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -362,6 +362,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
362 return 0; 362 return 0;
363 363
364 /* 364 /*
365 * Don't merge file system requests and discard requests
366 */
367 if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
368 return 0;
369
370 /*
371 * Don't merge discard requests and secure discard requests
372 */
373 if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
374 return 0;
375
376 /*
365 * not contiguous 377 * not contiguous
366 */ 378 */
367 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 379 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 001ab18078f5..0749b89c6885 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
511 kobject_uevent(&q->kobj, KOBJ_REMOVE); 511 kobject_uevent(&q->kobj, KOBJ_REMOVE);
512 kobject_del(&q->kobj); 512 kobject_del(&q->kobj);
513 blk_trace_remove_sysfs(disk_to_dev(disk)); 513 blk_trace_remove_sysfs(disk_to_dev(disk));
514 kobject_put(&dev->kobj);
514 return ret; 515 return ret;
515 } 516 }
516 517
diff --git a/block/blk.h b/block/blk.h
index a09c18b19116..faf94f2acb12 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -148,14 +148,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
148 148
149static inline int blk_cpu_to_group(int cpu) 149static inline int blk_cpu_to_group(int cpu)
150{ 150{
151 int group = NR_CPUS;
151#ifdef CONFIG_SCHED_MC 152#ifdef CONFIG_SCHED_MC
152 const struct cpumask *mask = cpu_coregroup_mask(cpu); 153 const struct cpumask *mask = cpu_coregroup_mask(cpu);
153 return cpumask_first(mask); 154 group = cpumask_first(mask);
154#elif defined(CONFIG_SCHED_SMT) 155#elif defined(CONFIG_SCHED_SMT)
155 return cpumask_first(topology_thread_cpumask(cpu)); 156 group = cpumask_first(topology_thread_cpumask(cpu));
156#else 157#else
157 return cpu; 158 return cpu;
158#endif 159#endif
160 if (likely(group < NR_CPUS))
161 return group;
162 return cpu;
159} 163}
160 164
161/* 165/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index eb4086f7dfef..9eba291eb6fd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25; 30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2; 31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125; 32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
33static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
34static const int cfq_hist_divisor = 4; 35static const int cfq_hist_divisor = 4;
35 36
@@ -147,6 +148,8 @@ struct cfq_queue {
147 struct cfq_queue *new_cfqq; 148 struct cfq_queue *new_cfqq;
148 struct cfq_group *cfqg; 149 struct cfq_group *cfqg;
149 struct cfq_group *orig_cfqg; 150 struct cfq_group *orig_cfqg;
151 /* Number of sectors dispatched from queue in single dispatch round */
152 unsigned long nr_sectors;
150}; 153};
151 154
152/* 155/*
@@ -198,6 +201,8 @@ struct cfq_group {
198 struct hlist_node cfqd_node; 201 struct hlist_node cfqd_node;
199 atomic_t ref; 202 atomic_t ref;
200#endif 203#endif
204 /* number of requests that are on the dispatch list or inside driver */
205 int dispatched;
201}; 206};
202 207
203/* 208/*
@@ -271,6 +276,7 @@ struct cfq_data {
271 unsigned int cfq_slice[2]; 276 unsigned int cfq_slice[2];
272 unsigned int cfq_slice_async_rq; 277 unsigned int cfq_slice_async_rq;
273 unsigned int cfq_slice_idle; 278 unsigned int cfq_slice_idle;
279 unsigned int cfq_group_idle;
274 unsigned int cfq_latency; 280 unsigned int cfq_latency;
275 unsigned int cfq_group_isolation; 281 unsigned int cfq_group_isolation;
276 282
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
378 &cfqg->service_trees[i][j]: NULL) \ 384 &cfqg->service_trees[i][j]: NULL) \
379 385
380 386
387static inline bool iops_mode(struct cfq_data *cfqd)
388{
389 /*
390 * If we are not idling on queues and it is a NCQ drive, parallel
391 * execution of requests is on and measuring time is not possible
392 * in most of the cases until and unless we drive shallower queue
393 * depths and that becomes a performance bottleneck. In such cases
394 * switch to start providing fairness in terms of number of IOs.
395 */
396 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
397 return true;
398 else
399 return false;
400}
401
381static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) 402static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
382{ 403{
383 if (cfq_class_idle(cfqq)) 404 if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
906 slice_used = cfqq->allocated_slice; 927 slice_used = cfqq->allocated_slice;
907 } 928 }
908 929
909 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
910 return slice_used; 930 return slice_used;
911} 931}
912 932
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
914 struct cfq_queue *cfqq) 934 struct cfq_queue *cfqq)
915{ 935{
916 struct cfq_rb_root *st = &cfqd->grp_service_tree; 936 struct cfq_rb_root *st = &cfqd->grp_service_tree;
917 unsigned int used_sl, charge_sl; 937 unsigned int used_sl, charge;
918 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 938 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
919 - cfqg->service_tree_idle.count; 939 - cfqg->service_tree_idle.count;
920 940
921 BUG_ON(nr_sync < 0); 941 BUG_ON(nr_sync < 0);
922 used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); 942 used_sl = charge = cfq_cfqq_slice_usage(cfqq);
923 943
924 if (!cfq_cfqq_sync(cfqq) && !nr_sync) 944 if (iops_mode(cfqd))
925 charge_sl = cfqq->allocated_slice; 945 charge = cfqq->slice_dispatch;
946 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
947 charge = cfqq->allocated_slice;
926 948
927 /* Can't update vdisktime while group is on service tree */ 949 /* Can't update vdisktime while group is on service tree */
928 cfq_rb_erase(&cfqg->rb_node, st); 950 cfq_rb_erase(&cfqg->rb_node, st);
929 cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); 951 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
930 __cfq_group_service_tree_add(st, cfqg); 952 __cfq_group_service_tree_add(st, cfqg);
931 953
932 /* This group is being expired. Save the context */ 954 /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
940 962
941 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 963 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
942 st->min_vdisktime); 964 st->min_vdisktime);
965 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
966 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
967 iops_mode(cfqd), cfqq->nr_sectors);
943 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 968 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
944 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 969 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
945} 970}
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
994 */ 1019 */
995 atomic_set(&cfqg->ref, 1); 1020 atomic_set(&cfqg->ref, 1);
996 1021
997 /* Add group onto cgroup list */ 1022 /*
998 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1023 * Add group onto cgroup list. It might happen that bdi->dev is
999 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 1024 * not initiliazed yet. Initialize this new group without major
1025 * and minor info and this info will be filled in once a new thread
1026 * comes for IO. See code above.
1027 */
1028 if (bdi->dev) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1000 MKDEV(major, minor)); 1031 MKDEV(major, minor));
1032 } else
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1034 0);
1035
1001 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1002 1037
1003 /* Add group on cfqd list */ 1038 /* Add group on cfqd list */
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1587 cfqq->allocated_slice = 0; 1622 cfqq->allocated_slice = 0;
1588 cfqq->slice_end = 0; 1623 cfqq->slice_end = 0;
1589 cfqq->slice_dispatch = 0; 1624 cfqq->slice_dispatch = 0;
1625 cfqq->nr_sectors = 0;
1590 1626
1591 cfq_clear_cfqq_wait_request(cfqq); 1627 cfq_clear_cfqq_wait_request(cfqq);
1592 cfq_clear_cfqq_must_dispatch(cfqq); 1628 cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1839 BUG_ON(!service_tree); 1875 BUG_ON(!service_tree);
1840 BUG_ON(!service_tree->count); 1876 BUG_ON(!service_tree->count);
1841 1877
1878 if (!cfqd->cfq_slice_idle)
1879 return false;
1880
1842 /* We never do for idle class queues. */ 1881 /* We never do for idle class queues. */
1843 if (prio == IDLE_WORKLOAD) 1882 if (prio == IDLE_WORKLOAD)
1844 return false; 1883 return false;
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1863{ 1902{
1864 struct cfq_queue *cfqq = cfqd->active_queue; 1903 struct cfq_queue *cfqq = cfqd->active_queue;
1865 struct cfq_io_context *cic; 1904 struct cfq_io_context *cic;
1866 unsigned long sl; 1905 unsigned long sl, group_idle = 0;
1867 1906
1868 /* 1907 /*
1869 * SSD device without seek penalty, disable idling. But only do so 1908 * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1879 /* 1918 /*
1880 * idle is disabled, either manually or by past process history 1919 * idle is disabled, either manually or by past process history
1881 */ 1920 */
1882 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) 1921 if (!cfq_should_idle(cfqd, cfqq)) {
1883 return; 1922 /* no queue idling. Check for group idling */
1923 if (cfqd->cfq_group_idle)
1924 group_idle = cfqd->cfq_group_idle;
1925 else
1926 return;
1927 }
1884 1928
1885 /* 1929 /*
1886 * still active requests from this queue, don't idle 1930 * still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1907 return; 1951 return;
1908 } 1952 }
1909 1953
1954 /* There are other queues in the group, don't do group idle */
1955 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1956 return;
1957
1910 cfq_mark_cfqq_wait_request(cfqq); 1958 cfq_mark_cfqq_wait_request(cfqq);
1911 1959
1912 sl = cfqd->cfq_slice_idle; 1960 if (group_idle)
1961 sl = cfqd->cfq_group_idle;
1962 else
1963 sl = cfqd->cfq_slice_idle;
1913 1964
1914 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1965 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1915 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1966 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1916 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1967 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1968 group_idle ? 1 : 0);
1917} 1969}
1918 1970
1919/* 1971/*
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1929 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 1981 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1930 cfq_remove_request(rq); 1982 cfq_remove_request(rq);
1931 cfqq->dispatched++; 1983 cfqq->dispatched++;
1984 (RQ_CFQG(rq))->dispatched++;
1932 elv_dispatch_sort(q, rq); 1985 elv_dispatch_sort(q, rq);
1933 1986
1934 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1987 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1988 cfqq->nr_sectors += blk_rq_sectors(rq);
1935 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1989 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1936 rq_data_dir(rq), rq_is_sync(rq)); 1990 rq_data_dir(rq), rq_is_sync(rq));
1937} 1991}
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2198 cfqq = NULL; 2252 cfqq = NULL;
2199 goto keep_queue; 2253 goto keep_queue;
2200 } else 2254 } else
2201 goto expire; 2255 goto check_group_idle;
2202 } 2256 }
2203 2257
2204 /* 2258 /*
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2226 * flight or is idling for a new request, allow either of these 2280 * flight or is idling for a new request, allow either of these
2227 * conditions to happen (or time out) before selecting a new queue. 2281 * conditions to happen (or time out) before selecting a new queue.
2228 */ 2282 */
2229 if (timer_pending(&cfqd->idle_slice_timer) || 2283 if (timer_pending(&cfqd->idle_slice_timer)) {
2230 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { 2284 cfqq = NULL;
2285 goto keep_queue;
2286 }
2287
2288 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2289 cfqq = NULL;
2290 goto keep_queue;
2291 }
2292
2293 /*
2294 * If group idle is enabled and there are requests dispatched from
2295 * this group, wait for requests to complete.
2296 */
2297check_group_idle:
2298 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2299 && cfqq->cfqg->dispatched) {
2231 cfqq = NULL; 2300 cfqq = NULL;
2232 goto keep_queue; 2301 goto keep_queue;
2233 } 2302 }
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3375 WARN_ON(!cfqq->dispatched); 3444 WARN_ON(!cfqq->dispatched);
3376 cfqd->rq_in_driver--; 3445 cfqd->rq_in_driver--;
3377 cfqq->dispatched--; 3446 cfqq->dispatched--;
3447 (RQ_CFQG(rq))->dispatched--;
3378 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3448 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3379 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3449 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3380 rq_data_dir(rq), rq_is_sync(rq)); 3450 rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3404 * the queue. 3474 * the queue.
3405 */ 3475 */
3406 if (cfq_should_wait_busy(cfqd, cfqq)) { 3476 if (cfq_should_wait_busy(cfqd, cfqq)) {
3407 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3477 unsigned long extend_sl = cfqd->cfq_slice_idle;
3478 if (!cfqd->cfq_slice_idle)
3479 extend_sl = cfqd->cfq_group_idle;
3480 cfqq->slice_end = jiffies + extend_sl;
3408 cfq_mark_cfqq_wait_busy(cfqq); 3481 cfq_mark_cfqq_wait_busy(cfqq);
3409 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 3482 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3410 } 3483 }
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q)
3850 cfqd->cfq_slice[1] = cfq_slice_sync; 3923 cfqd->cfq_slice[1] = cfq_slice_sync;
3851 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3924 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3852 cfqd->cfq_slice_idle = cfq_slice_idle; 3925 cfqd->cfq_slice_idle = cfq_slice_idle;
3926 cfqd->cfq_group_idle = cfq_group_idle;
3853 cfqd->cfq_latency = 1; 3927 cfqd->cfq_latency = 1;
3854 cfqd->cfq_group_isolation = 0; 3928 cfqd->cfq_group_isolation = 0;
3855 cfqd->hw_tag = -1; 3929 cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3922SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 3996SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3923SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 3997SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3924SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 3998SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3999SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3925SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 4000SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3926SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 4001SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3927SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 4002SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3954STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4029STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3955 UINT_MAX, 0); 4030 UINT_MAX, 0);
3956STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4031STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4032STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3957STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4033STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3958STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4034STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3959STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4035STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3975 CFQ_ATTR(slice_async), 4051 CFQ_ATTR(slice_async),
3976 CFQ_ATTR(slice_async_rq), 4052 CFQ_ATTR(slice_async_rq),
3977 CFQ_ATTR(slice_idle), 4053 CFQ_ATTR(slice_idle),
4054 CFQ_ATTR(group_idle),
3978 CFQ_ATTR(low_latency), 4055 CFQ_ATTR(low_latency),
3979 CFQ_ATTR(group_isolation), 4056 CFQ_ATTR(group_isolation),
3980 __ATTR_NULL 4057 __ATTR_NULL
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void)
4028 if (!cfq_slice_idle) 4105 if (!cfq_slice_idle)
4029 cfq_slice_idle = 1; 4106 cfq_slice_idle = 1;
4030 4107
4108#ifdef CONFIG_CFQ_GROUP_IOSCHED
4109 if (!cfq_group_idle)
4110 cfq_group_idle = 1;
4111#else
4112 cfq_group_idle = 0;
4113#endif
4031 if (cfq_slab_setup()) 4114 if (cfq_slab_setup())
4032 return -ENOMEM; 4115 return -ENOMEM;
4033 4116
diff --git a/block/elevator.c b/block/elevator.c
index 241c69c45c5f..282e8308f7e2 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -877,6 +877,7 @@ int elv_register_queue(struct request_queue *q)
877 } 877 }
878 } 878 }
879 kobject_uevent(&e->kobj, KOBJ_ADD); 879 kobject_uevent(&e->kobj, KOBJ_ADD);
880 e->registered = 1;
880 } 881 }
881 return error; 882 return error;
882} 883}
@@ -886,6 +887,7 @@ static void __elv_unregister_queue(struct elevator_queue *e)
886{ 887{
887 kobject_uevent(&e->kobj, KOBJ_REMOVE); 888 kobject_uevent(&e->kobj, KOBJ_REMOVE);
888 kobject_del(&e->kobj); 889 kobject_del(&e->kobj);
890 e->registered = 0;
889} 891}
890 892
891void elv_unregister_queue(struct request_queue *q) 893void elv_unregister_queue(struct request_queue *q)
@@ -948,18 +950,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
948{ 950{
949 struct elevator_queue *old_elevator, *e; 951 struct elevator_queue *old_elevator, *e;
950 void *data; 952 void *data;
953 int err;
951 954
952 /* 955 /*
953 * Allocate new elevator 956 * Allocate new elevator
954 */ 957 */
955 e = elevator_alloc(q, new_e); 958 e = elevator_alloc(q, new_e);
956 if (!e) 959 if (!e)
957 return 0; 960 return -ENOMEM;
958 961
959 data = elevator_init_queue(q, e); 962 data = elevator_init_queue(q, e);
960 if (!data) { 963 if (!data) {
961 kobject_put(&e->kobj); 964 kobject_put(&e->kobj);
962 return 0; 965 return -ENOMEM;
963 } 966 }
964 967
965 /* 968 /*
@@ -980,10 +983,13 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
980 983
981 spin_unlock_irq(q->queue_lock); 984 spin_unlock_irq(q->queue_lock);
982 985
983 __elv_unregister_queue(old_elevator); 986 if (old_elevator->registered) {
987 __elv_unregister_queue(old_elevator);
984 988
985 if (elv_register_queue(q)) 989 err = elv_register_queue(q);
986 goto fail_register; 990 if (err)
991 goto fail_register;
992 }
987 993
988 /* 994 /*
989 * finally exit old elevator and turn off BYPASS. 995 * finally exit old elevator and turn off BYPASS.
@@ -995,7 +1001,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
995 1001
996 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1002 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
997 1003
998 return 1; 1004 return 0;
999 1005
1000fail_register: 1006fail_register:
1001 /* 1007 /*
@@ -1010,17 +1016,19 @@ fail_register:
1010 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1016 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1011 spin_unlock_irq(q->queue_lock); 1017 spin_unlock_irq(q->queue_lock);
1012 1018
1013 return 0; 1019 return err;
1014} 1020}
1015 1021
1016ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1022/*
1017 size_t count) 1023 * Switch this queue to the given IO scheduler.
1024 */
1025int elevator_change(struct request_queue *q, const char *name)
1018{ 1026{
1019 char elevator_name[ELV_NAME_MAX]; 1027 char elevator_name[ELV_NAME_MAX];
1020 struct elevator_type *e; 1028 struct elevator_type *e;
1021 1029
1022 if (!q->elevator) 1030 if (!q->elevator)
1023 return count; 1031 return -ENXIO;
1024 1032
1025 strlcpy(elevator_name, name, sizeof(elevator_name)); 1033 strlcpy(elevator_name, name, sizeof(elevator_name));
1026 e = elevator_get(strstrip(elevator_name)); 1034 e = elevator_get(strstrip(elevator_name));
@@ -1031,13 +1039,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1031 1039
1032 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { 1040 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1033 elevator_put(e); 1041 elevator_put(e);
1034 return count; 1042 return 0;
1035 } 1043 }
1036 1044
1037 if (!elevator_switch(q, e)) 1045 return elevator_switch(q, e);
1038 printk(KERN_ERR "elevator: switch to %s failed\n", 1046}
1039 elevator_name); 1047EXPORT_SYMBOL(elevator_change);
1040 return count; 1048
1049ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1050 size_t count)
1051{
1052 int ret;
1053
1054 if (!q->elevator)
1055 return count;
1056
1057 ret = elevator_change(q, name);
1058 if (!ret)
1059 return count;
1060
1061 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1062 return ret;
1041} 1063}
1042 1064
1043ssize_t elv_iosched_show(struct request_queue *q, char *name) 1065ssize_t elv_iosched_show(struct request_queue *q, char *name)