aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c119
1 files changed, 101 insertions, 18 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index eb4086f7dfef..9eba291eb6fd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25; 30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2; 31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125; 32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
33static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
34static const int cfq_hist_divisor = 4; 35static const int cfq_hist_divisor = 4;
35 36
@@ -147,6 +148,8 @@ struct cfq_queue {
147 struct cfq_queue *new_cfqq; 148 struct cfq_queue *new_cfqq;
148 struct cfq_group *cfqg; 149 struct cfq_group *cfqg;
149 struct cfq_group *orig_cfqg; 150 struct cfq_group *orig_cfqg;
151 /* Number of sectors dispatched from queue in single dispatch round */
152 unsigned long nr_sectors;
150}; 153};
151 154
152/* 155/*
@@ -198,6 +201,8 @@ struct cfq_group {
198 struct hlist_node cfqd_node; 201 struct hlist_node cfqd_node;
199 atomic_t ref; 202 atomic_t ref;
200#endif 203#endif
204 /* number of requests that are on the dispatch list or inside driver */
205 int dispatched;
201}; 206};
202 207
203/* 208/*
@@ -271,6 +276,7 @@ struct cfq_data {
271 unsigned int cfq_slice[2]; 276 unsigned int cfq_slice[2];
272 unsigned int cfq_slice_async_rq; 277 unsigned int cfq_slice_async_rq;
273 unsigned int cfq_slice_idle; 278 unsigned int cfq_slice_idle;
279 unsigned int cfq_group_idle;
274 unsigned int cfq_latency; 280 unsigned int cfq_latency;
275 unsigned int cfq_group_isolation; 281 unsigned int cfq_group_isolation;
276 282
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
378 &cfqg->service_trees[i][j]: NULL) \ 384 &cfqg->service_trees[i][j]: NULL) \
379 385
380 386
387static inline bool iops_mode(struct cfq_data *cfqd)
388{
389 /*
390 * If we are not idling on queues and it is a NCQ drive, parallel
391 * execution of requests is on and measuring time is not possible
392 * in most of the cases until and unless we drive shallower queue
393 * depths and that becomes a performance bottleneck. In such cases
394 * switch to start providing fairness in terms of number of IOs.
395 */
396 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
397 return true;
398 else
399 return false;
400}
401
381static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) 402static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
382{ 403{
383 if (cfq_class_idle(cfqq)) 404 if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
906 slice_used = cfqq->allocated_slice; 927 slice_used = cfqq->allocated_slice;
907 } 928 }
908 929
909 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
910 return slice_used; 930 return slice_used;
911} 931}
912 932
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
914 struct cfq_queue *cfqq) 934 struct cfq_queue *cfqq)
915{ 935{
916 struct cfq_rb_root *st = &cfqd->grp_service_tree; 936 struct cfq_rb_root *st = &cfqd->grp_service_tree;
917 unsigned int used_sl, charge_sl; 937 unsigned int used_sl, charge;
918 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 938 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
919 - cfqg->service_tree_idle.count; 939 - cfqg->service_tree_idle.count;
920 940
921 BUG_ON(nr_sync < 0); 941 BUG_ON(nr_sync < 0);
922 used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); 942 used_sl = charge = cfq_cfqq_slice_usage(cfqq);
923 943
924 if (!cfq_cfqq_sync(cfqq) && !nr_sync) 944 if (iops_mode(cfqd))
925 charge_sl = cfqq->allocated_slice; 945 charge = cfqq->slice_dispatch;
946 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
947 charge = cfqq->allocated_slice;
926 948
927 /* Can't update vdisktime while group is on service tree */ 949 /* Can't update vdisktime while group is on service tree */
928 cfq_rb_erase(&cfqg->rb_node, st); 950 cfq_rb_erase(&cfqg->rb_node, st);
929 cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); 951 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
930 __cfq_group_service_tree_add(st, cfqg); 952 __cfq_group_service_tree_add(st, cfqg);
931 953
932 /* This group is being expired. Save the context */ 954 /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
940 962
941 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 963 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
942 st->min_vdisktime); 964 st->min_vdisktime);
965 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
966 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
967 iops_mode(cfqd), cfqq->nr_sectors);
943 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 968 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
944 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 969 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
945} 970}
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
994 */ 1019 */
995 atomic_set(&cfqg->ref, 1); 1020 atomic_set(&cfqg->ref, 1);
996 1021
997 /* Add group onto cgroup list */ 1022 /*
998 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1023 * Add group onto cgroup list. It might happen that bdi->dev is
999 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 1024 * not initiliazed yet. Initialize this new group without major
1025 * and minor info and this info will be filled in once a new thread
1026 * comes for IO. See code above.
1027 */
1028 if (bdi->dev) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1000 MKDEV(major, minor)); 1031 MKDEV(major, minor));
1032 } else
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1034 0);
1035
1001 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1002 1037
1003 /* Add group on cfqd list */ 1038 /* Add group on cfqd list */
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1587 cfqq->allocated_slice = 0; 1622 cfqq->allocated_slice = 0;
1588 cfqq->slice_end = 0; 1623 cfqq->slice_end = 0;
1589 cfqq->slice_dispatch = 0; 1624 cfqq->slice_dispatch = 0;
1625 cfqq->nr_sectors = 0;
1590 1626
1591 cfq_clear_cfqq_wait_request(cfqq); 1627 cfq_clear_cfqq_wait_request(cfqq);
1592 cfq_clear_cfqq_must_dispatch(cfqq); 1628 cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1839 BUG_ON(!service_tree); 1875 BUG_ON(!service_tree);
1840 BUG_ON(!service_tree->count); 1876 BUG_ON(!service_tree->count);
1841 1877
1878 if (!cfqd->cfq_slice_idle)
1879 return false;
1880
1842 /* We never do for idle class queues. */ 1881 /* We never do for idle class queues. */
1843 if (prio == IDLE_WORKLOAD) 1882 if (prio == IDLE_WORKLOAD)
1844 return false; 1883 return false;
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1863{ 1902{
1864 struct cfq_queue *cfqq = cfqd->active_queue; 1903 struct cfq_queue *cfqq = cfqd->active_queue;
1865 struct cfq_io_context *cic; 1904 struct cfq_io_context *cic;
1866 unsigned long sl; 1905 unsigned long sl, group_idle = 0;
1867 1906
1868 /* 1907 /*
1869 * SSD device without seek penalty, disable idling. But only do so 1908 * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1879 /* 1918 /*
1880 * idle is disabled, either manually or by past process history 1919 * idle is disabled, either manually or by past process history
1881 */ 1920 */
1882 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) 1921 if (!cfq_should_idle(cfqd, cfqq)) {
1883 return; 1922 /* no queue idling. Check for group idling */
1923 if (cfqd->cfq_group_idle)
1924 group_idle = cfqd->cfq_group_idle;
1925 else
1926 return;
1927 }
1884 1928
1885 /* 1929 /*
1886 * still active requests from this queue, don't idle 1930 * still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1907 return; 1951 return;
1908 } 1952 }
1909 1953
1954 /* There are other queues in the group, don't do group idle */
1955 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1956 return;
1957
1910 cfq_mark_cfqq_wait_request(cfqq); 1958 cfq_mark_cfqq_wait_request(cfqq);
1911 1959
1912 sl = cfqd->cfq_slice_idle; 1960 if (group_idle)
1961 sl = cfqd->cfq_group_idle;
1962 else
1963 sl = cfqd->cfq_slice_idle;
1913 1964
1914 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1965 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1915 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1966 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1916 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1967 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1968 group_idle ? 1 : 0);
1917} 1969}
1918 1970
1919/* 1971/*
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1929 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 1981 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1930 cfq_remove_request(rq); 1982 cfq_remove_request(rq);
1931 cfqq->dispatched++; 1983 cfqq->dispatched++;
1984 (RQ_CFQG(rq))->dispatched++;
1932 elv_dispatch_sort(q, rq); 1985 elv_dispatch_sort(q, rq);
1933 1986
1934 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1987 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1988 cfqq->nr_sectors += blk_rq_sectors(rq);
1935 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1989 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1936 rq_data_dir(rq), rq_is_sync(rq)); 1990 rq_data_dir(rq), rq_is_sync(rq));
1937} 1991}
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2198 cfqq = NULL; 2252 cfqq = NULL;
2199 goto keep_queue; 2253 goto keep_queue;
2200 } else 2254 } else
2201 goto expire; 2255 goto check_group_idle;
2202 } 2256 }
2203 2257
2204 /* 2258 /*
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2226 * flight or is idling for a new request, allow either of these 2280 * flight or is idling for a new request, allow either of these
2227 * conditions to happen (or time out) before selecting a new queue. 2281 * conditions to happen (or time out) before selecting a new queue.
2228 */ 2282 */
2229 if (timer_pending(&cfqd->idle_slice_timer) || 2283 if (timer_pending(&cfqd->idle_slice_timer)) {
2230 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { 2284 cfqq = NULL;
2285 goto keep_queue;
2286 }
2287
2288 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2289 cfqq = NULL;
2290 goto keep_queue;
2291 }
2292
2293 /*
2294 * If group idle is enabled and there are requests dispatched from
2295 * this group, wait for requests to complete.
2296 */
2297check_group_idle:
2298 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2299 && cfqq->cfqg->dispatched) {
2231 cfqq = NULL; 2300 cfqq = NULL;
2232 goto keep_queue; 2301 goto keep_queue;
2233 } 2302 }
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3375 WARN_ON(!cfqq->dispatched); 3444 WARN_ON(!cfqq->dispatched);
3376 cfqd->rq_in_driver--; 3445 cfqd->rq_in_driver--;
3377 cfqq->dispatched--; 3446 cfqq->dispatched--;
3447 (RQ_CFQG(rq))->dispatched--;
3378 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3448 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3379 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3449 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3380 rq_data_dir(rq), rq_is_sync(rq)); 3450 rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3404 * the queue. 3474 * the queue.
3405 */ 3475 */
3406 if (cfq_should_wait_busy(cfqd, cfqq)) { 3476 if (cfq_should_wait_busy(cfqd, cfqq)) {
3407 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3477 unsigned long extend_sl = cfqd->cfq_slice_idle;
3478 if (!cfqd->cfq_slice_idle)
3479 extend_sl = cfqd->cfq_group_idle;
3480 cfqq->slice_end = jiffies + extend_sl;
3408 cfq_mark_cfqq_wait_busy(cfqq); 3481 cfq_mark_cfqq_wait_busy(cfqq);
3409 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 3482 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3410 } 3483 }
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q)
3850 cfqd->cfq_slice[1] = cfq_slice_sync; 3923 cfqd->cfq_slice[1] = cfq_slice_sync;
3851 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3924 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3852 cfqd->cfq_slice_idle = cfq_slice_idle; 3925 cfqd->cfq_slice_idle = cfq_slice_idle;
3926 cfqd->cfq_group_idle = cfq_group_idle;
3853 cfqd->cfq_latency = 1; 3927 cfqd->cfq_latency = 1;
3854 cfqd->cfq_group_isolation = 0; 3928 cfqd->cfq_group_isolation = 0;
3855 cfqd->hw_tag = -1; 3929 cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3922SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 3996SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3923SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 3997SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3924SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 3998SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3999SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3925SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 4000SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3926SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 4001SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3927SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 4002SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3954STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4029STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3955 UINT_MAX, 0); 4030 UINT_MAX, 0);
3956STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4031STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4032STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3957STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4033STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3958STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4034STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3959STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4035STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3975 CFQ_ATTR(slice_async), 4051 CFQ_ATTR(slice_async),
3976 CFQ_ATTR(slice_async_rq), 4052 CFQ_ATTR(slice_async_rq),
3977 CFQ_ATTR(slice_idle), 4053 CFQ_ATTR(slice_idle),
4054 CFQ_ATTR(group_idle),
3978 CFQ_ATTR(low_latency), 4055 CFQ_ATTR(low_latency),
3979 CFQ_ATTR(group_isolation), 4056 CFQ_ATTR(group_isolation),
3980 __ATTR_NULL 4057 __ATTR_NULL
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void)
4028 if (!cfq_slice_idle) 4105 if (!cfq_slice_idle)
4029 cfq_slice_idle = 1; 4106 cfq_slice_idle = 1;
4030 4107
4108#ifdef CONFIG_CFQ_GROUP_IOSCHED
4109 if (!cfq_group_idle)
4110 cfq_group_idle = 1;
4111#else
4112 cfq_group_idle = 0;
4113#endif
4031 if (cfq_slab_setup()) 4114 if (cfq_slab_setup())
4032 return -ENOMEM; 4115 return -ENOMEM;
4033 4116