aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/cfq-iosched.c65
1 files changed, 56 insertions, 9 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3fc6be110c1d..85e48192754d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25; 30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2; 31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125; 32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
33static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
34static const int cfq_hist_divisor = 4; 35static const int cfq_hist_divisor = 4;
35 36
@@ -198,6 +199,8 @@ struct cfq_group {
198 struct hlist_node cfqd_node; 199 struct hlist_node cfqd_node;
199 atomic_t ref; 200 atomic_t ref;
200#endif 201#endif
202 /* number of requests that are on the dispatch list or inside driver */
203 int dispatched;
201}; 204};
202 205
203/* 206/*
@@ -271,6 +274,7 @@ struct cfq_data {
271 unsigned int cfq_slice[2]; 274 unsigned int cfq_slice[2];
272 unsigned int cfq_slice_async_rq; 275 unsigned int cfq_slice_async_rq;
273 unsigned int cfq_slice_idle; 276 unsigned int cfq_slice_idle;
277 unsigned int cfq_group_idle;
274 unsigned int cfq_latency; 278 unsigned int cfq_latency;
275 unsigned int cfq_group_isolation; 279 unsigned int cfq_group_isolation;
276 280
@@ -1884,7 +1888,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1884{ 1888{
1885 struct cfq_queue *cfqq = cfqd->active_queue; 1889 struct cfq_queue *cfqq = cfqd->active_queue;
1886 struct cfq_io_context *cic; 1890 struct cfq_io_context *cic;
1887 unsigned long sl; 1891 unsigned long sl, group_idle = 0;
1888 1892
1889 /* 1893 /*
1890 * SSD device without seek penalty, disable idling. But only do so 1894 * SSD device without seek penalty, disable idling. But only do so
@@ -1900,8 +1904,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1900 /* 1904 /*
1901 * idle is disabled, either manually or by past process history 1905 * idle is disabled, either manually or by past process history
1902 */ 1906 */
1903 if (!cfq_should_idle(cfqd, cfqq)) 1907 if (!cfq_should_idle(cfqd, cfqq)) {
1904 return; 1908 /* no queue idling. Check for group idling */
1909 if (cfqd->cfq_group_idle)
1910 group_idle = cfqd->cfq_group_idle;
1911 else
1912 return;
1913 }
1905 1914
1906 /* 1915 /*
1907 * still active requests from this queue, don't idle 1916 * still active requests from this queue, don't idle
@@ -1928,13 +1937,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1928 return; 1937 return;
1929 } 1938 }
1930 1939
1940 /* There are other queues in the group, don't do group idle */
1941 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1942 return;
1943
1931 cfq_mark_cfqq_wait_request(cfqq); 1944 cfq_mark_cfqq_wait_request(cfqq);
1932 1945
1933 sl = cfqd->cfq_slice_idle; 1946 if (group_idle)
1947 sl = cfqd->cfq_group_idle;
1948 else
1949 sl = cfqd->cfq_slice_idle;
1934 1950
1935 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1951 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1936 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1952 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1937 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1953 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1954 group_idle ? 1 : 0);
1938} 1955}
1939 1956
1940/* 1957/*
@@ -1950,6 +1967,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1950 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 1967 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1951 cfq_remove_request(rq); 1968 cfq_remove_request(rq);
1952 cfqq->dispatched++; 1969 cfqq->dispatched++;
1970 (RQ_CFQG(rq))->dispatched++;
1953 elv_dispatch_sort(q, rq); 1971 elv_dispatch_sort(q, rq);
1954 1972
1955 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1973 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
@@ -2219,7 +2237,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2219 cfqq = NULL; 2237 cfqq = NULL;
2220 goto keep_queue; 2238 goto keep_queue;
2221 } else 2239 } else
2222 goto expire; 2240 goto check_group_idle;
2223 } 2241 }
2224 2242
2225 /* 2243 /*
@@ -2247,8 +2265,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2247 * flight or is idling for a new request, allow either of these 2265 * flight or is idling for a new request, allow either of these
2248 * conditions to happen (or time out) before selecting a new queue. 2266 * conditions to happen (or time out) before selecting a new queue.
2249 */ 2267 */
2250 if (timer_pending(&cfqd->idle_slice_timer) || 2268 if (timer_pending(&cfqd->idle_slice_timer)) {
2251 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { 2269 cfqq = NULL;
2270 goto keep_queue;
2271 }
2272
2273 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2274 cfqq = NULL;
2275 goto keep_queue;
2276 }
2277
2278 /*
2279 * If group idle is enabled and there are requests dispatched from
2280 * this group, wait for requests to complete.
2281 */
2282check_group_idle:
2283 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2284 && cfqq->cfqg->dispatched) {
2252 cfqq = NULL; 2285 cfqq = NULL;
2253 goto keep_queue; 2286 goto keep_queue;
2254 } 2287 }
@@ -3396,6 +3429,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3396 WARN_ON(!cfqq->dispatched); 3429 WARN_ON(!cfqq->dispatched);
3397 cfqd->rq_in_driver--; 3430 cfqd->rq_in_driver--;
3398 cfqq->dispatched--; 3431 cfqq->dispatched--;
3432 (RQ_CFQG(rq))->dispatched--;
3399 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3433 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3400 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3434 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3401 rq_data_dir(rq), rq_is_sync(rq)); 3435 rq_data_dir(rq), rq_is_sync(rq));
@@ -3425,7 +3459,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3425 * the queue. 3459 * the queue.
3426 */ 3460 */
3427 if (cfq_should_wait_busy(cfqd, cfqq)) { 3461 if (cfq_should_wait_busy(cfqd, cfqq)) {
3428 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3462 unsigned long extend_sl = cfqd->cfq_slice_idle;
3463 if (!cfqd->cfq_slice_idle)
3464 extend_sl = cfqd->cfq_group_idle;
3465 cfqq->slice_end = jiffies + extend_sl;
3429 cfq_mark_cfqq_wait_busy(cfqq); 3466 cfq_mark_cfqq_wait_busy(cfqq);
3430 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 3467 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3431 } 3468 }
@@ -3871,6 +3908,7 @@ static void *cfq_init_queue(struct request_queue *q)
3871 cfqd->cfq_slice[1] = cfq_slice_sync; 3908 cfqd->cfq_slice[1] = cfq_slice_sync;
3872 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3909 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3873 cfqd->cfq_slice_idle = cfq_slice_idle; 3910 cfqd->cfq_slice_idle = cfq_slice_idle;
3911 cfqd->cfq_group_idle = cfq_group_idle;
3874 cfqd->cfq_latency = 1; 3912 cfqd->cfq_latency = 1;
3875 cfqd->cfq_group_isolation = 0; 3913 cfqd->cfq_group_isolation = 0;
3876 cfqd->hw_tag = -1; 3914 cfqd->hw_tag = -1;
@@ -3943,6 +3981,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3943SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 3981SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3944SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 3982SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3945SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 3983SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3984SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3946SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 3985SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3947SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 3986SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3948SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 3987SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3975,6 +4014,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3975STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4014STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3976 UINT_MAX, 0); 4015 UINT_MAX, 0);
3977STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4016STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4017STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3978STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4018STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3979STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4019STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3980STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4020STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3996,6 +4036,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3996 CFQ_ATTR(slice_async), 4036 CFQ_ATTR(slice_async),
3997 CFQ_ATTR(slice_async_rq), 4037 CFQ_ATTR(slice_async_rq),
3998 CFQ_ATTR(slice_idle), 4038 CFQ_ATTR(slice_idle),
4039 CFQ_ATTR(group_idle),
3999 CFQ_ATTR(low_latency), 4040 CFQ_ATTR(low_latency),
4000 CFQ_ATTR(group_isolation), 4041 CFQ_ATTR(group_isolation),
4001 __ATTR_NULL 4042 __ATTR_NULL
@@ -4049,6 +4090,12 @@ static int __init cfq_init(void)
4049 if (!cfq_slice_idle) 4090 if (!cfq_slice_idle)
4050 cfq_slice_idle = 1; 4091 cfq_slice_idle = 1;
4051 4092
4093#ifdef CONFIG_CFQ_GROUP_IOSCHED
4094 if (!cfq_group_idle)
4095 cfq_group_idle = 1;
4096#else
4097 cfq_group_idle = 0;
4098#endif
4052 if (cfq_slab_setup()) 4099 if (cfq_slab_setup())
4053 return -ENOMEM; 4100 return -ENOMEM;
4054 4101