aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-06-16 05:23:00 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-06-23 11:10:39 -0400
commitcaaa5f9f0a75d1dc5e812e69afdbb8720e077fd3 (patch)
tree07aba6bac5a57cb3efd23762acfd719e255775ab /block/cfq-iosched.c
parent35e6077cb16f93517ba5a51ba849b186d2474d60 (diff)
[PATCH] cfq-iosched: many performance fixes
This is a collection of patches that greatly improve CFQ performance in some circumstances. - Change the idling logic to only kick in after a request is done and we are deciding what to do. Before the idling included the request service time, so it was hard to adjust. Now it's true think/idle time. - Take advantage of TCQ/NCQ/queueing for seeky sync workloads, but keep it in control for sync and sequential (or close to) workloads. - Expire queues immediately and move on to other busy queues, if we are not going to idle after the current one finishes. - Don't rearm idle timer if there are no busy queues. Just leave the system idle. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c116
1 files changed, 76 insertions, 40 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 84b75f88c279..13c4793fdf5f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -26,7 +26,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
26static const int cfq_slice_sync = HZ / 10; 26static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25; 27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2; 28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 70; 29static int cfq_slice_idle = HZ / 125;
30 30
31#define CFQ_IDLE_GRACE (HZ / 10) 31#define CFQ_IDLE_GRACE (HZ / 10)
32#define CFQ_SLICE_SCALE (5) 32#define CFQ_SLICE_SCALE (5)
@@ -906,6 +906,8 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
906 return cfqq; 906 return cfqq;
907} 907}
908 908
909#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
910
909static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 911static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
910 912
911{ 913{
@@ -939,7 +941,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
939 * fair distribution of slice time for a process doing back-to-back 941 * fair distribution of slice time for a process doing back-to-back
940 * seeks. so allow a little bit of time for him to submit a new rq 942 * seeks. so allow a little bit of time for him to submit a new rq
941 */ 943 */
942 if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) 944 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
943 sl = 2; 945 sl = 2;
944 946
945 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 947 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
@@ -1038,8 +1040,10 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1038 */ 1040 */
1039 if (!RB_EMPTY(&cfqq->sort_list)) 1041 if (!RB_EMPTY(&cfqq->sort_list))
1040 goto keep_queue; 1042 goto keep_queue;
1041 else if (cfq_cfqq_class_sync(cfqq) && 1043 else if (cfq_cfqq_dispatched(cfqq)) {
1042 time_before(now, cfqq->slice_end)) { 1044 cfqq = NULL;
1045 goto keep_queue;
1046 } else if (cfq_cfqq_class_sync(cfqq)) {
1043 if (cfq_arm_slice_timer(cfqd, cfqq)) 1047 if (cfq_arm_slice_timer(cfqd, cfqq))
1044 return NULL; 1048 return NULL;
1045 } 1049 }
@@ -1088,8 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1088 } while (dispatched < max_dispatch); 1092 } while (dispatched < max_dispatch);
1089 1093
1090 /* 1094 /*
1091 * if slice end isn't set yet, set it. if at least one request was 1095 * if slice end isn't set yet, set it.
1092 * sync, use the sync time slice value
1093 */ 1096 */
1094 if (!cfqq->slice_end) 1097 if (!cfqq->slice_end)
1095 cfq_set_prio_slice(cfqd, cfqq); 1098 cfq_set_prio_slice(cfqd, cfqq);
@@ -1100,7 +1103,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1100 */ 1103 */
1101 if ((!cfq_cfqq_sync(cfqq) && 1104 if ((!cfq_cfqq_sync(cfqq) &&
1102 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1105 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1103 cfq_class_idle(cfqq)) 1106 cfq_class_idle(cfqq) ||
1107 !cfq_cfqq_idle_window(cfqq))
1104 cfq_slice_expired(cfqd, 0); 1108 cfq_slice_expired(cfqd, 0);
1105 1109
1106 return dispatched; 1110 return dispatched;
@@ -1109,10 +1113,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1109static int 1113static int
1110cfq_forced_dispatch_cfqqs(struct list_head *list) 1114cfq_forced_dispatch_cfqqs(struct list_head *list)
1111{ 1115{
1112 int dispatched = 0;
1113 struct cfq_queue *cfqq, *next; 1116 struct cfq_queue *cfqq, *next;
1114 struct cfq_rq *crq; 1117 struct cfq_rq *crq;
1118 int dispatched;
1115 1119
1120 dispatched = 0;
1116 list_for_each_entry_safe(cfqq, next, list, cfq_list) { 1121 list_for_each_entry_safe(cfqq, next, list, cfq_list) {
1117 while ((crq = cfqq->next_crq)) { 1122 while ((crq = cfqq->next_crq)) {
1118 cfq_dispatch_insert(cfqq->cfqd->queue, crq); 1123 cfq_dispatch_insert(cfqq->cfqd->queue, crq);
@@ -1120,6 +1125,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list)
1120 } 1125 }
1121 BUG_ON(!list_empty(&cfqq->fifo)); 1126 BUG_ON(!list_empty(&cfqq->fifo));
1122 } 1127 }
1128
1123 return dispatched; 1129 return dispatched;
1124} 1130}
1125 1131
@@ -1146,7 +1152,8 @@ static int
1146cfq_dispatch_requests(request_queue_t *q, int force) 1152cfq_dispatch_requests(request_queue_t *q, int force)
1147{ 1153{
1148 struct cfq_data *cfqd = q->elevator->elevator_data; 1154 struct cfq_data *cfqd = q->elevator->elevator_data;
1149 struct cfq_queue *cfqq; 1155 struct cfq_queue *cfqq, *prev_cfqq;
1156 int dispatched;
1150 1157
1151 if (!cfqd->busy_queues) 1158 if (!cfqd->busy_queues)
1152 return 0; 1159 return 0;
@@ -1154,10 +1161,17 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1154 if (unlikely(force)) 1161 if (unlikely(force))
1155 return cfq_forced_dispatch(cfqd); 1162 return cfq_forced_dispatch(cfqd);
1156 1163
1157 cfqq = cfq_select_queue(cfqd); 1164 dispatched = 0;
1158 if (cfqq) { 1165 prev_cfqq = NULL;
1166 while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1159 int max_dispatch; 1167 int max_dispatch;
1160 1168
1169 /*
1170 * Don't repeat dispatch from the previous queue.
1171 */
1172 if (prev_cfqq == cfqq)
1173 break;
1174
1161 cfq_clear_cfqq_must_dispatch(cfqq); 1175 cfq_clear_cfqq_must_dispatch(cfqq);
1162 cfq_clear_cfqq_wait_request(cfqq); 1176 cfq_clear_cfqq_wait_request(cfqq);
1163 del_timer(&cfqd->idle_slice_timer); 1177 del_timer(&cfqd->idle_slice_timer);
@@ -1166,10 +1180,19 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1166 if (cfq_class_idle(cfqq)) 1180 if (cfq_class_idle(cfqq))
1167 max_dispatch = 1; 1181 max_dispatch = 1;
1168 1182
1169 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1183 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1184
1185 /*
1186 * If the dispatch cfqq has idling enabled and is still
1187 * the active queue, break out.
1188 */
1189 if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
1190 break;
1191
1192 prev_cfqq = cfqq;
1170 } 1193 }
1171 1194
1172 return 0; 1195 return dispatched;
1173} 1196}
1174 1197
1175/* 1198/*
@@ -1375,24 +1398,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
1375{ 1398{
1376 struct cfq_data *cfqd = cic->key; 1399 struct cfq_data *cfqd = cic->key;
1377 struct cfq_queue *cfqq; 1400 struct cfq_queue *cfqq;
1378 if (cfqd) {
1379 spin_lock(cfqd->queue->queue_lock);
1380 cfqq = cic->cfqq[ASYNC];
1381 if (cfqq) {
1382 struct cfq_queue *new_cfqq;
1383 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
1384 cic->ioc->task, GFP_ATOMIC);
1385 if (new_cfqq) {
1386 cic->cfqq[ASYNC] = new_cfqq;
1387 cfq_put_queue(cfqq);
1388 }
1389 }
1390 cfqq = cic->cfqq[SYNC];
1391 if (cfqq)
1392 cfq_mark_cfqq_prio_changed(cfqq);
1393 1401
1394 spin_unlock(cfqd->queue->queue_lock); 1402 if (unlikely(!cfqd))
1403 return;
1404
1405 spin_lock(cfqd->queue->queue_lock);
1406
1407 cfqq = cic->cfqq[ASYNC];
1408 if (cfqq) {
1409 struct cfq_queue *new_cfqq;
1410 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1411 GFP_ATOMIC);
1412 if (new_cfqq) {
1413 cic->cfqq[ASYNC] = new_cfqq;
1414 cfq_put_queue(cfqq);
1415 }
1395 } 1416 }
1417
1418 cfqq = cic->cfqq[SYNC];
1419 if (cfqq)
1420 cfq_mark_cfqq_prio_changed(cfqq);
1421
1422 spin_unlock(cfqd->queue->queue_lock);
1396} 1423}
1397 1424
1398/* 1425/*
@@ -1461,8 +1488,7 @@ retry:
1461 * set ->slice_left to allow preemption for a new process 1488 * set ->slice_left to allow preemption for a new process
1462 */ 1489 */
1463 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1490 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1464 if (!cfqd->hw_tag) 1491 cfq_mark_cfqq_idle_window(cfqq);
1465 cfq_mark_cfqq_idle_window(cfqq);
1466 cfq_mark_cfqq_prio_changed(cfqq); 1492 cfq_mark_cfqq_prio_changed(cfqq);
1467 cfq_init_prio_data(cfqq); 1493 cfq_init_prio_data(cfqq);
1468 } 1494 }
@@ -1653,7 +1679,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1653{ 1679{
1654 int enable_idle = cfq_cfqq_idle_window(cfqq); 1680 int enable_idle = cfq_cfqq_idle_window(cfqq);
1655 1681
1656 if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) 1682 if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1683 (cfqd->hw_tag && CIC_SEEKY(cic)))
1657 enable_idle = 0; 1684 enable_idle = 0;
1658 else if (sample_valid(cic->ttime_samples)) { 1685 else if (sample_valid(cic->ttime_samples)) {
1659 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1686 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1683,7 +1710,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1683 return 0; 1710 return 0;
1684 1711
1685 if (!cfqq) 1712 if (!cfqq)
1686 return 1; 1713 return 0;
1687 1714
1688 if (cfq_class_idle(cfqq)) 1715 if (cfq_class_idle(cfqq))
1689 return 1; 1716 return 1;
@@ -1715,7 +1742,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1715 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; 1742 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1716 1743
1717 cfqq->slice_end = cfqq->slice_left + jiffies; 1744 cfqq->slice_end = cfqq->slice_left + jiffies;
1718 __cfq_slice_expired(cfqd, cfqq, 1); 1745 cfq_slice_expired(cfqd, 1);
1719 __cfq_set_active_queue(cfqd, cfqq); 1746 __cfq_set_active_queue(cfqd, cfqq);
1720} 1747}
1721 1748
@@ -1834,11 +1861,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1834 cfqq->service_last = now; 1861 cfqq->service_last = now;
1835 cfq_resort_rr_list(cfqq, 0); 1862 cfq_resort_rr_list(cfqq, 0);
1836 } 1863 }
1837 cfq_schedule_dispatch(cfqd);
1838 } 1864 }
1839 1865
1840 if (cfq_crq_is_sync(crq)) 1866 if (sync)
1841 crq->io_context->last_end_request = now; 1867 crq->io_context->last_end_request = now;
1868
1869 /*
1870 * If this is the active queue, check if it needs to be expired,
1871 * or if we want to idle in case it has no pending requests.
1872 */
1873 if (cfqd->active_queue == cfqq) {
1874 if (time_after(now, cfqq->slice_end))
1875 cfq_slice_expired(cfqd, 0);
1876 else if (sync && RB_EMPTY(&cfqq->sort_list)) {
1877 if (!cfq_arm_slice_timer(cfqd, cfqq))
1878 cfq_schedule_dispatch(cfqd);
1879 }
1880 }
1842} 1881}
1843 1882
1844static struct request * 1883static struct request *
@@ -2106,11 +2145,8 @@ static void cfq_idle_slice_timer(unsigned long data)
2106 * only expire and reinvoke request handler, if there are 2145 * only expire and reinvoke request handler, if there are
2107 * other queues with pending requests 2146 * other queues with pending requests
2108 */ 2147 */
2109 if (!cfqd->busy_queues) { 2148 if (!cfqd->busy_queues)
2110 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2111 add_timer(&cfqd->idle_slice_timer);
2112 goto out_cont; 2149 goto out_cont;
2113 }
2114 2150
2115 /* 2151 /*
2116 * not expired and it has a request pending, let it dispatch 2152 * not expired and it has a request pending, let it dispatch