aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-10-20 10:42:29 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2005-10-28 02:45:08 -0400
commitb4878f245ec8e168cdd1f170f823a750b7dd4af5 (patch)
treede784c2a7e1174e4843807998f0356bf92ee78be /drivers
parentd9ebb192aa13a026edc6faff137dcb14f2c91731 (diff)
[PATCH] 02/05: update ioscheds to use generic dispatch queue
This patch updates all four ioscheds to use generic dispatch queue. There's one behavior change in as-iosched. * In as-iosched, when force dispatching (ELEVATOR_INSERT_BACK), batch_data_dir is reset to REQ_SYNC and changed_batch and new_batch are cleared to zero. This prevernts AS from doing incorrect update_write_batch after the forced dispatched requests are finished. * In cfq-iosched, cfqd->rq_in_driver currently counts the number of activated (removed) requests to determine whether queue-kicking is needed and cfq_max_depth has been reached. With generic dispatch queue, I think counting the number of dispatched requests would be more appropriate. * cfq_max_depth can be lowered to 1 again. Original from Tejun Heo, modified version applied. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/as-iosched.c290
-rw-r--r--drivers/block/cfq-iosched.c340
-rw-r--r--drivers/block/deadline-iosched.c95
-rw-r--r--drivers/block/noop-iosched.c17
4 files changed, 187 insertions, 555 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 95c0a3690b0f..1775ffe9edc7 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -98,7 +98,6 @@ struct as_data {
98 98
99 struct as_rq *next_arq[2]; /* next in sort order */ 99 struct as_rq *next_arq[2]; /* next in sort order */
100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head *dispatch; /* driver dispatch queue */
102 struct list_head *hash; /* request hash */ 101 struct list_head *hash; /* request hash */
103 102
104 unsigned long exit_prob; /* probability a task will exit while 103 unsigned long exit_prob; /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
239 return ioc; 238 return ioc;
240} 239}
241 240
241static void as_put_io_context(struct as_rq *arq)
242{
243 struct as_io_context *aic;
244
245 if (unlikely(!arq->io_context))
246 return;
247
248 aic = arq->io_context->aic;
249
250 if (arq->is_sync == REQ_SYNC && aic) {
251 spin_lock(&aic->lock);
252 set_bit(AS_TASK_IORUNNING, &aic->state);
253 aic->last_end_request = jiffies;
254 spin_unlock(&aic->lock);
255 }
256
257 put_io_context(arq->io_context);
258}
259
242/* 260/*
243 * the back merge hash support functions 261 * the back merge hash support functions
244 */ 262 */
@@ -950,23 +968,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
950 968
951 WARN_ON(!list_empty(&rq->queuelist)); 969 WARN_ON(!list_empty(&rq->queuelist));
952 970
953 if (arq->state == AS_RQ_PRESCHED) {
954 WARN_ON(arq->io_context);
955 goto out;
956 }
957
958 if (arq->state == AS_RQ_MERGED)
959 goto out_ioc;
960
961 if (arq->state != AS_RQ_REMOVED) { 971 if (arq->state != AS_RQ_REMOVED) {
962 printk("arq->state %d\n", arq->state); 972 printk("arq->state %d\n", arq->state);
963 WARN_ON(1); 973 WARN_ON(1);
964 goto out; 974 goto out;
965 } 975 }
966 976
967 if (!blk_fs_request(rq))
968 goto out;
969
970 if (ad->changed_batch && ad->nr_dispatched == 1) { 977 if (ad->changed_batch && ad->nr_dispatched == 1) {
971 kblockd_schedule_work(&ad->antic_work); 978 kblockd_schedule_work(&ad->antic_work);
972 ad->changed_batch = 0; 979 ad->changed_batch = 0;
@@ -1001,21 +1008,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
1001 } 1008 }
1002 } 1009 }
1003 1010
1004out_ioc: 1011 as_put_io_context(arq);
1005 if (!arq->io_context)
1006 goto out;
1007
1008 if (arq->is_sync == REQ_SYNC) {
1009 struct as_io_context *aic = arq->io_context->aic;
1010 if (aic) {
1011 spin_lock(&aic->lock);
1012 set_bit(AS_TASK_IORUNNING, &aic->state);
1013 aic->last_end_request = jiffies;
1014 spin_unlock(&aic->lock);
1015 }
1016 }
1017
1018 put_io_context(arq->io_context);
1019out: 1012out:
1020 arq->state = AS_RQ_POSTSCHED; 1013 arq->state = AS_RQ_POSTSCHED;
1021} 1014}
@@ -1052,68 +1045,6 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1052} 1045}
1053 1046
1054/* 1047/*
1055 * as_remove_dispatched_request is called to remove a request which has gone
1056 * to the dispatch list.
1057 */
1058static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
1059{
1060 struct as_rq *arq = RQ_DATA(rq);
1061 struct as_io_context *aic;
1062
1063 if (!arq) {
1064 WARN_ON(1);
1065 return;
1066 }
1067
1068 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1069 WARN_ON(ON_RB(&arq->rb_node));
1070 if (arq->io_context && arq->io_context->aic) {
1071 aic = arq->io_context->aic;
1072 if (aic) {
1073 WARN_ON(!atomic_read(&aic->nr_dispatched));
1074 atomic_dec(&aic->nr_dispatched);
1075 }
1076 }
1077}
1078
1079/*
1080 * as_remove_request is called when a driver has finished with a request.
1081 * This should be only called for dispatched requests, but for some reason
1082 * a POWER4 box running hwscan it does not.
1083 */
1084static void as_remove_request(request_queue_t *q, struct request *rq)
1085{
1086 struct as_rq *arq = RQ_DATA(rq);
1087
1088 if (unlikely(arq->state == AS_RQ_NEW))
1089 goto out;
1090
1091 if (ON_RB(&arq->rb_node)) {
1092 if (arq->state != AS_RQ_QUEUED) {
1093 printk("arq->state %d\n", arq->state);
1094 WARN_ON(1);
1095 goto out;
1096 }
1097 /*
1098 * We'll lose the aliased request(s) here. I don't think this
1099 * will ever happen, but if it does, hopefully someone will
1100 * report it.
1101 */
1102 WARN_ON(!list_empty(&rq->queuelist));
1103 as_remove_queued_request(q, rq);
1104 } else {
1105 if (arq->state != AS_RQ_DISPATCHED) {
1106 printk("arq->state %d\n", arq->state);
1107 WARN_ON(1);
1108 goto out;
1109 }
1110 as_remove_dispatched_request(q, rq);
1111 }
1112out:
1113 arq->state = AS_RQ_REMOVED;
1114}
1115
1116/*
1117 * as_fifo_expired returns 0 if there are no expired reads on the fifo, 1048 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1118 * 1 otherwise. It is ratelimited so that we only perform the check once per 1049 * 1 otherwise. It is ratelimited so that we only perform the check once per
1119 * `fifo_expire' interval. Otherwise a large number of expired requests 1050 * `fifo_expire' interval. Otherwise a large number of expired requests
@@ -1165,7 +1096,6 @@ static inline int as_batch_expired(struct as_data *ad)
1165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 1096static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1166{ 1097{
1167 struct request *rq = arq->request; 1098 struct request *rq = arq->request;
1168 struct list_head *insert;
1169 const int data_dir = arq->is_sync; 1099 const int data_dir = arq->is_sync;
1170 1100
1171 BUG_ON(!ON_RB(&arq->rb_node)); 1101 BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1128,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1198 /* 1128 /*
1199 * take it off the sort and fifo list, add to dispatch queue 1129 * take it off the sort and fifo list, add to dispatch queue
1200 */ 1130 */
1201 insert = ad->dispatch->prev;
1202
1203 while (!list_empty(&rq->queuelist)) { 1131 while (!list_empty(&rq->queuelist)) {
1204 struct request *__rq = list_entry_rq(rq->queuelist.next); 1132 struct request *__rq = list_entry_rq(rq->queuelist.next);
1205 struct as_rq *__arq = RQ_DATA(__rq); 1133 struct as_rq *__arq = RQ_DATA(__rq);
1206 1134
1207 list_move_tail(&__rq->queuelist, ad->dispatch); 1135 list_del(&__rq->queuelist);
1136
1137 elv_dispatch_add_tail(ad->q, __rq);
1208 1138
1209 if (__arq->io_context && __arq->io_context->aic) 1139 if (__arq->io_context && __arq->io_context->aic)
1210 atomic_inc(&__arq->io_context->aic->nr_dispatched); 1140 atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1148,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1218 as_remove_queued_request(ad->q, rq); 1148 as_remove_queued_request(ad->q, rq);
1219 WARN_ON(arq->state != AS_RQ_QUEUED); 1149 WARN_ON(arq->state != AS_RQ_QUEUED);
1220 1150
1221 list_add(&rq->queuelist, insert); 1151 elv_dispatch_sort(ad->q, rq);
1152
1222 arq->state = AS_RQ_DISPATCHED; 1153 arq->state = AS_RQ_DISPATCHED;
1223 if (arq->io_context && arq->io_context->aic) 1154 if (arq->io_context && arq->io_context->aic)
1224 atomic_inc(&arq->io_context->aic->nr_dispatched); 1155 atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1161,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1230 * read/write expire, batch expire, etc, and moves it to the dispatch 1161 * read/write expire, batch expire, etc, and moves it to the dispatch
1231 * queue. Returns 1 if a request was found, 0 otherwise. 1162 * queue. Returns 1 if a request was found, 0 otherwise.
1232 */ 1163 */
1233static int as_dispatch_request(struct as_data *ad) 1164static int as_dispatch_request(request_queue_t *q, int force)
1234{ 1165{
1166 struct as_data *ad = q->elevator->elevator_data;
1235 struct as_rq *arq; 1167 struct as_rq *arq;
1236 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1168 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
1237 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1169 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
1238 1170
1171 if (unlikely(force)) {
1172 /*
1173 * Forced dispatch, accounting is useless. Reset
1174 * accounting states and dump fifo_lists. Note that
1175 * batch_data_dir is reset to REQ_SYNC to avoid
1176 * screwing write batch accounting as write batch
1177 * accounting occurs on W->R transition.
1178 */
1179 int dispatched = 0;
1180
1181 ad->batch_data_dir = REQ_SYNC;
1182 ad->changed_batch = 0;
1183 ad->new_batch = 0;
1184
1185 while (ad->next_arq[REQ_SYNC]) {
1186 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1187 dispatched++;
1188 }
1189 ad->last_check_fifo[REQ_SYNC] = jiffies;
1190
1191 while (ad->next_arq[REQ_ASYNC]) {
1192 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1193 dispatched++;
1194 }
1195 ad->last_check_fifo[REQ_ASYNC] = jiffies;
1196
1197 return dispatched;
1198 }
1199
1239 /* Signal that the write batch was uncontended, so we can't time it */ 1200 /* Signal that the write batch was uncontended, so we can't time it */
1240 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1201 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1241 if (ad->current_write_count == 0 || !writes) 1202 if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1320,6 @@ fifo_expired:
1359 return 1; 1320 return 1;
1360} 1321}
1361 1322
1362static struct request *as_next_request(request_queue_t *q)
1363{
1364 struct as_data *ad = q->elevator->elevator_data;
1365 struct request *rq = NULL;
1366
1367 /*
1368 * if there are still requests on the dispatch queue, grab the first
1369 */
1370 if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
1371 rq = list_entry_rq(ad->dispatch->next);
1372
1373 return rq;
1374}
1375
1376/* 1323/*
1377 * Add arq to a list behind alias 1324 * Add arq to a list behind alias
1378 */ 1325 */
@@ -1410,11 +1357,19 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
1410/* 1357/*
1411 * add arq to rbtree and fifo 1358 * add arq to rbtree and fifo
1412 */ 1359 */
1413static void as_add_request(struct as_data *ad, struct as_rq *arq) 1360static void as_add_request(request_queue_t *q, struct request *rq)
1414{ 1361{
1362 struct as_data *ad = q->elevator->elevator_data;
1363 struct as_rq *arq = RQ_DATA(rq);
1415 struct as_rq *alias; 1364 struct as_rq *alias;
1416 int data_dir; 1365 int data_dir;
1417 1366
1367 if (arq->state != AS_RQ_PRESCHED) {
1368 printk("arq->state: %d\n", arq->state);
1369 WARN_ON(1);
1370 }
1371 arq->state = AS_RQ_NEW;
1372
1418 if (rq_data_dir(arq->request) == READ 1373 if (rq_data_dir(arq->request) == READ
1419 || current->flags&PF_SYNCWRITE) 1374 || current->flags&PF_SYNCWRITE)
1420 arq->is_sync = 1; 1375 arq->is_sync = 1;
@@ -1463,96 +1418,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1463 arq->state = AS_RQ_QUEUED; 1418 arq->state = AS_RQ_QUEUED;
1464} 1419}
1465 1420
1466static void as_deactivate_request(request_queue_t *q, struct request *rq) 1421static void as_activate_request(request_queue_t *q, struct request *rq)
1467{ 1422{
1468 struct as_data *ad = q->elevator->elevator_data;
1469 struct as_rq *arq = RQ_DATA(rq); 1423 struct as_rq *arq = RQ_DATA(rq);
1470 1424
1471 if (arq) { 1425 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1472 if (arq->state == AS_RQ_REMOVED) { 1426 arq->state = AS_RQ_REMOVED;
1473 arq->state = AS_RQ_DISPATCHED; 1427 if (arq->io_context && arq->io_context->aic)
1474 if (arq->io_context && arq->io_context->aic) 1428 atomic_dec(&arq->io_context->aic->nr_dispatched);
1475 atomic_inc(&arq->io_context->aic->nr_dispatched);
1476 }
1477 } else
1478 WARN_ON(blk_fs_request(rq)
1479 && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
1480
1481 /* Stop anticipating - let this request get through */
1482 as_antic_stop(ad);
1483}
1484
1485/*
1486 * requeue the request. The request has not been completed, nor is it a
1487 * new request, so don't touch accounting.
1488 */
1489static void as_requeue_request(request_queue_t *q, struct request *rq)
1490{
1491 as_deactivate_request(q, rq);
1492 list_add(&rq->queuelist, &q->queue_head);
1493}
1494
1495/*
1496 * Account a request that is inserted directly onto the dispatch queue.
1497 * arq->io_context->aic->nr_dispatched should not need to be incremented
1498 * because only new requests should come through here: requeues go through
1499 * our explicit requeue handler.
1500 */
1501static void as_account_queued_request(struct as_data *ad, struct request *rq)
1502{
1503 if (blk_fs_request(rq)) {
1504 struct as_rq *arq = RQ_DATA(rq);
1505 arq->state = AS_RQ_DISPATCHED;
1506 ad->nr_dispatched++;
1507 }
1508} 1429}
1509 1430
1510static void 1431static void as_deactivate_request(request_queue_t *q, struct request *rq)
1511as_insert_request(request_queue_t *q, struct request *rq, int where)
1512{ 1432{
1513 struct as_data *ad = q->elevator->elevator_data;
1514 struct as_rq *arq = RQ_DATA(rq); 1433 struct as_rq *arq = RQ_DATA(rq);
1515 1434
1516 if (arq) { 1435 WARN_ON(arq->state != AS_RQ_REMOVED);
1517 if (arq->state != AS_RQ_PRESCHED) { 1436 arq->state = AS_RQ_DISPATCHED;
1518 printk("arq->state: %d\n", arq->state); 1437 if (arq->io_context && arq->io_context->aic)
1519 WARN_ON(1); 1438 atomic_inc(&arq->io_context->aic->nr_dispatched);
1520 }
1521 arq->state = AS_RQ_NEW;
1522 }
1523
1524 /* barriers must flush the reorder queue */
1525 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
1526 && where == ELEVATOR_INSERT_SORT)) {
1527 WARN_ON(1);
1528 where = ELEVATOR_INSERT_BACK;
1529 }
1530
1531 switch (where) {
1532 case ELEVATOR_INSERT_BACK:
1533 while (ad->next_arq[REQ_SYNC])
1534 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1535
1536 while (ad->next_arq[REQ_ASYNC])
1537 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1538
1539 list_add_tail(&rq->queuelist, ad->dispatch);
1540 as_account_queued_request(ad, rq);
1541 as_antic_stop(ad);
1542 break;
1543 case ELEVATOR_INSERT_FRONT:
1544 list_add(&rq->queuelist, ad->dispatch);
1545 as_account_queued_request(ad, rq);
1546 as_antic_stop(ad);
1547 break;
1548 case ELEVATOR_INSERT_SORT:
1549 BUG_ON(!blk_fs_request(rq));
1550 as_add_request(ad, arq);
1551 break;
1552 default:
1553 BUG();
1554 return;
1555 }
1556} 1439}
1557 1440
1558/* 1441/*
@@ -1565,12 +1448,8 @@ static int as_queue_empty(request_queue_t *q)
1565{ 1448{
1566 struct as_data *ad = q->elevator->elevator_data; 1449 struct as_data *ad = q->elevator->elevator_data;
1567 1450
1568 if (!list_empty(&ad->fifo_list[REQ_ASYNC]) 1451 return list_empty(&ad->fifo_list[REQ_ASYNC])
1569 || !list_empty(&ad->fifo_list[REQ_SYNC]) 1452 && list_empty(&ad->fifo_list[REQ_SYNC]);
1570 || !list_empty(ad->dispatch))
1571 return 0;
1572
1573 return 1;
1574} 1453}
1575 1454
1576static struct request * 1455static struct request *
@@ -1763,6 +1642,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
1763 * kill knowledge of next, this one is a goner 1642 * kill knowledge of next, this one is a goner
1764 */ 1643 */
1765 as_remove_queued_request(q, next); 1644 as_remove_queued_request(q, next);
1645 as_put_io_context(anext);
1766 1646
1767 anext->state = AS_RQ_MERGED; 1647 anext->state = AS_RQ_MERGED;
1768} 1648}
@@ -1782,7 +1662,7 @@ static void as_work_handler(void *data)
1782 unsigned long flags; 1662 unsigned long flags;
1783 1663
1784 spin_lock_irqsave(q->queue_lock, flags); 1664 spin_lock_irqsave(q->queue_lock, flags);
1785 if (as_next_request(q)) 1665 if (!as_queue_empty(q))
1786 q->request_fn(q); 1666 q->request_fn(q);
1787 spin_unlock_irqrestore(q->queue_lock, flags); 1667 spin_unlock_irqrestore(q->queue_lock, flags);
1788} 1668}
@@ -1797,7 +1677,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1797 return; 1677 return;
1798 } 1678 }
1799 1679
1800 if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { 1680 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1681 arq->state != AS_RQ_PRESCHED &&
1682 arq->state != AS_RQ_MERGED)) {
1801 printk("arq->state %d\n", arq->state); 1683 printk("arq->state %d\n", arq->state);
1802 WARN_ON(1); 1684 WARN_ON(1);
1803 } 1685 }
@@ -1907,7 +1789,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1907 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1789 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1908 ad->sort_list[REQ_SYNC] = RB_ROOT; 1790 ad->sort_list[REQ_SYNC] = RB_ROOT;
1909 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1791 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1910 ad->dispatch = &q->queue_head;
1911 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1792 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1912 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1793 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1913 ad->antic_expire = default_antic_expire; 1794 ad->antic_expire = default_antic_expire;
@@ -2072,10 +1953,9 @@ static struct elevator_type iosched_as = {
2072 .elevator_merge_fn = as_merge, 1953 .elevator_merge_fn = as_merge,
2073 .elevator_merged_fn = as_merged_request, 1954 .elevator_merged_fn = as_merged_request,
2074 .elevator_merge_req_fn = as_merged_requests, 1955 .elevator_merge_req_fn = as_merged_requests,
2075 .elevator_next_req_fn = as_next_request, 1956 .elevator_dispatch_fn = as_dispatch_request,
2076 .elevator_add_req_fn = as_insert_request, 1957 .elevator_add_req_fn = as_add_request,
2077 .elevator_remove_req_fn = as_remove_request, 1958 .elevator_activate_req_fn = as_activate_request,
2078 .elevator_requeue_req_fn = as_requeue_request,
2079 .elevator_deactivate_req_fn = as_deactivate_request, 1959 .elevator_deactivate_req_fn = as_deactivate_request,
2080 .elevator_queue_empty_fn = as_queue_empty, 1960 .elevator_queue_empty_fn = as_queue_empty,
2081 .elevator_completed_req_fn = as_completed_request, 1961 .elevator_completed_req_fn = as_completed_request,
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64ec..7b14160e0798 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -84,7 +84,6 @@ static int cfq_max_depth = 2;
84 (node)->rb_left = NULL; \ 84 (node)->rb_left = NULL; \
85} while (0) 85} while (0)
86#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) 86#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
87#define ON_RB(node) ((node)->rb_color != RB_NONE)
88#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) 87#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
89#define rq_rb_key(rq) (rq)->sector 88#define rq_rb_key(rq) (rq)->sector
90 89
@@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired);
271#undef CFQ_CFQQ_FNS 270#undef CFQ_CFQQ_FNS
272 271
273enum cfq_rq_state_flags { 272enum cfq_rq_state_flags {
274 CFQ_CRQ_FLAG_in_flight = 0, 273 CFQ_CRQ_FLAG_is_sync = 0,
275 CFQ_CRQ_FLAG_in_driver,
276 CFQ_CRQ_FLAG_is_sync,
277 CFQ_CRQ_FLAG_requeued,
278}; 274};
279 275
280#define CFQ_CRQ_FNS(name) \ 276#define CFQ_CRQ_FNS(name) \
@@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq) \
291 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ 287 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
292} 288}
293 289
294CFQ_CRQ_FNS(in_flight);
295CFQ_CRQ_FNS(in_driver);
296CFQ_CRQ_FNS(is_sync); 290CFQ_CRQ_FNS(is_sync);
297CFQ_CRQ_FNS(requeued);
298#undef CFQ_CRQ_FNS 291#undef CFQ_CRQ_FNS
299 292
300static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 293static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
301static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 294static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
302static void cfq_put_cfqd(struct cfq_data *cfqd); 295static void cfq_put_cfqd(struct cfq_data *cfqd);
303 296
304#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 297#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
@@ -347,18 +340,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
347 return NULL; 340 return NULL;
348} 341}
349 342
350static inline int cfq_pending_requests(struct cfq_data *cfqd)
351{
352 return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
353}
354
355/* 343/*
356 * scheduler run of queue, if there are requests pending and no one in the 344 * scheduler run of queue, if there are requests pending and no one in the
357 * driver that will restart queueing 345 * driver that will restart queueing
358 */ 346 */
359static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 347static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
360{ 348{
361 if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) 349 if (!cfqd->rq_in_driver && cfqd->busy_queues)
362 kblockd_schedule_work(&cfqd->unplug_work); 350 kblockd_schedule_work(&cfqd->unplug_work);
363} 351}
364 352
@@ -366,7 +354,7 @@ static int cfq_queue_empty(request_queue_t *q)
366{ 354{
367 struct cfq_data *cfqd = q->elevator->elevator_data; 355 struct cfq_data *cfqd = q->elevator->elevator_data;
368 356
369 return !cfq_pending_requests(cfqd); 357 return !cfqd->busy_queues;
370} 358}
371 359
372/* 360/*
@@ -386,11 +374,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
386 if (crq2 == NULL) 374 if (crq2 == NULL)
387 return crq1; 375 return crq1;
388 376
389 if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
390 return crq1;
391 else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
392 return crq2;
393
394 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) 377 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
395 return crq1; 378 return crq1;
396 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) 379 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
@@ -461,10 +444,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
461 struct cfq_rq *crq_next = NULL, *crq_prev = NULL; 444 struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
462 struct rb_node *rbnext, *rbprev; 445 struct rb_node *rbnext, *rbprev;
463 446
464 rbnext = NULL; 447 if (!(rbnext = rb_next(&last->rb_node))) {
465 if (ON_RB(&last->rb_node))
466 rbnext = rb_next(&last->rb_node);
467 if (!rbnext) {
468 rbnext = rb_first(&cfqq->sort_list); 448 rbnext = rb_first(&cfqq->sort_list);
469 if (rbnext == &last->rb_node) 449 if (rbnext == &last->rb_node)
470 rbnext = NULL; 450 rbnext = NULL;
@@ -545,13 +525,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
545 * the pending list according to last request service 525 * the pending list according to last request service
546 */ 526 */
547static inline void 527static inline void
548cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) 528cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
549{ 529{
550 BUG_ON(cfq_cfqq_on_rr(cfqq)); 530 BUG_ON(cfq_cfqq_on_rr(cfqq));
551 cfq_mark_cfqq_on_rr(cfqq); 531 cfq_mark_cfqq_on_rr(cfqq);
552 cfqd->busy_queues++; 532 cfqd->busy_queues++;
553 533
554 cfq_resort_rr_list(cfqq, requeue); 534 cfq_resort_rr_list(cfqq, 0);
555} 535}
556 536
557static inline void 537static inline void
@@ -571,22 +551,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
571static inline void cfq_del_crq_rb(struct cfq_rq *crq) 551static inline void cfq_del_crq_rb(struct cfq_rq *crq)
572{ 552{
573 struct cfq_queue *cfqq = crq->cfq_queue; 553 struct cfq_queue *cfqq = crq->cfq_queue;
554 struct cfq_data *cfqd = cfqq->cfqd;
555 const int sync = cfq_crq_is_sync(crq);
574 556
575 if (ON_RB(&crq->rb_node)) { 557 BUG_ON(!cfqq->queued[sync]);
576 struct cfq_data *cfqd = cfqq->cfqd; 558 cfqq->queued[sync]--;
577 const int sync = cfq_crq_is_sync(crq);
578
579 BUG_ON(!cfqq->queued[sync]);
580 cfqq->queued[sync]--;
581 559
582 cfq_update_next_crq(crq); 560 cfq_update_next_crq(crq);
583 561
584 rb_erase(&crq->rb_node, &cfqq->sort_list); 562 rb_erase(&crq->rb_node, &cfqq->sort_list);
585 RB_CLEAR_COLOR(&crq->rb_node); 563 RB_CLEAR_COLOR(&crq->rb_node);
586 564
587 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) 565 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
588 cfq_del_cfqq_rr(cfqd, cfqq); 566 cfq_del_cfqq_rr(cfqd, cfqq);
589 }
590} 567}
591 568
592static struct cfq_rq * 569static struct cfq_rq *
@@ -627,12 +604,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
627 * if that happens, put the alias on the dispatch list 604 * if that happens, put the alias on the dispatch list
628 */ 605 */
629 while ((__alias = __cfq_add_crq_rb(crq)) != NULL) 606 while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
630 cfq_dispatch_sort(cfqd->queue, __alias); 607 cfq_dispatch_insert(cfqd->queue, __alias);
631 608
632 rb_insert_color(&crq->rb_node, &cfqq->sort_list); 609 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
633 610
634 if (!cfq_cfqq_on_rr(cfqq)) 611 if (!cfq_cfqq_on_rr(cfqq))
635 cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); 612 cfq_add_cfqq_rr(cfqd, cfqq);
636 613
637 /* 614 /*
638 * check if this request is a better next-serve candidate 615 * check if this request is a better next-serve candidate
@@ -643,10 +620,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
643static inline void 620static inline void
644cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) 621cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
645{ 622{
646 if (ON_RB(&crq->rb_node)) { 623 rb_erase(&crq->rb_node, &cfqq->sort_list);
647 rb_erase(&crq->rb_node, &cfqq->sort_list); 624 cfqq->queued[cfq_crq_is_sync(crq)]--;
648 cfqq->queued[cfq_crq_is_sync(crq)]--;
649 }
650 625
651 cfq_add_crq_rb(crq); 626 cfq_add_crq_rb(crq);
652} 627}
@@ -676,49 +651,28 @@ out:
676 return NULL; 651 return NULL;
677} 652}
678 653
679static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 654static void cfq_activate_request(request_queue_t *q, struct request *rq)
680{ 655{
681 struct cfq_data *cfqd = q->elevator->elevator_data; 656 struct cfq_data *cfqd = q->elevator->elevator_data;
682 struct cfq_rq *crq = RQ_DATA(rq);
683
684 if (crq) {
685 struct cfq_queue *cfqq = crq->cfq_queue;
686 657
687 if (cfq_crq_in_driver(crq)) { 658 cfqd->rq_in_driver++;
688 cfq_clear_crq_in_driver(crq);
689 WARN_ON(!cfqd->rq_in_driver);
690 cfqd->rq_in_driver--;
691 }
692 if (cfq_crq_in_flight(crq)) {
693 const int sync = cfq_crq_is_sync(crq);
694
695 cfq_clear_crq_in_flight(crq);
696 WARN_ON(!cfqq->on_dispatch[sync]);
697 cfqq->on_dispatch[sync]--;
698 }
699 cfq_mark_crq_requeued(crq);
700 }
701} 659}
702 660
703/* 661static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
704 * make sure the service time gets corrected on reissue of this request
705 */
706static void cfq_requeue_request(request_queue_t *q, struct request *rq)
707{ 662{
708 cfq_deactivate_request(q, rq); 663 struct cfq_data *cfqd = q->elevator->elevator_data;
709 list_add(&rq->queuelist, &q->queue_head); 664
665 WARN_ON(!cfqd->rq_in_driver);
666 cfqd->rq_in_driver--;
710} 667}
711 668
712static void cfq_remove_request(request_queue_t *q, struct request *rq) 669static void cfq_remove_request(struct request *rq)
713{ 670{
714 struct cfq_rq *crq = RQ_DATA(rq); 671 struct cfq_rq *crq = RQ_DATA(rq);
715 672
716 if (crq) { 673 list_del_init(&rq->queuelist);
717 list_del_init(&rq->queuelist); 674 cfq_del_crq_rb(crq);
718 cfq_del_crq_rb(crq); 675 cfq_remove_merge_hints(rq->q, crq);
719 cfq_remove_merge_hints(q, crq);
720
721 }
722} 676}
723 677
724static int 678static int
@@ -762,7 +716,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req)
762 cfq_del_crq_hash(crq); 716 cfq_del_crq_hash(crq);
763 cfq_add_crq_hash(cfqd, crq); 717 cfq_add_crq_hash(cfqd, crq);
764 718
765 if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) { 719 if (rq_rb_key(req) != crq->rb_key) {
766 struct cfq_queue *cfqq = crq->cfq_queue; 720 struct cfq_queue *cfqq = crq->cfq_queue;
767 721
768 cfq_update_next_crq(crq); 722 cfq_update_next_crq(crq);
@@ -785,7 +739,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
785 time_before(next->start_time, rq->start_time)) 739 time_before(next->start_time, rq->start_time))
786 list_move(&rq->queuelist, &next->queuelist); 740 list_move(&rq->queuelist, &next->queuelist);
787 741
788 cfq_remove_request(q, next); 742 cfq_remove_request(next);
789} 743}
790 744
791static inline void 745static inline void
@@ -992,53 +946,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
992 return 1; 946 return 1;
993} 947}
994 948
995/* 949static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
996 * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
997 * this function sector sorts the selected request to minimize seeks. we start
998 * at cfqd->last_sector, not 0.
999 */
1000static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
1001{ 950{
1002 struct cfq_data *cfqd = q->elevator->elevator_data; 951 struct cfq_data *cfqd = q->elevator->elevator_data;
1003 struct cfq_queue *cfqq = crq->cfq_queue; 952 struct cfq_queue *cfqq = crq->cfq_queue;
1004 struct list_head *head = &q->queue_head, *entry = head;
1005 struct request *__rq;
1006 sector_t last;
1007
1008 list_del(&crq->request->queuelist);
1009
1010 last = cfqd->last_sector;
1011 list_for_each_entry_reverse(__rq, head, queuelist) {
1012 struct cfq_rq *__crq = RQ_DATA(__rq);
1013
1014 if (blk_barrier_rq(__rq))
1015 break;
1016 if (!blk_fs_request(__rq))
1017 break;
1018 if (cfq_crq_requeued(__crq))
1019 break;
1020
1021 if (__rq->sector <= crq->request->sector)
1022 break;
1023 if (__rq->sector > last && crq->request->sector < last) {
1024 last = crq->request->sector + crq->request->nr_sectors;
1025 break;
1026 }
1027 entry = &__rq->queuelist;
1028 }
1029
1030 cfqd->last_sector = last;
1031 953
1032 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); 954 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
1033 955 cfq_remove_request(crq->request);
1034 cfq_del_crq_rb(crq);
1035 cfq_remove_merge_hints(q, crq);
1036
1037 cfq_mark_crq_in_flight(crq);
1038 cfq_clear_crq_requeued(crq);
1039
1040 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; 956 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
1041 list_add_tail(&crq->request->queuelist, entry); 957 elv_dispatch_sort(q, crq->request);
1042} 958}
1043 959
1044/* 960/*
@@ -1159,7 +1075,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1159 /* 1075 /*
1160 * finally, insert request into driver dispatch list 1076 * finally, insert request into driver dispatch list
1161 */ 1077 */
1162 cfq_dispatch_sort(cfqd->queue, crq); 1078 cfq_dispatch_insert(cfqd->queue, crq);
1163 1079
1164 cfqd->dispatch_slice++; 1080 cfqd->dispatch_slice++;
1165 dispatched++; 1081 dispatched++;
@@ -1194,7 +1110,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1194} 1110}
1195 1111
1196static int 1112static int
1197cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) 1113cfq_dispatch_requests(request_queue_t *q, int force)
1198{ 1114{
1199 struct cfq_data *cfqd = q->elevator->elevator_data; 1115 struct cfq_data *cfqd = q->elevator->elevator_data;
1200 struct cfq_queue *cfqq; 1116 struct cfq_queue *cfqq;
@@ -1204,12 +1120,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1204 1120
1205 cfqq = cfq_select_queue(cfqd, force); 1121 cfqq = cfq_select_queue(cfqd, force);
1206 if (cfqq) { 1122 if (cfqq) {
1123 int max_dispatch;
1124
1125 /*
1126 * if idle window is disabled, allow queue buildup
1127 */
1128 if (!cfq_cfqq_idle_window(cfqq) &&
1129 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1130 return 0;
1131
1207 cfq_clear_cfqq_must_dispatch(cfqq); 1132 cfq_clear_cfqq_must_dispatch(cfqq);
1208 cfq_clear_cfqq_wait_request(cfqq); 1133 cfq_clear_cfqq_wait_request(cfqq);
1209 del_timer(&cfqd->idle_slice_timer); 1134 del_timer(&cfqd->idle_slice_timer);
1210 1135
1211 if (cfq_class_idle(cfqq)) 1136 if (!force) {
1212 max_dispatch = 1; 1137 max_dispatch = cfqd->cfq_quantum;
1138 if (cfq_class_idle(cfqq))
1139 max_dispatch = 1;
1140 } else
1141 max_dispatch = INT_MAX;
1213 1142
1214 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1143 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1215 } 1144 }
@@ -1217,93 +1146,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1217 return 0; 1146 return 0;
1218} 1147}
1219 1148
1220static inline void cfq_account_dispatch(struct cfq_rq *crq)
1221{
1222 struct cfq_queue *cfqq = crq->cfq_queue;
1223 struct cfq_data *cfqd = cfqq->cfqd;
1224
1225 if (unlikely(!blk_fs_request(crq->request)))
1226 return;
1227
1228 /*
1229 * accounted bit is necessary since some drivers will call
1230 * elv_next_request() many times for the same request (eg ide)
1231 */
1232 if (cfq_crq_in_driver(crq))
1233 return;
1234
1235 cfq_mark_crq_in_driver(crq);
1236 cfqd->rq_in_driver++;
1237}
1238
1239static inline void
1240cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
1241{
1242 struct cfq_data *cfqd = cfqq->cfqd;
1243 unsigned long now;
1244
1245 if (!cfq_crq_in_driver(crq))
1246 return;
1247
1248 now = jiffies;
1249
1250 WARN_ON(!cfqd->rq_in_driver);
1251 cfqd->rq_in_driver--;
1252
1253 if (!cfq_class_idle(cfqq))
1254 cfqd->last_end_request = now;
1255
1256 if (!cfq_cfqq_dispatched(cfqq)) {
1257 if (cfq_cfqq_on_rr(cfqq)) {
1258 cfqq->service_last = now;
1259 cfq_resort_rr_list(cfqq, 0);
1260 }
1261 if (cfq_cfqq_expired(cfqq)) {
1262 __cfq_slice_expired(cfqd, cfqq, 0);
1263 cfq_schedule_dispatch(cfqd);
1264 }
1265 }
1266
1267 if (cfq_crq_is_sync(crq))
1268 crq->io_context->last_end_request = now;
1269}
1270
1271static struct request *cfq_next_request(request_queue_t *q)
1272{
1273 struct cfq_data *cfqd = q->elevator->elevator_data;
1274 struct request *rq;
1275
1276 if (!list_empty(&q->queue_head)) {
1277 struct cfq_rq *crq;
1278dispatch:
1279 rq = list_entry_rq(q->queue_head.next);
1280
1281 crq = RQ_DATA(rq);
1282 if (crq) {
1283 struct cfq_queue *cfqq = crq->cfq_queue;
1284
1285 /*
1286 * if idle window is disabled, allow queue buildup
1287 */
1288 if (!cfq_crq_in_driver(crq) &&
1289 !cfq_cfqq_idle_window(cfqq) &&
1290 !blk_barrier_rq(rq) &&
1291 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1292 return NULL;
1293
1294 cfq_remove_merge_hints(q, crq);
1295 cfq_account_dispatch(crq);
1296 }
1297
1298 return rq;
1299 }
1300
1301 if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
1302 goto dispatch;
1303
1304 return NULL;
1305}
1306
1307/* 1149/*
1308 * task holds one reference to the queue, dropped when task exits. each crq 1150 * task holds one reference to the queue, dropped when task exits. each crq
1309 * in-flight on this queue also holds a reference, dropped when crq is freed. 1151 * in-flight on this queue also holds a reference, dropped when crq is freed.
@@ -1816,8 +1658,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1816 } 1658 }
1817} 1659}
1818 1660
1819static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) 1661static void cfq_insert_request(request_queue_t *q, struct request *rq)
1820{ 1662{
1663 struct cfq_data *cfqd = q->elevator->elevator_data;
1821 struct cfq_rq *crq = RQ_DATA(rq); 1664 struct cfq_rq *crq = RQ_DATA(rq);
1822 struct cfq_queue *cfqq = crq->cfq_queue; 1665 struct cfq_queue *cfqq = crq->cfq_queue;
1823 1666
@@ -1837,56 +1680,37 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
1837 cfq_crq_enqueued(cfqd, cfqq, crq); 1680 cfq_crq_enqueued(cfqd, cfqq, crq);
1838} 1681}
1839 1682
1840static void
1841cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1842{
1843 struct cfq_data *cfqd = q->elevator->elevator_data;
1844
1845 switch (where) {
1846 case ELEVATOR_INSERT_BACK:
1847 while (cfq_dispatch_requests(q, INT_MAX, 1))
1848 ;
1849 list_add_tail(&rq->queuelist, &q->queue_head);
1850 /*
1851 * If we were idling with pending requests on
1852 * inactive cfqqs, force dispatching will
1853 * remove the idle timer and the queue won't
1854 * be kicked by __make_request() afterward.
1855 * Kick it here.
1856 */
1857 cfq_schedule_dispatch(cfqd);
1858 break;
1859 case ELEVATOR_INSERT_FRONT:
1860 list_add(&rq->queuelist, &q->queue_head);
1861 break;
1862 case ELEVATOR_INSERT_SORT:
1863 BUG_ON(!blk_fs_request(rq));
1864 cfq_enqueue(cfqd, rq);
1865 break;
1866 default:
1867 printk("%s: bad insert point %d\n", __FUNCTION__,where);
1868 return;
1869 }
1870}
1871
1872static void cfq_completed_request(request_queue_t *q, struct request *rq) 1683static void cfq_completed_request(request_queue_t *q, struct request *rq)
1873{ 1684{
1874 struct cfq_rq *crq = RQ_DATA(rq); 1685 struct cfq_rq *crq = RQ_DATA(rq);
1875 struct cfq_queue *cfqq; 1686 struct cfq_queue *cfqq = crq->cfq_queue;
1687 struct cfq_data *cfqd = cfqq->cfqd;
1688 const int sync = cfq_crq_is_sync(crq);
1689 unsigned long now;
1876 1690
1877 if (unlikely(!blk_fs_request(rq))) 1691 now = jiffies;
1878 return;
1879 1692
1880 cfqq = crq->cfq_queue; 1693 WARN_ON(!cfqd->rq_in_driver);
1694 WARN_ON(!cfqq->on_dispatch[sync]);
1695 cfqd->rq_in_driver--;
1696 cfqq->on_dispatch[sync]--;
1881 1697
1882 if (cfq_crq_in_flight(crq)) { 1698 if (!cfq_class_idle(cfqq))
1883 const int sync = cfq_crq_is_sync(crq); 1699 cfqd->last_end_request = now;
1884 1700
1885 WARN_ON(!cfqq->on_dispatch[sync]); 1701 if (!cfq_cfqq_dispatched(cfqq)) {
1886 cfqq->on_dispatch[sync]--; 1702 if (cfq_cfqq_on_rr(cfqq)) {
1703 cfqq->service_last = now;
1704 cfq_resort_rr_list(cfqq, 0);
1705 }
1706 if (cfq_cfqq_expired(cfqq)) {
1707 __cfq_slice_expired(cfqd, cfqq, 0);
1708 cfq_schedule_dispatch(cfqd);
1709 }
1887 } 1710 }
1888 1711
1889 cfq_account_completion(cfqq, crq); 1712 if (cfq_crq_is_sync(crq))
1713 crq->io_context->last_end_request = now;
1890} 1714}
1891 1715
1892static struct request * 1716static struct request *
@@ -2118,9 +1942,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2118 INIT_HLIST_NODE(&crq->hash); 1942 INIT_HLIST_NODE(&crq->hash);
2119 crq->cfq_queue = cfqq; 1943 crq->cfq_queue = cfqq;
2120 crq->io_context = cic; 1944 crq->io_context = cic;
2121 cfq_clear_crq_in_flight(crq);
2122 cfq_clear_crq_in_driver(crq);
2123 cfq_clear_crq_requeued(crq);
2124 1945
2125 if (rw == READ || process_sync(tsk)) 1946 if (rw == READ || process_sync(tsk))
2126 cfq_mark_crq_is_sync(crq); 1947 cfq_mark_crq_is_sync(crq);
@@ -2201,7 +2022,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2201 * only expire and reinvoke request handler, if there are 2022 * only expire and reinvoke request handler, if there are
2202 * other queues with pending requests 2023 * other queues with pending requests
2203 */ 2024 */
2204 if (!cfq_pending_requests(cfqd)) { 2025 if (!cfqd->busy_queues) {
2205 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); 2026 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2206 add_timer(&cfqd->idle_slice_timer); 2027 add_timer(&cfqd->idle_slice_timer);
2207 goto out_cont; 2028 goto out_cont;
@@ -2576,10 +2397,9 @@ static struct elevator_type iosched_cfq = {
2576 .elevator_merge_fn = cfq_merge, 2397 .elevator_merge_fn = cfq_merge,
2577 .elevator_merged_fn = cfq_merged_request, 2398 .elevator_merged_fn = cfq_merged_request,
2578 .elevator_merge_req_fn = cfq_merged_requests, 2399 .elevator_merge_req_fn = cfq_merged_requests,
2579 .elevator_next_req_fn = cfq_next_request, 2400 .elevator_dispatch_fn = cfq_dispatch_requests,
2580 .elevator_add_req_fn = cfq_insert_request, 2401 .elevator_add_req_fn = cfq_insert_request,
2581 .elevator_remove_req_fn = cfq_remove_request, 2402 .elevator_activate_req_fn = cfq_activate_request,
2582 .elevator_requeue_req_fn = cfq_requeue_request,
2583 .elevator_deactivate_req_fn = cfq_deactivate_request, 2403 .elevator_deactivate_req_fn = cfq_deactivate_request,
2584 .elevator_queue_empty_fn = cfq_queue_empty, 2404 .elevator_queue_empty_fn = cfq_queue_empty,
2585 .elevator_completed_req_fn = cfq_completed_request, 2405 .elevator_completed_req_fn = cfq_completed_request,
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index 52a3ae5289a0..07de4d24ddba 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -50,7 +50,6 @@ struct deadline_data {
50 * next in sort order. read, write or both are NULL 50 * next in sort order. read, write or both are NULL
51 */ 51 */
52 struct deadline_rq *next_drq[2]; 52 struct deadline_rq *next_drq[2];
53 struct list_head *dispatch; /* driver dispatch queue */
54 struct list_head *hash; /* request hash */ 53 struct list_head *hash; /* request hash */
55 unsigned int batching; /* number of sequential requests made */ 54 unsigned int batching; /* number of sequential requests made */
56 sector_t last_sector; /* head position */ 55 sector_t last_sector; /* head position */
@@ -239,10 +238,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
239 dd->next_drq[data_dir] = rb_entry_drq(rbnext); 238 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
240 } 239 }
241 240
242 if (ON_RB(&drq->rb_node)) { 241 BUG_ON(!ON_RB(&drq->rb_node));
243 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); 242 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
244 RB_CLEAR(&drq->rb_node); 243 RB_CLEAR(&drq->rb_node);
245 }
246} 244}
247 245
248static struct request * 246static struct request *
@@ -286,7 +284,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir)
286/* 284/*
287 * add drq to rbtree and fifo 285 * add drq to rbtree and fifo
288 */ 286 */
289static inline void 287static void
290deadline_add_request(struct request_queue *q, struct request *rq) 288deadline_add_request(struct request_queue *q, struct request *rq)
291{ 289{
292 struct deadline_data *dd = q->elevator->elevator_data; 290 struct deadline_data *dd = q->elevator->elevator_data;
@@ -315,14 +313,11 @@ deadline_add_request(struct request_queue *q, struct request *rq)
315static void deadline_remove_request(request_queue_t *q, struct request *rq) 313static void deadline_remove_request(request_queue_t *q, struct request *rq)
316{ 314{
317 struct deadline_rq *drq = RQ_DATA(rq); 315 struct deadline_rq *drq = RQ_DATA(rq);
316 struct deadline_data *dd = q->elevator->elevator_data;
318 317
319 if (drq) { 318 list_del_init(&drq->fifo);
320 struct deadline_data *dd = q->elevator->elevator_data; 319 deadline_remove_merge_hints(q, drq);
321 320 deadline_del_drq_rb(dd, drq);
322 list_del_init(&drq->fifo);
323 deadline_remove_merge_hints(q, drq);
324 deadline_del_drq_rb(dd, drq);
325 }
326} 321}
327 322
328static int 323static int
@@ -452,7 +447,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
452 request_queue_t *q = drq->request->q; 447 request_queue_t *q = drq->request->q;
453 448
454 deadline_remove_request(q, drq->request); 449 deadline_remove_request(q, drq->request);
455 list_add_tail(&drq->request->queuelist, dd->dispatch); 450 elv_dispatch_add_tail(q, drq->request);
456} 451}
457 452
458/* 453/*
@@ -502,8 +497,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
502 * deadline_dispatch_requests selects the best request according to 497 * deadline_dispatch_requests selects the best request according to
503 * read/write expire, fifo_batch, etc 498 * read/write expire, fifo_batch, etc
504 */ 499 */
505static int deadline_dispatch_requests(struct deadline_data *dd) 500static int deadline_dispatch_requests(request_queue_t *q, int force)
506{ 501{
502 struct deadline_data *dd = q->elevator->elevator_data;
507 const int reads = !list_empty(&dd->fifo_list[READ]); 503 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]); 504 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq; 505 struct deadline_rq *drq;
@@ -597,65 +593,12 @@ dispatch_request:
597 return 1; 593 return 1;
598} 594}
599 595
600static struct request *deadline_next_request(request_queue_t *q)
601{
602 struct deadline_data *dd = q->elevator->elevator_data;
603 struct request *rq;
604
605 /*
606 * if there are still requests on the dispatch queue, grab the first one
607 */
608 if (!list_empty(dd->dispatch)) {
609dispatch:
610 rq = list_entry_rq(dd->dispatch->next);
611 return rq;
612 }
613
614 if (deadline_dispatch_requests(dd))
615 goto dispatch;
616
617 return NULL;
618}
619
620static void
621deadline_insert_request(request_queue_t *q, struct request *rq, int where)
622{
623 struct deadline_data *dd = q->elevator->elevator_data;
624
625 /* barriers must flush the reorder queue */
626 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
627 && where == ELEVATOR_INSERT_SORT))
628 where = ELEVATOR_INSERT_BACK;
629
630 switch (where) {
631 case ELEVATOR_INSERT_BACK:
632 while (deadline_dispatch_requests(dd))
633 ;
634 list_add_tail(&rq->queuelist, dd->dispatch);
635 break;
636 case ELEVATOR_INSERT_FRONT:
637 list_add(&rq->queuelist, dd->dispatch);
638 break;
639 case ELEVATOR_INSERT_SORT:
640 BUG_ON(!blk_fs_request(rq));
641 deadline_add_request(q, rq);
642 break;
643 default:
644 printk("%s: bad insert point %d\n", __FUNCTION__,where);
645 return;
646 }
647}
648
649static int deadline_queue_empty(request_queue_t *q) 596static int deadline_queue_empty(request_queue_t *q)
650{ 597{
651 struct deadline_data *dd = q->elevator->elevator_data; 598 struct deadline_data *dd = q->elevator->elevator_data;
652 599
653 if (!list_empty(&dd->fifo_list[WRITE]) 600 return list_empty(&dd->fifo_list[WRITE])
654 || !list_empty(&dd->fifo_list[READ]) 601 && list_empty(&dd->fifo_list[READ]);
655 || !list_empty(dd->dispatch))
656 return 0;
657
658 return 1;
659} 602}
660 603
661static struct request * 604static struct request *
@@ -733,7 +676,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
733 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 676 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
734 dd->sort_list[READ] = RB_ROOT; 677 dd->sort_list[READ] = RB_ROOT;
735 dd->sort_list[WRITE] = RB_ROOT; 678 dd->sort_list[WRITE] = RB_ROOT;
736 dd->dispatch = &q->queue_head;
737 dd->fifo_expire[READ] = read_expire; 679 dd->fifo_expire[READ] = read_expire;
738 dd->fifo_expire[WRITE] = write_expire; 680 dd->fifo_expire[WRITE] = write_expire;
739 dd->writes_starved = writes_starved; 681 dd->writes_starved = writes_starved;
@@ -748,10 +690,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
748 struct deadline_data *dd = q->elevator->elevator_data; 690 struct deadline_data *dd = q->elevator->elevator_data;
749 struct deadline_rq *drq = RQ_DATA(rq); 691 struct deadline_rq *drq = RQ_DATA(rq);
750 692
751 if (drq) { 693 mempool_free(drq, dd->drq_pool);
752 mempool_free(drq, dd->drq_pool); 694 rq->elevator_private = NULL;
753 rq->elevator_private = NULL;
754 }
755} 695}
756 696
757static int 697static int
@@ -917,9 +857,8 @@ static struct elevator_type iosched_deadline = {
917 .elevator_merge_fn = deadline_merge, 857 .elevator_merge_fn = deadline_merge,
918 .elevator_merged_fn = deadline_merged_request, 858 .elevator_merged_fn = deadline_merged_request,
919 .elevator_merge_req_fn = deadline_merged_requests, 859 .elevator_merge_req_fn = deadline_merged_requests,
920 .elevator_next_req_fn = deadline_next_request, 860 .elevator_dispatch_fn = deadline_dispatch_requests,
921 .elevator_add_req_fn = deadline_insert_request, 861 .elevator_add_req_fn = deadline_add_request,
922 .elevator_remove_req_fn = deadline_remove_request,
923 .elevator_queue_empty_fn = deadline_queue_empty, 862 .elevator_queue_empty_fn = deadline_queue_empty,
924 .elevator_former_req_fn = deadline_former_request, 863 .elevator_former_req_fn = deadline_former_request,
925 .elevator_latter_req_fn = deadline_latter_request, 864 .elevator_latter_req_fn = deadline_latter_request,
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
index b1730b62c37e..bc2252b6f2e5 100644
--- a/drivers/block/noop-iosched.c
+++ b/drivers/block/noop-iosched.c
@@ -28,13 +28,9 @@ static void elevator_noop_merge_requests(request_queue_t *q, struct request *req
28 list_del_init(&next->queuelist); 28 list_del_init(&next->queuelist);
29} 29}
30 30
31static void elevator_noop_add_request(request_queue_t *q, struct request *rq, 31static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
32 int where)
33{ 32{
34 if (where == ELEVATOR_INSERT_FRONT) 33 elv_dispatch_add_tail(q, rq);
35 list_add(&rq->queuelist, &q->queue_head);
36 else
37 list_add_tail(&rq->queuelist, &q->queue_head);
38 34
39 /* 35 /*
40 * new merges must not precede this barrier 36 * new merges must not precede this barrier
@@ -45,19 +41,16 @@ static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
45 q->last_merge = rq; 41 q->last_merge = rq;
46} 42}
47 43
48static struct request *elevator_noop_next_request(request_queue_t *q) 44static int elevator_noop_dispatch(request_queue_t *q, int force)
49{ 45{
50 if (!list_empty(&q->queue_head)) 46 return 0;
51 return list_entry_rq(q->queue_head.next);
52
53 return NULL;
54} 47}
55 48
56static struct elevator_type elevator_noop = { 49static struct elevator_type elevator_noop = {
57 .ops = { 50 .ops = {
58 .elevator_merge_fn = elevator_noop_merge, 51 .elevator_merge_fn = elevator_noop_merge,
59 .elevator_merge_req_fn = elevator_noop_merge_requests, 52 .elevator_merge_req_fn = elevator_noop_merge_requests,
60 .elevator_next_req_fn = elevator_noop_next_request, 53 .elevator_dispatch_fn = elevator_noop_dispatch,
61 .elevator_add_req_fn = elevator_noop_add_request, 54 .elevator_add_req_fn = elevator_noop_add_request,
62 }, 55 },
63 .elevator_name = "noop", 56 .elevator_name = "noop",