aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/as-iosched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 11:53:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 11:53:49 -0400
commit28d721e24c88496ff8e9c4a0959bdc1415c0658e (patch)
tree0652161bbbcbfddf47c7ddb25d2db8ecd4cbec89 /drivers/block/as-iosched.c
parent0ee40c6628434f0535da31deeacc28b61e80d810 (diff)
parentcb19833dccb32f97cacbfff834b53523915f13f6 (diff)
Merge branch 'generic-dispatch' of git://brick.kernel.dk/data/git/linux-2.6-block
Diffstat (limited to 'drivers/block/as-iosched.c')
-rw-r--r--drivers/block/as-iosched.c325
1 files changed, 89 insertions, 236 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 1f08e14697e9..4081c36c8c19 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -98,7 +98,6 @@ struct as_data {
98 98
99 struct as_rq *next_arq[2]; /* next in sort order */ 99 struct as_rq *next_arq[2]; /* next in sort order */
100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head *dispatch; /* driver dispatch queue */
102 struct list_head *hash; /* request hash */ 101 struct list_head *hash; /* request hash */
103 102
104 unsigned long exit_prob; /* probability a task will exit while 103 unsigned long exit_prob; /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
239 return ioc; 238 return ioc;
240} 239}
241 240
241static void as_put_io_context(struct as_rq *arq)
242{
243 struct as_io_context *aic;
244
245 if (unlikely(!arq->io_context))
246 return;
247
248 aic = arq->io_context->aic;
249
250 if (arq->is_sync == REQ_SYNC && aic) {
251 spin_lock(&aic->lock);
252 set_bit(AS_TASK_IORUNNING, &aic->state);
253 aic->last_end_request = jiffies;
254 spin_unlock(&aic->lock);
255 }
256
257 put_io_context(arq->io_context);
258}
259
242/* 260/*
243 * the back merge hash support functions 261 * the back merge hash support functions
244 */ 262 */
@@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq)
261 __as_del_arq_hash(arq); 279 __as_del_arq_hash(arq);
262} 280}
263 281
264static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
265{
266 as_del_arq_hash(arq);
267
268 if (q->last_merge == arq->request)
269 q->last_merge = NULL;
270}
271
272static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) 282static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
273{ 283{
274 struct request *rq = arq->request; 284 struct request *rq = arq->request;
@@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
312 BUG_ON(!arq->on_hash); 322 BUG_ON(!arq->on_hash);
313 323
314 if (!rq_mergeable(__rq)) { 324 if (!rq_mergeable(__rq)) {
315 as_remove_merge_hints(ad->q, arq); 325 as_del_arq_hash(arq);
316 continue; 326 continue;
317 } 327 }
318 328
@@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
950 960
951 WARN_ON(!list_empty(&rq->queuelist)); 961 WARN_ON(!list_empty(&rq->queuelist));
952 962
953 if (arq->state == AS_RQ_PRESCHED) {
954 WARN_ON(arq->io_context);
955 goto out;
956 }
957
958 if (arq->state == AS_RQ_MERGED)
959 goto out_ioc;
960
961 if (arq->state != AS_RQ_REMOVED) { 963 if (arq->state != AS_RQ_REMOVED) {
962 printk("arq->state %d\n", arq->state); 964 printk("arq->state %d\n", arq->state);
963 WARN_ON(1); 965 WARN_ON(1);
964 goto out; 966 goto out;
965 } 967 }
966 968
967 if (!blk_fs_request(rq))
968 goto out;
969
970 if (ad->changed_batch && ad->nr_dispatched == 1) { 969 if (ad->changed_batch && ad->nr_dispatched == 1) {
971 kblockd_schedule_work(&ad->antic_work); 970 kblockd_schedule_work(&ad->antic_work);
972 ad->changed_batch = 0; 971 ad->changed_batch = 0;
@@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
1001 } 1000 }
1002 } 1001 }
1003 1002
1004out_ioc: 1003 as_put_io_context(arq);
1005 if (!arq->io_context)
1006 goto out;
1007
1008 if (arq->is_sync == REQ_SYNC) {
1009 struct as_io_context *aic = arq->io_context->aic;
1010 if (aic) {
1011 spin_lock(&aic->lock);
1012 set_bit(AS_TASK_IORUNNING, &aic->state);
1013 aic->last_end_request = jiffies;
1014 spin_unlock(&aic->lock);
1015 }
1016 }
1017
1018 put_io_context(arq->io_context);
1019out: 1004out:
1020 arq->state = AS_RQ_POSTSCHED; 1005 arq->state = AS_RQ_POSTSCHED;
1021} 1006}
@@ -1047,73 +1032,11 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1047 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 1032 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1048 1033
1049 list_del_init(&arq->fifo); 1034 list_del_init(&arq->fifo);
1050 as_remove_merge_hints(q, arq); 1035 as_del_arq_hash(arq);
1051 as_del_arq_rb(ad, arq); 1036 as_del_arq_rb(ad, arq);
1052} 1037}
1053 1038
1054/* 1039/*
1055 * as_remove_dispatched_request is called to remove a request which has gone
1056 * to the dispatch list.
1057 */
1058static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
1059{
1060 struct as_rq *arq = RQ_DATA(rq);
1061 struct as_io_context *aic;
1062
1063 if (!arq) {
1064 WARN_ON(1);
1065 return;
1066 }
1067
1068 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1069 WARN_ON(ON_RB(&arq->rb_node));
1070 if (arq->io_context && arq->io_context->aic) {
1071 aic = arq->io_context->aic;
1072 if (aic) {
1073 WARN_ON(!atomic_read(&aic->nr_dispatched));
1074 atomic_dec(&aic->nr_dispatched);
1075 }
1076 }
1077}
1078
1079/*
1080 * as_remove_request is called when a driver has finished with a request.
1081 * This should be only called for dispatched requests, but for some reason
1082 * a POWER4 box running hwscan it does not.
1083 */
1084static void as_remove_request(request_queue_t *q, struct request *rq)
1085{
1086 struct as_rq *arq = RQ_DATA(rq);
1087
1088 if (unlikely(arq->state == AS_RQ_NEW))
1089 goto out;
1090
1091 if (ON_RB(&arq->rb_node)) {
1092 if (arq->state != AS_RQ_QUEUED) {
1093 printk("arq->state %d\n", arq->state);
1094 WARN_ON(1);
1095 goto out;
1096 }
1097 /*
1098 * We'll lose the aliased request(s) here. I don't think this
1099 * will ever happen, but if it does, hopefully someone will
1100 * report it.
1101 */
1102 WARN_ON(!list_empty(&rq->queuelist));
1103 as_remove_queued_request(q, rq);
1104 } else {
1105 if (arq->state != AS_RQ_DISPATCHED) {
1106 printk("arq->state %d\n", arq->state);
1107 WARN_ON(1);
1108 goto out;
1109 }
1110 as_remove_dispatched_request(q, rq);
1111 }
1112out:
1113 arq->state = AS_RQ_REMOVED;
1114}
1115
1116/*
1117 * as_fifo_expired returns 0 if there are no expired reads on the fifo, 1040 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1118 * 1 otherwise. It is ratelimited so that we only perform the check once per 1041 * 1 otherwise. It is ratelimited so that we only perform the check once per
1119 * `fifo_expire' interval. Otherwise a large number of expired requests 1042 * `fifo_expire' interval. Otherwise a large number of expired requests
@@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad)
1165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 1088static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1166{ 1089{
1167 struct request *rq = arq->request; 1090 struct request *rq = arq->request;
1168 struct list_head *insert;
1169 const int data_dir = arq->is_sync; 1091 const int data_dir = arq->is_sync;
1170 1092
1171 BUG_ON(!ON_RB(&arq->rb_node)); 1093 BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1198 /* 1120 /*
1199 * take it off the sort and fifo list, add to dispatch queue 1121 * take it off the sort and fifo list, add to dispatch queue
1200 */ 1122 */
1201 insert = ad->dispatch->prev;
1202
1203 while (!list_empty(&rq->queuelist)) { 1123 while (!list_empty(&rq->queuelist)) {
1204 struct request *__rq = list_entry_rq(rq->queuelist.next); 1124 struct request *__rq = list_entry_rq(rq->queuelist.next);
1205 struct as_rq *__arq = RQ_DATA(__rq); 1125 struct as_rq *__arq = RQ_DATA(__rq);
1206 1126
1207 list_move_tail(&__rq->queuelist, ad->dispatch); 1127 list_del(&__rq->queuelist);
1128
1129 elv_dispatch_add_tail(ad->q, __rq);
1208 1130
1209 if (__arq->io_context && __arq->io_context->aic) 1131 if (__arq->io_context && __arq->io_context->aic)
1210 atomic_inc(&__arq->io_context->aic->nr_dispatched); 1132 atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1218 as_remove_queued_request(ad->q, rq); 1140 as_remove_queued_request(ad->q, rq);
1219 WARN_ON(arq->state != AS_RQ_QUEUED); 1141 WARN_ON(arq->state != AS_RQ_QUEUED);
1220 1142
1221 list_add(&rq->queuelist, insert); 1143 elv_dispatch_sort(ad->q, rq);
1144
1222 arq->state = AS_RQ_DISPATCHED; 1145 arq->state = AS_RQ_DISPATCHED;
1223 if (arq->io_context && arq->io_context->aic) 1146 if (arq->io_context && arq->io_context->aic)
1224 atomic_inc(&arq->io_context->aic->nr_dispatched); 1147 atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1230 * read/write expire, batch expire, etc, and moves it to the dispatch 1153 * read/write expire, batch expire, etc, and moves it to the dispatch
1231 * queue. Returns 1 if a request was found, 0 otherwise. 1154 * queue. Returns 1 if a request was found, 0 otherwise.
1232 */ 1155 */
1233static int as_dispatch_request(struct as_data *ad) 1156static int as_dispatch_request(request_queue_t *q, int force)
1234{ 1157{
1158 struct as_data *ad = q->elevator->elevator_data;
1235 struct as_rq *arq; 1159 struct as_rq *arq;
1236 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1160 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
1237 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1161 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
1238 1162
1163 if (unlikely(force)) {
1164 /*
1165 * Forced dispatch, accounting is useless. Reset
1166 * accounting states and dump fifo_lists. Note that
1167 * batch_data_dir is reset to REQ_SYNC to avoid
1168 * screwing write batch accounting as write batch
1169 * accounting occurs on W->R transition.
1170 */
1171 int dispatched = 0;
1172
1173 ad->batch_data_dir = REQ_SYNC;
1174 ad->changed_batch = 0;
1175 ad->new_batch = 0;
1176
1177 while (ad->next_arq[REQ_SYNC]) {
1178 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1179 dispatched++;
1180 }
1181 ad->last_check_fifo[REQ_SYNC] = jiffies;
1182
1183 while (ad->next_arq[REQ_ASYNC]) {
1184 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1185 dispatched++;
1186 }
1187 ad->last_check_fifo[REQ_ASYNC] = jiffies;
1188
1189 return dispatched;
1190 }
1191
1239 /* Signal that the write batch was uncontended, so we can't time it */ 1192 /* Signal that the write batch was uncontended, so we can't time it */
1240 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1193 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1241 if (ad->current_write_count == 0 || !writes) 1194 if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1312,6 @@ fifo_expired:
1359 return 1; 1312 return 1;
1360} 1313}
1361 1314
1362static struct request *as_next_request(request_queue_t *q)
1363{
1364 struct as_data *ad = q->elevator->elevator_data;
1365 struct request *rq = NULL;
1366
1367 /*
1368 * if there are still requests on the dispatch queue, grab the first
1369 */
1370 if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
1371 rq = list_entry_rq(ad->dispatch->next);
1372
1373 return rq;
1374}
1375
1376/* 1315/*
1377 * Add arq to a list behind alias 1316 * Add arq to a list behind alias
1378 */ 1317 */
@@ -1404,17 +1343,25 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
1404 /* 1343 /*
1405 * Don't want to have to handle merges. 1344 * Don't want to have to handle merges.
1406 */ 1345 */
1407 as_remove_merge_hints(ad->q, arq); 1346 as_del_arq_hash(arq);
1408} 1347}
1409 1348
1410/* 1349/*
1411 * add arq to rbtree and fifo 1350 * add arq to rbtree and fifo
1412 */ 1351 */
1413static void as_add_request(struct as_data *ad, struct as_rq *arq) 1352static void as_add_request(request_queue_t *q, struct request *rq)
1414{ 1353{
1354 struct as_data *ad = q->elevator->elevator_data;
1355 struct as_rq *arq = RQ_DATA(rq);
1415 struct as_rq *alias; 1356 struct as_rq *alias;
1416 int data_dir; 1357 int data_dir;
1417 1358
1359 if (arq->state != AS_RQ_PRESCHED) {
1360 printk("arq->state: %d\n", arq->state);
1361 WARN_ON(1);
1362 }
1363 arq->state = AS_RQ_NEW;
1364
1418 if (rq_data_dir(arq->request) == READ 1365 if (rq_data_dir(arq->request) == READ
1419 || current->flags&PF_SYNCWRITE) 1366 || current->flags&PF_SYNCWRITE)
1420 arq->is_sync = 1; 1367 arq->is_sync = 1;
@@ -1437,12 +1384,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1437 arq->expires = jiffies + ad->fifo_expire[data_dir]; 1384 arq->expires = jiffies + ad->fifo_expire[data_dir];
1438 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); 1385 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
1439 1386
1440 if (rq_mergeable(arq->request)) { 1387 if (rq_mergeable(arq->request))
1441 as_add_arq_hash(ad, arq); 1388 as_add_arq_hash(ad, arq);
1442
1443 if (!ad->q->last_merge)
1444 ad->q->last_merge = arq->request;
1445 }
1446 as_update_arq(ad, arq); /* keep state machine up to date */ 1389 as_update_arq(ad, arq); /* keep state machine up to date */
1447 1390
1448 } else { 1391 } else {
@@ -1463,96 +1406,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1463 arq->state = AS_RQ_QUEUED; 1406 arq->state = AS_RQ_QUEUED;
1464} 1407}
1465 1408
1466static void as_deactivate_request(request_queue_t *q, struct request *rq) 1409static void as_activate_request(request_queue_t *q, struct request *rq)
1467{ 1410{
1468 struct as_data *ad = q->elevator->elevator_data;
1469 struct as_rq *arq = RQ_DATA(rq); 1411 struct as_rq *arq = RQ_DATA(rq);
1470 1412
1471 if (arq) { 1413 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1472 if (arq->state == AS_RQ_REMOVED) { 1414 arq->state = AS_RQ_REMOVED;
1473 arq->state = AS_RQ_DISPATCHED; 1415 if (arq->io_context && arq->io_context->aic)
1474 if (arq->io_context && arq->io_context->aic) 1416 atomic_dec(&arq->io_context->aic->nr_dispatched);
1475 atomic_inc(&arq->io_context->aic->nr_dispatched);
1476 }
1477 } else
1478 WARN_ON(blk_fs_request(rq)
1479 && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
1480
1481 /* Stop anticipating - let this request get through */
1482 as_antic_stop(ad);
1483}
1484
1485/*
1486 * requeue the request. The request has not been completed, nor is it a
1487 * new request, so don't touch accounting.
1488 */
1489static void as_requeue_request(request_queue_t *q, struct request *rq)
1490{
1491 as_deactivate_request(q, rq);
1492 list_add(&rq->queuelist, &q->queue_head);
1493}
1494
1495/*
1496 * Account a request that is inserted directly onto the dispatch queue.
1497 * arq->io_context->aic->nr_dispatched should not need to be incremented
1498 * because only new requests should come through here: requeues go through
1499 * our explicit requeue handler.
1500 */
1501static void as_account_queued_request(struct as_data *ad, struct request *rq)
1502{
1503 if (blk_fs_request(rq)) {
1504 struct as_rq *arq = RQ_DATA(rq);
1505 arq->state = AS_RQ_DISPATCHED;
1506 ad->nr_dispatched++;
1507 }
1508} 1417}
1509 1418
1510static void 1419static void as_deactivate_request(request_queue_t *q, struct request *rq)
1511as_insert_request(request_queue_t *q, struct request *rq, int where)
1512{ 1420{
1513 struct as_data *ad = q->elevator->elevator_data;
1514 struct as_rq *arq = RQ_DATA(rq); 1421 struct as_rq *arq = RQ_DATA(rq);
1515 1422
1516 if (arq) { 1423 WARN_ON(arq->state != AS_RQ_REMOVED);
1517 if (arq->state != AS_RQ_PRESCHED) { 1424 arq->state = AS_RQ_DISPATCHED;
1518 printk("arq->state: %d\n", arq->state); 1425 if (arq->io_context && arq->io_context->aic)
1519 WARN_ON(1); 1426 atomic_inc(&arq->io_context->aic->nr_dispatched);
1520 }
1521 arq->state = AS_RQ_NEW;
1522 }
1523
1524 /* barriers must flush the reorder queue */
1525 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
1526 && where == ELEVATOR_INSERT_SORT)) {
1527 WARN_ON(1);
1528 where = ELEVATOR_INSERT_BACK;
1529 }
1530
1531 switch (where) {
1532 case ELEVATOR_INSERT_BACK:
1533 while (ad->next_arq[REQ_SYNC])
1534 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1535
1536 while (ad->next_arq[REQ_ASYNC])
1537 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1538
1539 list_add_tail(&rq->queuelist, ad->dispatch);
1540 as_account_queued_request(ad, rq);
1541 as_antic_stop(ad);
1542 break;
1543 case ELEVATOR_INSERT_FRONT:
1544 list_add(&rq->queuelist, ad->dispatch);
1545 as_account_queued_request(ad, rq);
1546 as_antic_stop(ad);
1547 break;
1548 case ELEVATOR_INSERT_SORT:
1549 BUG_ON(!blk_fs_request(rq));
1550 as_add_request(ad, arq);
1551 break;
1552 default:
1553 BUG();
1554 return;
1555 }
1556} 1427}
1557 1428
1558/* 1429/*
@@ -1565,12 +1436,8 @@ static int as_queue_empty(request_queue_t *q)
1565{ 1436{
1566 struct as_data *ad = q->elevator->elevator_data; 1437 struct as_data *ad = q->elevator->elevator_data;
1567 1438
1568 if (!list_empty(&ad->fifo_list[REQ_ASYNC]) 1439 return list_empty(&ad->fifo_list[REQ_ASYNC])
1569 || !list_empty(&ad->fifo_list[REQ_SYNC]) 1440 && list_empty(&ad->fifo_list[REQ_SYNC]);
1570 || !list_empty(ad->dispatch))
1571 return 0;
1572
1573 return 1;
1574} 1441}
1575 1442
1576static struct request * 1443static struct request *
@@ -1608,15 +1475,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1608 int ret; 1475 int ret;
1609 1476
1610 /* 1477 /*
1611 * try last_merge to avoid going to hash
1612 */
1613 ret = elv_try_last_merge(q, bio);
1614 if (ret != ELEVATOR_NO_MERGE) {
1615 __rq = q->last_merge;
1616 goto out_insert;
1617 }
1618
1619 /*
1620 * see if the merge hash can satisfy a back merge 1478 * see if the merge hash can satisfy a back merge
1621 */ 1479 */
1622 __rq = as_find_arq_hash(ad, bio->bi_sector); 1480 __rq = as_find_arq_hash(ad, bio->bi_sector);
@@ -1644,9 +1502,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1644 1502
1645 return ELEVATOR_NO_MERGE; 1503 return ELEVATOR_NO_MERGE;
1646out: 1504out:
1647 if (rq_mergeable(__rq))
1648 q->last_merge = __rq;
1649out_insert:
1650 if (ret) { 1505 if (ret) {
1651 if (rq_mergeable(__rq)) 1506 if (rq_mergeable(__rq))
1652 as_hot_arq_hash(ad, RQ_DATA(__rq)); 1507 as_hot_arq_hash(ad, RQ_DATA(__rq));
@@ -1693,9 +1548,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
1693 * behind the disk head. We currently don't bother adjusting. 1548 * behind the disk head. We currently don't bother adjusting.
1694 */ 1549 */
1695 } 1550 }
1696
1697 if (arq->on_hash)
1698 q->last_merge = req;
1699} 1551}
1700 1552
1701static void 1553static void
@@ -1763,6 +1615,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
1763 * kill knowledge of next, this one is a goner 1615 * kill knowledge of next, this one is a goner
1764 */ 1616 */
1765 as_remove_queued_request(q, next); 1617 as_remove_queued_request(q, next);
1618 as_put_io_context(anext);
1766 1619
1767 anext->state = AS_RQ_MERGED; 1620 anext->state = AS_RQ_MERGED;
1768} 1621}
@@ -1782,7 +1635,7 @@ static void as_work_handler(void *data)
1782 unsigned long flags; 1635 unsigned long flags;
1783 1636
1784 spin_lock_irqsave(q->queue_lock, flags); 1637 spin_lock_irqsave(q->queue_lock, flags);
1785 if (as_next_request(q)) 1638 if (!as_queue_empty(q))
1786 q->request_fn(q); 1639 q->request_fn(q);
1787 spin_unlock_irqrestore(q->queue_lock, flags); 1640 spin_unlock_irqrestore(q->queue_lock, flags);
1788} 1641}
@@ -1797,7 +1650,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1797 return; 1650 return;
1798 } 1651 }
1799 1652
1800 if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { 1653 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1654 arq->state != AS_RQ_PRESCHED &&
1655 arq->state != AS_RQ_MERGED)) {
1801 printk("arq->state %d\n", arq->state); 1656 printk("arq->state %d\n", arq->state);
1802 WARN_ON(1); 1657 WARN_ON(1);
1803 } 1658 }
@@ -1907,7 +1762,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1907 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1762 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1908 ad->sort_list[REQ_SYNC] = RB_ROOT; 1763 ad->sort_list[REQ_SYNC] = RB_ROOT;
1909 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1764 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1910 ad->dispatch = &q->queue_head;
1911 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1765 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1912 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1766 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1913 ad->antic_expire = default_antic_expire; 1767 ad->antic_expire = default_antic_expire;
@@ -2072,10 +1926,9 @@ static struct elevator_type iosched_as = {
2072 .elevator_merge_fn = as_merge, 1926 .elevator_merge_fn = as_merge,
2073 .elevator_merged_fn = as_merged_request, 1927 .elevator_merged_fn = as_merged_request,
2074 .elevator_merge_req_fn = as_merged_requests, 1928 .elevator_merge_req_fn = as_merged_requests,
2075 .elevator_next_req_fn = as_next_request, 1929 .elevator_dispatch_fn = as_dispatch_request,
2076 .elevator_add_req_fn = as_insert_request, 1930 .elevator_add_req_fn = as_add_request,
2077 .elevator_remove_req_fn = as_remove_request, 1931 .elevator_activate_req_fn = as_activate_request,
2078 .elevator_requeue_req_fn = as_requeue_request,
2079 .elevator_deactivate_req_fn = as_deactivate_request, 1932 .elevator_deactivate_req_fn = as_deactivate_request,
2080 .elevator_queue_empty_fn = as_queue_empty, 1933 .elevator_queue_empty_fn = as_queue_empty,
2081 .elevator_completed_req_fn = as_completed_request, 1934 .elevator_completed_req_fn = as_completed_request,