aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/as-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/as-iosched.c')
-rw-r--r--drivers/block/as-iosched.c330
1 files changed, 92 insertions, 238 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 95c0a3690b0f..c6744ff38294 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -98,7 +98,6 @@ struct as_data {
98 98
99 struct as_rq *next_arq[2]; /* next in sort order */ 99 struct as_rq *next_arq[2]; /* next in sort order */
100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head *dispatch; /* driver dispatch queue */
102 struct list_head *hash; /* request hash */ 101 struct list_head *hash; /* request hash */
103 102
104 unsigned long exit_prob; /* probability a task will exit while 103 unsigned long exit_prob; /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
239 return ioc; 238 return ioc;
240} 239}
241 240
241static void as_put_io_context(struct as_rq *arq)
242{
243 struct as_io_context *aic;
244
245 if (unlikely(!arq->io_context))
246 return;
247
248 aic = arq->io_context->aic;
249
250 if (arq->is_sync == REQ_SYNC && aic) {
251 spin_lock(&aic->lock);
252 set_bit(AS_TASK_IORUNNING, &aic->state);
253 aic->last_end_request = jiffies;
254 spin_unlock(&aic->lock);
255 }
256
257 put_io_context(arq->io_context);
258}
259
242/* 260/*
243 * the back merge hash support functions 261 * the back merge hash support functions
244 */ 262 */
@@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq)
261 __as_del_arq_hash(arq); 279 __as_del_arq_hash(arq);
262} 280}
263 281
264static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
265{
266 as_del_arq_hash(arq);
267
268 if (q->last_merge == arq->request)
269 q->last_merge = NULL;
270}
271
272static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) 282static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
273{ 283{
274 struct request *rq = arq->request; 284 struct request *rq = arq->request;
@@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
312 BUG_ON(!arq->on_hash); 322 BUG_ON(!arq->on_hash);
313 323
314 if (!rq_mergeable(__rq)) { 324 if (!rq_mergeable(__rq)) {
315 as_remove_merge_hints(ad->q, arq); 325 as_del_arq_hash(arq);
316 continue; 326 continue;
317 } 327 }
318 328
@@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
950 960
951 WARN_ON(!list_empty(&rq->queuelist)); 961 WARN_ON(!list_empty(&rq->queuelist));
952 962
953 if (arq->state == AS_RQ_PRESCHED) {
954 WARN_ON(arq->io_context);
955 goto out;
956 }
957
958 if (arq->state == AS_RQ_MERGED)
959 goto out_ioc;
960
961 if (arq->state != AS_RQ_REMOVED) { 963 if (arq->state != AS_RQ_REMOVED) {
962 printk("arq->state %d\n", arq->state); 964 printk("arq->state %d\n", arq->state);
963 WARN_ON(1); 965 WARN_ON(1);
964 goto out; 966 goto out;
965 } 967 }
966 968
967 if (!blk_fs_request(rq))
968 goto out;
969
970 if (ad->changed_batch && ad->nr_dispatched == 1) { 969 if (ad->changed_batch && ad->nr_dispatched == 1) {
971 kblockd_schedule_work(&ad->antic_work); 970 kblockd_schedule_work(&ad->antic_work);
972 ad->changed_batch = 0; 971 ad->changed_batch = 0;
@@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
1001 } 1000 }
1002 } 1001 }
1003 1002
1004out_ioc: 1003 as_put_io_context(arq);
1005 if (!arq->io_context)
1006 goto out;
1007
1008 if (arq->is_sync == REQ_SYNC) {
1009 struct as_io_context *aic = arq->io_context->aic;
1010 if (aic) {
1011 spin_lock(&aic->lock);
1012 set_bit(AS_TASK_IORUNNING, &aic->state);
1013 aic->last_end_request = jiffies;
1014 spin_unlock(&aic->lock);
1015 }
1016 }
1017
1018 put_io_context(arq->io_context);
1019out: 1004out:
1020 arq->state = AS_RQ_POSTSCHED; 1005 arq->state = AS_RQ_POSTSCHED;
1021} 1006}
@@ -1047,73 +1032,11 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1047 ad->next_arq[data_dir] = as_find_next_arq(ad, arq); 1032 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1048 1033
1049 list_del_init(&arq->fifo); 1034 list_del_init(&arq->fifo);
1050 as_remove_merge_hints(q, arq); 1035 as_del_arq_hash(arq);
1051 as_del_arq_rb(ad, arq); 1036 as_del_arq_rb(ad, arq);
1052} 1037}
1053 1038
1054/* 1039/*
1055 * as_remove_dispatched_request is called to remove a request which has gone
1056 * to the dispatch list.
1057 */
1058static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
1059{
1060 struct as_rq *arq = RQ_DATA(rq);
1061 struct as_io_context *aic;
1062
1063 if (!arq) {
1064 WARN_ON(1);
1065 return;
1066 }
1067
1068 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1069 WARN_ON(ON_RB(&arq->rb_node));
1070 if (arq->io_context && arq->io_context->aic) {
1071 aic = arq->io_context->aic;
1072 if (aic) {
1073 WARN_ON(!atomic_read(&aic->nr_dispatched));
1074 atomic_dec(&aic->nr_dispatched);
1075 }
1076 }
1077}
1078
1079/*
1080 * as_remove_request is called when a driver has finished with a request.
1081 * This should be only called for dispatched requests, but for some reason
1082 * a POWER4 box running hwscan it does not.
1083 */
1084static void as_remove_request(request_queue_t *q, struct request *rq)
1085{
1086 struct as_rq *arq = RQ_DATA(rq);
1087
1088 if (unlikely(arq->state == AS_RQ_NEW))
1089 goto out;
1090
1091 if (ON_RB(&arq->rb_node)) {
1092 if (arq->state != AS_RQ_QUEUED) {
1093 printk("arq->state %d\n", arq->state);
1094 WARN_ON(1);
1095 goto out;
1096 }
1097 /*
1098 * We'll lose the aliased request(s) here. I don't think this
1099 * will ever happen, but if it does, hopefully someone will
1100 * report it.
1101 */
1102 WARN_ON(!list_empty(&rq->queuelist));
1103 as_remove_queued_request(q, rq);
1104 } else {
1105 if (arq->state != AS_RQ_DISPATCHED) {
1106 printk("arq->state %d\n", arq->state);
1107 WARN_ON(1);
1108 goto out;
1109 }
1110 as_remove_dispatched_request(q, rq);
1111 }
1112out:
1113 arq->state = AS_RQ_REMOVED;
1114}
1115
1116/*
1117 * as_fifo_expired returns 0 if there are no expired reads on the fifo, 1040 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1118 * 1 otherwise. It is ratelimited so that we only perform the check once per 1041 * 1 otherwise. It is ratelimited so that we only perform the check once per
1119 * `fifo_expire' interval. Otherwise a large number of expired requests 1042 * `fifo_expire' interval. Otherwise a large number of expired requests
@@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad)
1165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 1088static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1166{ 1089{
1167 struct request *rq = arq->request; 1090 struct request *rq = arq->request;
1168 struct list_head *insert;
1169 const int data_dir = arq->is_sync; 1091 const int data_dir = arq->is_sync;
1170 1092
1171 BUG_ON(!ON_RB(&arq->rb_node)); 1093 BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1198 /* 1120 /*
1199 * take it off the sort and fifo list, add to dispatch queue 1121 * take it off the sort and fifo list, add to dispatch queue
1200 */ 1122 */
1201 insert = ad->dispatch->prev;
1202
1203 while (!list_empty(&rq->queuelist)) { 1123 while (!list_empty(&rq->queuelist)) {
1204 struct request *__rq = list_entry_rq(rq->queuelist.next); 1124 struct request *__rq = list_entry_rq(rq->queuelist.next);
1205 struct as_rq *__arq = RQ_DATA(__rq); 1125 struct as_rq *__arq = RQ_DATA(__rq);
1206 1126
1207 list_move_tail(&__rq->queuelist, ad->dispatch); 1127 list_del(&__rq->queuelist);
1128
1129 elv_dispatch_add_tail(ad->q, __rq);
1208 1130
1209 if (__arq->io_context && __arq->io_context->aic) 1131 if (__arq->io_context && __arq->io_context->aic)
1210 atomic_inc(&__arq->io_context->aic->nr_dispatched); 1132 atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1218 as_remove_queued_request(ad->q, rq); 1140 as_remove_queued_request(ad->q, rq);
1219 WARN_ON(arq->state != AS_RQ_QUEUED); 1141 WARN_ON(arq->state != AS_RQ_QUEUED);
1220 1142
1221 list_add(&rq->queuelist, insert); 1143 elv_dispatch_sort(ad->q, rq);
1144
1222 arq->state = AS_RQ_DISPATCHED; 1145 arq->state = AS_RQ_DISPATCHED;
1223 if (arq->io_context && arq->io_context->aic) 1146 if (arq->io_context && arq->io_context->aic)
1224 atomic_inc(&arq->io_context->aic->nr_dispatched); 1147 atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1230 * read/write expire, batch expire, etc, and moves it to the dispatch 1153 * read/write expire, batch expire, etc, and moves it to the dispatch
1231 * queue. Returns 1 if a request was found, 0 otherwise. 1154 * queue. Returns 1 if a request was found, 0 otherwise.
1232 */ 1155 */
1233static int as_dispatch_request(struct as_data *ad) 1156static int as_dispatch_request(request_queue_t *q, int force)
1234{ 1157{
1158 struct as_data *ad = q->elevator->elevator_data;
1235 struct as_rq *arq; 1159 struct as_rq *arq;
1236 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1160 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
1237 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1161 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
1238 1162
1163 if (unlikely(force)) {
1164 /*
1165 * Forced dispatch, accounting is useless. Reset
1166 * accounting states and dump fifo_lists. Note that
1167 * batch_data_dir is reset to REQ_SYNC to avoid
1168 * screwing write batch accounting as write batch
1169 * accounting occurs on W->R transition.
1170 */
1171 int dispatched = 0;
1172
1173 ad->batch_data_dir = REQ_SYNC;
1174 ad->changed_batch = 0;
1175 ad->new_batch = 0;
1176
1177 while (ad->next_arq[REQ_SYNC]) {
1178 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1179 dispatched++;
1180 }
1181 ad->last_check_fifo[REQ_SYNC] = jiffies;
1182
1183 while (ad->next_arq[REQ_ASYNC]) {
1184 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1185 dispatched++;
1186 }
1187 ad->last_check_fifo[REQ_ASYNC] = jiffies;
1188
1189 return dispatched;
1190 }
1191
1239 /* Signal that the write batch was uncontended, so we can't time it */ 1192 /* Signal that the write batch was uncontended, so we can't time it */
1240 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1193 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1241 if (ad->current_write_count == 0 || !writes) 1194 if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1312,6 @@ fifo_expired:
1359 return 1; 1312 return 1;
1360} 1313}
1361 1314
1362static struct request *as_next_request(request_queue_t *q)
1363{
1364 struct as_data *ad = q->elevator->elevator_data;
1365 struct request *rq = NULL;
1366
1367 /*
1368 * if there are still requests on the dispatch queue, grab the first
1369 */
1370 if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
1371 rq = list_entry_rq(ad->dispatch->next);
1372
1373 return rq;
1374}
1375
1376/* 1315/*
1377 * Add arq to a list behind alias 1316 * Add arq to a list behind alias
1378 */ 1317 */
@@ -1404,17 +1343,26 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
1404 /* 1343 /*
1405 * Don't want to have to handle merges. 1344 * Don't want to have to handle merges.
1406 */ 1345 */
1407 as_remove_merge_hints(ad->q, arq); 1346 as_del_arq_hash(arq);
1347 arq->request->flags |= REQ_NOMERGE;
1408} 1348}
1409 1349
1410/* 1350/*
1411 * add arq to rbtree and fifo 1351 * add arq to rbtree and fifo
1412 */ 1352 */
1413static void as_add_request(struct as_data *ad, struct as_rq *arq) 1353static void as_add_request(request_queue_t *q, struct request *rq)
1414{ 1354{
1355 struct as_data *ad = q->elevator->elevator_data;
1356 struct as_rq *arq = RQ_DATA(rq);
1415 struct as_rq *alias; 1357 struct as_rq *alias;
1416 int data_dir; 1358 int data_dir;
1417 1359
1360 if (arq->state != AS_RQ_PRESCHED) {
1361 printk("arq->state: %d\n", arq->state);
1362 WARN_ON(1);
1363 }
1364 arq->state = AS_RQ_NEW;
1365
1418 if (rq_data_dir(arq->request) == READ 1366 if (rq_data_dir(arq->request) == READ
1419 || current->flags&PF_SYNCWRITE) 1367 || current->flags&PF_SYNCWRITE)
1420 arq->is_sync = 1; 1368 arq->is_sync = 1;
@@ -1437,12 +1385,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1437 arq->expires = jiffies + ad->fifo_expire[data_dir]; 1385 arq->expires = jiffies + ad->fifo_expire[data_dir];
1438 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); 1386 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
1439 1387
1440 if (rq_mergeable(arq->request)) { 1388 if (rq_mergeable(arq->request))
1441 as_add_arq_hash(ad, arq); 1389 as_add_arq_hash(ad, arq);
1442
1443 if (!ad->q->last_merge)
1444 ad->q->last_merge = arq->request;
1445 }
1446 as_update_arq(ad, arq); /* keep state machine up to date */ 1390 as_update_arq(ad, arq); /* keep state machine up to date */
1447 1391
1448 } else { 1392 } else {
@@ -1463,96 +1407,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1463 arq->state = AS_RQ_QUEUED; 1407 arq->state = AS_RQ_QUEUED;
1464} 1408}
1465 1409
1466static void as_deactivate_request(request_queue_t *q, struct request *rq) 1410static void as_activate_request(request_queue_t *q, struct request *rq)
1467{ 1411{
1468 struct as_data *ad = q->elevator->elevator_data;
1469 struct as_rq *arq = RQ_DATA(rq); 1412 struct as_rq *arq = RQ_DATA(rq);
1470 1413
1471 if (arq) { 1414 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1472 if (arq->state == AS_RQ_REMOVED) { 1415 arq->state = AS_RQ_REMOVED;
1473 arq->state = AS_RQ_DISPATCHED; 1416 if (arq->io_context && arq->io_context->aic)
1474 if (arq->io_context && arq->io_context->aic) 1417 atomic_dec(&arq->io_context->aic->nr_dispatched);
1475 atomic_inc(&arq->io_context->aic->nr_dispatched);
1476 }
1477 } else
1478 WARN_ON(blk_fs_request(rq)
1479 && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
1480
1481 /* Stop anticipating - let this request get through */
1482 as_antic_stop(ad);
1483}
1484
1485/*
1486 * requeue the request. The request has not been completed, nor is it a
1487 * new request, so don't touch accounting.
1488 */
1489static void as_requeue_request(request_queue_t *q, struct request *rq)
1490{
1491 as_deactivate_request(q, rq);
1492 list_add(&rq->queuelist, &q->queue_head);
1493}
1494
1495/*
1496 * Account a request that is inserted directly onto the dispatch queue.
1497 * arq->io_context->aic->nr_dispatched should not need to be incremented
1498 * because only new requests should come through here: requeues go through
1499 * our explicit requeue handler.
1500 */
1501static void as_account_queued_request(struct as_data *ad, struct request *rq)
1502{
1503 if (blk_fs_request(rq)) {
1504 struct as_rq *arq = RQ_DATA(rq);
1505 arq->state = AS_RQ_DISPATCHED;
1506 ad->nr_dispatched++;
1507 }
1508} 1418}
1509 1419
1510static void 1420static void as_deactivate_request(request_queue_t *q, struct request *rq)
1511as_insert_request(request_queue_t *q, struct request *rq, int where)
1512{ 1421{
1513 struct as_data *ad = q->elevator->elevator_data;
1514 struct as_rq *arq = RQ_DATA(rq); 1422 struct as_rq *arq = RQ_DATA(rq);
1515 1423
1516 if (arq) { 1424 WARN_ON(arq->state != AS_RQ_REMOVED);
1517 if (arq->state != AS_RQ_PRESCHED) { 1425 arq->state = AS_RQ_DISPATCHED;
1518 printk("arq->state: %d\n", arq->state); 1426 if (arq->io_context && arq->io_context->aic)
1519 WARN_ON(1); 1427 atomic_inc(&arq->io_context->aic->nr_dispatched);
1520 }
1521 arq->state = AS_RQ_NEW;
1522 }
1523
1524 /* barriers must flush the reorder queue */
1525 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
1526 && where == ELEVATOR_INSERT_SORT)) {
1527 WARN_ON(1);
1528 where = ELEVATOR_INSERT_BACK;
1529 }
1530
1531 switch (where) {
1532 case ELEVATOR_INSERT_BACK:
1533 while (ad->next_arq[REQ_SYNC])
1534 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1535
1536 while (ad->next_arq[REQ_ASYNC])
1537 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1538
1539 list_add_tail(&rq->queuelist, ad->dispatch);
1540 as_account_queued_request(ad, rq);
1541 as_antic_stop(ad);
1542 break;
1543 case ELEVATOR_INSERT_FRONT:
1544 list_add(&rq->queuelist, ad->dispatch);
1545 as_account_queued_request(ad, rq);
1546 as_antic_stop(ad);
1547 break;
1548 case ELEVATOR_INSERT_SORT:
1549 BUG_ON(!blk_fs_request(rq));
1550 as_add_request(ad, arq);
1551 break;
1552 default:
1553 BUG();
1554 return;
1555 }
1556} 1428}
1557 1429
1558/* 1430/*
@@ -1565,12 +1437,8 @@ static int as_queue_empty(request_queue_t *q)
1565{ 1437{
1566 struct as_data *ad = q->elevator->elevator_data; 1438 struct as_data *ad = q->elevator->elevator_data;
1567 1439
1568 if (!list_empty(&ad->fifo_list[REQ_ASYNC]) 1440 return list_empty(&ad->fifo_list[REQ_ASYNC])
1569 || !list_empty(&ad->fifo_list[REQ_SYNC]) 1441 && list_empty(&ad->fifo_list[REQ_SYNC]);
1570 || !list_empty(ad->dispatch))
1571 return 0;
1572
1573 return 1;
1574} 1442}
1575 1443
1576static struct request * 1444static struct request *
@@ -1608,15 +1476,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1608 int ret; 1476 int ret;
1609 1477
1610 /* 1478 /*
1611 * try last_merge to avoid going to hash
1612 */
1613 ret = elv_try_last_merge(q, bio);
1614 if (ret != ELEVATOR_NO_MERGE) {
1615 __rq = q->last_merge;
1616 goto out_insert;
1617 }
1618
1619 /*
1620 * see if the merge hash can satisfy a back merge 1479 * see if the merge hash can satisfy a back merge
1621 */ 1480 */
1622 __rq = as_find_arq_hash(ad, bio->bi_sector); 1481 __rq = as_find_arq_hash(ad, bio->bi_sector);
@@ -1644,9 +1503,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1644 1503
1645 return ELEVATOR_NO_MERGE; 1504 return ELEVATOR_NO_MERGE;
1646out: 1505out:
1647 if (rq_mergeable(__rq))
1648 q->last_merge = __rq;
1649out_insert:
1650 if (ret) { 1506 if (ret) {
1651 if (rq_mergeable(__rq)) 1507 if (rq_mergeable(__rq))
1652 as_hot_arq_hash(ad, RQ_DATA(__rq)); 1508 as_hot_arq_hash(ad, RQ_DATA(__rq));
@@ -1693,9 +1549,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
1693 * behind the disk head. We currently don't bother adjusting. 1549 * behind the disk head. We currently don't bother adjusting.
1694 */ 1550 */
1695 } 1551 }
1696
1697 if (arq->on_hash)
1698 q->last_merge = req;
1699} 1552}
1700 1553
1701static void 1554static void
@@ -1763,6 +1616,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
1763 * kill knowledge of next, this one is a goner 1616 * kill knowledge of next, this one is a goner
1764 */ 1617 */
1765 as_remove_queued_request(q, next); 1618 as_remove_queued_request(q, next);
1619 as_put_io_context(anext);
1766 1620
1767 anext->state = AS_RQ_MERGED; 1621 anext->state = AS_RQ_MERGED;
1768} 1622}
@@ -1782,7 +1636,7 @@ static void as_work_handler(void *data)
1782 unsigned long flags; 1636 unsigned long flags;
1783 1637
1784 spin_lock_irqsave(q->queue_lock, flags); 1638 spin_lock_irqsave(q->queue_lock, flags);
1785 if (as_next_request(q)) 1639 if (!as_queue_empty(q))
1786 q->request_fn(q); 1640 q->request_fn(q);
1787 spin_unlock_irqrestore(q->queue_lock, flags); 1641 spin_unlock_irqrestore(q->queue_lock, flags);
1788} 1642}
@@ -1797,7 +1651,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1797 return; 1651 return;
1798 } 1652 }
1799 1653
1800 if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { 1654 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1655 arq->state != AS_RQ_PRESCHED &&
1656 arq->state != AS_RQ_MERGED)) {
1801 printk("arq->state %d\n", arq->state); 1657 printk("arq->state %d\n", arq->state);
1802 WARN_ON(1); 1658 WARN_ON(1);
1803 } 1659 }
@@ -1807,7 +1663,7 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1807} 1663}
1808 1664
1809static int as_set_request(request_queue_t *q, struct request *rq, 1665static int as_set_request(request_queue_t *q, struct request *rq,
1810 struct bio *bio, int gfp_mask) 1666 struct bio *bio, gfp_t gfp_mask)
1811{ 1667{
1812 struct as_data *ad = q->elevator->elevator_data; 1668 struct as_data *ad = q->elevator->elevator_data;
1813 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); 1669 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
@@ -1907,7 +1763,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1907 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1763 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1908 ad->sort_list[REQ_SYNC] = RB_ROOT; 1764 ad->sort_list[REQ_SYNC] = RB_ROOT;
1909 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1765 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1910 ad->dispatch = &q->queue_head;
1911 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1766 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1912 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1767 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1913 ad->antic_expire = default_antic_expire; 1768 ad->antic_expire = default_antic_expire;
@@ -2072,10 +1927,9 @@ static struct elevator_type iosched_as = {
2072 .elevator_merge_fn = as_merge, 1927 .elevator_merge_fn = as_merge,
2073 .elevator_merged_fn = as_merged_request, 1928 .elevator_merged_fn = as_merged_request,
2074 .elevator_merge_req_fn = as_merged_requests, 1929 .elevator_merge_req_fn = as_merged_requests,
2075 .elevator_next_req_fn = as_next_request, 1930 .elevator_dispatch_fn = as_dispatch_request,
2076 .elevator_add_req_fn = as_insert_request, 1931 .elevator_add_req_fn = as_add_request,
2077 .elevator_remove_req_fn = as_remove_request, 1932 .elevator_activate_req_fn = as_activate_request,
2078 .elevator_requeue_req_fn = as_requeue_request,
2079 .elevator_deactivate_req_fn = as_deactivate_request, 1933 .elevator_deactivate_req_fn = as_deactivate_request,
2080 .elevator_queue_empty_fn = as_queue_empty, 1934 .elevator_queue_empty_fn = as_queue_empty,
2081 .elevator_completed_req_fn = as_completed_request, 1935 .elevator_completed_req_fn = as_completed_request,
@@ -2119,8 +1973,8 @@ static int __init as_init(void)
2119 1973
2120static void __exit as_exit(void) 1974static void __exit as_exit(void)
2121{ 1975{
2122 kmem_cache_destroy(arq_pool);
2123 elv_unregister(&iosched_as); 1976 elv_unregister(&iosched_as);
1977 kmem_cache_destroy(arq_pool);
2124} 1978}
2125 1979
2126module_init(as_init); 1980module_init(as_init);