aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/as-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/as-iosched.c')
-rw-r--r--drivers/block/as-iosched.c290
1 files changed, 85 insertions, 205 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 95c0a3690b0f..1775ffe9edc7 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -98,7 +98,6 @@ struct as_data {
98 98
99 struct as_rq *next_arq[2]; /* next in sort order */ 99 struct as_rq *next_arq[2]; /* next in sort order */
100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head *dispatch; /* driver dispatch queue */
102 struct list_head *hash; /* request hash */ 101 struct list_head *hash; /* request hash */
103 102
104 unsigned long exit_prob; /* probability a task will exit while 103 unsigned long exit_prob; /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
239 return ioc; 238 return ioc;
240} 239}
241 240
241static void as_put_io_context(struct as_rq *arq)
242{
243 struct as_io_context *aic;
244
245 if (unlikely(!arq->io_context))
246 return;
247
248 aic = arq->io_context->aic;
249
250 if (arq->is_sync == REQ_SYNC && aic) {
251 spin_lock(&aic->lock);
252 set_bit(AS_TASK_IORUNNING, &aic->state);
253 aic->last_end_request = jiffies;
254 spin_unlock(&aic->lock);
255 }
256
257 put_io_context(arq->io_context);
258}
259
242/* 260/*
243 * the back merge hash support functions 261 * the back merge hash support functions
244 */ 262 */
@@ -950,23 +968,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
950 968
951 WARN_ON(!list_empty(&rq->queuelist)); 969 WARN_ON(!list_empty(&rq->queuelist));
952 970
953 if (arq->state == AS_RQ_PRESCHED) {
954 WARN_ON(arq->io_context);
955 goto out;
956 }
957
958 if (arq->state == AS_RQ_MERGED)
959 goto out_ioc;
960
961 if (arq->state != AS_RQ_REMOVED) { 971 if (arq->state != AS_RQ_REMOVED) {
962 printk("arq->state %d\n", arq->state); 972 printk("arq->state %d\n", arq->state);
963 WARN_ON(1); 973 WARN_ON(1);
964 goto out; 974 goto out;
965 } 975 }
966 976
967 if (!blk_fs_request(rq))
968 goto out;
969
970 if (ad->changed_batch && ad->nr_dispatched == 1) { 977 if (ad->changed_batch && ad->nr_dispatched == 1) {
971 kblockd_schedule_work(&ad->antic_work); 978 kblockd_schedule_work(&ad->antic_work);
972 ad->changed_batch = 0; 979 ad->changed_batch = 0;
@@ -1001,21 +1008,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
1001 } 1008 }
1002 } 1009 }
1003 1010
1004out_ioc: 1011 as_put_io_context(arq);
1005 if (!arq->io_context)
1006 goto out;
1007
1008 if (arq->is_sync == REQ_SYNC) {
1009 struct as_io_context *aic = arq->io_context->aic;
1010 if (aic) {
1011 spin_lock(&aic->lock);
1012 set_bit(AS_TASK_IORUNNING, &aic->state);
1013 aic->last_end_request = jiffies;
1014 spin_unlock(&aic->lock);
1015 }
1016 }
1017
1018 put_io_context(arq->io_context);
1019out: 1012out:
1020 arq->state = AS_RQ_POSTSCHED; 1013 arq->state = AS_RQ_POSTSCHED;
1021} 1014}
@@ -1052,68 +1045,6 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1052} 1045}
1053 1046
1054/* 1047/*
1055 * as_remove_dispatched_request is called to remove a request which has gone
1056 * to the dispatch list.
1057 */
1058static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
1059{
1060 struct as_rq *arq = RQ_DATA(rq);
1061 struct as_io_context *aic;
1062
1063 if (!arq) {
1064 WARN_ON(1);
1065 return;
1066 }
1067
1068 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1069 WARN_ON(ON_RB(&arq->rb_node));
1070 if (arq->io_context && arq->io_context->aic) {
1071 aic = arq->io_context->aic;
1072 if (aic) {
1073 WARN_ON(!atomic_read(&aic->nr_dispatched));
1074 atomic_dec(&aic->nr_dispatched);
1075 }
1076 }
1077}
1078
1079/*
1080 * as_remove_request is called when a driver has finished with a request.
1081 * This should be only called for dispatched requests, but for some reason
1082 * a POWER4 box running hwscan it does not.
1083 */
1084static void as_remove_request(request_queue_t *q, struct request *rq)
1085{
1086 struct as_rq *arq = RQ_DATA(rq);
1087
1088 if (unlikely(arq->state == AS_RQ_NEW))
1089 goto out;
1090
1091 if (ON_RB(&arq->rb_node)) {
1092 if (arq->state != AS_RQ_QUEUED) {
1093 printk("arq->state %d\n", arq->state);
1094 WARN_ON(1);
1095 goto out;
1096 }
1097 /*
1098 * We'll lose the aliased request(s) here. I don't think this
1099 * will ever happen, but if it does, hopefully someone will
1100 * report it.
1101 */
1102 WARN_ON(!list_empty(&rq->queuelist));
1103 as_remove_queued_request(q, rq);
1104 } else {
1105 if (arq->state != AS_RQ_DISPATCHED) {
1106 printk("arq->state %d\n", arq->state);
1107 WARN_ON(1);
1108 goto out;
1109 }
1110 as_remove_dispatched_request(q, rq);
1111 }
1112out:
1113 arq->state = AS_RQ_REMOVED;
1114}
1115
1116/*
1117 * as_fifo_expired returns 0 if there are no expired reads on the fifo, 1048 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1118 * 1 otherwise. It is ratelimited so that we only perform the check once per 1049 * 1 otherwise. It is ratelimited so that we only perform the check once per
1119 * `fifo_expire' interval. Otherwise a large number of expired requests 1050 * `fifo_expire' interval. Otherwise a large number of expired requests
@@ -1165,7 +1096,6 @@ static inline int as_batch_expired(struct as_data *ad)
1165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) 1096static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1166{ 1097{
1167 struct request *rq = arq->request; 1098 struct request *rq = arq->request;
1168 struct list_head *insert;
1169 const int data_dir = arq->is_sync; 1099 const int data_dir = arq->is_sync;
1170 1100
1171 BUG_ON(!ON_RB(&arq->rb_node)); 1101 BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1128,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1198 /* 1128 /*
1199 * take it off the sort and fifo list, add to dispatch queue 1129 * take it off the sort and fifo list, add to dispatch queue
1200 */ 1130 */
1201 insert = ad->dispatch->prev;
1202
1203 while (!list_empty(&rq->queuelist)) { 1131 while (!list_empty(&rq->queuelist)) {
1204 struct request *__rq = list_entry_rq(rq->queuelist.next); 1132 struct request *__rq = list_entry_rq(rq->queuelist.next);
1205 struct as_rq *__arq = RQ_DATA(__rq); 1133 struct as_rq *__arq = RQ_DATA(__rq);
1206 1134
1207 list_move_tail(&__rq->queuelist, ad->dispatch); 1135 list_del(&__rq->queuelist);
1136
1137 elv_dispatch_add_tail(ad->q, __rq);
1208 1138
1209 if (__arq->io_context && __arq->io_context->aic) 1139 if (__arq->io_context && __arq->io_context->aic)
1210 atomic_inc(&__arq->io_context->aic->nr_dispatched); 1140 atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1148,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1218 as_remove_queued_request(ad->q, rq); 1148 as_remove_queued_request(ad->q, rq);
1219 WARN_ON(arq->state != AS_RQ_QUEUED); 1149 WARN_ON(arq->state != AS_RQ_QUEUED);
1220 1150
1221 list_add(&rq->queuelist, insert); 1151 elv_dispatch_sort(ad->q, rq);
1152
1222 arq->state = AS_RQ_DISPATCHED; 1153 arq->state = AS_RQ_DISPATCHED;
1223 if (arq->io_context && arq->io_context->aic) 1154 if (arq->io_context && arq->io_context->aic)
1224 atomic_inc(&arq->io_context->aic->nr_dispatched); 1155 atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1161,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1230 * read/write expire, batch expire, etc, and moves it to the dispatch 1161 * read/write expire, batch expire, etc, and moves it to the dispatch
1231 * queue. Returns 1 if a request was found, 0 otherwise. 1162 * queue. Returns 1 if a request was found, 0 otherwise.
1232 */ 1163 */
1233static int as_dispatch_request(struct as_data *ad) 1164static int as_dispatch_request(request_queue_t *q, int force)
1234{ 1165{
1166 struct as_data *ad = q->elevator->elevator_data;
1235 struct as_rq *arq; 1167 struct as_rq *arq;
1236 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1168 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
1237 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1169 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
1238 1170
1171 if (unlikely(force)) {
1172 /*
1173 * Forced dispatch, accounting is useless. Reset
1174 * accounting states and dump fifo_lists. Note that
1175 * batch_data_dir is reset to REQ_SYNC to avoid
1176 * screwing write batch accounting as write batch
1177 * accounting occurs on W->R transition.
1178 */
1179 int dispatched = 0;
1180
1181 ad->batch_data_dir = REQ_SYNC;
1182 ad->changed_batch = 0;
1183 ad->new_batch = 0;
1184
1185 while (ad->next_arq[REQ_SYNC]) {
1186 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1187 dispatched++;
1188 }
1189 ad->last_check_fifo[REQ_SYNC] = jiffies;
1190
1191 while (ad->next_arq[REQ_ASYNC]) {
1192 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1193 dispatched++;
1194 }
1195 ad->last_check_fifo[REQ_ASYNC] = jiffies;
1196
1197 return dispatched;
1198 }
1199
1239 /* Signal that the write batch was uncontended, so we can't time it */ 1200 /* Signal that the write batch was uncontended, so we can't time it */
1240 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1201 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1241 if (ad->current_write_count == 0 || !writes) 1202 if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1320,6 @@ fifo_expired:
1359 return 1; 1320 return 1;
1360} 1321}
1361 1322
1362static struct request *as_next_request(request_queue_t *q)
1363{
1364 struct as_data *ad = q->elevator->elevator_data;
1365 struct request *rq = NULL;
1366
1367 /*
1368 * if there are still requests on the dispatch queue, grab the first
1369 */
1370 if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
1371 rq = list_entry_rq(ad->dispatch->next);
1372
1373 return rq;
1374}
1375
1376/* 1323/*
1377 * Add arq to a list behind alias 1324 * Add arq to a list behind alias
1378 */ 1325 */
@@ -1410,11 +1357,19 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
1410/* 1357/*
1411 * add arq to rbtree and fifo 1358 * add arq to rbtree and fifo
1412 */ 1359 */
1413static void as_add_request(struct as_data *ad, struct as_rq *arq) 1360static void as_add_request(request_queue_t *q, struct request *rq)
1414{ 1361{
1362 struct as_data *ad = q->elevator->elevator_data;
1363 struct as_rq *arq = RQ_DATA(rq);
1415 struct as_rq *alias; 1364 struct as_rq *alias;
1416 int data_dir; 1365 int data_dir;
1417 1366
1367 if (arq->state != AS_RQ_PRESCHED) {
1368 printk("arq->state: %d\n", arq->state);
1369 WARN_ON(1);
1370 }
1371 arq->state = AS_RQ_NEW;
1372
1418 if (rq_data_dir(arq->request) == READ 1373 if (rq_data_dir(arq->request) == READ
1419 || current->flags&PF_SYNCWRITE) 1374 || current->flags&PF_SYNCWRITE)
1420 arq->is_sync = 1; 1375 arq->is_sync = 1;
@@ -1463,96 +1418,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
1463 arq->state = AS_RQ_QUEUED; 1418 arq->state = AS_RQ_QUEUED;
1464} 1419}
1465 1420
1466static void as_deactivate_request(request_queue_t *q, struct request *rq) 1421static void as_activate_request(request_queue_t *q, struct request *rq)
1467{ 1422{
1468 struct as_data *ad = q->elevator->elevator_data;
1469 struct as_rq *arq = RQ_DATA(rq); 1423 struct as_rq *arq = RQ_DATA(rq);
1470 1424
1471 if (arq) { 1425 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1472 if (arq->state == AS_RQ_REMOVED) { 1426 arq->state = AS_RQ_REMOVED;
1473 arq->state = AS_RQ_DISPATCHED; 1427 if (arq->io_context && arq->io_context->aic)
1474 if (arq->io_context && arq->io_context->aic) 1428 atomic_dec(&arq->io_context->aic->nr_dispatched);
1475 atomic_inc(&arq->io_context->aic->nr_dispatched);
1476 }
1477 } else
1478 WARN_ON(blk_fs_request(rq)
1479 && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
1480
1481 /* Stop anticipating - let this request get through */
1482 as_antic_stop(ad);
1483}
1484
1485/*
1486 * requeue the request. The request has not been completed, nor is it a
1487 * new request, so don't touch accounting.
1488 */
1489static void as_requeue_request(request_queue_t *q, struct request *rq)
1490{
1491 as_deactivate_request(q, rq);
1492 list_add(&rq->queuelist, &q->queue_head);
1493}
1494
1495/*
1496 * Account a request that is inserted directly onto the dispatch queue.
1497 * arq->io_context->aic->nr_dispatched should not need to be incremented
1498 * because only new requests should come through here: requeues go through
1499 * our explicit requeue handler.
1500 */
1501static void as_account_queued_request(struct as_data *ad, struct request *rq)
1502{
1503 if (blk_fs_request(rq)) {
1504 struct as_rq *arq = RQ_DATA(rq);
1505 arq->state = AS_RQ_DISPATCHED;
1506 ad->nr_dispatched++;
1507 }
1508} 1429}
1509 1430
1510static void 1431static void as_deactivate_request(request_queue_t *q, struct request *rq)
1511as_insert_request(request_queue_t *q, struct request *rq, int where)
1512{ 1432{
1513 struct as_data *ad = q->elevator->elevator_data;
1514 struct as_rq *arq = RQ_DATA(rq); 1433 struct as_rq *arq = RQ_DATA(rq);
1515 1434
1516 if (arq) { 1435 WARN_ON(arq->state != AS_RQ_REMOVED);
1517 if (arq->state != AS_RQ_PRESCHED) { 1436 arq->state = AS_RQ_DISPATCHED;
1518 printk("arq->state: %d\n", arq->state); 1437 if (arq->io_context && arq->io_context->aic)
1519 WARN_ON(1); 1438 atomic_inc(&arq->io_context->aic->nr_dispatched);
1520 }
1521 arq->state = AS_RQ_NEW;
1522 }
1523
1524 /* barriers must flush the reorder queue */
1525 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
1526 && where == ELEVATOR_INSERT_SORT)) {
1527 WARN_ON(1);
1528 where = ELEVATOR_INSERT_BACK;
1529 }
1530
1531 switch (where) {
1532 case ELEVATOR_INSERT_BACK:
1533 while (ad->next_arq[REQ_SYNC])
1534 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1535
1536 while (ad->next_arq[REQ_ASYNC])
1537 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1538
1539 list_add_tail(&rq->queuelist, ad->dispatch);
1540 as_account_queued_request(ad, rq);
1541 as_antic_stop(ad);
1542 break;
1543 case ELEVATOR_INSERT_FRONT:
1544 list_add(&rq->queuelist, ad->dispatch);
1545 as_account_queued_request(ad, rq);
1546 as_antic_stop(ad);
1547 break;
1548 case ELEVATOR_INSERT_SORT:
1549 BUG_ON(!blk_fs_request(rq));
1550 as_add_request(ad, arq);
1551 break;
1552 default:
1553 BUG();
1554 return;
1555 }
1556} 1439}
1557 1440
1558/* 1441/*
@@ -1565,12 +1448,8 @@ static int as_queue_empty(request_queue_t *q)
1565{ 1448{
1566 struct as_data *ad = q->elevator->elevator_data; 1449 struct as_data *ad = q->elevator->elevator_data;
1567 1450
1568 if (!list_empty(&ad->fifo_list[REQ_ASYNC]) 1451 return list_empty(&ad->fifo_list[REQ_ASYNC])
1569 || !list_empty(&ad->fifo_list[REQ_SYNC]) 1452 && list_empty(&ad->fifo_list[REQ_SYNC]);
1570 || !list_empty(ad->dispatch))
1571 return 0;
1572
1573 return 1;
1574} 1453}
1575 1454
1576static struct request * 1455static struct request *
@@ -1763,6 +1642,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
1763 * kill knowledge of next, this one is a goner 1642 * kill knowledge of next, this one is a goner
1764 */ 1643 */
1765 as_remove_queued_request(q, next); 1644 as_remove_queued_request(q, next);
1645 as_put_io_context(anext);
1766 1646
1767 anext->state = AS_RQ_MERGED; 1647 anext->state = AS_RQ_MERGED;
1768} 1648}
@@ -1782,7 +1662,7 @@ static void as_work_handler(void *data)
1782 unsigned long flags; 1662 unsigned long flags;
1783 1663
1784 spin_lock_irqsave(q->queue_lock, flags); 1664 spin_lock_irqsave(q->queue_lock, flags);
1785 if (as_next_request(q)) 1665 if (!as_queue_empty(q))
1786 q->request_fn(q); 1666 q->request_fn(q);
1787 spin_unlock_irqrestore(q->queue_lock, flags); 1667 spin_unlock_irqrestore(q->queue_lock, flags);
1788} 1668}
@@ -1797,7 +1677,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
1797 return; 1677 return;
1798 } 1678 }
1799 1679
1800 if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { 1680 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1681 arq->state != AS_RQ_PRESCHED &&
1682 arq->state != AS_RQ_MERGED)) {
1801 printk("arq->state %d\n", arq->state); 1683 printk("arq->state %d\n", arq->state);
1802 WARN_ON(1); 1684 WARN_ON(1);
1803 } 1685 }
@@ -1907,7 +1789,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1907 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1789 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1908 ad->sort_list[REQ_SYNC] = RB_ROOT; 1790 ad->sort_list[REQ_SYNC] = RB_ROOT;
1909 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1791 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1910 ad->dispatch = &q->queue_head;
1911 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1792 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1912 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1793 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1913 ad->antic_expire = default_antic_expire; 1794 ad->antic_expire = default_antic_expire;
@@ -2072,10 +1953,9 @@ static struct elevator_type iosched_as = {
2072 .elevator_merge_fn = as_merge, 1953 .elevator_merge_fn = as_merge,
2073 .elevator_merged_fn = as_merged_request, 1954 .elevator_merged_fn = as_merged_request,
2074 .elevator_merge_req_fn = as_merged_requests, 1955 .elevator_merge_req_fn = as_merged_requests,
2075 .elevator_next_req_fn = as_next_request, 1956 .elevator_dispatch_fn = as_dispatch_request,
2076 .elevator_add_req_fn = as_insert_request, 1957 .elevator_add_req_fn = as_add_request,
2077 .elevator_remove_req_fn = as_remove_request, 1958 .elevator_activate_req_fn = as_activate_request,
2078 .elevator_requeue_req_fn = as_requeue_request,
2079 .elevator_deactivate_req_fn = as_deactivate_request, 1959 .elevator_deactivate_req_fn = as_deactivate_request,
2080 .elevator_queue_empty_fn = as_queue_empty, 1960 .elevator_queue_empty_fn = as_queue_empty,
2081 .elevator_completed_req_fn = as_completed_request, 1961 .elevator_completed_req_fn = as_completed_request,