aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-10-20 10:42:29 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2005-10-28 02:45:08 -0400
commitb4878f245ec8e168cdd1f170f823a750b7dd4af5 (patch)
treede784c2a7e1174e4843807998f0356bf92ee78be /drivers/block/cfq-iosched.c
parentd9ebb192aa13a026edc6faff137dcb14f2c91731 (diff)
[PATCH] 02/05: update ioscheds to use generic dispatch queue
This patch updates all four ioscheds to use generic dispatch queue. There's one behavior change in as-iosched. * In as-iosched, when force dispatching (ELEVATOR_INSERT_BACK), batch_data_dir is reset to REQ_SYNC and changed_batch and new_batch are cleared to zero. This prevernts AS from doing incorrect update_write_batch after the forced dispatched requests are finished. * In cfq-iosched, cfqd->rq_in_driver currently counts the number of activated (removed) requests to determine whether queue-kicking is needed and cfq_max_depth has been reached. With generic dispatch queue, I think counting the number of dispatched requests would be more appropriate. * cfq_max_depth can be lowered to 1 again. Original from Tejun Heo, modified version applied. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/block/cfq-iosched.c')
-rw-r--r--drivers/block/cfq-iosched.c340
1 files changed, 80 insertions, 260 deletions
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64ec..7b14160e0798 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -84,7 +84,6 @@ static int cfq_max_depth = 2;
84 (node)->rb_left = NULL; \ 84 (node)->rb_left = NULL; \
85} while (0) 85} while (0)
86#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) 86#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
87#define ON_RB(node) ((node)->rb_color != RB_NONE)
88#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) 87#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
89#define rq_rb_key(rq) (rq)->sector 88#define rq_rb_key(rq) (rq)->sector
90 89
@@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired);
271#undef CFQ_CFQQ_FNS 270#undef CFQ_CFQQ_FNS
272 271
273enum cfq_rq_state_flags { 272enum cfq_rq_state_flags {
274 CFQ_CRQ_FLAG_in_flight = 0, 273 CFQ_CRQ_FLAG_is_sync = 0,
275 CFQ_CRQ_FLAG_in_driver,
276 CFQ_CRQ_FLAG_is_sync,
277 CFQ_CRQ_FLAG_requeued,
278}; 274};
279 275
280#define CFQ_CRQ_FNS(name) \ 276#define CFQ_CRQ_FNS(name) \
@@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq) \
291 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ 287 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
292} 288}
293 289
294CFQ_CRQ_FNS(in_flight);
295CFQ_CRQ_FNS(in_driver);
296CFQ_CRQ_FNS(is_sync); 290CFQ_CRQ_FNS(is_sync);
297CFQ_CRQ_FNS(requeued);
298#undef CFQ_CRQ_FNS 291#undef CFQ_CRQ_FNS
299 292
300static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 293static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
301static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 294static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
302static void cfq_put_cfqd(struct cfq_data *cfqd); 295static void cfq_put_cfqd(struct cfq_data *cfqd);
303 296
304#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) 297#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
@@ -347,18 +340,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
347 return NULL; 340 return NULL;
348} 341}
349 342
350static inline int cfq_pending_requests(struct cfq_data *cfqd)
351{
352 return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
353}
354
355/* 343/*
356 * scheduler run of queue, if there are requests pending and no one in the 344 * scheduler run of queue, if there are requests pending and no one in the
357 * driver that will restart queueing 345 * driver that will restart queueing
358 */ 346 */
359static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 347static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
360{ 348{
361 if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) 349 if (!cfqd->rq_in_driver && cfqd->busy_queues)
362 kblockd_schedule_work(&cfqd->unplug_work); 350 kblockd_schedule_work(&cfqd->unplug_work);
363} 351}
364 352
@@ -366,7 +354,7 @@ static int cfq_queue_empty(request_queue_t *q)
366{ 354{
367 struct cfq_data *cfqd = q->elevator->elevator_data; 355 struct cfq_data *cfqd = q->elevator->elevator_data;
368 356
369 return !cfq_pending_requests(cfqd); 357 return !cfqd->busy_queues;
370} 358}
371 359
372/* 360/*
@@ -386,11 +374,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
386 if (crq2 == NULL) 374 if (crq2 == NULL)
387 return crq1; 375 return crq1;
388 376
389 if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
390 return crq1;
391 else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
392 return crq2;
393
394 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) 377 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
395 return crq1; 378 return crq1;
396 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) 379 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
@@ -461,10 +444,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
461 struct cfq_rq *crq_next = NULL, *crq_prev = NULL; 444 struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
462 struct rb_node *rbnext, *rbprev; 445 struct rb_node *rbnext, *rbprev;
463 446
464 rbnext = NULL; 447 if (!(rbnext = rb_next(&last->rb_node))) {
465 if (ON_RB(&last->rb_node))
466 rbnext = rb_next(&last->rb_node);
467 if (!rbnext) {
468 rbnext = rb_first(&cfqq->sort_list); 448 rbnext = rb_first(&cfqq->sort_list);
469 if (rbnext == &last->rb_node) 449 if (rbnext == &last->rb_node)
470 rbnext = NULL; 450 rbnext = NULL;
@@ -545,13 +525,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
545 * the pending list according to last request service 525 * the pending list according to last request service
546 */ 526 */
547static inline void 527static inline void
548cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) 528cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
549{ 529{
550 BUG_ON(cfq_cfqq_on_rr(cfqq)); 530 BUG_ON(cfq_cfqq_on_rr(cfqq));
551 cfq_mark_cfqq_on_rr(cfqq); 531 cfq_mark_cfqq_on_rr(cfqq);
552 cfqd->busy_queues++; 532 cfqd->busy_queues++;
553 533
554 cfq_resort_rr_list(cfqq, requeue); 534 cfq_resort_rr_list(cfqq, 0);
555} 535}
556 536
557static inline void 537static inline void
@@ -571,22 +551,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
571static inline void cfq_del_crq_rb(struct cfq_rq *crq) 551static inline void cfq_del_crq_rb(struct cfq_rq *crq)
572{ 552{
573 struct cfq_queue *cfqq = crq->cfq_queue; 553 struct cfq_queue *cfqq = crq->cfq_queue;
554 struct cfq_data *cfqd = cfqq->cfqd;
555 const int sync = cfq_crq_is_sync(crq);
574 556
575 if (ON_RB(&crq->rb_node)) { 557 BUG_ON(!cfqq->queued[sync]);
576 struct cfq_data *cfqd = cfqq->cfqd; 558 cfqq->queued[sync]--;
577 const int sync = cfq_crq_is_sync(crq);
578
579 BUG_ON(!cfqq->queued[sync]);
580 cfqq->queued[sync]--;
581 559
582 cfq_update_next_crq(crq); 560 cfq_update_next_crq(crq);
583 561
584 rb_erase(&crq->rb_node, &cfqq->sort_list); 562 rb_erase(&crq->rb_node, &cfqq->sort_list);
585 RB_CLEAR_COLOR(&crq->rb_node); 563 RB_CLEAR_COLOR(&crq->rb_node);
586 564
587 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) 565 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
588 cfq_del_cfqq_rr(cfqd, cfqq); 566 cfq_del_cfqq_rr(cfqd, cfqq);
589 }
590} 567}
591 568
592static struct cfq_rq * 569static struct cfq_rq *
@@ -627,12 +604,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
627 * if that happens, put the alias on the dispatch list 604 * if that happens, put the alias on the dispatch list
628 */ 605 */
629 while ((__alias = __cfq_add_crq_rb(crq)) != NULL) 606 while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
630 cfq_dispatch_sort(cfqd->queue, __alias); 607 cfq_dispatch_insert(cfqd->queue, __alias);
631 608
632 rb_insert_color(&crq->rb_node, &cfqq->sort_list); 609 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
633 610
634 if (!cfq_cfqq_on_rr(cfqq)) 611 if (!cfq_cfqq_on_rr(cfqq))
635 cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); 612 cfq_add_cfqq_rr(cfqd, cfqq);
636 613
637 /* 614 /*
638 * check if this request is a better next-serve candidate 615 * check if this request is a better next-serve candidate
@@ -643,10 +620,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
643static inline void 620static inline void
644cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) 621cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
645{ 622{
646 if (ON_RB(&crq->rb_node)) { 623 rb_erase(&crq->rb_node, &cfqq->sort_list);
647 rb_erase(&crq->rb_node, &cfqq->sort_list); 624 cfqq->queued[cfq_crq_is_sync(crq)]--;
648 cfqq->queued[cfq_crq_is_sync(crq)]--;
649 }
650 625
651 cfq_add_crq_rb(crq); 626 cfq_add_crq_rb(crq);
652} 627}
@@ -676,49 +651,28 @@ out:
676 return NULL; 651 return NULL;
677} 652}
678 653
679static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 654static void cfq_activate_request(request_queue_t *q, struct request *rq)
680{ 655{
681 struct cfq_data *cfqd = q->elevator->elevator_data; 656 struct cfq_data *cfqd = q->elevator->elevator_data;
682 struct cfq_rq *crq = RQ_DATA(rq);
683
684 if (crq) {
685 struct cfq_queue *cfqq = crq->cfq_queue;
686 657
687 if (cfq_crq_in_driver(crq)) { 658 cfqd->rq_in_driver++;
688 cfq_clear_crq_in_driver(crq);
689 WARN_ON(!cfqd->rq_in_driver);
690 cfqd->rq_in_driver--;
691 }
692 if (cfq_crq_in_flight(crq)) {
693 const int sync = cfq_crq_is_sync(crq);
694
695 cfq_clear_crq_in_flight(crq);
696 WARN_ON(!cfqq->on_dispatch[sync]);
697 cfqq->on_dispatch[sync]--;
698 }
699 cfq_mark_crq_requeued(crq);
700 }
701} 659}
702 660
703/* 661static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
704 * make sure the service time gets corrected on reissue of this request
705 */
706static void cfq_requeue_request(request_queue_t *q, struct request *rq)
707{ 662{
708 cfq_deactivate_request(q, rq); 663 struct cfq_data *cfqd = q->elevator->elevator_data;
709 list_add(&rq->queuelist, &q->queue_head); 664
665 WARN_ON(!cfqd->rq_in_driver);
666 cfqd->rq_in_driver--;
710} 667}
711 668
712static void cfq_remove_request(request_queue_t *q, struct request *rq) 669static void cfq_remove_request(struct request *rq)
713{ 670{
714 struct cfq_rq *crq = RQ_DATA(rq); 671 struct cfq_rq *crq = RQ_DATA(rq);
715 672
716 if (crq) { 673 list_del_init(&rq->queuelist);
717 list_del_init(&rq->queuelist); 674 cfq_del_crq_rb(crq);
718 cfq_del_crq_rb(crq); 675 cfq_remove_merge_hints(rq->q, crq);
719 cfq_remove_merge_hints(q, crq);
720
721 }
722} 676}
723 677
724static int 678static int
@@ -762,7 +716,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req)
762 cfq_del_crq_hash(crq); 716 cfq_del_crq_hash(crq);
763 cfq_add_crq_hash(cfqd, crq); 717 cfq_add_crq_hash(cfqd, crq);
764 718
765 if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) { 719 if (rq_rb_key(req) != crq->rb_key) {
766 struct cfq_queue *cfqq = crq->cfq_queue; 720 struct cfq_queue *cfqq = crq->cfq_queue;
767 721
768 cfq_update_next_crq(crq); 722 cfq_update_next_crq(crq);
@@ -785,7 +739,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
785 time_before(next->start_time, rq->start_time)) 739 time_before(next->start_time, rq->start_time))
786 list_move(&rq->queuelist, &next->queuelist); 740 list_move(&rq->queuelist, &next->queuelist);
787 741
788 cfq_remove_request(q, next); 742 cfq_remove_request(next);
789} 743}
790 744
791static inline void 745static inline void
@@ -992,53 +946,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
992 return 1; 946 return 1;
993} 947}
994 948
995/* 949static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
996 * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
997 * this function sector sorts the selected request to minimize seeks. we start
998 * at cfqd->last_sector, not 0.
999 */
1000static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
1001{ 950{
1002 struct cfq_data *cfqd = q->elevator->elevator_data; 951 struct cfq_data *cfqd = q->elevator->elevator_data;
1003 struct cfq_queue *cfqq = crq->cfq_queue; 952 struct cfq_queue *cfqq = crq->cfq_queue;
1004 struct list_head *head = &q->queue_head, *entry = head;
1005 struct request *__rq;
1006 sector_t last;
1007
1008 list_del(&crq->request->queuelist);
1009
1010 last = cfqd->last_sector;
1011 list_for_each_entry_reverse(__rq, head, queuelist) {
1012 struct cfq_rq *__crq = RQ_DATA(__rq);
1013
1014 if (blk_barrier_rq(__rq))
1015 break;
1016 if (!blk_fs_request(__rq))
1017 break;
1018 if (cfq_crq_requeued(__crq))
1019 break;
1020
1021 if (__rq->sector <= crq->request->sector)
1022 break;
1023 if (__rq->sector > last && crq->request->sector < last) {
1024 last = crq->request->sector + crq->request->nr_sectors;
1025 break;
1026 }
1027 entry = &__rq->queuelist;
1028 }
1029
1030 cfqd->last_sector = last;
1031 953
1032 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); 954 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
1033 955 cfq_remove_request(crq->request);
1034 cfq_del_crq_rb(crq);
1035 cfq_remove_merge_hints(q, crq);
1036
1037 cfq_mark_crq_in_flight(crq);
1038 cfq_clear_crq_requeued(crq);
1039
1040 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; 956 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
1041 list_add_tail(&crq->request->queuelist, entry); 957 elv_dispatch_sort(q, crq->request);
1042} 958}
1043 959
1044/* 960/*
@@ -1159,7 +1075,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1159 /* 1075 /*
1160 * finally, insert request into driver dispatch list 1076 * finally, insert request into driver dispatch list
1161 */ 1077 */
1162 cfq_dispatch_sort(cfqd->queue, crq); 1078 cfq_dispatch_insert(cfqd->queue, crq);
1163 1079
1164 cfqd->dispatch_slice++; 1080 cfqd->dispatch_slice++;
1165 dispatched++; 1081 dispatched++;
@@ -1194,7 +1110,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1194} 1110}
1195 1111
1196static int 1112static int
1197cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) 1113cfq_dispatch_requests(request_queue_t *q, int force)
1198{ 1114{
1199 struct cfq_data *cfqd = q->elevator->elevator_data; 1115 struct cfq_data *cfqd = q->elevator->elevator_data;
1200 struct cfq_queue *cfqq; 1116 struct cfq_queue *cfqq;
@@ -1204,12 +1120,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1204 1120
1205 cfqq = cfq_select_queue(cfqd, force); 1121 cfqq = cfq_select_queue(cfqd, force);
1206 if (cfqq) { 1122 if (cfqq) {
1123 int max_dispatch;
1124
1125 /*
1126 * if idle window is disabled, allow queue buildup
1127 */
1128 if (!cfq_cfqq_idle_window(cfqq) &&
1129 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1130 return 0;
1131
1207 cfq_clear_cfqq_must_dispatch(cfqq); 1132 cfq_clear_cfqq_must_dispatch(cfqq);
1208 cfq_clear_cfqq_wait_request(cfqq); 1133 cfq_clear_cfqq_wait_request(cfqq);
1209 del_timer(&cfqd->idle_slice_timer); 1134 del_timer(&cfqd->idle_slice_timer);
1210 1135
1211 if (cfq_class_idle(cfqq)) 1136 if (!force) {
1212 max_dispatch = 1; 1137 max_dispatch = cfqd->cfq_quantum;
1138 if (cfq_class_idle(cfqq))
1139 max_dispatch = 1;
1140 } else
1141 max_dispatch = INT_MAX;
1213 1142
1214 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1143 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1215 } 1144 }
@@ -1217,93 +1146,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1217 return 0; 1146 return 0;
1218} 1147}
1219 1148
1220static inline void cfq_account_dispatch(struct cfq_rq *crq)
1221{
1222 struct cfq_queue *cfqq = crq->cfq_queue;
1223 struct cfq_data *cfqd = cfqq->cfqd;
1224
1225 if (unlikely(!blk_fs_request(crq->request)))
1226 return;
1227
1228 /*
1229 * accounted bit is necessary since some drivers will call
1230 * elv_next_request() many times for the same request (eg ide)
1231 */
1232 if (cfq_crq_in_driver(crq))
1233 return;
1234
1235 cfq_mark_crq_in_driver(crq);
1236 cfqd->rq_in_driver++;
1237}
1238
1239static inline void
1240cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
1241{
1242 struct cfq_data *cfqd = cfqq->cfqd;
1243 unsigned long now;
1244
1245 if (!cfq_crq_in_driver(crq))
1246 return;
1247
1248 now = jiffies;
1249
1250 WARN_ON(!cfqd->rq_in_driver);
1251 cfqd->rq_in_driver--;
1252
1253 if (!cfq_class_idle(cfqq))
1254 cfqd->last_end_request = now;
1255
1256 if (!cfq_cfqq_dispatched(cfqq)) {
1257 if (cfq_cfqq_on_rr(cfqq)) {
1258 cfqq->service_last = now;
1259 cfq_resort_rr_list(cfqq, 0);
1260 }
1261 if (cfq_cfqq_expired(cfqq)) {
1262 __cfq_slice_expired(cfqd, cfqq, 0);
1263 cfq_schedule_dispatch(cfqd);
1264 }
1265 }
1266
1267 if (cfq_crq_is_sync(crq))
1268 crq->io_context->last_end_request = now;
1269}
1270
1271static struct request *cfq_next_request(request_queue_t *q)
1272{
1273 struct cfq_data *cfqd = q->elevator->elevator_data;
1274 struct request *rq;
1275
1276 if (!list_empty(&q->queue_head)) {
1277 struct cfq_rq *crq;
1278dispatch:
1279 rq = list_entry_rq(q->queue_head.next);
1280
1281 crq = RQ_DATA(rq);
1282 if (crq) {
1283 struct cfq_queue *cfqq = crq->cfq_queue;
1284
1285 /*
1286 * if idle window is disabled, allow queue buildup
1287 */
1288 if (!cfq_crq_in_driver(crq) &&
1289 !cfq_cfqq_idle_window(cfqq) &&
1290 !blk_barrier_rq(rq) &&
1291 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1292 return NULL;
1293
1294 cfq_remove_merge_hints(q, crq);
1295 cfq_account_dispatch(crq);
1296 }
1297
1298 return rq;
1299 }
1300
1301 if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
1302 goto dispatch;
1303
1304 return NULL;
1305}
1306
1307/* 1149/*
1308 * task holds one reference to the queue, dropped when task exits. each crq 1150 * task holds one reference to the queue, dropped when task exits. each crq
1309 * in-flight on this queue also holds a reference, dropped when crq is freed. 1151 * in-flight on this queue also holds a reference, dropped when crq is freed.
@@ -1816,8 +1658,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1816 } 1658 }
1817} 1659}
1818 1660
1819static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) 1661static void cfq_insert_request(request_queue_t *q, struct request *rq)
1820{ 1662{
1663 struct cfq_data *cfqd = q->elevator->elevator_data;
1821 struct cfq_rq *crq = RQ_DATA(rq); 1664 struct cfq_rq *crq = RQ_DATA(rq);
1822 struct cfq_queue *cfqq = crq->cfq_queue; 1665 struct cfq_queue *cfqq = crq->cfq_queue;
1823 1666
@@ -1837,56 +1680,37 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
1837 cfq_crq_enqueued(cfqd, cfqq, crq); 1680 cfq_crq_enqueued(cfqd, cfqq, crq);
1838} 1681}
1839 1682
1840static void
1841cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1842{
1843 struct cfq_data *cfqd = q->elevator->elevator_data;
1844
1845 switch (where) {
1846 case ELEVATOR_INSERT_BACK:
1847 while (cfq_dispatch_requests(q, INT_MAX, 1))
1848 ;
1849 list_add_tail(&rq->queuelist, &q->queue_head);
1850 /*
1851 * If we were idling with pending requests on
1852 * inactive cfqqs, force dispatching will
1853 * remove the idle timer and the queue won't
1854 * be kicked by __make_request() afterward.
1855 * Kick it here.
1856 */
1857 cfq_schedule_dispatch(cfqd);
1858 break;
1859 case ELEVATOR_INSERT_FRONT:
1860 list_add(&rq->queuelist, &q->queue_head);
1861 break;
1862 case ELEVATOR_INSERT_SORT:
1863 BUG_ON(!blk_fs_request(rq));
1864 cfq_enqueue(cfqd, rq);
1865 break;
1866 default:
1867 printk("%s: bad insert point %d\n", __FUNCTION__,where);
1868 return;
1869 }
1870}
1871
1872static void cfq_completed_request(request_queue_t *q, struct request *rq) 1683static void cfq_completed_request(request_queue_t *q, struct request *rq)
1873{ 1684{
1874 struct cfq_rq *crq = RQ_DATA(rq); 1685 struct cfq_rq *crq = RQ_DATA(rq);
1875 struct cfq_queue *cfqq; 1686 struct cfq_queue *cfqq = crq->cfq_queue;
1687 struct cfq_data *cfqd = cfqq->cfqd;
1688 const int sync = cfq_crq_is_sync(crq);
1689 unsigned long now;
1876 1690
1877 if (unlikely(!blk_fs_request(rq))) 1691 now = jiffies;
1878 return;
1879 1692
1880 cfqq = crq->cfq_queue; 1693 WARN_ON(!cfqd->rq_in_driver);
1694 WARN_ON(!cfqq->on_dispatch[sync]);
1695 cfqd->rq_in_driver--;
1696 cfqq->on_dispatch[sync]--;
1881 1697
1882 if (cfq_crq_in_flight(crq)) { 1698 if (!cfq_class_idle(cfqq))
1883 const int sync = cfq_crq_is_sync(crq); 1699 cfqd->last_end_request = now;
1884 1700
1885 WARN_ON(!cfqq->on_dispatch[sync]); 1701 if (!cfq_cfqq_dispatched(cfqq)) {
1886 cfqq->on_dispatch[sync]--; 1702 if (cfq_cfqq_on_rr(cfqq)) {
1703 cfqq->service_last = now;
1704 cfq_resort_rr_list(cfqq, 0);
1705 }
1706 if (cfq_cfqq_expired(cfqq)) {
1707 __cfq_slice_expired(cfqd, cfqq, 0);
1708 cfq_schedule_dispatch(cfqd);
1709 }
1887 } 1710 }
1888 1711
1889 cfq_account_completion(cfqq, crq); 1712 if (cfq_crq_is_sync(crq))
1713 crq->io_context->last_end_request = now;
1890} 1714}
1891 1715
1892static struct request * 1716static struct request *
@@ -2118,9 +1942,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2118 INIT_HLIST_NODE(&crq->hash); 1942 INIT_HLIST_NODE(&crq->hash);
2119 crq->cfq_queue = cfqq; 1943 crq->cfq_queue = cfqq;
2120 crq->io_context = cic; 1944 crq->io_context = cic;
2121 cfq_clear_crq_in_flight(crq);
2122 cfq_clear_crq_in_driver(crq);
2123 cfq_clear_crq_requeued(crq);
2124 1945
2125 if (rw == READ || process_sync(tsk)) 1946 if (rw == READ || process_sync(tsk))
2126 cfq_mark_crq_is_sync(crq); 1947 cfq_mark_crq_is_sync(crq);
@@ -2201,7 +2022,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2201 * only expire and reinvoke request handler, if there are 2022 * only expire and reinvoke request handler, if there are
2202 * other queues with pending requests 2023 * other queues with pending requests
2203 */ 2024 */
2204 if (!cfq_pending_requests(cfqd)) { 2025 if (!cfqd->busy_queues) {
2205 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); 2026 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2206 add_timer(&cfqd->idle_slice_timer); 2027 add_timer(&cfqd->idle_slice_timer);
2207 goto out_cont; 2028 goto out_cont;
@@ -2576,10 +2397,9 @@ static struct elevator_type iosched_cfq = {
2576 .elevator_merge_fn = cfq_merge, 2397 .elevator_merge_fn = cfq_merge,
2577 .elevator_merged_fn = cfq_merged_request, 2398 .elevator_merged_fn = cfq_merged_request,
2578 .elevator_merge_req_fn = cfq_merged_requests, 2399 .elevator_merge_req_fn = cfq_merged_requests,
2579 .elevator_next_req_fn = cfq_next_request, 2400 .elevator_dispatch_fn = cfq_dispatch_requests,
2580 .elevator_add_req_fn = cfq_insert_request, 2401 .elevator_add_req_fn = cfq_insert_request,
2581 .elevator_remove_req_fn = cfq_remove_request, 2402 .elevator_activate_req_fn = cfq_activate_request,
2582 .elevator_requeue_req_fn = cfq_requeue_request,
2583 .elevator_deactivate_req_fn = cfq_deactivate_request, 2403 .elevator_deactivate_req_fn = cfq_deactivate_request,
2584 .elevator_queue_empty_fn = cfq_queue_empty, 2404 .elevator_queue_empty_fn = cfq_queue_empty,
2585 .elevator_completed_req_fn = cfq_completed_request, 2405 .elevator_completed_req_fn = cfq_completed_request,