aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 13:52:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 14:10:35 -0400
commitc9059598ea8981d02356eead3188bf7fa4d717b8 (patch)
tree03e73b20a30e988da7c6a3e0ad93b2dc5843274d /block/elevator.c
parent0a33f80a8373eca7f4bea3961d1346c3815fa5ed (diff)
parentb0fd271d5fba0b2d00888363f3869e3f9b26caa9 (diff)
Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits) block: add request clone interface (v2) floppy: fix hibernation ramdisk: remove long-deprecated "ramdisk=" boot-time parameter fs/bio.c: add missing __user annotation block: prevent possible io_context->refcount overflow Add serial number support for virtio_blk, V4a block: Add missing bounce_pfn stacking and fix comments Revert "block: Fix bounce limit setting in DM" cciss: decode unit attention in SCSI error handling code cciss: Remove no longer needed sendcmd reject processing code cciss: change SCSI error handling routines to work with interrupts enabled. cciss: separate error processing and command retrying code in sendcmd_withirq_core() cciss: factor out fix target status processing code from sendcmd functions cciss: simplify interface of sendcmd() and sendcmd_withirq() cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code cciss: Use schedule_timeout_uninterruptible in SCSI error handling code block: needs to set the residual length of a bidi request Revert "block: implement blkdev_readpages" block: Fix bounce limit setting in DM Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt ... Manually fix conflicts with tracing updates in: block/blk-sysfs.c drivers/ide/ide-atapi.c drivers/ide/ide-cd.c drivers/ide/ide-floppy.c drivers/ide/ide-tape.c include/trace/events/block.h kernel/trace/blktrace.c
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c185
1 files changed, 36 insertions, 149 deletions
diff --git a/block/elevator.c b/block/elevator.c
index e220f0c543e3..ca861927ba41 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -51,8 +51,7 @@ static const int elv_hash_shift = 6;
51#define ELV_HASH_FN(sec) \ 51#define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 53#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
56 55
57/* 56/*
58 * Query io scheduler to see if the current process issuing bio may be 57 * Query io scheduler to see if the current process issuing bio may be
@@ -116,9 +115,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
116 * we can merge and sequence is ok, check if it's possible 115 * we can merge and sequence is ok, check if it's possible
117 */ 116 */
118 if (elv_rq_merge_ok(__rq, bio)) { 117 if (elv_rq_merge_ok(__rq, bio)) {
119 if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 118 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
120 ret = ELEVATOR_BACK_MERGE; 119 ret = ELEVATOR_BACK_MERGE;
121 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 120 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
122 ret = ELEVATOR_FRONT_MERGE; 121 ret = ELEVATOR_FRONT_MERGE;
123 } 122 }
124 123
@@ -306,22 +305,6 @@ void elevator_exit(struct elevator_queue *e)
306} 305}
307EXPORT_SYMBOL(elevator_exit); 306EXPORT_SYMBOL(elevator_exit);
308 307
309static void elv_activate_rq(struct request_queue *q, struct request *rq)
310{
311 struct elevator_queue *e = q->elevator;
312
313 if (e->ops->elevator_activate_req_fn)
314 e->ops->elevator_activate_req_fn(q, rq);
315}
316
317static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
318{
319 struct elevator_queue *e = q->elevator;
320
321 if (e->ops->elevator_deactivate_req_fn)
322 e->ops->elevator_deactivate_req_fn(q, rq);
323}
324
325static inline void __elv_rqhash_del(struct request *rq) 308static inline void __elv_rqhash_del(struct request *rq)
326{ 309{
327 hlist_del_init(&rq->hash); 310 hlist_del_init(&rq->hash);
@@ -383,9 +366,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
383 parent = *p; 366 parent = *p;
384 __rq = rb_entry(parent, struct request, rb_node); 367 __rq = rb_entry(parent, struct request, rb_node);
385 368
386 if (rq->sector < __rq->sector) 369 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
387 p = &(*p)->rb_left; 370 p = &(*p)->rb_left;
388 else if (rq->sector > __rq->sector) 371 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
389 p = &(*p)->rb_right; 372 p = &(*p)->rb_right;
390 else 373 else
391 return __rq; 374 return __rq;
@@ -413,9 +396,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
413 while (n) { 396 while (n) {
414 rq = rb_entry(n, struct request, rb_node); 397 rq = rb_entry(n, struct request, rb_node);
415 398
416 if (sector < rq->sector) 399 if (sector < blk_rq_pos(rq))
417 n = n->rb_left; 400 n = n->rb_left;
418 else if (sector > rq->sector) 401 else if (sector > blk_rq_pos(rq))
419 n = n->rb_right; 402 n = n->rb_right;
420 else 403 else
421 return rq; 404 return rq;
@@ -454,14 +437,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
454 break; 437 break;
455 if (pos->cmd_flags & stop_flags) 438 if (pos->cmd_flags & stop_flags)
456 break; 439 break;
457 if (rq->sector >= boundary) { 440 if (blk_rq_pos(rq) >= boundary) {
458 if (pos->sector < boundary) 441 if (blk_rq_pos(pos) < boundary)
459 continue; 442 continue;
460 } else { 443 } else {
461 if (pos->sector >= boundary) 444 if (blk_rq_pos(pos) >= boundary)
462 break; 445 break;
463 } 446 }
464 if (rq->sector >= pos->sector) 447 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
465 break; 448 break;
466 } 449 }
467 450
@@ -559,7 +542,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
559 * in_flight count again 542 * in_flight count again
560 */ 543 */
561 if (blk_account_rq(rq)) { 544 if (blk_account_rq(rq)) {
562 q->in_flight--; 545 q->in_flight[rq_is_sync(rq)]--;
563 if (blk_sorted_rq(rq)) 546 if (blk_sorted_rq(rq))
564 elv_deactivate_rq(q, rq); 547 elv_deactivate_rq(q, rq);
565 } 548 }
@@ -588,6 +571,9 @@ void elv_drain_elevator(struct request_queue *q)
588 */ 571 */
589void elv_quiesce_start(struct request_queue *q) 572void elv_quiesce_start(struct request_queue *q)
590{ 573{
574 if (!q->elevator)
575 return;
576
591 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 577 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
592 578
593 /* 579 /*
@@ -595,7 +581,7 @@ void elv_quiesce_start(struct request_queue *q)
595 */ 581 */
596 elv_drain_elevator(q); 582 elv_drain_elevator(q);
597 while (q->rq.elvpriv) { 583 while (q->rq.elvpriv) {
598 blk_start_queueing(q); 584 __blk_run_queue(q);
599 spin_unlock_irq(q->queue_lock); 585 spin_unlock_irq(q->queue_lock);
600 msleep(10); 586 msleep(10);
601 spin_lock_irq(q->queue_lock); 587 spin_lock_irq(q->queue_lock);
@@ -639,8 +625,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
639 * with anything. There's no point in delaying queue 625 * with anything. There's no point in delaying queue
640 * processing. 626 * processing.
641 */ 627 */
642 blk_remove_plug(q); 628 __blk_run_queue(q);
643 blk_start_queueing(q);
644 break; 629 break;
645 630
646 case ELEVATOR_INSERT_SORT: 631 case ELEVATOR_INSERT_SORT:
@@ -699,7 +684,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
699 684
700 if (unplug_it && blk_queue_plugged(q)) { 685 if (unplug_it && blk_queue_plugged(q)) {
701 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 686 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
702 - q->in_flight; 687 - queue_in_flight(q);
703 688
704 if (nrq >= q->unplug_thresh) 689 if (nrq >= q->unplug_thresh)
705 __generic_unplug_device(q); 690 __generic_unplug_device(q);
@@ -755,117 +740,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
755} 740}
756EXPORT_SYMBOL(elv_add_request); 741EXPORT_SYMBOL(elv_add_request);
757 742
758static inline struct request *__elv_next_request(struct request_queue *q)
759{
760 struct request *rq;
761
762 while (1) {
763 while (!list_empty(&q->queue_head)) {
764 rq = list_entry_rq(q->queue_head.next);
765 if (blk_do_ordered(q, &rq))
766 return rq;
767 }
768
769 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
770 return NULL;
771 }
772}
773
774struct request *elv_next_request(struct request_queue *q)
775{
776 struct request *rq;
777 int ret;
778
779 while ((rq = __elv_next_request(q)) != NULL) {
780 if (!(rq->cmd_flags & REQ_STARTED)) {
781 /*
782 * This is the first time the device driver
783 * sees this request (possibly after
784 * requeueing). Notify IO scheduler.
785 */
786 if (blk_sorted_rq(rq))
787 elv_activate_rq(q, rq);
788
789 /*
790 * just mark as started even if we don't start
791 * it, a request that has been delayed should
792 * not be passed by new incoming requests
793 */
794 rq->cmd_flags |= REQ_STARTED;
795 trace_block_rq_issue(q, rq);
796 }
797
798 if (!q->boundary_rq || q->boundary_rq == rq) {
799 q->end_sector = rq_end_sector(rq);
800 q->boundary_rq = NULL;
801 }
802
803 if (rq->cmd_flags & REQ_DONTPREP)
804 break;
805
806 if (q->dma_drain_size && rq->data_len) {
807 /*
808 * make sure space for the drain appears we
809 * know we can do this because max_hw_segments
810 * has been adjusted to be one fewer than the
811 * device can handle
812 */
813 rq->nr_phys_segments++;
814 }
815
816 if (!q->prep_rq_fn)
817 break;
818
819 ret = q->prep_rq_fn(q, rq);
820 if (ret == BLKPREP_OK) {
821 break;
822 } else if (ret == BLKPREP_DEFER) {
823 /*
824 * the request may have been (partially) prepped.
825 * we need to keep this request in the front to
826 * avoid resource deadlock. REQ_STARTED will
827 * prevent other fs requests from passing this one.
828 */
829 if (q->dma_drain_size && rq->data_len &&
830 !(rq->cmd_flags & REQ_DONTPREP)) {
831 /*
832 * remove the space for the drain we added
833 * so that we don't add it again
834 */
835 --rq->nr_phys_segments;
836 }
837
838 rq = NULL;
839 break;
840 } else if (ret == BLKPREP_KILL) {
841 rq->cmd_flags |= REQ_QUIET;
842 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
843 } else {
844 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
845 break;
846 }
847 }
848
849 return rq;
850}
851EXPORT_SYMBOL(elv_next_request);
852
853void elv_dequeue_request(struct request_queue *q, struct request *rq)
854{
855 BUG_ON(list_empty(&rq->queuelist));
856 BUG_ON(ELV_ON_HASH(rq));
857
858 list_del_init(&rq->queuelist);
859
860 /*
861 * the time frame between a request being removed from the lists
862 * and to it is freed is accounted as io that is in progress at
863 * the driver side.
864 */
865 if (blk_account_rq(rq))
866 q->in_flight++;
867}
868
869int elv_queue_empty(struct request_queue *q) 743int elv_queue_empty(struct request_queue *q)
870{ 744{
871 struct elevator_queue *e = q->elevator; 745 struct elevator_queue *e = q->elevator;
@@ -935,7 +809,12 @@ void elv_abort_queue(struct request_queue *q)
935 rq = list_entry_rq(q->queue_head.next); 809 rq = list_entry_rq(q->queue_head.next);
936 rq->cmd_flags |= REQ_QUIET; 810 rq->cmd_flags |= REQ_QUIET;
937 trace_block_rq_abort(q, rq); 811 trace_block_rq_abort(q, rq);
938 __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 812 /*
813 * Mark this request as started so we don't trigger
814 * any debug logic in the end I/O path.
815 */
816 blk_start_request(rq);
817 __blk_end_request_all(rq, -EIO);
939 } 818 }
940} 819}
941EXPORT_SYMBOL(elv_abort_queue); 820EXPORT_SYMBOL(elv_abort_queue);
@@ -948,7 +827,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
948 * request is released from the driver, io must be done 827 * request is released from the driver, io must be done
949 */ 828 */
950 if (blk_account_rq(rq)) { 829 if (blk_account_rq(rq)) {
951 q->in_flight--; 830 q->in_flight[rq_is_sync(rq)]--;
952 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 831 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
953 e->ops->elevator_completed_req_fn(q, rq); 832 e->ops->elevator_completed_req_fn(q, rq);
954 } 833 }
@@ -963,11 +842,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
963 if (!list_empty(&q->queue_head)) 842 if (!list_empty(&q->queue_head))
964 next = list_entry_rq(q->queue_head.next); 843 next = list_entry_rq(q->queue_head.next);
965 844
966 if (!q->in_flight && 845 if (!queue_in_flight(q) &&
967 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 846 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
968 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 847 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
969 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 848 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
970 blk_start_queueing(q); 849 __blk_run_queue(q);
971 } 850 }
972 } 851 }
973} 852}
@@ -1175,6 +1054,9 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1175 char elevator_name[ELV_NAME_MAX]; 1054 char elevator_name[ELV_NAME_MAX];
1176 struct elevator_type *e; 1055 struct elevator_type *e;
1177 1056
1057 if (!q->elevator)
1058 return count;
1059
1178 strlcpy(elevator_name, name, sizeof(elevator_name)); 1060 strlcpy(elevator_name, name, sizeof(elevator_name));
1179 strstrip(elevator_name); 1061 strstrip(elevator_name);
1180 1062
@@ -1198,10 +1080,15 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1198ssize_t elv_iosched_show(struct request_queue *q, char *name) 1080ssize_t elv_iosched_show(struct request_queue *q, char *name)
1199{ 1081{
1200 struct elevator_queue *e = q->elevator; 1082 struct elevator_queue *e = q->elevator;
1201 struct elevator_type *elv = e->elevator_type; 1083 struct elevator_type *elv;
1202 struct elevator_type *__e; 1084 struct elevator_type *__e;
1203 int len = 0; 1085 int len = 0;
1204 1086
1087 if (!q->elevator)
1088 return sprintf(name, "none\n");
1089
1090 elv = e->elevator_type;
1091
1205 spin_lock(&elv_list_lock); 1092 spin_lock(&elv_list_lock);
1206 list_for_each_entry(__e, &elv_list, list) { 1093 list_for_each_entry(__e, &elv_list, list) {
1207 if (!strcmp(elv->elevator_name, __e->elevator_name)) 1094 if (!strcmp(elv->elevator_name, __e->elevator_name))