diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /block/elevator.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 202 |
1 files changed, 74 insertions, 128 deletions
diff --git a/block/elevator.c b/block/elevator.c index 4e11559aa2b0..b0b38ce0dcb6 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL(elv_rq_merge_ok); | 114 | EXPORT_SYMBOL(elv_rq_merge_ok); |
115 | 115 | ||
116 | static inline int elv_try_merge(struct request *__rq, struct bio *bio) | 116 | int elv_try_merge(struct request *__rq, struct bio *bio) |
117 | { | 117 | { |
118 | int ret = ELEVATOR_NO_MERGE; | 118 | int ret = ELEVATOR_NO_MERGE; |
119 | 119 | ||
@@ -155,13 +155,8 @@ static struct elevator_type *elevator_get(const char *name) | |||
155 | 155 | ||
156 | e = elevator_find(name); | 156 | e = elevator_find(name); |
157 | if (!e) { | 157 | if (!e) { |
158 | char elv[ELV_NAME_MAX + strlen("-iosched")]; | ||
159 | |||
160 | spin_unlock(&elv_list_lock); | 158 | spin_unlock(&elv_list_lock); |
161 | 159 | request_module("%s-iosched", name); | |
162 | snprintf(elv, sizeof(elv), "%s-iosched", name); | ||
163 | |||
164 | request_module("%s", elv); | ||
165 | spin_lock(&elv_list_lock); | 160 | spin_lock(&elv_list_lock); |
166 | e = elevator_find(name); | 161 | e = elevator_find(name); |
167 | } | 162 | } |
@@ -429,7 +424,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
429 | q->nr_sorted--; | 424 | q->nr_sorted--; |
430 | 425 | ||
431 | boundary = q->end_sector; | 426 | boundary = q->end_sector; |
432 | stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; | 427 | stop_flags = REQ_SOFTBARRIER | REQ_STARTED; |
433 | list_for_each_prev(entry, &q->queue_head) { | 428 | list_for_each_prev(entry, &q->queue_head) { |
434 | struct request *pos = list_entry_rq(entry); | 429 | struct request *pos = list_entry_rq(entry); |
435 | 430 | ||
@@ -519,6 +514,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
519 | return ELEVATOR_NO_MERGE; | 514 | return ELEVATOR_NO_MERGE; |
520 | } | 515 | } |
521 | 516 | ||
517 | /* | ||
518 | * Attempt to do an insertion back merge. Only check for the case where | ||
519 | * we can append 'rq' to an existing request, so we can throw 'rq' away | ||
520 | * afterwards. | ||
521 | * | ||
522 | * Returns true if we merged, false otherwise | ||
523 | */ | ||
524 | static bool elv_attempt_insert_merge(struct request_queue *q, | ||
525 | struct request *rq) | ||
526 | { | ||
527 | struct request *__rq; | ||
528 | |||
529 | if (blk_queue_nomerges(q)) | ||
530 | return false; | ||
531 | |||
532 | /* | ||
533 | * First try one-hit cache. | ||
534 | */ | ||
535 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) | ||
536 | return true; | ||
537 | |||
538 | if (blk_queue_noxmerges(q)) | ||
539 | return false; | ||
540 | |||
541 | /* | ||
542 | * See if our hash lookup can find a potential backmerge. | ||
543 | */ | ||
544 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | ||
545 | if (__rq && blk_attempt_req_merge(q, __rq, rq)) | ||
546 | return true; | ||
547 | |||
548 | return false; | ||
549 | } | ||
550 | |||
522 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) | 551 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
523 | { | 552 | { |
524 | struct elevator_queue *e = q->elevator; | 553 | struct elevator_queue *e = q->elevator; |
@@ -536,14 +565,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, | |||
536 | struct request *next) | 565 | struct request *next) |
537 | { | 566 | { |
538 | struct elevator_queue *e = q->elevator; | 567 | struct elevator_queue *e = q->elevator; |
568 | const int next_sorted = next->cmd_flags & REQ_SORTED; | ||
539 | 569 | ||
540 | if (e->ops->elevator_merge_req_fn) | 570 | if (next_sorted && e->ops->elevator_merge_req_fn) |
541 | e->ops->elevator_merge_req_fn(q, rq, next); | 571 | e->ops->elevator_merge_req_fn(q, rq, next); |
542 | 572 | ||
543 | elv_rqhash_reposition(q, rq); | 573 | elv_rqhash_reposition(q, rq); |
544 | elv_rqhash_del(q, next); | ||
545 | 574 | ||
546 | q->nr_sorted--; | 575 | if (next_sorted) { |
576 | elv_rqhash_del(q, next); | ||
577 | q->nr_sorted--; | ||
578 | } | ||
579 | |||
547 | q->last_merge = rq; | 580 | q->last_merge = rq; |
548 | } | 581 | } |
549 | 582 | ||
@@ -570,7 +603,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) | |||
570 | 603 | ||
571 | rq->cmd_flags &= ~REQ_STARTED; | 604 | rq->cmd_flags &= ~REQ_STARTED; |
572 | 605 | ||
573 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); | 606 | __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
574 | } | 607 | } |
575 | 608 | ||
576 | void elv_drain_elevator(struct request_queue *q) | 609 | void elv_drain_elevator(struct request_queue *q) |
@@ -615,20 +648,28 @@ void elv_quiesce_end(struct request_queue *q) | |||
615 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 648 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
616 | } | 649 | } |
617 | 650 | ||
618 | void elv_insert(struct request_queue *q, struct request *rq, int where) | 651 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
619 | { | 652 | { |
620 | struct list_head *pos; | ||
621 | unsigned ordseq; | ||
622 | int unplug_it = 1; | ||
623 | |||
624 | trace_block_rq_insert(q, rq); | 653 | trace_block_rq_insert(q, rq); |
625 | 654 | ||
626 | rq->q = q; | 655 | rq->q = q; |
627 | 656 | ||
657 | if (rq->cmd_flags & REQ_SOFTBARRIER) { | ||
658 | /* barriers are scheduling boundary, update end_sector */ | ||
659 | if (rq->cmd_type == REQ_TYPE_FS || | ||
660 | (rq->cmd_flags & REQ_DISCARD)) { | ||
661 | q->end_sector = rq_end_sector(rq); | ||
662 | q->boundary_rq = rq; | ||
663 | } | ||
664 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && | ||
665 | (where == ELEVATOR_INSERT_SORT || | ||
666 | where == ELEVATOR_INSERT_SORT_MERGE)) | ||
667 | where = ELEVATOR_INSERT_BACK; | ||
668 | |||
628 | switch (where) { | 669 | switch (where) { |
670 | case ELEVATOR_INSERT_REQUEUE: | ||
629 | case ELEVATOR_INSERT_FRONT: | 671 | case ELEVATOR_INSERT_FRONT: |
630 | rq->cmd_flags |= REQ_SOFTBARRIER; | 672 | rq->cmd_flags |= REQ_SOFTBARRIER; |
631 | |||
632 | list_add(&rq->queuelist, &q->queue_head); | 673 | list_add(&rq->queuelist, &q->queue_head); |
633 | break; | 674 | break; |
634 | 675 | ||
@@ -649,6 +690,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
649 | __blk_run_queue(q); | 690 | __blk_run_queue(q); |
650 | break; | 691 | break; |
651 | 692 | ||
693 | case ELEVATOR_INSERT_SORT_MERGE: | ||
694 | /* | ||
695 | * If we succeed in merging this request with one in the | ||
696 | * queue already, we are done - rq has now been freed, | ||
697 | * so no need to do anything further. | ||
698 | */ | ||
699 | if (elv_attempt_insert_merge(q, rq)) | ||
700 | break; | ||
652 | case ELEVATOR_INSERT_SORT: | 701 | case ELEVATOR_INSERT_SORT: |
653 | BUG_ON(rq->cmd_type != REQ_TYPE_FS && | 702 | BUG_ON(rq->cmd_type != REQ_TYPE_FS && |
654 | !(rq->cmd_flags & REQ_DISCARD)); | 703 | !(rq->cmd_flags & REQ_DISCARD)); |
@@ -668,115 +717,28 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
668 | q->elevator->ops->elevator_add_req_fn(q, rq); | 717 | q->elevator->ops->elevator_add_req_fn(q, rq); |
669 | break; | 718 | break; |
670 | 719 | ||
671 | case ELEVATOR_INSERT_REQUEUE: | 720 | case ELEVATOR_INSERT_FLUSH: |
672 | /* | ||
673 | * If ordered flush isn't in progress, we do front | ||
674 | * insertion; otherwise, requests should be requeued | ||
675 | * in ordseq order. | ||
676 | */ | ||
677 | rq->cmd_flags |= REQ_SOFTBARRIER; | 721 | rq->cmd_flags |= REQ_SOFTBARRIER; |
678 | 722 | blk_insert_flush(rq); | |
679 | /* | ||
680 | * Most requeues happen because of a busy condition, | ||
681 | * don't force unplug of the queue for that case. | ||
682 | */ | ||
683 | unplug_it = 0; | ||
684 | |||
685 | if (q->ordseq == 0) { | ||
686 | list_add(&rq->queuelist, &q->queue_head); | ||
687 | break; | ||
688 | } | ||
689 | |||
690 | ordseq = blk_ordered_req_seq(rq); | ||
691 | |||
692 | list_for_each(pos, &q->queue_head) { | ||
693 | struct request *pos_rq = list_entry_rq(pos); | ||
694 | if (ordseq <= blk_ordered_req_seq(pos_rq)) | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | list_add_tail(&rq->queuelist, pos); | ||
699 | break; | 723 | break; |
700 | |||
701 | default: | 724 | default: |
702 | printk(KERN_ERR "%s: bad insertion point %d\n", | 725 | printk(KERN_ERR "%s: bad insertion point %d\n", |
703 | __func__, where); | 726 | __func__, where); |
704 | BUG(); | 727 | BUG(); |
705 | } | 728 | } |
706 | |||
707 | if (unplug_it && blk_queue_plugged(q)) { | ||
708 | int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] | ||
709 | - queue_in_flight(q); | ||
710 | |||
711 | if (nrq >= q->unplug_thresh) | ||
712 | __generic_unplug_device(q); | ||
713 | } | ||
714 | } | ||
715 | |||
716 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, | ||
717 | int plug) | ||
718 | { | ||
719 | if (q->ordcolor) | ||
720 | rq->cmd_flags |= REQ_ORDERED_COLOR; | ||
721 | |||
722 | if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { | ||
723 | /* | ||
724 | * toggle ordered color | ||
725 | */ | ||
726 | if (rq->cmd_flags & REQ_HARDBARRIER) | ||
727 | q->ordcolor ^= 1; | ||
728 | |||
729 | /* | ||
730 | * barriers implicitly indicate back insertion | ||
731 | */ | ||
732 | if (where == ELEVATOR_INSERT_SORT) | ||
733 | where = ELEVATOR_INSERT_BACK; | ||
734 | |||
735 | /* | ||
736 | * this request is scheduling boundary, update | ||
737 | * end_sector | ||
738 | */ | ||
739 | if (rq->cmd_type == REQ_TYPE_FS || | ||
740 | (rq->cmd_flags & REQ_DISCARD)) { | ||
741 | q->end_sector = rq_end_sector(rq); | ||
742 | q->boundary_rq = rq; | ||
743 | } | ||
744 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && | ||
745 | where == ELEVATOR_INSERT_SORT) | ||
746 | where = ELEVATOR_INSERT_BACK; | ||
747 | |||
748 | if (plug) | ||
749 | blk_plug_device(q); | ||
750 | |||
751 | elv_insert(q, rq, where); | ||
752 | } | 729 | } |
753 | EXPORT_SYMBOL(__elv_add_request); | 730 | EXPORT_SYMBOL(__elv_add_request); |
754 | 731 | ||
755 | void elv_add_request(struct request_queue *q, struct request *rq, int where, | 732 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
756 | int plug) | ||
757 | { | 733 | { |
758 | unsigned long flags; | 734 | unsigned long flags; |
759 | 735 | ||
760 | spin_lock_irqsave(q->queue_lock, flags); | 736 | spin_lock_irqsave(q->queue_lock, flags); |
761 | __elv_add_request(q, rq, where, plug); | 737 | __elv_add_request(q, rq, where); |
762 | spin_unlock_irqrestore(q->queue_lock, flags); | 738 | spin_unlock_irqrestore(q->queue_lock, flags); |
763 | } | 739 | } |
764 | EXPORT_SYMBOL(elv_add_request); | 740 | EXPORT_SYMBOL(elv_add_request); |
765 | 741 | ||
766 | int elv_queue_empty(struct request_queue *q) | ||
767 | { | ||
768 | struct elevator_queue *e = q->elevator; | ||
769 | |||
770 | if (!list_empty(&q->queue_head)) | ||
771 | return 0; | ||
772 | |||
773 | if (e->ops->elevator_queue_empty_fn) | ||
774 | return e->ops->elevator_queue_empty_fn(q); | ||
775 | |||
776 | return 1; | ||
777 | } | ||
778 | EXPORT_SYMBOL(elv_queue_empty); | ||
779 | |||
780 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 742 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
781 | { | 743 | { |
782 | struct elevator_queue *e = q->elevator; | 744 | struct elevator_queue *e = q->elevator; |
@@ -802,7 +764,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
802 | if (e->ops->elevator_set_req_fn) | 764 | if (e->ops->elevator_set_req_fn) |
803 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); | 765 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
804 | 766 | ||
805 | rq->elevator_private = NULL; | 767 | rq->elevator_private[0] = NULL; |
806 | return 0; | 768 | return 0; |
807 | } | 769 | } |
808 | 770 | ||
@@ -828,6 +790,8 @@ void elv_abort_queue(struct request_queue *q) | |||
828 | { | 790 | { |
829 | struct request *rq; | 791 | struct request *rq; |
830 | 792 | ||
793 | blk_abort_flushes(q); | ||
794 | |||
831 | while (!list_empty(&q->queue_head)) { | 795 | while (!list_empty(&q->queue_head)) { |
832 | rq = list_entry_rq(q->queue_head.next); | 796 | rq = list_entry_rq(q->queue_head.next); |
833 | rq->cmd_flags |= REQ_QUIET; | 797 | rq->cmd_flags |= REQ_QUIET; |
@@ -855,24 +819,6 @@ void elv_completed_request(struct request_queue *q, struct request *rq) | |||
855 | e->ops->elevator_completed_req_fn) | 819 | e->ops->elevator_completed_req_fn) |
856 | e->ops->elevator_completed_req_fn(q, rq); | 820 | e->ops->elevator_completed_req_fn(q, rq); |
857 | } | 821 | } |
858 | |||
859 | /* | ||
860 | * Check if the queue is waiting for fs requests to be | ||
861 | * drained for flush sequence. | ||
862 | */ | ||
863 | if (unlikely(q->ordseq)) { | ||
864 | struct request *next = NULL; | ||
865 | |||
866 | if (!list_empty(&q->queue_head)) | ||
867 | next = list_entry_rq(q->queue_head.next); | ||
868 | |||
869 | if (!queue_in_flight(q) && | ||
870 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && | ||
871 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { | ||
872 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); | ||
873 | __blk_run_queue(q); | ||
874 | } | ||
875 | } | ||
876 | } | 822 | } |
877 | 823 | ||
878 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) | 824 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |