diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:58:35 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:58:35 -0500 |
commit | 4c63f5646e405b5010cc9499419060bf2e838f5b (patch) | |
tree | df91ba315032c8ec4aafeb3ab96fdfa7c6c656e1 /block/elevator.c | |
parent | cafb0bfca1a73efd6d8a4a6a6a716e6134b96c24 (diff) | |
parent | 69d60eb96ae8a73cf9b79cf28051caf973006011 (diff) |
Merge branch 'for-2.6.39/stack-plug' into for-2.6.39/core
Conflicts:
block/blk-core.c
block/blk-flush.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
fs/nilfs2/btnode.c
fs/nilfs2/mdt.c
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 49 |
1 files changed, 8 insertions, 41 deletions
diff --git a/block/elevator.c b/block/elevator.c index fabf3675c913..542ce826b401 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL(elv_rq_merge_ok); | 114 | EXPORT_SYMBOL(elv_rq_merge_ok); |
115 | 115 | ||
116 | static inline int elv_try_merge(struct request *__rq, struct bio *bio) | 116 | int elv_try_merge(struct request *__rq, struct bio *bio) |
117 | { | 117 | { |
118 | int ret = ELEVATOR_NO_MERGE; | 118 | int ret = ELEVATOR_NO_MERGE; |
119 | 119 | ||
@@ -421,6 +421,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
421 | struct list_head *entry; | 421 | struct list_head *entry; |
422 | int stop_flags; | 422 | int stop_flags; |
423 | 423 | ||
424 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
425 | |||
424 | if (q->last_merge == rq) | 426 | if (q->last_merge == rq) |
425 | q->last_merge = NULL; | 427 | q->last_merge = NULL; |
426 | 428 | ||
@@ -617,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q) | |||
617 | 619 | ||
618 | void elv_insert(struct request_queue *q, struct request *rq, int where) | 620 | void elv_insert(struct request_queue *q, struct request *rq, int where) |
619 | { | 621 | { |
620 | int unplug_it = 1; | ||
621 | |||
622 | trace_block_rq_insert(q, rq); | 622 | trace_block_rq_insert(q, rq); |
623 | 623 | ||
624 | rq->q = q; | 624 | rq->q = q; |
625 | 625 | ||
626 | switch (where) { | 626 | switch (where) { |
627 | case ELEVATOR_INSERT_REQUEUE: | 627 | case ELEVATOR_INSERT_REQUEUE: |
628 | /* | ||
629 | * Most requeues happen because of a busy condition, | ||
630 | * don't force unplug of the queue for that case. | ||
631 | * Clear unplug_it and fall through. | ||
632 | */ | ||
633 | unplug_it = 0; | ||
634 | |||
635 | case ELEVATOR_INSERT_FRONT: | 628 | case ELEVATOR_INSERT_FRONT: |
636 | rq->cmd_flags |= REQ_SOFTBARRIER; | 629 | rq->cmd_flags |= REQ_SOFTBARRIER; |
637 | list_add(&rq->queuelist, &q->queue_head); | 630 | list_add(&rq->queuelist, &q->queue_head); |
@@ -677,25 +670,17 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
677 | rq->cmd_flags |= REQ_SOFTBARRIER; | 670 | rq->cmd_flags |= REQ_SOFTBARRIER; |
678 | blk_insert_flush(rq); | 671 | blk_insert_flush(rq); |
679 | break; | 672 | break; |
680 | |||
681 | default: | 673 | default: |
682 | printk(KERN_ERR "%s: bad insertion point %d\n", | 674 | printk(KERN_ERR "%s: bad insertion point %d\n", |
683 | __func__, where); | 675 | __func__, where); |
684 | BUG(); | 676 | BUG(); |
685 | } | 677 | } |
686 | |||
687 | if (unplug_it && blk_queue_plugged(q)) { | ||
688 | int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] | ||
689 | - queue_in_flight(q); | ||
690 | |||
691 | if (nrq >= q->unplug_thresh) | ||
692 | __generic_unplug_device(q); | ||
693 | } | ||
694 | } | 678 | } |
695 | 679 | ||
696 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, | 680 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
697 | int plug) | ||
698 | { | 681 | { |
682 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
683 | |||
699 | if (rq->cmd_flags & REQ_SOFTBARRIER) { | 684 | if (rq->cmd_flags & REQ_SOFTBARRIER) { |
700 | /* barriers are scheduling boundary, update end_sector */ | 685 | /* barriers are scheduling boundary, update end_sector */ |
701 | if (rq->cmd_type == REQ_TYPE_FS || | 686 | if (rq->cmd_type == REQ_TYPE_FS || |
@@ -707,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
707 | where == ELEVATOR_INSERT_SORT) | 692 | where == ELEVATOR_INSERT_SORT) |
708 | where = ELEVATOR_INSERT_BACK; | 693 | where = ELEVATOR_INSERT_BACK; |
709 | 694 | ||
710 | if (plug) | ||
711 | blk_plug_device(q); | ||
712 | |||
713 | elv_insert(q, rq, where); | 695 | elv_insert(q, rq, where); |
714 | } | 696 | } |
715 | EXPORT_SYMBOL(__elv_add_request); | 697 | EXPORT_SYMBOL(__elv_add_request); |
716 | 698 | ||
717 | void elv_add_request(struct request_queue *q, struct request *rq, int where, | 699 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
718 | int plug) | ||
719 | { | 700 | { |
720 | unsigned long flags; | 701 | unsigned long flags; |
721 | 702 | ||
722 | spin_lock_irqsave(q->queue_lock, flags); | 703 | spin_lock_irqsave(q->queue_lock, flags); |
723 | __elv_add_request(q, rq, where, plug); | 704 | __elv_add_request(q, rq, where); |
724 | spin_unlock_irqrestore(q->queue_lock, flags); | 705 | spin_unlock_irqrestore(q->queue_lock, flags); |
725 | } | 706 | } |
726 | EXPORT_SYMBOL(elv_add_request); | 707 | EXPORT_SYMBOL(elv_add_request); |
727 | 708 | ||
728 | int elv_queue_empty(struct request_queue *q) | ||
729 | { | ||
730 | struct elevator_queue *e = q->elevator; | ||
731 | |||
732 | if (!list_empty(&q->queue_head)) | ||
733 | return 0; | ||
734 | |||
735 | if (e->ops->elevator_queue_empty_fn) | ||
736 | return e->ops->elevator_queue_empty_fn(q); | ||
737 | |||
738 | return 1; | ||
739 | } | ||
740 | EXPORT_SYMBOL(elv_queue_empty); | ||
741 | |||
742 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 709 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
743 | { | 710 | { |
744 | struct elevator_queue *e = q->elevator; | 711 | struct elevator_queue *e = q->elevator; |