diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
commit | 7eaceaccab5f40bbfda044629a6298616aeaed50 (patch) | |
tree | 33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /block/elevator.c | |
parent | 73c101011926c5832e6e141682180c4debe2cf45 (diff) |
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 43 |
1 files changed, 3 insertions, 40 deletions
diff --git a/block/elevator.c b/block/elevator.c index 25713927c0d3..3ea208256e78 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q) | |||
619 | 619 | ||
620 | void elv_insert(struct request_queue *q, struct request *rq, int where) | 620 | void elv_insert(struct request_queue *q, struct request *rq, int where) |
621 | { | 621 | { |
622 | int unplug_it = 1; | ||
623 | |||
624 | trace_block_rq_insert(q, rq); | 622 | trace_block_rq_insert(q, rq); |
625 | 623 | ||
626 | rq->q = q; | 624 | rq->q = q; |
627 | 625 | ||
628 | switch (where) { | 626 | switch (where) { |
629 | case ELEVATOR_INSERT_REQUEUE: | 627 | case ELEVATOR_INSERT_REQUEUE: |
630 | /* | ||
631 | * Most requeues happen because of a busy condition, | ||
632 | * don't force unplug of the queue for that case. | ||
633 | * Clear unplug_it and fall through. | ||
634 | */ | ||
635 | unplug_it = 0; | ||
636 | |||
637 | case ELEVATOR_INSERT_FRONT: | 628 | case ELEVATOR_INSERT_FRONT: |
638 | rq->cmd_flags |= REQ_SOFTBARRIER; | 629 | rq->cmd_flags |= REQ_SOFTBARRIER; |
639 | list_add(&rq->queuelist, &q->queue_head); | 630 | list_add(&rq->queuelist, &q->queue_head); |
@@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
679 | rq->cmd_flags |= REQ_SOFTBARRIER; | 670 | rq->cmd_flags |= REQ_SOFTBARRIER; |
680 | blk_insert_flush(rq); | 671 | blk_insert_flush(rq); |
681 | break; | 672 | break; |
682 | |||
683 | default: | 673 | default: |
684 | printk(KERN_ERR "%s: bad insertion point %d\n", | 674 | printk(KERN_ERR "%s: bad insertion point %d\n", |
685 | __func__, where); | 675 | __func__, where); |
686 | BUG(); | 676 | BUG(); |
687 | } | 677 | } |
688 | |||
689 | if (unplug_it && blk_queue_plugged(q)) { | ||
690 | int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] | ||
691 | - queue_in_flight(q); | ||
692 | |||
693 | if (nrq >= q->unplug_thresh) | ||
694 | __generic_unplug_device(q); | ||
695 | } | ||
696 | } | 678 | } |
697 | 679 | ||
698 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, | 680 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
699 | int plug) | ||
700 | { | 681 | { |
701 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | 682 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); |
702 | 683 | ||
@@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
711 | where == ELEVATOR_INSERT_SORT) | 692 | where == ELEVATOR_INSERT_SORT) |
712 | where = ELEVATOR_INSERT_BACK; | 693 | where = ELEVATOR_INSERT_BACK; |
713 | 694 | ||
714 | if (plug) | ||
715 | blk_plug_device(q); | ||
716 | |||
717 | elv_insert(q, rq, where); | 695 | elv_insert(q, rq, where); |
718 | } | 696 | } |
719 | EXPORT_SYMBOL(__elv_add_request); | 697 | EXPORT_SYMBOL(__elv_add_request); |
720 | 698 | ||
721 | void elv_add_request(struct request_queue *q, struct request *rq, int where, | 699 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
722 | int plug) | ||
723 | { | 700 | { |
724 | unsigned long flags; | 701 | unsigned long flags; |
725 | 702 | ||
726 | spin_lock_irqsave(q->queue_lock, flags); | 703 | spin_lock_irqsave(q->queue_lock, flags); |
727 | __elv_add_request(q, rq, where, plug); | 704 | __elv_add_request(q, rq, where); |
728 | spin_unlock_irqrestore(q->queue_lock, flags); | 705 | spin_unlock_irqrestore(q->queue_lock, flags); |
729 | } | 706 | } |
730 | EXPORT_SYMBOL(elv_add_request); | 707 | EXPORT_SYMBOL(elv_add_request); |
731 | 708 | ||
732 | int elv_queue_empty(struct request_queue *q) | ||
733 | { | ||
734 | struct elevator_queue *e = q->elevator; | ||
735 | |||
736 | if (!list_empty(&q->queue_head)) | ||
737 | return 0; | ||
738 | |||
739 | if (e->ops->elevator_queue_empty_fn) | ||
740 | return e->ops->elevator_queue_empty_fn(q); | ||
741 | |||
742 | return 1; | ||
743 | } | ||
744 | EXPORT_SYMBOL(elv_queue_empty); | ||
745 | |||
746 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 709 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
747 | { | 710 | { |
748 | struct elevator_queue *e = q->elevator; | 711 | struct elevator_queue *e = q->elevator; |