diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-22 22:05:18 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 01:37:34 -0400 |
commit | 158dbda0068e63c7cce7bd47c123bd1dfa5a902c (patch) | |
tree | 2665f31350ba4f0875c7611c980b0831c22d8c98 /block/elevator.c | |
parent | 5efccd17ceb0fc43837a331297c2c407969d7201 (diff) |
block: reorganize request fetching functions
Impact: code reorganization
elv_next_request() and elv_dequeue_request() are public block layer
interface than actual elevator implementation. They mostly deal with
how requests interact with block layer and low level drivers at the
beginning of rqeuest processing whereas __elv_next_request() is the
actual eleveator request fetching interface.
Move the two functions to blk-core.c. This prepares for further
interface cleanup.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 128 |
1 files changed, 0 insertions, 128 deletions
diff --git a/block/elevator.c b/block/elevator.c index 2e0fb21485b7..b03b8752e18b 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -53,7 +53,6 @@ static const int elv_hash_shift = 6; | |||
53 | (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) | 53 | (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) |
54 | #define ELV_HASH_ENTRIES (1 << elv_hash_shift) | 54 | #define ELV_HASH_ENTRIES (1 << elv_hash_shift) |
55 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 55 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
56 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | ||
57 | 56 | ||
58 | DEFINE_TRACE(block_rq_insert); | 57 | DEFINE_TRACE(block_rq_insert); |
59 | DEFINE_TRACE(block_rq_issue); | 58 | DEFINE_TRACE(block_rq_issue); |
@@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e) | |||
310 | } | 309 | } |
311 | EXPORT_SYMBOL(elevator_exit); | 310 | EXPORT_SYMBOL(elevator_exit); |
312 | 311 | ||
313 | static void elv_activate_rq(struct request_queue *q, struct request *rq) | ||
314 | { | ||
315 | struct elevator_queue *e = q->elevator; | ||
316 | |||
317 | if (e->ops->elevator_activate_req_fn) | ||
318 | e->ops->elevator_activate_req_fn(q, rq); | ||
319 | } | ||
320 | |||
321 | static void elv_deactivate_rq(struct request_queue *q, struct request *rq) | ||
322 | { | ||
323 | struct elevator_queue *e = q->elevator; | ||
324 | |||
325 | if (e->ops->elevator_deactivate_req_fn) | ||
326 | e->ops->elevator_deactivate_req_fn(q, rq); | ||
327 | } | ||
328 | |||
329 | static inline void __elv_rqhash_del(struct request *rq) | 312 | static inline void __elv_rqhash_del(struct request *rq) |
330 | { | 313 | { |
331 | hlist_del_init(&rq->hash); | 314 | hlist_del_init(&rq->hash); |
@@ -758,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
758 | } | 741 | } |
759 | EXPORT_SYMBOL(elv_add_request); | 742 | EXPORT_SYMBOL(elv_add_request); |
760 | 743 | ||
761 | static inline struct request *__elv_next_request(struct request_queue *q) | ||
762 | { | ||
763 | struct request *rq; | ||
764 | |||
765 | while (1) { | ||
766 | while (!list_empty(&q->queue_head)) { | ||
767 | rq = list_entry_rq(q->queue_head.next); | ||
768 | if (blk_do_ordered(q, &rq)) | ||
769 | return rq; | ||
770 | } | ||
771 | |||
772 | if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) | ||
773 | return NULL; | ||
774 | } | ||
775 | } | ||
776 | |||
777 | struct request *elv_next_request(struct request_queue *q) | ||
778 | { | ||
779 | struct request *rq; | ||
780 | int ret; | ||
781 | |||
782 | while ((rq = __elv_next_request(q)) != NULL) { | ||
783 | if (!(rq->cmd_flags & REQ_STARTED)) { | ||
784 | /* | ||
785 | * This is the first time the device driver | ||
786 | * sees this request (possibly after | ||
787 | * requeueing). Notify IO scheduler. | ||
788 | */ | ||
789 | if (blk_sorted_rq(rq)) | ||
790 | elv_activate_rq(q, rq); | ||
791 | |||
792 | /* | ||
793 | * just mark as started even if we don't start | ||
794 | * it, a request that has been delayed should | ||
795 | * not be passed by new incoming requests | ||
796 | */ | ||
797 | rq->cmd_flags |= REQ_STARTED; | ||
798 | trace_block_rq_issue(q, rq); | ||
799 | } | ||
800 | |||
801 | if (!q->boundary_rq || q->boundary_rq == rq) { | ||
802 | q->end_sector = rq_end_sector(rq); | ||
803 | q->boundary_rq = NULL; | ||
804 | } | ||
805 | |||
806 | if (rq->cmd_flags & REQ_DONTPREP) | ||
807 | break; | ||
808 | |||
809 | if (q->dma_drain_size && rq->data_len) { | ||
810 | /* | ||
811 | * make sure space for the drain appears we | ||
812 | * know we can do this because max_hw_segments | ||
813 | * has been adjusted to be one fewer than the | ||
814 | * device can handle | ||
815 | */ | ||
816 | rq->nr_phys_segments++; | ||
817 | } | ||
818 | |||
819 | if (!q->prep_rq_fn) | ||
820 | break; | ||
821 | |||
822 | ret = q->prep_rq_fn(q, rq); | ||
823 | if (ret == BLKPREP_OK) { | ||
824 | break; | ||
825 | } else if (ret == BLKPREP_DEFER) { | ||
826 | /* | ||
827 | * the request may have been (partially) prepped. | ||
828 | * we need to keep this request in the front to | ||
829 | * avoid resource deadlock. REQ_STARTED will | ||
830 | * prevent other fs requests from passing this one. | ||
831 | */ | ||
832 | if (q->dma_drain_size && rq->data_len && | ||
833 | !(rq->cmd_flags & REQ_DONTPREP)) { | ||
834 | /* | ||
835 | * remove the space for the drain we added | ||
836 | * so that we don't add it again | ||
837 | */ | ||
838 | --rq->nr_phys_segments; | ||
839 | } | ||
840 | |||
841 | rq = NULL; | ||
842 | break; | ||
843 | } else if (ret == BLKPREP_KILL) { | ||
844 | rq->cmd_flags |= REQ_QUIET; | ||
845 | __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); | ||
846 | } else { | ||
847 | printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | ||
848 | break; | ||
849 | } | ||
850 | } | ||
851 | |||
852 | return rq; | ||
853 | } | ||
854 | EXPORT_SYMBOL(elv_next_request); | ||
855 | |||
856 | void elv_dequeue_request(struct request_queue *q, struct request *rq) | ||
857 | { | ||
858 | BUG_ON(list_empty(&rq->queuelist)); | ||
859 | BUG_ON(ELV_ON_HASH(rq)); | ||
860 | |||
861 | list_del_init(&rq->queuelist); | ||
862 | |||
863 | /* | ||
864 | * the time frame between a request being removed from the lists | ||
865 | * and to it is freed is accounted as io that is in progress at | ||
866 | * the driver side. | ||
867 | */ | ||
868 | if (blk_account_rq(rq)) | ||
869 | q->in_flight++; | ||
870 | } | ||
871 | |||
872 | int elv_queue_empty(struct request_queue *q) | 744 | int elv_queue_empty(struct request_queue *q) |
873 | { | 745 | { |
874 | struct elevator_queue *e = q->elevator; | 746 | struct elevator_queue *e = q->elevator; |