aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c128
1 files changed, 0 insertions, 128 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 2e0fb21485b7..b03b8752e18b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -53,7 +53,6 @@ static const int elv_hash_shift = 6;
53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
56#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
57 56
58DEFINE_TRACE(block_rq_insert); 57DEFINE_TRACE(block_rq_insert);
59DEFINE_TRACE(block_rq_issue); 58DEFINE_TRACE(block_rq_issue);
@@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e)
310} 309}
311EXPORT_SYMBOL(elevator_exit); 310EXPORT_SYMBOL(elevator_exit);
312 311
313static void elv_activate_rq(struct request_queue *q, struct request *rq)
314{
315 struct elevator_queue *e = q->elevator;
316
317 if (e->ops->elevator_activate_req_fn)
318 e->ops->elevator_activate_req_fn(q, rq);
319}
320
321static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
322{
323 struct elevator_queue *e = q->elevator;
324
325 if (e->ops->elevator_deactivate_req_fn)
326 e->ops->elevator_deactivate_req_fn(q, rq);
327}
328
329static inline void __elv_rqhash_del(struct request *rq) 312static inline void __elv_rqhash_del(struct request *rq)
330{ 313{
331 hlist_del_init(&rq->hash); 314 hlist_del_init(&rq->hash);
@@ -758,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
758} 741}
759EXPORT_SYMBOL(elv_add_request); 742EXPORT_SYMBOL(elv_add_request);
760 743
761static inline struct request *__elv_next_request(struct request_queue *q)
762{
763 struct request *rq;
764
765 while (1) {
766 while (!list_empty(&q->queue_head)) {
767 rq = list_entry_rq(q->queue_head.next);
768 if (blk_do_ordered(q, &rq))
769 return rq;
770 }
771
772 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
773 return NULL;
774 }
775}
776
777struct request *elv_next_request(struct request_queue *q)
778{
779 struct request *rq;
780 int ret;
781
782 while ((rq = __elv_next_request(q)) != NULL) {
783 if (!(rq->cmd_flags & REQ_STARTED)) {
784 /*
785 * This is the first time the device driver
786 * sees this request (possibly after
787 * requeueing). Notify IO scheduler.
788 */
789 if (blk_sorted_rq(rq))
790 elv_activate_rq(q, rq);
791
792 /*
793 * just mark as started even if we don't start
794 * it, a request that has been delayed should
795 * not be passed by new incoming requests
796 */
797 rq->cmd_flags |= REQ_STARTED;
798 trace_block_rq_issue(q, rq);
799 }
800
801 if (!q->boundary_rq || q->boundary_rq == rq) {
802 q->end_sector = rq_end_sector(rq);
803 q->boundary_rq = NULL;
804 }
805
806 if (rq->cmd_flags & REQ_DONTPREP)
807 break;
808
809 if (q->dma_drain_size && rq->data_len) {
810 /*
811 * make sure space for the drain appears we
812 * know we can do this because max_hw_segments
813 * has been adjusted to be one fewer than the
814 * device can handle
815 */
816 rq->nr_phys_segments++;
817 }
818
819 if (!q->prep_rq_fn)
820 break;
821
822 ret = q->prep_rq_fn(q, rq);
823 if (ret == BLKPREP_OK) {
824 break;
825 } else if (ret == BLKPREP_DEFER) {
826 /*
827 * the request may have been (partially) prepped.
828 * we need to keep this request in the front to
829 * avoid resource deadlock. REQ_STARTED will
830 * prevent other fs requests from passing this one.
831 */
832 if (q->dma_drain_size && rq->data_len &&
833 !(rq->cmd_flags & REQ_DONTPREP)) {
834 /*
835 * remove the space for the drain we added
836 * so that we don't add it again
837 */
838 --rq->nr_phys_segments;
839 }
840
841 rq = NULL;
842 break;
843 } else if (ret == BLKPREP_KILL) {
844 rq->cmd_flags |= REQ_QUIET;
845 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
846 } else {
847 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
848 break;
849 }
850 }
851
852 return rq;
853}
854EXPORT_SYMBOL(elv_next_request);
855
856void elv_dequeue_request(struct request_queue *q, struct request *rq)
857{
858 BUG_ON(list_empty(&rq->queuelist));
859 BUG_ON(ELV_ON_HASH(rq));
860
861 list_del_init(&rq->queuelist);
862
863 /*
864 * the time frame between a request being removed from the lists
865 * and to it is freed is accounted as io that is in progress at
866 * the driver side.
867 */
868 if (blk_account_rq(rq))
869 q->in_flight++;
870}
871
872int elv_queue_empty(struct request_queue *q) 744int elv_queue_empty(struct request_queue *q)
873{ 745{
874 struct elevator_queue *e = q->elevator; 746 struct elevator_queue *e = q->elevator;