aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c95
-rw-r--r--block/blk.h37
-rw-r--r--block/elevator.c128
3 files changed, 132 insertions, 128 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 406a93e526b6..678ede23ed0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1712,6 +1712,101 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
1712} 1712}
1713EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); 1713EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1714 1714
1715struct request *elv_next_request(struct request_queue *q)
1716{
1717 struct request *rq;
1718 int ret;
1719
1720 while ((rq = __elv_next_request(q)) != NULL) {
1721 if (!(rq->cmd_flags & REQ_STARTED)) {
1722 /*
1723 * This is the first time the device driver
1724 * sees this request (possibly after
1725 * requeueing). Notify IO scheduler.
1726 */
1727 if (blk_sorted_rq(rq))
1728 elv_activate_rq(q, rq);
1729
1730 /*
1731 * just mark as started even if we don't start
1732 * it, a request that has been delayed should
1733 * not be passed by new incoming requests
1734 */
1735 rq->cmd_flags |= REQ_STARTED;
1736 trace_block_rq_issue(q, rq);
1737 }
1738
1739 if (!q->boundary_rq || q->boundary_rq == rq) {
1740 q->end_sector = rq_end_sector(rq);
1741 q->boundary_rq = NULL;
1742 }
1743
1744 if (rq->cmd_flags & REQ_DONTPREP)
1745 break;
1746
1747 if (q->dma_drain_size && rq->data_len) {
1748 /*
1749 * make sure space for the drain appears we
1750 * know we can do this because max_hw_segments
1751 * has been adjusted to be one fewer than the
1752 * device can handle
1753 */
1754 rq->nr_phys_segments++;
1755 }
1756
1757 if (!q->prep_rq_fn)
1758 break;
1759
1760 ret = q->prep_rq_fn(q, rq);
1761 if (ret == BLKPREP_OK) {
1762 break;
1763 } else if (ret == BLKPREP_DEFER) {
1764 /*
1765 * the request may have been (partially) prepped.
1766 * we need to keep this request in the front to
1767 * avoid resource deadlock. REQ_STARTED will
1768 * prevent other fs requests from passing this one.
1769 */
1770 if (q->dma_drain_size && rq->data_len &&
1771 !(rq->cmd_flags & REQ_DONTPREP)) {
1772 /*
1773 * remove the space for the drain we added
1774 * so that we don't add it again
1775 */
1776 --rq->nr_phys_segments;
1777 }
1778
1779 rq = NULL;
1780 break;
1781 } else if (ret == BLKPREP_KILL) {
1782 rq->cmd_flags |= REQ_QUIET;
1783 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
1784 } else {
1785 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1786 break;
1787 }
1788 }
1789
1790 return rq;
1791}
1792EXPORT_SYMBOL(elv_next_request);
1793
1794void elv_dequeue_request(struct request_queue *q, struct request *rq)
1795{
1796 BUG_ON(list_empty(&rq->queuelist));
1797 BUG_ON(ELV_ON_HASH(rq));
1798
1799 list_del_init(&rq->queuelist);
1800
1801 /*
1802 * the time frame between a request being removed from the lists
1803 * and to it is freed is accounted as io that is in progress at
1804 * the driver side.
1805 */
1806 if (blk_account_rq(rq))
1807 q->in_flight++;
1808}
1809
1715/** 1810/**
1716 * __end_that_request_first - end I/O on a request 1811 * __end_that_request_first - end I/O on a request
1717 * @req: the request being processed 1812 * @req: the request being processed
diff --git a/block/blk.h b/block/blk.h
index 79c85f7c9ff5..9b2c324e4407 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -43,6 +43,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
43 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 43 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
44} 44}
45 45
46/*
47 * Internal elevator interface
48 */
49#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
50
51static inline struct request *__elv_next_request(struct request_queue *q)
52{
53 struct request *rq;
54
55 while (1) {
56 while (!list_empty(&q->queue_head)) {
57 rq = list_entry_rq(q->queue_head.next);
58 if (blk_do_ordered(q, &rq))
59 return rq;
60 }
61
62 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
63 return NULL;
64 }
65}
66
67static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
68{
69 struct elevator_queue *e = q->elevator;
70
71 if (e->ops->elevator_activate_req_fn)
72 e->ops->elevator_activate_req_fn(q, rq);
73}
74
75static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
76{
77 struct elevator_queue *e = q->elevator;
78
79 if (e->ops->elevator_deactivate_req_fn)
80 e->ops->elevator_deactivate_req_fn(q, rq);
81}
82
46#ifdef CONFIG_FAIL_IO_TIMEOUT 83#ifdef CONFIG_FAIL_IO_TIMEOUT
47int blk_should_fake_timeout(struct request_queue *); 84int blk_should_fake_timeout(struct request_queue *);
48ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 85ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
diff --git a/block/elevator.c b/block/elevator.c
index 2e0fb21485b7..b03b8752e18b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -53,7 +53,6 @@ static const int elv_hash_shift = 6;
53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
56#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
57 56
58DEFINE_TRACE(block_rq_insert); 57DEFINE_TRACE(block_rq_insert);
59DEFINE_TRACE(block_rq_issue); 58DEFINE_TRACE(block_rq_issue);
@@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e)
310} 309}
311EXPORT_SYMBOL(elevator_exit); 310EXPORT_SYMBOL(elevator_exit);
312 311
313static void elv_activate_rq(struct request_queue *q, struct request *rq)
314{
315 struct elevator_queue *e = q->elevator;
316
317 if (e->ops->elevator_activate_req_fn)
318 e->ops->elevator_activate_req_fn(q, rq);
319}
320
321static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
322{
323 struct elevator_queue *e = q->elevator;
324
325 if (e->ops->elevator_deactivate_req_fn)
326 e->ops->elevator_deactivate_req_fn(q, rq);
327}
328
329static inline void __elv_rqhash_del(struct request *rq) 312static inline void __elv_rqhash_del(struct request *rq)
330{ 313{
331 hlist_del_init(&rq->hash); 314 hlist_del_init(&rq->hash);
@@ -758,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
758} 741}
759EXPORT_SYMBOL(elv_add_request); 742EXPORT_SYMBOL(elv_add_request);
760 743
761static inline struct request *__elv_next_request(struct request_queue *q)
762{
763 struct request *rq;
764
765 while (1) {
766 while (!list_empty(&q->queue_head)) {
767 rq = list_entry_rq(q->queue_head.next);
768 if (blk_do_ordered(q, &rq))
769 return rq;
770 }
771
772 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
773 return NULL;
774 }
775}
776
777struct request *elv_next_request(struct request_queue *q)
778{
779 struct request *rq;
780 int ret;
781
782 while ((rq = __elv_next_request(q)) != NULL) {
783 if (!(rq->cmd_flags & REQ_STARTED)) {
784 /*
785 * This is the first time the device driver
786 * sees this request (possibly after
787 * requeueing). Notify IO scheduler.
788 */
789 if (blk_sorted_rq(rq))
790 elv_activate_rq(q, rq);
791
792 /*
793 * just mark as started even if we don't start
794 * it, a request that has been delayed should
795 * not be passed by new incoming requests
796 */
797 rq->cmd_flags |= REQ_STARTED;
798 trace_block_rq_issue(q, rq);
799 }
800
801 if (!q->boundary_rq || q->boundary_rq == rq) {
802 q->end_sector = rq_end_sector(rq);
803 q->boundary_rq = NULL;
804 }
805
806 if (rq->cmd_flags & REQ_DONTPREP)
807 break;
808
809 if (q->dma_drain_size && rq->data_len) {
810 /*
811 * make sure space for the drain appears we
812 * know we can do this because max_hw_segments
813 * has been adjusted to be one fewer than the
814 * device can handle
815 */
816 rq->nr_phys_segments++;
817 }
818
819 if (!q->prep_rq_fn)
820 break;
821
822 ret = q->prep_rq_fn(q, rq);
823 if (ret == BLKPREP_OK) {
824 break;
825 } else if (ret == BLKPREP_DEFER) {
826 /*
827 * the request may have been (partially) prepped.
828 * we need to keep this request in the front to
829 * avoid resource deadlock. REQ_STARTED will
830 * prevent other fs requests from passing this one.
831 */
832 if (q->dma_drain_size && rq->data_len &&
833 !(rq->cmd_flags & REQ_DONTPREP)) {
834 /*
835 * remove the space for the drain we added
836 * so that we don't add it again
837 */
838 --rq->nr_phys_segments;
839 }
840
841 rq = NULL;
842 break;
843 } else if (ret == BLKPREP_KILL) {
844 rq->cmd_flags |= REQ_QUIET;
845 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
846 } else {
847 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
848 break;
849 }
850 }
851
852 return rq;
853}
854EXPORT_SYMBOL(elv_next_request);
855
856void elv_dequeue_request(struct request_queue *q, struct request *rq)
857{
858 BUG_ON(list_empty(&rq->queuelist));
859 BUG_ON(ELV_ON_HASH(rq));
860
861 list_del_init(&rq->queuelist);
862
863 /*
864 * the time frame between a request being removed from the lists
865 * and to it is freed is accounted as io that is in progress at
866 * the driver side.
867 */
868 if (blk_account_rq(rq))
869 q->in_flight++;
870}
871
872int elv_queue_empty(struct request_queue *q) 744int elv_queue_empty(struct request_queue *q)
873{ 745{
874 struct elevator_queue *e = q->elevator; 746 struct elevator_queue *e = q->elevator;