aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-22 22:05:18 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 01:37:34 -0400
commit158dbda0068e63c7cce7bd47c123bd1dfa5a902c (patch)
tree2665f31350ba4f0875c7611c980b0831c22d8c98 /block/blk-core.c
parent5efccd17ceb0fc43837a331297c2c407969d7201 (diff)
block: reorganize request fetching functions
Impact: code reorganization elv_next_request() and elv_dequeue_request() are public block layer interface than actual elevator implementation. They mostly deal with how requests interact with block layer and low level drivers at the beginning of rqeuest processing whereas __elv_next_request() is the actual eleveator request fetching interface. Move the two functions to blk-core.c. This prepares for further interface cleanup. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c95
1 files changed, 95 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 406a93e526b6..678ede23ed0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1712,6 +1712,101 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
1712} 1712}
1713EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); 1713EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1714 1714
1715struct request *elv_next_request(struct request_queue *q)
1716{
1717 struct request *rq;
1718 int ret;
1719
1720 while ((rq = __elv_next_request(q)) != NULL) {
1721 if (!(rq->cmd_flags & REQ_STARTED)) {
1722 /*
1723 * This is the first time the device driver
1724 * sees this request (possibly after
1725 * requeueing). Notify IO scheduler.
1726 */
1727 if (blk_sorted_rq(rq))
1728 elv_activate_rq(q, rq);
1729
1730 /*
1731 * just mark as started even if we don't start
1732 * it, a request that has been delayed should
1733 * not be passed by new incoming requests
1734 */
1735 rq->cmd_flags |= REQ_STARTED;
1736 trace_block_rq_issue(q, rq);
1737 }
1738
1739 if (!q->boundary_rq || q->boundary_rq == rq) {
1740 q->end_sector = rq_end_sector(rq);
1741 q->boundary_rq = NULL;
1742 }
1743
1744 if (rq->cmd_flags & REQ_DONTPREP)
1745 break;
1746
1747 if (q->dma_drain_size && rq->data_len) {
1748 /*
1749 * make sure space for the drain appears we
1750 * know we can do this because max_hw_segments
1751 * has been adjusted to be one fewer than the
1752 * device can handle
1753 */
1754 rq->nr_phys_segments++;
1755 }
1756
1757 if (!q->prep_rq_fn)
1758 break;
1759
1760 ret = q->prep_rq_fn(q, rq);
1761 if (ret == BLKPREP_OK) {
1762 break;
1763 } else if (ret == BLKPREP_DEFER) {
1764 /*
1765 * the request may have been (partially) prepped.
1766 * we need to keep this request in the front to
1767 * avoid resource deadlock. REQ_STARTED will
1768 * prevent other fs requests from passing this one.
1769 */
1770 if (q->dma_drain_size && rq->data_len &&
1771 !(rq->cmd_flags & REQ_DONTPREP)) {
1772 /*
1773 * remove the space for the drain we added
1774 * so that we don't add it again
1775 */
1776 --rq->nr_phys_segments;
1777 }
1778
1779 rq = NULL;
1780 break;
1781 } else if (ret == BLKPREP_KILL) {
1782 rq->cmd_flags |= REQ_QUIET;
1783 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
1784 } else {
1785 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1786 break;
1787 }
1788 }
1789
1790 return rq;
1791}
1792EXPORT_SYMBOL(elv_next_request);
1793
1794void elv_dequeue_request(struct request_queue *q, struct request *rq)
1795{
1796 BUG_ON(list_empty(&rq->queuelist));
1797 BUG_ON(ELV_ON_HASH(rq));
1798
1799 list_del_init(&rq->queuelist);
1800
1801 /*
1802 * the time frame between a request being removed from the lists
1803 * and to it is freed is accounted as io that is in progress at
1804 * the driver side.
1805 */
1806 if (blk_account_rq(rq))
1807 q->in_flight++;
1808}
1809
1715/** 1810/**
1716 * __end_that_request_first - end I/O on a request 1811 * __end_that_request_first - end I/O on a request
1717 * @req: the request being processed 1812 * @req: the request being processed