aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-22 22:05:18 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 01:37:35 -0400
commit2e60e02297cf54e367567f2d85b2ca56b1c4a906 (patch)
treeea824afcaff58be894799a011f74d80c3560f372 /block/blk-core.c
parent0b302d5aa7975006fa2ec3d66386610b9b36c669 (diff)
block: clean up request completion API
Request completion has gone through several changes and became a bit messy over the time. Clean it up. 1. end_that_request_data() is a thin wrapper around end_that_request_data_first() which checks whether bio is NULL before doing anything and handles bidi completion. blk_update_request() is a thin wrapper around end_that_request_data() which clears nr_sectors on the last iteration but doesn't use the bidi completion. Clean it up by moving the initial bio NULL check and nr_sectors clearing on the last iteration into end_that_request_data() and renaming it to blk_update_request(), which makes blk_end_io() the only user of end_that_request_data(). Collapse end_that_request_data() into blk_end_io(). 2. There are four visible completion variants - blk_end_request(), __blk_end_request(), blk_end_bidi_request() and end_request(). blk_end_request() and blk_end_bidi_request() uses blk_end_request() as the backend but __blk_end_request() and end_request() use separate implementation in __blk_end_request() due to different locking rules. blk_end_bidi_request() is identical to blk_end_io(). Collapse blk_end_io() into blk_end_bidi_request(), separate out request update into internal helper blk_update_bidi_request() and add __blk_end_bidi_request(). Redefine [__]blk_end_request() as thin inline wrappers around [__]blk_end_bidi_request(). 3. As the whole request issue/completion usages are about to be modified and audited, it's a good chance to convert completion functions return bool which better indicates the intended meaning of return values. 4. The function name end_that_request_last() is from the days when it was a public interface and slighly confusing. Give it a proper internal name - blk_finish_request(). 5. Add description explaning that blk_end_bidi_request() can be safely used for uni requests as suggested by Boaz Harrosh. The only visible behavior change is from #1. nr_sectors counts are cleared after the final iteration no matter which function is used to complete the request. I couldn't find any place where the code assumes those nr_sectors counters contain the values for the last segment and this change is good as it makes the API much more consistent as the end result is now same whether a request is completed using [__]blk_end_request() alone or in combination with blk_update_request(). API further cleaned up per Christoph's suggestion. [ Impact: cleanup, rq->*nr_sectors always updated after req completion ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Boaz Harrosh <bharrosh@panasas.com> Cc: Christoph Hellwig <hch@infradead.org>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c226
1 files changed, 75 insertions, 151 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2f277ea0e599..89cc05d9a7a9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1808,25 +1808,35 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
1808} 1808}
1809 1809
1810/** 1810/**
1811 * __end_that_request_first - end I/O on a request 1811 * blk_update_request - Special helper function for request stacking drivers
1812 * @req: the request being processed 1812 * @rq: the request being processed
1813 * @error: %0 for success, < %0 for error 1813 * @error: %0 for success, < %0 for error
1814 * @nr_bytes: number of bytes to complete 1814 * @nr_bytes: number of bytes to complete @rq
1815 * 1815 *
1816 * Description: 1816 * Description:
1817 * Ends I/O on a number of bytes attached to @req, and sets it up 1817 * Ends I/O on a number of bytes attached to @rq, but doesn't complete
1818 * for the next range of segments (if any) in the cluster. 1818 * the request structure even if @rq doesn't have leftover.
1819 * If @rq has leftover, sets it up for the next range of segments.
1820 *
1821 * This special helper function is only for request stacking drivers
1822 * (e.g. request-based dm) so that they can handle partial completion.
1823 * Actual device drivers should use blk_end_request instead.
1824 *
1825 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1826 * %false return from this function.
1819 * 1827 *
1820 * Return: 1828 * Return:
1821 * %0 - we are done with this request, call end_that_request_last() 1829 * %false - this request doesn't have any more data
1822 * %1 - still buffers pending for this request 1830 * %true - this request has more data
1823 **/ 1831 **/
1824static int __end_that_request_first(struct request *req, int error, 1832bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1825 int nr_bytes)
1826{ 1833{
1827 int total_bytes, bio_nbytes, next_idx = 0; 1834 int total_bytes, bio_nbytes, next_idx = 0;
1828 struct bio *bio; 1835 struct bio *bio;
1829 1836
1837 if (!req->bio)
1838 return false;
1839
1830 trace_block_rq_complete(req->q, req); 1840 trace_block_rq_complete(req->q, req);
1831 1841
1832 /* 1842 /*
@@ -1903,8 +1913,16 @@ static int __end_that_request_first(struct request *req, int error,
1903 /* 1913 /*
1904 * completely done 1914 * completely done
1905 */ 1915 */
1906 if (!req->bio) 1916 if (!req->bio) {
1907 return 0; 1917 /*
1918 * Reset counters so that the request stacking driver
1919 * can find how many bytes remain in the request
1920 * later.
1921 */
1922 req->nr_sectors = req->hard_nr_sectors = 0;
1923 req->current_nr_sectors = req->hard_cur_sectors = 0;
1924 return false;
1925 }
1908 1926
1909 /* 1927 /*
1910 * if the request wasn't completed, update state 1928 * if the request wasn't completed, update state
@@ -1918,29 +1936,31 @@ static int __end_that_request_first(struct request *req, int error,
1918 1936
1919 blk_recalc_rq_sectors(req, total_bytes >> 9); 1937 blk_recalc_rq_sectors(req, total_bytes >> 9);
1920 blk_recalc_rq_segments(req); 1938 blk_recalc_rq_segments(req);
1921 return 1; 1939 return true;
1922} 1940}
1941EXPORT_SYMBOL_GPL(blk_update_request);
1923 1942
1924static int end_that_request_data(struct request *rq, int error, 1943static bool blk_update_bidi_request(struct request *rq, int error,
1925 unsigned int nr_bytes, unsigned int bidi_bytes) 1944 unsigned int nr_bytes,
1945 unsigned int bidi_bytes)
1926{ 1946{
1927 if (rq->bio) { 1947 if (blk_update_request(rq, error, nr_bytes))
1928 if (__end_that_request_first(rq, error, nr_bytes)) 1948 return true;
1929 return 1;
1930 1949
1931 /* Bidi request must be completed as a whole */ 1950 /* Bidi request must be completed as a whole */
1932 if (blk_bidi_rq(rq) && 1951 if (unlikely(blk_bidi_rq(rq)) &&
1933 __end_that_request_first(rq->next_rq, error, bidi_bytes)) 1952 blk_update_request(rq->next_rq, error, bidi_bytes))
1934 return 1; 1953 return true;
1935 }
1936 1954
1937 return 0; 1955 add_disk_randomness(rq->rq_disk);
1956
1957 return false;
1938} 1958}
1939 1959
1940/* 1960/*
1941 * queue lock must be held 1961 * queue lock must be held
1942 */ 1962 */
1943static void end_that_request_last(struct request *req, int error) 1963static void blk_finish_request(struct request *req, int error)
1944{ 1964{
1945 if (blk_rq_tagged(req)) 1965 if (blk_rq_tagged(req))
1946 blk_queue_end_tag(req->q, req); 1966 blk_queue_end_tag(req->q, req);
@@ -1966,161 +1986,65 @@ static void end_that_request_last(struct request *req, int error)
1966} 1986}
1967 1987
1968/** 1988/**
1969 * blk_end_io - Generic end_io function to complete a request. 1989 * blk_end_bidi_request - Complete a bidi request
1970 * @rq: the request being processed 1990 * @rq: the request to complete
1971 * @error: %0 for success, < %0 for error 1991 * @error: %0 for success, < %0 for error
1972 * @nr_bytes: number of bytes to complete @rq 1992 * @nr_bytes: number of bytes to complete @rq
1973 * @bidi_bytes: number of bytes to complete @rq->next_rq 1993 * @bidi_bytes: number of bytes to complete @rq->next_rq
1974 * 1994 *
1975 * Description: 1995 * Description:
1976 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 1996 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1977 * If @rq has leftover, sets it up for the next range of segments. 1997 * Drivers that supports bidi can safely call this member for any
1998 * type of request, bidi or uni. In the later case @bidi_bytes is
1999 * just ignored.
1978 * 2000 *
1979 * Return: 2001 * Return:
1980 * %0 - we are done with this request 2002 * %false - we are done with this request
1981 * %1 - this request is not freed yet, it still has pending buffers. 2003 * %true - still buffers pending for this request
1982 **/ 2004 **/
1983static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 2005bool blk_end_bidi_request(struct request *rq, int error,
1984 unsigned int bidi_bytes) 2006 unsigned int nr_bytes, unsigned int bidi_bytes)
1985{ 2007{
1986 struct request_queue *q = rq->q; 2008 struct request_queue *q = rq->q;
1987 unsigned long flags = 0UL; 2009 unsigned long flags;
1988
1989 if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1990 return 1;
1991 2010
1992 add_disk_randomness(rq->rq_disk); 2011 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2012 return true;
1993 2013
1994 spin_lock_irqsave(q->queue_lock, flags); 2014 spin_lock_irqsave(q->queue_lock, flags);
1995 end_that_request_last(rq, error); 2015 blk_finish_request(rq, error);
1996 spin_unlock_irqrestore(q->queue_lock, flags); 2016 spin_unlock_irqrestore(q->queue_lock, flags);
1997 2017
1998 return 0; 2018 return false;
1999}
2000
2001/**
2002 * blk_end_request - Helper function for drivers to complete the request.
2003 * @rq: the request being processed
2004 * @error: %0 for success, < %0 for error
2005 * @nr_bytes: number of bytes to complete
2006 *
2007 * Description:
2008 * Ends I/O on a number of bytes attached to @rq.
2009 * If @rq has leftover, sets it up for the next range of segments.
2010 *
2011 * Return:
2012 * %0 - we are done with this request
2013 * %1 - still buffers pending for this request
2014 **/
2015int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2016{
2017 return blk_end_io(rq, error, nr_bytes, 0);
2018}
2019EXPORT_SYMBOL_GPL(blk_end_request);
2020
2021/**
2022 * __blk_end_request - Helper function for drivers to complete the request.
2023 * @rq: the request being processed
2024 * @error: %0 for success, < %0 for error
2025 * @nr_bytes: number of bytes to complete
2026 *
2027 * Description:
2028 * Must be called with queue lock held unlike blk_end_request().
2029 *
2030 * Return:
2031 * %0 - we are done with this request
2032 * %1 - still buffers pending for this request
2033 **/
2034int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2035{
2036 if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
2037 return 1;
2038
2039 add_disk_randomness(rq->rq_disk);
2040
2041 end_that_request_last(rq, error);
2042
2043 return 0;
2044} 2019}
2045EXPORT_SYMBOL_GPL(__blk_end_request); 2020EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2046 2021
2047/** 2022/**
2048 * blk_end_bidi_request - Helper function for drivers to complete bidi request. 2023 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2049 * @rq: the bidi request being processed 2024 * @rq: the request to complete
2050 * @error: %0 for success, < %0 for error 2025 * @error: %0 for success, < %0 for error
2051 * @nr_bytes: number of bytes to complete @rq 2026 * @nr_bytes: number of bytes to complete @rq
2052 * @bidi_bytes: number of bytes to complete @rq->next_rq 2027 * @bidi_bytes: number of bytes to complete @rq->next_rq
2053 * 2028 *
2054 * Description: 2029 * Description:
2055 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2030 * Identical to blk_end_bidi_request() except that queue lock is
2031 * assumed to be locked on entry and remains so on return.
2056 * 2032 *
2057 * Return: 2033 * Return:
2058 * %0 - we are done with this request 2034 * %false - we are done with this request
2059 * %1 - still buffers pending for this request 2035 * %true - still buffers pending for this request
2060 **/
2061int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
2062 unsigned int bidi_bytes)
2063{
2064 return blk_end_io(rq, error, nr_bytes, bidi_bytes);
2065}
2066EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2067
2068/**
2069 * end_request - end I/O on the current segment of the request
2070 * @req: the request being processed
2071 * @uptodate: error value or %0/%1 uptodate flag
2072 *
2073 * Description:
2074 * Ends I/O on the current segment of a request. If that is the only
2075 * remaining segment, the request is also completed and freed.
2076 *
2077 * This is a remnant of how older block drivers handled I/O completions.
2078 * Modern drivers typically end I/O on the full request in one go, unless
2079 * they have a residual value to account for. For that case this function
2080 * isn't really useful, unless the residual just happens to be the
2081 * full current segment. In other words, don't use this function in new
2082 * code. Use blk_end_request() or __blk_end_request() to end a request.
2083 **/ 2036 **/
2084void end_request(struct request *req, int uptodate) 2037bool __blk_end_bidi_request(struct request *rq, int error,
2038 unsigned int nr_bytes, unsigned int bidi_bytes)
2085{ 2039{
2086 int error = 0; 2040 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2041 return true;
2087 2042
2088 if (uptodate <= 0) 2043 blk_finish_request(rq, error);
2089 error = uptodate ? uptodate : -EIO;
2090 2044
2091 __blk_end_request(req, error, req->hard_cur_sectors << 9); 2045 return false;
2092}
2093EXPORT_SYMBOL(end_request);
2094
2095/**
2096 * blk_update_request - Special helper function for request stacking drivers
2097 * @rq: the request being processed
2098 * @error: %0 for success, < %0 for error
2099 * @nr_bytes: number of bytes to complete @rq
2100 *
2101 * Description:
2102 * Ends I/O on a number of bytes attached to @rq, but doesn't complete
2103 * the request structure even if @rq doesn't have leftover.
2104 * If @rq has leftover, sets it up for the next range of segments.
2105 *
2106 * This special helper function is only for request stacking drivers
2107 * (e.g. request-based dm) so that they can handle partial completion.
2108 * Actual device drivers should use blk_end_request instead.
2109 */
2110void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
2111{
2112 if (!end_that_request_data(rq, error, nr_bytes, 0)) {
2113 /*
2114 * These members are not updated in end_that_request_data()
2115 * when all bios are completed.
2116 * Update them so that the request stacking driver can find
2117 * how many bytes remain in the request later.
2118 */
2119 rq->nr_sectors = rq->hard_nr_sectors = 0;
2120 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2121 }
2122} 2046}
2123EXPORT_SYMBOL_GPL(blk_update_request); 2047EXPORT_SYMBOL_GPL(__blk_end_bidi_request);
2124 2048
2125void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2049void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2126 struct bio *bio) 2050 struct bio *bio)