diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 226 |
1 files changed, 75 insertions, 151 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 2f277ea0e599..89cc05d9a7a9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1808,25 +1808,35 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) | |||
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | /** | 1810 | /** |
1811 | * __end_that_request_first - end I/O on a request | 1811 | * blk_update_request - Special helper function for request stacking drivers |
1812 | * @req: the request being processed | 1812 | * @rq: the request being processed |
1813 | * @error: %0 for success, < %0 for error | 1813 | * @error: %0 for success, < %0 for error |
1814 | * @nr_bytes: number of bytes to complete | 1814 | * @nr_bytes: number of bytes to complete @rq |
1815 | * | 1815 | * |
1816 | * Description: | 1816 | * Description: |
1817 | * Ends I/O on a number of bytes attached to @req, and sets it up | 1817 | * Ends I/O on a number of bytes attached to @rq, but doesn't complete |
1818 | * for the next range of segments (if any) in the cluster. | 1818 | * the request structure even if @rq doesn't have leftover. |
1819 | * If @rq has leftover, sets it up for the next range of segments. | ||
1820 | * | ||
1821 | * This special helper function is only for request stacking drivers | ||
1822 | * (e.g. request-based dm) so that they can handle partial completion. | ||
1823 | * Actual device drivers should use blk_end_request instead. | ||
1824 | * | ||
1825 | * Passing the result of blk_rq_bytes() as @nr_bytes guarantees | ||
1826 | * %false return from this function. | ||
1819 | * | 1827 | * |
1820 | * Return: | 1828 | * Return: |
1821 | * %0 - we are done with this request, call end_that_request_last() | 1829 | * %false - this request doesn't have any more data |
1822 | * %1 - still buffers pending for this request | 1830 | * %true - this request has more data |
1823 | **/ | 1831 | **/ |
1824 | static int __end_that_request_first(struct request *req, int error, | 1832 | bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) |
1825 | int nr_bytes) | ||
1826 | { | 1833 | { |
1827 | int total_bytes, bio_nbytes, next_idx = 0; | 1834 | int total_bytes, bio_nbytes, next_idx = 0; |
1828 | struct bio *bio; | 1835 | struct bio *bio; |
1829 | 1836 | ||
1837 | if (!req->bio) | ||
1838 | return false; | ||
1839 | |||
1830 | trace_block_rq_complete(req->q, req); | 1840 | trace_block_rq_complete(req->q, req); |
1831 | 1841 | ||
1832 | /* | 1842 | /* |
@@ -1903,8 +1913,16 @@ static int __end_that_request_first(struct request *req, int error, | |||
1903 | /* | 1913 | /* |
1904 | * completely done | 1914 | * completely done |
1905 | */ | 1915 | */ |
1906 | if (!req->bio) | 1916 | if (!req->bio) { |
1907 | return 0; | 1917 | /* |
1918 | * Reset counters so that the request stacking driver | ||
1919 | * can find how many bytes remain in the request | ||
1920 | * later. | ||
1921 | */ | ||
1922 | req->nr_sectors = req->hard_nr_sectors = 0; | ||
1923 | req->current_nr_sectors = req->hard_cur_sectors = 0; | ||
1924 | return false; | ||
1925 | } | ||
1908 | 1926 | ||
1909 | /* | 1927 | /* |
1910 | * if the request wasn't completed, update state | 1928 | * if the request wasn't completed, update state |
@@ -1918,29 +1936,31 @@ static int __end_that_request_first(struct request *req, int error, | |||
1918 | 1936 | ||
1919 | blk_recalc_rq_sectors(req, total_bytes >> 9); | 1937 | blk_recalc_rq_sectors(req, total_bytes >> 9); |
1920 | blk_recalc_rq_segments(req); | 1938 | blk_recalc_rq_segments(req); |
1921 | return 1; | 1939 | return true; |
1922 | } | 1940 | } |
1941 | EXPORT_SYMBOL_GPL(blk_update_request); | ||
1923 | 1942 | ||
1924 | static int end_that_request_data(struct request *rq, int error, | 1943 | static bool blk_update_bidi_request(struct request *rq, int error, |
1925 | unsigned int nr_bytes, unsigned int bidi_bytes) | 1944 | unsigned int nr_bytes, |
1945 | unsigned int bidi_bytes) | ||
1926 | { | 1946 | { |
1927 | if (rq->bio) { | 1947 | if (blk_update_request(rq, error, nr_bytes)) |
1928 | if (__end_that_request_first(rq, error, nr_bytes)) | 1948 | return true; |
1929 | return 1; | ||
1930 | 1949 | ||
1931 | /* Bidi request must be completed as a whole */ | 1950 | /* Bidi request must be completed as a whole */ |
1932 | if (blk_bidi_rq(rq) && | 1951 | if (unlikely(blk_bidi_rq(rq)) && |
1933 | __end_that_request_first(rq->next_rq, error, bidi_bytes)) | 1952 | blk_update_request(rq->next_rq, error, bidi_bytes)) |
1934 | return 1; | 1953 | return true; |
1935 | } | ||
1936 | 1954 | ||
1937 | return 0; | 1955 | add_disk_randomness(rq->rq_disk); |
1956 | |||
1957 | return false; | ||
1938 | } | 1958 | } |
1939 | 1959 | ||
1940 | /* | 1960 | /* |
1941 | * queue lock must be held | 1961 | * queue lock must be held |
1942 | */ | 1962 | */ |
1943 | static void end_that_request_last(struct request *req, int error) | 1963 | static void blk_finish_request(struct request *req, int error) |
1944 | { | 1964 | { |
1945 | if (blk_rq_tagged(req)) | 1965 | if (blk_rq_tagged(req)) |
1946 | blk_queue_end_tag(req->q, req); | 1966 | blk_queue_end_tag(req->q, req); |
@@ -1966,161 +1986,65 @@ static void end_that_request_last(struct request *req, int error) | |||
1966 | } | 1986 | } |
1967 | 1987 | ||
1968 | /** | 1988 | /** |
1969 | * blk_end_io - Generic end_io function to complete a request. | 1989 | * blk_end_bidi_request - Complete a bidi request |
1970 | * @rq: the request being processed | 1990 | * @rq: the request to complete |
1971 | * @error: %0 for success, < %0 for error | 1991 | * @error: %0 for success, < %0 for error |
1972 | * @nr_bytes: number of bytes to complete @rq | 1992 | * @nr_bytes: number of bytes to complete @rq |
1973 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 1993 | * @bidi_bytes: number of bytes to complete @rq->next_rq |
1974 | * | 1994 | * |
1975 | * Description: | 1995 | * Description: |
1976 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 1996 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. |
1977 | * If @rq has leftover, sets it up for the next range of segments. | 1997 | * Drivers that supports bidi can safely call this member for any |
1998 | * type of request, bidi or uni. In the later case @bidi_bytes is | ||
1999 | * just ignored. | ||
1978 | * | 2000 | * |
1979 | * Return: | 2001 | * Return: |
1980 | * %0 - we are done with this request | 2002 | * %false - we are done with this request |
1981 | * %1 - this request is not freed yet, it still has pending buffers. | 2003 | * %true - still buffers pending for this request |
1982 | **/ | 2004 | **/ |
1983 | static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, | 2005 | bool blk_end_bidi_request(struct request *rq, int error, |
1984 | unsigned int bidi_bytes) | 2006 | unsigned int nr_bytes, unsigned int bidi_bytes) |
1985 | { | 2007 | { |
1986 | struct request_queue *q = rq->q; | 2008 | struct request_queue *q = rq->q; |
1987 | unsigned long flags = 0UL; | 2009 | unsigned long flags; |
1988 | |||
1989 | if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) | ||
1990 | return 1; | ||
1991 | 2010 | ||
1992 | add_disk_randomness(rq->rq_disk); | 2011 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) |
2012 | return true; | ||
1993 | 2013 | ||
1994 | spin_lock_irqsave(q->queue_lock, flags); | 2014 | spin_lock_irqsave(q->queue_lock, flags); |
1995 | end_that_request_last(rq, error); | 2015 | blk_finish_request(rq, error); |
1996 | spin_unlock_irqrestore(q->queue_lock, flags); | 2016 | spin_unlock_irqrestore(q->queue_lock, flags); |
1997 | 2017 | ||
1998 | return 0; | 2018 | return false; |
1999 | } | ||
2000 | |||
2001 | /** | ||
2002 | * blk_end_request - Helper function for drivers to complete the request. | ||
2003 | * @rq: the request being processed | ||
2004 | * @error: %0 for success, < %0 for error | ||
2005 | * @nr_bytes: number of bytes to complete | ||
2006 | * | ||
2007 | * Description: | ||
2008 | * Ends I/O on a number of bytes attached to @rq. | ||
2009 | * If @rq has leftover, sets it up for the next range of segments. | ||
2010 | * | ||
2011 | * Return: | ||
2012 | * %0 - we are done with this request | ||
2013 | * %1 - still buffers pending for this request | ||
2014 | **/ | ||
2015 | int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | ||
2016 | { | ||
2017 | return blk_end_io(rq, error, nr_bytes, 0); | ||
2018 | } | ||
2019 | EXPORT_SYMBOL_GPL(blk_end_request); | ||
2020 | |||
2021 | /** | ||
2022 | * __blk_end_request - Helper function for drivers to complete the request. | ||
2023 | * @rq: the request being processed | ||
2024 | * @error: %0 for success, < %0 for error | ||
2025 | * @nr_bytes: number of bytes to complete | ||
2026 | * | ||
2027 | * Description: | ||
2028 | * Must be called with queue lock held unlike blk_end_request(). | ||
2029 | * | ||
2030 | * Return: | ||
2031 | * %0 - we are done with this request | ||
2032 | * %1 - still buffers pending for this request | ||
2033 | **/ | ||
2034 | int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | ||
2035 | { | ||
2036 | if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) | ||
2037 | return 1; | ||
2038 | |||
2039 | add_disk_randomness(rq->rq_disk); | ||
2040 | |||
2041 | end_that_request_last(rq, error); | ||
2042 | |||
2043 | return 0; | ||
2044 | } | 2019 | } |
2045 | EXPORT_SYMBOL_GPL(__blk_end_request); | 2020 | EXPORT_SYMBOL_GPL(blk_end_bidi_request); |
2046 | 2021 | ||
2047 | /** | 2022 | /** |
2048 | * blk_end_bidi_request - Helper function for drivers to complete bidi request. | 2023 | * __blk_end_bidi_request - Complete a bidi request with queue lock held |
2049 | * @rq: the bidi request being processed | 2024 | * @rq: the request to complete |
2050 | * @error: %0 for success, < %0 for error | 2025 | * @error: %0 for success, < %0 for error |
2051 | * @nr_bytes: number of bytes to complete @rq | 2026 | * @nr_bytes: number of bytes to complete @rq |
2052 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 2027 | * @bidi_bytes: number of bytes to complete @rq->next_rq |
2053 | * | 2028 | * |
2054 | * Description: | 2029 | * Description: |
2055 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 2030 | * Identical to blk_end_bidi_request() except that queue lock is |
2031 | * assumed to be locked on entry and remains so on return. | ||
2056 | * | 2032 | * |
2057 | * Return: | 2033 | * Return: |
2058 | * %0 - we are done with this request | 2034 | * %false - we are done with this request |
2059 | * %1 - still buffers pending for this request | 2035 | * %true - still buffers pending for this request |
2060 | **/ | ||
2061 | int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, | ||
2062 | unsigned int bidi_bytes) | ||
2063 | { | ||
2064 | return blk_end_io(rq, error, nr_bytes, bidi_bytes); | ||
2065 | } | ||
2066 | EXPORT_SYMBOL_GPL(blk_end_bidi_request); | ||
2067 | |||
2068 | /** | ||
2069 | * end_request - end I/O on the current segment of the request | ||
2070 | * @req: the request being processed | ||
2071 | * @uptodate: error value or %0/%1 uptodate flag | ||
2072 | * | ||
2073 | * Description: | ||
2074 | * Ends I/O on the current segment of a request. If that is the only | ||
2075 | * remaining segment, the request is also completed and freed. | ||
2076 | * | ||
2077 | * This is a remnant of how older block drivers handled I/O completions. | ||
2078 | * Modern drivers typically end I/O on the full request in one go, unless | ||
2079 | * they have a residual value to account for. For that case this function | ||
2080 | * isn't really useful, unless the residual just happens to be the | ||
2081 | * full current segment. In other words, don't use this function in new | ||
2082 | * code. Use blk_end_request() or __blk_end_request() to end a request. | ||
2083 | **/ | 2036 | **/ |
2084 | void end_request(struct request *req, int uptodate) | 2037 | bool __blk_end_bidi_request(struct request *rq, int error, |
2038 | unsigned int nr_bytes, unsigned int bidi_bytes) | ||
2085 | { | 2039 | { |
2086 | int error = 0; | 2040 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) |
2041 | return true; | ||
2087 | 2042 | ||
2088 | if (uptodate <= 0) | 2043 | blk_finish_request(rq, error); |
2089 | error = uptodate ? uptodate : -EIO; | ||
2090 | 2044 | ||
2091 | __blk_end_request(req, error, req->hard_cur_sectors << 9); | 2045 | return false; |
2092 | } | ||
2093 | EXPORT_SYMBOL(end_request); | ||
2094 | |||
2095 | /** | ||
2096 | * blk_update_request - Special helper function for request stacking drivers | ||
2097 | * @rq: the request being processed | ||
2098 | * @error: %0 for success, < %0 for error | ||
2099 | * @nr_bytes: number of bytes to complete @rq | ||
2100 | * | ||
2101 | * Description: | ||
2102 | * Ends I/O on a number of bytes attached to @rq, but doesn't complete | ||
2103 | * the request structure even if @rq doesn't have leftover. | ||
2104 | * If @rq has leftover, sets it up for the next range of segments. | ||
2105 | * | ||
2106 | * This special helper function is only for request stacking drivers | ||
2107 | * (e.g. request-based dm) so that they can handle partial completion. | ||
2108 | * Actual device drivers should use blk_end_request instead. | ||
2109 | */ | ||
2110 | void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) | ||
2111 | { | ||
2112 | if (!end_that_request_data(rq, error, nr_bytes, 0)) { | ||
2113 | /* | ||
2114 | * These members are not updated in end_that_request_data() | ||
2115 | * when all bios are completed. | ||
2116 | * Update them so that the request stacking driver can find | ||
2117 | * how many bytes remain in the request later. | ||
2118 | */ | ||
2119 | rq->nr_sectors = rq->hard_nr_sectors = 0; | ||
2120 | rq->current_nr_sectors = rq->hard_cur_sectors = 0; | ||
2121 | } | ||
2122 | } | 2046 | } |
2123 | EXPORT_SYMBOL_GPL(blk_update_request); | 2047 | EXPORT_SYMBOL_GPL(__blk_end_bidi_request); |
2124 | 2048 | ||
2125 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2049 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
2126 | struct bio *bio) | 2050 | struct bio *bio) |