aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRandy Dunlap <randy.dunlap@oracle.com>2008-08-19 14:13:11 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:03 -0400
commit710027a48ede75428cc68eaa8ae2269b1e356e2c (patch)
tree22cba18860b83b03613bad97c405fb5146a2d686
parent5b99c2ffa980528a197f26c7d876cceeccce8dd5 (diff)
Add some block/ source files to the kernel-api docbook. Fix kernel-doc notation in them as needed. Fix changed function parameter names. Fix typos/spellos. In comments, change REQ_SPECIAL to REQ_TYPE_SPECIAL and REQ_BLOCK_PC to REQ_TYPE_BLOCK_PC.
Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--Documentation/DocBook/kernel-api.tmpl4
-rw-r--r--block/blk-core.c72
-rw-r--r--block/blk-exec.c6
-rw-r--r--block/blk-integrity.c4
-rw-r--r--block/blk-map.c16
-rw-r--r--block/blk-settings.c8
-rw-r--r--block/blk-tag.c8
-rw-r--r--block/genhd.c5
8 files changed, 64 insertions, 59 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index b7b1482f6e04..f5696ba9ae96 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -364,6 +364,10 @@ X!Edrivers/pnp/system.c
364!Eblock/blk-barrier.c 364!Eblock/blk-barrier.c
365!Eblock/blk-tag.c 365!Eblock/blk-tag.c
366!Iblock/blk-tag.c 366!Iblock/blk-tag.c
367!Eblock/blk-integrity.c
368!Iblock/blktrace.c
369!Iblock/genhd.c
370!Eblock/genhd.c
367 </chapter> 371 </chapter>
368 372
369 <chapter id="chrdev"> 373 <chapter id="chrdev">
diff --git a/block/blk-core.c b/block/blk-core.c
index 2616cdd049a8..86d22e7d65c5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
531 * request queue; this lock will be taken also from interrupt context, so irq 531 * request queue; this lock will be taken also from interrupt context, so irq
532 * disabling is needed for it. 532 * disabling is needed for it.
533 * 533 *
534 * Function returns a pointer to the initialized request queue, or NULL if 534 * Function returns a pointer to the initialized request queue, or %NULL if
535 * it didn't succeed. 535 * it didn't succeed.
536 * 536 *
537 * Note: 537 * Note:
@@ -913,7 +913,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
913EXPORT_SYMBOL(blk_requeue_request); 913EXPORT_SYMBOL(blk_requeue_request);
914 914
915/** 915/**
916 * blk_insert_request - insert a special request in to a request queue 916 * blk_insert_request - insert a special request into a request queue
917 * @q: request queue where request should be inserted 917 * @q: request queue where request should be inserted
918 * @rq: request to be inserted 918 * @rq: request to be inserted
919 * @at_head: insert request at head or tail of queue 919 * @at_head: insert request at head or tail of queue
@@ -923,8 +923,8 @@ EXPORT_SYMBOL(blk_requeue_request);
923 * Many block devices need to execute commands asynchronously, so they don't 923 * Many block devices need to execute commands asynchronously, so they don't
924 * block the whole kernel from preemption during request execution. This is 924 * block the whole kernel from preemption during request execution. This is
925 * accomplished normally by inserting aritficial requests tagged as 925 * accomplished normally by inserting aritficial requests tagged as
926 * REQ_SPECIAL in to the corresponding request queue, and letting them be 926 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
927 * scheduled for actual execution by the request queue. 927 * be scheduled for actual execution by the request queue.
928 * 928 *
929 * We have the option of inserting the head or the tail of the queue. 929 * We have the option of inserting the head or the tail of the queue.
930 * Typically we use the tail for new ioctls and so forth. We use the head 930 * Typically we use the tail for new ioctls and so forth. We use the head
@@ -1322,7 +1322,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1322} 1322}
1323 1323
1324/** 1324/**
1325 * generic_make_request: hand a buffer to its device driver for I/O 1325 * generic_make_request - hand a buffer to its device driver for I/O
1326 * @bio: The bio describing the location in memory and on the device. 1326 * @bio: The bio describing the location in memory and on the device.
1327 * 1327 *
1328 * generic_make_request() is used to make I/O requests of block 1328 * generic_make_request() is used to make I/O requests of block
@@ -1480,13 +1480,13 @@ void generic_make_request(struct bio *bio)
1480EXPORT_SYMBOL(generic_make_request); 1480EXPORT_SYMBOL(generic_make_request);
1481 1481
1482/** 1482/**
1483 * submit_bio: submit a bio to the block device layer for I/O 1483 * submit_bio - submit a bio to the block device layer for I/O
1484 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1484 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1485 * @bio: The &struct bio which describes the I/O 1485 * @bio: The &struct bio which describes the I/O
1486 * 1486 *
1487 * submit_bio() is very similar in purpose to generic_make_request(), and 1487 * submit_bio() is very similar in purpose to generic_make_request(), and
1488 * uses that function to do most of the work. Both are fairly rough 1488 * uses that function to do most of the work. Both are fairly rough
1489 * interfaces, @bio must be presetup and ready for I/O. 1489 * interfaces; @bio must be presetup and ready for I/O.
1490 * 1490 *
1491 */ 1491 */
1492void submit_bio(int rw, struct bio *bio) 1492void submit_bio(int rw, struct bio *bio)
@@ -1524,7 +1524,7 @@ EXPORT_SYMBOL(submit_bio);
1524/** 1524/**
1525 * __end_that_request_first - end I/O on a request 1525 * __end_that_request_first - end I/O on a request
1526 * @req: the request being processed 1526 * @req: the request being processed
1527 * @error: 0 for success, < 0 for error 1527 * @error: %0 for success, < %0 for error
1528 * @nr_bytes: number of bytes to complete 1528 * @nr_bytes: number of bytes to complete
1529 * 1529 *
1530 * Description: 1530 * Description:
@@ -1532,8 +1532,8 @@ EXPORT_SYMBOL(submit_bio);
1532 * for the next range of segments (if any) in the cluster. 1532 * for the next range of segments (if any) in the cluster.
1533 * 1533 *
1534 * Return: 1534 * Return:
1535 * 0 - we are done with this request, call end_that_request_last() 1535 * %0 - we are done with this request, call end_that_request_last()
1536 * 1 - still buffers pending for this request 1536 * %1 - still buffers pending for this request
1537 **/ 1537 **/
1538static int __end_that_request_first(struct request *req, int error, 1538static int __end_that_request_first(struct request *req, int error,
1539 int nr_bytes) 1539 int nr_bytes)
@@ -1544,7 +1544,7 @@ static int __end_that_request_first(struct request *req, int error,
1544 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); 1544 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1545 1545
1546 /* 1546 /*
1547 * for a REQ_BLOCK_PC request, we want to carry any eventual 1547 * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
1548 * sense key with us all the way through 1548 * sense key with us all the way through
1549 */ 1549 */
1550 if (!blk_pc_request(req)) 1550 if (!blk_pc_request(req))
@@ -1810,11 +1810,11 @@ EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1810/** 1810/**
1811 * end_queued_request - end all I/O on a queued request 1811 * end_queued_request - end all I/O on a queued request
1812 * @rq: the request being processed 1812 * @rq: the request being processed
1813 * @uptodate: error value or 0/1 uptodate flag 1813 * @uptodate: error value or %0/%1 uptodate flag
1814 * 1814 *
1815 * Description: 1815 * Description:
1816 * Ends all I/O on a request, and removes it from the block layer queues. 1816 * Ends all I/O on a request, and removes it from the block layer queues.
1817 * Not suitable for normal IO completion, unless the driver still has 1817 * Not suitable for normal I/O completion, unless the driver still has
1818 * the request attached to the block layer. 1818 * the request attached to the block layer.
1819 * 1819 *
1820 **/ 1820 **/
@@ -1827,7 +1827,7 @@ EXPORT_SYMBOL(end_queued_request);
1827/** 1827/**
1828 * end_dequeued_request - end all I/O on a dequeued request 1828 * end_dequeued_request - end all I/O on a dequeued request
1829 * @rq: the request being processed 1829 * @rq: the request being processed
1830 * @uptodate: error value or 0/1 uptodate flag 1830 * @uptodate: error value or %0/%1 uptodate flag
1831 * 1831 *
1832 * Description: 1832 * Description:
1833 * Ends all I/O on a request. The request must already have been 1833 * Ends all I/O on a request. The request must already have been
@@ -1845,14 +1845,14 @@ EXPORT_SYMBOL(end_dequeued_request);
1845/** 1845/**
1846 * end_request - end I/O on the current segment of the request 1846 * end_request - end I/O on the current segment of the request
1847 * @req: the request being processed 1847 * @req: the request being processed
1848 * @uptodate: error value or 0/1 uptodate flag 1848 * @uptodate: error value or %0/%1 uptodate flag
1849 * 1849 *
1850 * Description: 1850 * Description:
1851 * Ends I/O on the current segment of a request. If that is the only 1851 * Ends I/O on the current segment of a request. If that is the only
1852 * remaining segment, the request is also completed and freed. 1852 * remaining segment, the request is also completed and freed.
1853 * 1853 *
1854 * This is a remnant of how older block drivers handled IO completions. 1854 * This is a remnant of how older block drivers handled I/O completions.
1855 * Modern drivers typically end IO on the full request in one go, unless 1855 * Modern drivers typically end I/O on the full request in one go, unless
1856 * they have a residual value to account for. For that case this function 1856 * they have a residual value to account for. For that case this function
1857 * isn't really useful, unless the residual just happens to be the 1857 * isn't really useful, unless the residual just happens to be the
1858 * full current segment. In other words, don't use this function in new 1858 * full current segment. In other words, don't use this function in new
@@ -1870,12 +1870,12 @@ EXPORT_SYMBOL(end_request);
1870/** 1870/**
1871 * blk_end_io - Generic end_io function to complete a request. 1871 * blk_end_io - Generic end_io function to complete a request.
1872 * @rq: the request being processed 1872 * @rq: the request being processed
1873 * @error: 0 for success, < 0 for error 1873 * @error: %0 for success, < %0 for error
1874 * @nr_bytes: number of bytes to complete @rq 1874 * @nr_bytes: number of bytes to complete @rq
1875 * @bidi_bytes: number of bytes to complete @rq->next_rq 1875 * @bidi_bytes: number of bytes to complete @rq->next_rq
1876 * @drv_callback: function called between completion of bios in the request 1876 * @drv_callback: function called between completion of bios in the request
1877 * and completion of the request. 1877 * and completion of the request.
1878 * If the callback returns non 0, this helper returns without 1878 * If the callback returns non %0, this helper returns without
1879 * completion of the request. 1879 * completion of the request.
1880 * 1880 *
1881 * Description: 1881 * Description:
@@ -1883,8 +1883,8 @@ EXPORT_SYMBOL(end_request);
1883 * If @rq has leftover, sets it up for the next range of segments. 1883 * If @rq has leftover, sets it up for the next range of segments.
1884 * 1884 *
1885 * Return: 1885 * Return:
1886 * 0 - we are done with this request 1886 * %0 - we are done with this request
1887 * 1 - this request is not freed yet, it still has pending buffers. 1887 * %1 - this request is not freed yet, it still has pending buffers.
1888 **/ 1888 **/
1889static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 1889static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1890 unsigned int bidi_bytes, 1890 unsigned int bidi_bytes,
@@ -1919,7 +1919,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1919/** 1919/**
1920 * blk_end_request - Helper function for drivers to complete the request. 1920 * blk_end_request - Helper function for drivers to complete the request.
1921 * @rq: the request being processed 1921 * @rq: the request being processed
1922 * @error: 0 for success, < 0 for error 1922 * @error: %0 for success, < %0 for error
1923 * @nr_bytes: number of bytes to complete 1923 * @nr_bytes: number of bytes to complete
1924 * 1924 *
1925 * Description: 1925 * Description:
@@ -1927,8 +1927,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1927 * If @rq has leftover, sets it up for the next range of segments. 1927 * If @rq has leftover, sets it up for the next range of segments.
1928 * 1928 *
1929 * Return: 1929 * Return:
1930 * 0 - we are done with this request 1930 * %0 - we are done with this request
1931 * 1 - still buffers pending for this request 1931 * %1 - still buffers pending for this request
1932 **/ 1932 **/
1933int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1933int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1934{ 1934{
@@ -1939,15 +1939,15 @@ EXPORT_SYMBOL_GPL(blk_end_request);
1939/** 1939/**
1940 * __blk_end_request - Helper function for drivers to complete the request. 1940 * __blk_end_request - Helper function for drivers to complete the request.
1941 * @rq: the request being processed 1941 * @rq: the request being processed
1942 * @error: 0 for success, < 0 for error 1942 * @error: %0 for success, < %0 for error
1943 * @nr_bytes: number of bytes to complete 1943 * @nr_bytes: number of bytes to complete
1944 * 1944 *
1945 * Description: 1945 * Description:
1946 * Must be called with queue lock held unlike blk_end_request(). 1946 * Must be called with queue lock held unlike blk_end_request().
1947 * 1947 *
1948 * Return: 1948 * Return:
1949 * 0 - we are done with this request 1949 * %0 - we are done with this request
1950 * 1 - still buffers pending for this request 1950 * %1 - still buffers pending for this request
1951 **/ 1951 **/
1952int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1952int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1953{ 1953{
@@ -1966,7 +1966,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
1966/** 1966/**
1967 * blk_end_bidi_request - Helper function for drivers to complete bidi request. 1967 * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1968 * @rq: the bidi request being processed 1968 * @rq: the bidi request being processed
1969 * @error: 0 for success, < 0 for error 1969 * @error: %0 for success, < %0 for error
1970 * @nr_bytes: number of bytes to complete @rq 1970 * @nr_bytes: number of bytes to complete @rq
1971 * @bidi_bytes: number of bytes to complete @rq->next_rq 1971 * @bidi_bytes: number of bytes to complete @rq->next_rq
1972 * 1972 *
@@ -1974,8 +1974,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
1974 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 1974 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1975 * 1975 *
1976 * Return: 1976 * Return:
1977 * 0 - we are done with this request 1977 * %0 - we are done with this request
1978 * 1 - still buffers pending for this request 1978 * %1 - still buffers pending for this request
1979 **/ 1979 **/
1980int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 1980int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1981 unsigned int bidi_bytes) 1981 unsigned int bidi_bytes)
@@ -1987,11 +1987,11 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1987/** 1987/**
1988 * blk_end_request_callback - Special helper function for tricky drivers 1988 * blk_end_request_callback - Special helper function for tricky drivers
1989 * @rq: the request being processed 1989 * @rq: the request being processed
1990 * @error: 0 for success, < 0 for error 1990 * @error: %0 for success, < %0 for error
1991 * @nr_bytes: number of bytes to complete 1991 * @nr_bytes: number of bytes to complete
1992 * @drv_callback: function called between completion of bios in the request 1992 * @drv_callback: function called between completion of bios in the request
1993 * and completion of the request. 1993 * and completion of the request.
1994 * If the callback returns non 0, this helper returns without 1994 * If the callback returns non %0, this helper returns without
1995 * completion of the request. 1995 * completion of the request.
1996 * 1996 *
1997 * Description: 1997 * Description:
@@ -2004,10 +2004,10 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2004 * Don't use this interface in other places anymore. 2004 * Don't use this interface in other places anymore.
2005 * 2005 *
2006 * Return: 2006 * Return:
2007 * 0 - we are done with this request 2007 * %0 - we are done with this request
2008 * 1 - this request is not freed yet. 2008 * %1 - this request is not freed yet.
2009 * this request still has pending buffers or 2009 * this request still has pending buffers or
2010 * the driver doesn't want to finish this request yet. 2010 * the driver doesn't want to finish this request yet.
2011 **/ 2011 **/
2012int blk_end_request_callback(struct request *rq, int error, 2012int blk_end_request_callback(struct request *rq, int error,
2013 unsigned int nr_bytes, 2013 unsigned int nr_bytes,
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 9bceff7674f2..6af716d1e54e 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -16,7 +16,7 @@
16/** 16/**
17 * blk_end_sync_rq - executes a completion event on a request 17 * blk_end_sync_rq - executes a completion event on a request
18 * @rq: request to complete 18 * @rq: request to complete
19 * @error: end io status of the request 19 * @error: end I/O status of the request
20 */ 20 */
21static void blk_end_sync_rq(struct request *rq, int error) 21static void blk_end_sync_rq(struct request *rq, int error)
22{ 22{
@@ -41,7 +41,7 @@ static void blk_end_sync_rq(struct request *rq, int error)
41 * @done: I/O completion handler 41 * @done: I/O completion handler
42 * 42 *
43 * Description: 43 * Description:
44 * Insert a fully prepared request at the back of the io scheduler queue 44 * Insert a fully prepared request at the back of the I/O scheduler queue
45 * for execution. Don't wait for completion. 45 * for execution. Don't wait for completion.
46 */ 46 */
47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, 47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
72 * @at_head: insert request at head or tail of queue 72 * @at_head: insert request at head or tail of queue
73 * 73 *
74 * Description: 74 * Description:
75 * Insert a fully prepared request at the back of the io scheduler queue 75 * Insert a fully prepared request at the back of the I/O scheduler queue
76 * for execution and wait for completion. 76 * for execution and wait for completion.
77 */ 77 */
78int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, 78int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 3f1a8478cc38..d87606eaca1d 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -109,8 +109,8 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
109 109
110/** 110/**
111 * blk_integrity_compare - Compare integrity profile of two block devices 111 * blk_integrity_compare - Compare integrity profile of two block devices
112 * @b1: Device to compare 112 * @bd1: Device to compare
113 * @b2: Device to compare 113 * @bd2: Device to compare
114 * 114 *
115 * Description: Meta-devices like DM and MD need to verify that all 115 * Description: Meta-devices like DM and MD need to verify that all
116 * sub-devices use the same integrity format before advertising to 116 * sub-devices use the same integrity format before advertising to
diff --git a/block/blk-map.c b/block/blk-map.c
index af37e4ae62f5..ea1bf53929e4 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -85,17 +85,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
85} 85}
86 86
87/** 87/**
88 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 88 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
89 * @q: request queue where request should be inserted 89 * @q: request queue where request should be inserted
90 * @rq: request structure to fill 90 * @rq: request structure to fill
91 * @ubuf: the user buffer 91 * @ubuf: the user buffer
92 * @len: length of user data 92 * @len: length of user data
93 * 93 *
94 * Description: 94 * Description:
95 * Data will be mapped directly for zero copy io, if possible. Otherwise 95 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
96 * a kernel bounce buffer is used. 96 * a kernel bounce buffer is used.
97 * 97 *
98 * A matching blk_rq_unmap_user() must be issued at the end of io, while 98 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
99 * still in process context. 99 * still in process context.
100 * 100 *
101 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 101 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -154,7 +154,7 @@ unmap_rq:
154EXPORT_SYMBOL(blk_rq_map_user); 154EXPORT_SYMBOL(blk_rq_map_user);
155 155
156/** 156/**
157 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage 157 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
158 * @q: request queue where request should be inserted 158 * @q: request queue where request should be inserted
159 * @rq: request to map data to 159 * @rq: request to map data to
160 * @iov: pointer to the iovec 160 * @iov: pointer to the iovec
@@ -162,10 +162,10 @@ EXPORT_SYMBOL(blk_rq_map_user);
162 * @len: I/O byte count 162 * @len: I/O byte count
163 * 163 *
164 * Description: 164 * Description:
165 * Data will be mapped directly for zero copy io, if possible. Otherwise 165 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
166 * a kernel bounce buffer is used. 166 * a kernel bounce buffer is used.
167 * 167 *
168 * A matching blk_rq_unmap_user() must be issued at the end of io, while 168 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
169 * still in process context. 169 * still in process context.
170 * 170 *
171 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 171 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
224 * Description: 224 * Description:
225 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 225 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
226 * supply the original rq->bio from the blk_rq_map_user() return, since 226 * supply the original rq->bio from the blk_rq_map_user() return, since
227 * the io completion may have changed rq->bio. 227 * the I/O completion may have changed rq->bio.
228 */ 228 */
229int blk_rq_unmap_user(struct bio *bio) 229int blk_rq_unmap_user(struct bio *bio)
230{ 230{
@@ -250,7 +250,7 @@ int blk_rq_unmap_user(struct bio *bio)
250EXPORT_SYMBOL(blk_rq_unmap_user); 250EXPORT_SYMBOL(blk_rq_unmap_user);
251 251
252/** 252/**
253 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage 253 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
254 * @q: request queue where request should be inserted 254 * @q: request queue where request should be inserted
255 * @rq: request to fill 255 * @rq: request to fill
256 * @kbuf: the kernel buffer 256 * @kbuf: the kernel buffer
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 539d873c820d..d70692badcdb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -144,7 +144,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
144 * Different hardware can have different requirements as to what pages 144 * Different hardware can have different requirements as to what pages
145 * it can do I/O directly to. A low level driver can call 145 * it can do I/O directly to. A low level driver can call
146 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 146 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
147 * buffers for doing I/O to pages residing above @page. 147 * buffers for doing I/O to pages residing above @dma_addr.
148 **/ 148 **/
149void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 149void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
150{ 150{
@@ -229,7 +229,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
229 * Description: 229 * Description:
230 * Enables a low level driver to set an upper limit on the number of 230 * Enables a low level driver to set an upper limit on the number of
231 * hw data segments in a request. This would be the largest number of 231 * hw data segments in a request. This would be the largest number of
232 * address/length pairs the host adapter can actually give as once 232 * address/length pairs the host adapter can actually give at once
233 * to the device. 233 * to the device.
234 **/ 234 **/
235void blk_queue_max_hw_segments(struct request_queue *q, 235void blk_queue_max_hw_segments(struct request_queue *q,
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
410 * @mask: alignment mask 410 * @mask: alignment mask
411 * 411 *
412 * description: 412 * description:
413 * set required memory and length aligment for direct dma transactions. 413 * set required memory and length alignment for direct dma transactions.
414 * this is used when buiding direct io requests for the queue. 414 * this is used when buiding direct io requests for the queue.
415 * 415 *
416 **/ 416 **/
@@ -426,7 +426,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
426 * @mask: alignment mask 426 * @mask: alignment mask
427 * 427 *
428 * description: 428 * description:
429 * update required memory and length aligment for direct dma transactions. 429 * update required memory and length alignment for direct dma transactions.
430 * If the requested alignment is larger than the current alignment, then 430 * If the requested alignment is larger than the current alignment, then
431 * the current queue alignment is updated to the new value, otherwise it 431 * the current queue alignment is updated to the new value, otherwise it
432 * is left alone. The design of this is to allow multiple objects 432 * is left alone. The design of this is to allow multiple objects
diff --git a/block/blk-tag.c b/block/blk-tag.c
index ed5166fbc599..8a99688eb1b1 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
29 * __blk_free_tags - release a given set of tag maintenance info 29 * __blk_free_tags - release a given set of tag maintenance info
30 * @bqt: the tag map to free 30 * @bqt: the tag map to free
31 * 31 *
32 * Tries to free the specified @bqt@. Returns true if it was 32 * Tries to free the specified @bqt. Returns true if it was
33 * actually freed and false if there are still references using it 33 * actually freed and false if there are still references using it
34 */ 34 */
35static int __blk_free_tags(struct blk_queue_tag *bqt) 35static int __blk_free_tags(struct blk_queue_tag *bqt)
@@ -78,7 +78,7 @@ void __blk_queue_free_tags(struct request_queue *q)
78 * blk_free_tags - release a given set of tag maintenance info 78 * blk_free_tags - release a given set of tag maintenance info
79 * @bqt: the tag map to free 79 * @bqt: the tag map to free
80 * 80 *
81 * For externally managed @bqt@ frees the map. Callers of this 81 * For externally managed @bqt frees the map. Callers of this
82 * function must guarantee to have released all the queues that 82 * function must guarantee to have released all the queues that
83 * might have been using this tag map. 83 * might have been using this tag map.
84 */ 84 */
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(blk_free_tags);
94 * @q: the request queue for the device 94 * @q: the request queue for the device
95 * 95 *
96 * Notes: 96 * Notes:
97 * This is used to disabled tagged queuing to a device, yet leave 97 * This is used to disable tagged queuing to a device, yet leave
98 * queue in function. 98 * queue in function.
99 **/ 99 **/
100void blk_queue_free_tags(struct request_queue *q) 100void blk_queue_free_tags(struct request_queue *q)
@@ -271,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
271 * @rq: the request that has completed 271 * @rq: the request that has completed
272 * 272 *
273 * Description: 273 * Description:
274 * Typically called when end_that_request_first() returns 0, meaning 274 * Typically called when end_that_request_first() returns %0, meaning
275 * all transfers have been done for a request. It's important to call 275 * all transfers have been done for a request. It's important to call
276 * this function before end_that_request_last(), as that will put the 276 * this function before end_that_request_last(), as that will put the
277 * request back on the free list thus corrupting the internal tag list. 277 * request back on the free list thus corrupting the internal tag list.
diff --git a/block/genhd.c b/block/genhd.c
index e0ce23ac2ece..c114a43052de 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -211,10 +211,11 @@ void unlink_gendisk(struct gendisk *disk)
211 211
212/** 212/**
213 * get_gendisk - get partitioning information for a given device 213 * get_gendisk - get partitioning information for a given device
214 * @dev: device to get partitioning information for 214 * @devt: device to get partitioning information for
215 * @part: returned partition index
215 * 216 *
216 * This function gets the structure containing partitioning 217 * This function gets the structure containing partitioning
217 * information for the given device @dev. 218 * information for the given device @devt.
218 */ 219 */
219struct gendisk *get_gendisk(dev_t devt, int *part) 220struct gendisk *get_gendisk(dev_t devt, int *part)
220{ 221{