diff options
author | Randy Dunlap <randy.dunlap@oracle.com> | 2008-08-19 14:13:11 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:03 -0400 |
commit | 710027a48ede75428cc68eaa8ae2269b1e356e2c (patch) | |
tree | 22cba18860b83b03613bad97c405fb5146a2d686 /block/blk-settings.c | |
parent | 5b99c2ffa980528a197f26c7d876cceeccce8dd5 (diff) |
Add some block/ source files to the kernel-api docbook. Fix kernel-doc notation in them as needed. Fix changed function parameter names. Fix typos/spellos. In comments, change REQ_SPECIAL to REQ_TYPE_SPECIAL and REQ_BLOCK_PC to REQ_TYPE_BLOCK_PC.
Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 539d873c820d..d70692badcdb 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -144,7 +144,7 @@ EXPORT_SYMBOL(blk_queue_make_request); | |||
144 | * Different hardware can have different requirements as to what pages | 144 | * Different hardware can have different requirements as to what pages |
145 | * it can do I/O directly to. A low level driver can call | 145 | * it can do I/O directly to. A low level driver can call |
146 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | 146 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
147 | * buffers for doing I/O to pages residing above @page. | 147 | * buffers for doing I/O to pages residing above @dma_addr. |
148 | **/ | 148 | **/ |
149 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | 149 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) |
150 | { | 150 | { |
@@ -229,7 +229,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments); | |||
229 | * Description: | 229 | * Description: |
230 | * Enables a low level driver to set an upper limit on the number of | 230 | * Enables a low level driver to set an upper limit on the number of |
231 | * hw data segments in a request. This would be the largest number of | 231 | * hw data segments in a request. This would be the largest number of |
232 | * address/length pairs the host adapter can actually give as once | 232 | * address/length pairs the host adapter can actually give at once |
233 | * to the device. | 233 | * to the device. |
234 | **/ | 234 | **/ |
235 | void blk_queue_max_hw_segments(struct request_queue *q, | 235 | void blk_queue_max_hw_segments(struct request_queue *q, |
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary); | |||
410 | * @mask: alignment mask | 410 | * @mask: alignment mask |
411 | * | 411 | * |
412 | * description: | 412 | * description: |
413 | * set required memory and length aligment for direct dma transactions. | 413 | * set required memory and length alignment for direct dma transactions. |
414 | * this is used when buiding direct io requests for the queue. | 414 | * this is used when buiding direct io requests for the queue. |
415 | * | 415 | * |
416 | **/ | 416 | **/ |
@@ -426,7 +426,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment); | |||
426 | * @mask: alignment mask | 426 | * @mask: alignment mask |
427 | * | 427 | * |
428 | * description: | 428 | * description: |
429 | * update required memory and length aligment for direct dma transactions. | 429 | * update required memory and length alignment for direct dma transactions. |
430 | * If the requested alignment is larger than the current alignment, then | 430 | * If the requested alignment is larger than the current alignment, then |
431 | * the current queue alignment is updated to the new value, otherwise it | 431 | * the current queue alignment is updated to the new value, otherwise it |
432 | * is left alone. The design of this is to allow multiple objects | 432 | * is left alone. The design of this is to allow multiple objects |