aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-28 16:51:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-28 16:51:56 -0500
commit8d01eddf292dcd78b640418c80fb300532799cd4 (patch)
treedd7f816423463e5ad10f15af22fdbd5b75cc6184 /block/ll_rw_blk.c
parentf0f0052069989b80d2a3e50c9cd2f2a650bc1aea (diff)
parentfa0ccd837e3dddb44c7db2f128a8bb7e4eabc21a (diff)
Merge branch 'for-2.6.25' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.25' of git://git.kernel.dk/linux-2.6-block: block: implement drain buffers __bio_clone: don't calculate hw/phys segment counts block: allow queue dma_alignment of zero blktrace: Add blktrace ioctls to SCSI generic devices
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c16fdfed8c62..1932a56f5e4b 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -721,6 +721,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
721EXPORT_SYMBOL(blk_queue_stack_limits); 721EXPORT_SYMBOL(blk_queue_stack_limits);
722 722
723/** 723/**
724 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
725 *
726 * @q: the request queue for the device
727 * @buf: physically contiguous buffer
728 * @size: size of the buffer in bytes
729 *
730 * Some devices have excess DMA problems and can't simply discard (or
731 * zero fill) the unwanted piece of the transfer. They have to have a
732 * real area of memory to transfer it into. The use case for this is
733 * ATAPI devices in DMA mode. If the packet command causes a transfer
734 * bigger than the transfer size some HBAs will lock up if there
735 * aren't DMA elements to contain the excess transfer. What this API
736 * does is adjust the queue so that the buf is always appended
737 * silently to the scatterlist.
738 *
739 * Note: This routine adjusts max_hw_segments to make room for
740 * appending the drain buffer. If you call
741 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
742 * calling this routine, you must set the limit to one fewer than your
743 * device can support otherwise there won't be room for the drain
744 * buffer.
745 */
746int blk_queue_dma_drain(struct request_queue *q, void *buf,
747 unsigned int size)
748{
749 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
750 return -EINVAL;
751 /* make room for appending the drain */
752 --q->max_hw_segments;
753 --q->max_phys_segments;
754 q->dma_drain_buffer = buf;
755 q->dma_drain_size = size;
756
757 return 0;
758}
759
760EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
761
762/**
724 * blk_queue_segment_boundary - set boundary rules for segment merging 763 * blk_queue_segment_boundary - set boundary rules for segment merging
725 * @q: the request queue for the device 764 * @q: the request queue for the device
726 * @mask: the memory boundary mask 765 * @mask: the memory boundary mask
@@ -1374,6 +1413,16 @@ new_segment:
1374 bvprv = bvec; 1413 bvprv = bvec;
1375 } /* segments in rq */ 1414 } /* segments in rq */
1376 1415
1416 if (q->dma_drain_size) {
1417 sg->page_link &= ~0x02;
1418 sg = sg_next(sg);
1419 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
1420 q->dma_drain_size,
1421 ((unsigned long)q->dma_drain_buffer) &
1422 (PAGE_SIZE - 1));
1423 nsegs++;
1424 }
1425
1377 if (sg) 1426 if (sg)
1378 sg_mark_end(sg); 1427 sg_mark_end(sg);
1379 1428