aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@HansenPartnership.com>2008-01-10 12:30:36 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-01-28 04:54:11 -0500
commitfa0ccd837e3dddb44c7db2f128a8bb7e4eabc21a (patch)
treeade071502f3e7cba423295890d828f0f301ad731 /block/ll_rw_blk.c
parent5d84070ee0a433620c57e85dac7f82faaec5fbb3 (diff)
block: implement drain buffers
These DMA drain buffer implementations in drivers are pretty horrible to do in terms of manipulating the scatterlist. Plus they're being done at least in drivers/ide and drivers/ata, so we now have code duplication. The one use case for this, as I understand it is AHCI controllers doing PIO mode to mmc devices but translating this to DMA at the controller level. So, what about adding a callback to the block layer that permits the adding of the drain buffer for the problem devices. The idea is that you'd do this in slave_configure after you find one of these devices. The beauty of doing it in the block layer is that it quietly adds the drain buffer to the end of the sg list, so it automatically gets mapped (and unmapped) without anything unusual having to be done to the scatterlist in driver/scsi or drivers/ata and without any alteration to the transfer length. Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 3d0422f48453..768987dc2697 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -726,6 +726,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
726EXPORT_SYMBOL(blk_queue_stack_limits); 726EXPORT_SYMBOL(blk_queue_stack_limits);
727 727
728/** 728/**
729 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
730 *
731 * @q: the request queue for the device
732 * @buf: physically contiguous buffer
733 * @size: size of the buffer in bytes
734 *
735 * Some devices have excess DMA problems and can't simply discard (or
736 * zero fill) the unwanted piece of the transfer. They have to have a
737 * real area of memory to transfer it into. The use case for this is
738 * ATAPI devices in DMA mode. If the packet command causes a transfer
739 * bigger than the transfer size some HBAs will lock up if there
740 * aren't DMA elements to contain the excess transfer. What this API
741 * does is adjust the queue so that the buf is always appended
742 * silently to the scatterlist.
743 *
744 * Note: This routine adjusts max_hw_segments to make room for
745 * appending the drain buffer. If you call
746 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
747 * calling this routine, you must set the limit to one fewer than your
748 * device can support otherwise there won't be room for the drain
749 * buffer.
750 */
751int blk_queue_dma_drain(struct request_queue *q, void *buf,
752 unsigned int size)
753{
754 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
755 return -EINVAL;
756 /* make room for appending the drain */
757 --q->max_hw_segments;
758 --q->max_phys_segments;
759 q->dma_drain_buffer = buf;
760 q->dma_drain_size = size;
761
762 return 0;
763}
764
765EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
766
767/**
729 * blk_queue_segment_boundary - set boundary rules for segment merging 768 * blk_queue_segment_boundary - set boundary rules for segment merging
730 * @q: the request queue for the device 769 * @q: the request queue for the device
731 * @mask: the memory boundary mask 770 * @mask: the memory boundary mask
@@ -1379,6 +1418,16 @@ new_segment:
1379 bvprv = bvec; 1418 bvprv = bvec;
1380 } /* segments in rq */ 1419 } /* segments in rq */
1381 1420
1421 if (q->dma_drain_size) {
1422 sg->page_link &= ~0x02;
1423 sg = sg_next(sg);
1424 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
1425 q->dma_drain_size,
1426 ((unsigned long)q->dma_drain_buffer) &
1427 (PAGE_SIZE - 1));
1428 nsegs++;
1429 }
1430
1382 if (sg) 1431 if (sg)
1383 sg_mark_end(sg); 1432 sg_mark_end(sg);
1384 1433