diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 49 |
1 files changed, 49 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 3d0422f48453..768987dc2697 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -726,6 +726,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
726 | EXPORT_SYMBOL(blk_queue_stack_limits); | 726 | EXPORT_SYMBOL(blk_queue_stack_limits); |
727 | 727 | ||
728 | /** | 728 | /** |
729 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | ||
730 | * | ||
731 | * @q: the request queue for the device | ||
732 | * @buf: physically contiguous buffer | ||
733 | * @size: size of the buffer in bytes | ||
734 | * | ||
735 | * Some devices have excess DMA problems and can't simply discard (or | ||
736 | * zero fill) the unwanted piece of the transfer. They have to have a | ||
737 | * real area of memory to transfer it into. The use case for this is | ||
738 | * ATAPI devices in DMA mode. If the packet command causes a transfer | ||
739 | * bigger than the transfer size some HBAs will lock up if there | ||
740 | * aren't DMA elements to contain the excess transfer. What this API | ||
741 | * does is adjust the queue so that the buf is always appended | ||
742 | * silently to the scatterlist. | ||
743 | * | ||
744 | * Note: This routine adjusts max_hw_segments to make room for | ||
745 | * appending the drain buffer. If you call | ||
746 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | ||
747 | * calling this routine, you must set the limit to one fewer than your | ||
748 | * device can support otherwise there won't be room for the drain | ||
749 | * buffer. | ||
750 | */ | ||
751 | int blk_queue_dma_drain(struct request_queue *q, void *buf, | ||
752 | unsigned int size) | ||
753 | { | ||
754 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | ||
755 | return -EINVAL; | ||
756 | /* make room for appending the drain */ | ||
757 | --q->max_hw_segments; | ||
758 | --q->max_phys_segments; | ||
759 | q->dma_drain_buffer = buf; | ||
760 | q->dma_drain_size = size; | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | ||
766 | |||
767 | /** | ||
729 | * blk_queue_segment_boundary - set boundary rules for segment merging | 768 | * blk_queue_segment_boundary - set boundary rules for segment merging |
730 | * @q: the request queue for the device | 769 | * @q: the request queue for the device |
731 | * @mask: the memory boundary mask | 770 | * @mask: the memory boundary mask |
@@ -1379,6 +1418,16 @@ new_segment: | |||
1379 | bvprv = bvec; | 1418 | bvprv = bvec; |
1380 | } /* segments in rq */ | 1419 | } /* segments in rq */ |
1381 | 1420 | ||
1421 | if (q->dma_drain_size) { | ||
1422 | sg->page_link &= ~0x02; | ||
1423 | sg = sg_next(sg); | ||
1424 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | ||
1425 | q->dma_drain_size, | ||
1426 | ((unsigned long)q->dma_drain_buffer) & | ||
1427 | (PAGE_SIZE - 1)); | ||
1428 | nsegs++; | ||
1429 | } | ||
1430 | |||
1382 | if (sg) | 1431 | if (sg) |
1383 | sg_mark_end(sg); | 1432 | sg_mark_end(sg); |
1384 | 1433 | ||