diff options
-rw-r--r-- | block/elevator.c | 26 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 49 | ||||
-rw-r--r-- | include/linux/blkdev.h | 4 |
3 files changed, 78 insertions, 1 deletions
diff --git a/block/elevator.c b/block/elevator.c index f9736fbdab0..8cd5775acd7 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q) | |||
741 | q->boundary_rq = NULL; | 741 | q->boundary_rq = NULL; |
742 | } | 742 | } |
743 | 743 | ||
744 | if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) | 744 | if (rq->cmd_flags & REQ_DONTPREP) |
745 | break; | ||
746 | |||
747 | if (q->dma_drain_size && rq->data_len) { | ||
748 | /* | ||
749 | * make sure space for the drain appears we | ||
750 | * know we can do this because max_hw_segments | ||
751 | * has been adjusted to be one fewer than the | ||
752 | * device can handle | ||
753 | */ | ||
754 | rq->nr_phys_segments++; | ||
755 | rq->nr_hw_segments++; | ||
756 | } | ||
757 | |||
758 | if (!q->prep_rq_fn) | ||
745 | break; | 759 | break; |
746 | 760 | ||
747 | ret = q->prep_rq_fn(q, rq); | 761 | ret = q->prep_rq_fn(q, rq); |
@@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q) | |||
754 | * avoid resource deadlock. REQ_STARTED will | 768 | * avoid resource deadlock. REQ_STARTED will |
755 | * prevent other fs requests from passing this one. | 769 | * prevent other fs requests from passing this one. |
756 | */ | 770 | */ |
771 | if (q->dma_drain_size && rq->data_len && | ||
772 | !(rq->cmd_flags & REQ_DONTPREP)) { | ||
773 | /* | ||
774 | * remove the space for the drain we added | ||
775 | * so that we don't add it again | ||
776 | */ | ||
777 | --rq->nr_phys_segments; | ||
778 | --rq->nr_hw_segments; | ||
779 | } | ||
780 | |||
757 | rq = NULL; | 781 | rq = NULL; |
758 | break; | 782 | break; |
759 | } else if (ret == BLKPREP_KILL) { | 783 | } else if (ret == BLKPREP_KILL) { |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 3d0422f4845..768987dc269 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -726,6 +726,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
726 | EXPORT_SYMBOL(blk_queue_stack_limits); | 726 | EXPORT_SYMBOL(blk_queue_stack_limits); |
727 | 727 | ||
728 | /** | 728 | /** |
729 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | ||
730 | * | ||
731 | * @q: the request queue for the device | ||
732 | * @buf: physically contiguous buffer | ||
733 | * @size: size of the buffer in bytes | ||
734 | * | ||
735 | * Some devices have excess DMA problems and can't simply discard (or | ||
736 | * zero fill) the unwanted piece of the transfer. They have to have a | ||
737 | * real area of memory to transfer it into. The use case for this is | ||
738 | * ATAPI devices in DMA mode. If the packet command causes a transfer | ||
739 | * bigger than the transfer size some HBAs will lock up if there | ||
740 | * aren't DMA elements to contain the excess transfer. What this API | ||
741 | * does is adjust the queue so that the buf is always appended | ||
742 | * silently to the scatterlist. | ||
743 | * | ||
744 | * Note: This routine adjusts max_hw_segments to make room for | ||
745 | * appending the drain buffer. If you call | ||
746 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | ||
747 | * calling this routine, you must set the limit to one fewer than your | ||
748 | * device can support otherwise there won't be room for the drain | ||
749 | * buffer. | ||
750 | */ | ||
751 | int blk_queue_dma_drain(struct request_queue *q, void *buf, | ||
752 | unsigned int size) | ||
753 | { | ||
754 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | ||
755 | return -EINVAL; | ||
756 | /* make room for appending the drain */ | ||
757 | --q->max_hw_segments; | ||
758 | --q->max_phys_segments; | ||
759 | q->dma_drain_buffer = buf; | ||
760 | q->dma_drain_size = size; | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | ||
766 | |||
767 | /** | ||
729 | * blk_queue_segment_boundary - set boundary rules for segment merging | 768 | * blk_queue_segment_boundary - set boundary rules for segment merging |
730 | * @q: the request queue for the device | 769 | * @q: the request queue for the device |
731 | * @mask: the memory boundary mask | 770 | * @mask: the memory boundary mask |
@@ -1379,6 +1418,16 @@ new_segment: | |||
1379 | bvprv = bvec; | 1418 | bvprv = bvec; |
1380 | } /* segments in rq */ | 1419 | } /* segments in rq */ |
1381 | 1420 | ||
1421 | if (q->dma_drain_size) { | ||
1422 | sg->page_link &= ~0x02; | ||
1423 | sg = sg_next(sg); | ||
1424 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | ||
1425 | q->dma_drain_size, | ||
1426 | ((unsigned long)q->dma_drain_buffer) & | ||
1427 | (PAGE_SIZE - 1)); | ||
1428 | nsegs++; | ||
1429 | } | ||
1430 | |||
1382 | if (sg) | 1431 | if (sg) |
1383 | sg_mark_end(sg); | 1432 | sg_mark_end(sg); |
1384 | 1433 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7a3ab575c2..e542c8fd921 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -429,6 +429,8 @@ struct request_queue | |||
429 | unsigned int max_segment_size; | 429 | unsigned int max_segment_size; |
430 | 430 | ||
431 | unsigned long seg_boundary_mask; | 431 | unsigned long seg_boundary_mask; |
432 | void *dma_drain_buffer; | ||
433 | unsigned int dma_drain_size; | ||
432 | unsigned int dma_alignment; | 434 | unsigned int dma_alignment; |
433 | 435 | ||
434 | struct blk_queue_tag *queue_tags; | 436 | struct blk_queue_tag *queue_tags; |
@@ -760,6 +762,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | |||
760 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 762 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
761 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 763 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); |
762 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 764 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
765 | extern int blk_queue_dma_drain(struct request_queue *q, void *buf, | ||
766 | unsigned int size); | ||
763 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 767 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
764 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 768 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
765 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 769 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |