diff options
-rw-r--r-- | drivers/dma/ioat_dma.c | 48 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 29 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 22 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 4 |
4 files changed, 61 insertions, 42 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 90e5b0a28cbf..171cad69f318 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -757,6 +757,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) | |||
757 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | 757 | chan->reg_base + IOAT_CHANCTRL_OFFSET); |
758 | } | 758 | } |
759 | 759 | ||
760 | static void | ||
761 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | ||
762 | { | ||
763 | /* | ||
764 | * yes we are unmapping both _page and _single | ||
765 | * alloc'd regions with unmap_page. Is this | ||
766 | * *really* that bad? | ||
767 | */ | ||
768 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
769 | pci_unmap_page(ioat_chan->device->pdev, | ||
770 | pci_unmap_addr(desc, dst), | ||
771 | pci_unmap_len(desc, len), | ||
772 | PCI_DMA_FROMDEVICE); | ||
773 | |||
774 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
775 | pci_unmap_page(ioat_chan->device->pdev, | ||
776 | pci_unmap_addr(desc, src), | ||
777 | pci_unmap_len(desc, len), | ||
778 | PCI_DMA_TODEVICE); | ||
779 | } | ||
780 | |||
760 | /** | 781 | /** |
761 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors | 782 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors |
762 | * @chan: ioat channel to be cleaned up | 783 | * @chan: ioat channel to be cleaned up |
@@ -817,21 +838,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
817 | */ | 838 | */ |
818 | if (desc->async_tx.cookie) { | 839 | if (desc->async_tx.cookie) { |
819 | cookie = desc->async_tx.cookie; | 840 | cookie = desc->async_tx.cookie; |
820 | 841 | ioat_dma_unmap(ioat_chan, desc); | |
821 | /* | ||
822 | * yes we are unmapping both _page and _single | ||
823 | * alloc'd regions with unmap_page. Is this | ||
824 | * *really* that bad? | ||
825 | */ | ||
826 | pci_unmap_page(ioat_chan->device->pdev, | ||
827 | pci_unmap_addr(desc, dst), | ||
828 | pci_unmap_len(desc, len), | ||
829 | PCI_DMA_FROMDEVICE); | ||
830 | pci_unmap_page(ioat_chan->device->pdev, | ||
831 | pci_unmap_addr(desc, src), | ||
832 | pci_unmap_len(desc, len), | ||
833 | PCI_DMA_TODEVICE); | ||
834 | |||
835 | if (desc->async_tx.callback) { | 842 | if (desc->async_tx.callback) { |
836 | desc->async_tx.callback(desc->async_tx.callback_param); | 843 | desc->async_tx.callback(desc->async_tx.callback_param); |
837 | desc->async_tx.callback = NULL; | 844 | desc->async_tx.callback = NULL; |
@@ -890,16 +897,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
890 | if (desc->async_tx.cookie) { | 897 | if (desc->async_tx.cookie) { |
891 | cookie = desc->async_tx.cookie; | 898 | cookie = desc->async_tx.cookie; |
892 | desc->async_tx.cookie = 0; | 899 | desc->async_tx.cookie = 0; |
893 | 900 | ioat_dma_unmap(ioat_chan, desc); | |
894 | pci_unmap_page(ioat_chan->device->pdev, | ||
895 | pci_unmap_addr(desc, dst), | ||
896 | pci_unmap_len(desc, len), | ||
897 | PCI_DMA_FROMDEVICE); | ||
898 | pci_unmap_page(ioat_chan->device->pdev, | ||
899 | pci_unmap_addr(desc, src), | ||
900 | pci_unmap_len(desc, len), | ||
901 | PCI_DMA_TODEVICE); | ||
902 | |||
903 | if (desc->async_tx.callback) { | 901 | if (desc->async_tx.callback) { |
904 | desc->async_tx.callback(desc->async_tx.callback_param); | 902 | desc->async_tx.callback(desc->async_tx.callback_param); |
905 | desc->async_tx.callback = NULL; | 903 | desc->async_tx.callback = NULL; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index b57564dd0232..434013d41288 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
82 | struct device *dev = | 82 | struct device *dev = |
83 | &iop_chan->device->pdev->dev; | 83 | &iop_chan->device->pdev->dev; |
84 | u32 len = unmap->unmap_len; | 84 | u32 len = unmap->unmap_len; |
85 | u32 src_cnt = unmap->unmap_src_cnt; | 85 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
86 | dma_addr_t addr = iop_desc_get_dest_addr(unmap, | 86 | u32 src_cnt; |
87 | iop_chan); | 87 | dma_addr_t addr; |
88 | 88 | ||
89 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | 89 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
90 | while (src_cnt--) { | 90 | addr = iop_desc_get_dest_addr(unmap, iop_chan); |
91 | addr = iop_desc_get_src_addr(unmap, | 91 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); |
92 | iop_chan, | 92 | } |
93 | src_cnt); | 93 | |
94 | dma_unmap_page(dev, addr, len, | 94 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
95 | DMA_TO_DEVICE); | 95 | src_cnt = unmap->unmap_src_cnt; |
96 | while (src_cnt--) { | ||
97 | addr = iop_desc_get_src_addr(unmap, | ||
98 | iop_chan, | ||
99 | src_cnt); | ||
100 | dma_unmap_page(dev, addr, len, | ||
101 | DMA_TO_DEVICE); | ||
102 | } | ||
96 | } | 103 | } |
97 | desc->group_head = NULL; | 104 | desc->group_head = NULL; |
98 | } | 105 | } |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 8239cfdbc2e6..a4e4494663bf 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -308,13 +308,23 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
308 | struct device *dev = | 308 | struct device *dev = |
309 | &mv_chan->device->pdev->dev; | 309 | &mv_chan->device->pdev->dev; |
310 | u32 len = unmap->unmap_len; | 310 | u32 len = unmap->unmap_len; |
311 | u32 src_cnt = unmap->unmap_src_cnt; | 311 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
312 | dma_addr_t addr = mv_desc_get_dest_addr(unmap); | 312 | u32 src_cnt; |
313 | dma_addr_t addr; | ||
313 | 314 | ||
314 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | 315 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
315 | while (src_cnt--) { | 316 | addr = mv_desc_get_dest_addr(unmap); |
316 | addr = mv_desc_get_src_addr(unmap, src_cnt); | 317 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); |
317 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | 318 | } |
319 | |||
320 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
321 | src_cnt = unmap->unmap_src_cnt; | ||
322 | while (src_cnt--) { | ||
323 | addr = mv_desc_get_src_addr(unmap, | ||
324 | src_cnt); | ||
325 | dma_unmap_page(dev, addr, len, | ||
326 | DMA_TO_DEVICE); | ||
327 | } | ||
318 | } | 328 | } |
319 | desc->group_head = NULL; | 329 | desc->group_head = NULL; |
320 | } | 330 | } |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ba89b0f5056e..b058d6360383 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -102,10 +102,14 @@ enum dma_transaction_type { | |||
102 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | 102 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client |
103 | * acknowledges receipt, i.e. has has a chance to establish any | 103 | * acknowledges receipt, i.e. has has a chance to establish any |
104 | * dependency chains | 104 | * dependency chains |
105 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | ||
106 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | ||
105 | */ | 107 | */ |
106 | enum dma_ctrl_flags { | 108 | enum dma_ctrl_flags { |
107 | DMA_PREP_INTERRUPT = (1 << 0), | 109 | DMA_PREP_INTERRUPT = (1 << 0), |
108 | DMA_CTRL_ACK = (1 << 1), | 110 | DMA_CTRL_ACK = (1 << 1), |
111 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | ||
112 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | ||
109 | }; | 113 | }; |
110 | 114 | ||
111 | /** | 115 | /** |