aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-07-04 03:13:40 -0400
committerDan Williams <dan.j.williams@intel.com>2008-07-08 14:59:12 -0400
commite1d181efb14a93cf263d6c588a5395518edf3294 (patch)
tree1792d1faa7e344401789bbcfad8102d0d93036e2 /drivers/dma
parent848c536a37b8db4e461f14ca15fe29850151c822 (diff)
dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap
In some cases client code may need the dma-driver to skip the unmap of source and/or destination buffers. Setting these flags indicates to the driver to skip the unmap step. In this regard async_xor is currently broken in that it allows the destination buffer to be unmapped while an operation is still in progress, i.e. when the number of sources exceeds the hardware channel's maximum (fixed in a subsequent patch). Acked-by: Saeed Bishara <saeed@marvell.com> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Acked-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat_dma.c48
-rw-r--r--drivers/dma/iop-adma.c29
-rw-r--r--drivers/dma/mv_xor.c22
3 files changed, 57 insertions, 42 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 90e5b0a28cbf..171cad69f318 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -757,6 +757,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
757 chan->reg_base + IOAT_CHANCTRL_OFFSET); 757 chan->reg_base + IOAT_CHANCTRL_OFFSET);
758} 758}
759 759
760static void
761ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
762{
763 /*
764 * yes we are unmapping both _page and _single
765 * alloc'd regions with unmap_page. Is this
766 * *really* that bad?
767 */
768 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
769 pci_unmap_page(ioat_chan->device->pdev,
770 pci_unmap_addr(desc, dst),
771 pci_unmap_len(desc, len),
772 PCI_DMA_FROMDEVICE);
773
774 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
775 pci_unmap_page(ioat_chan->device->pdev,
776 pci_unmap_addr(desc, src),
777 pci_unmap_len(desc, len),
778 PCI_DMA_TODEVICE);
779}
780
760/** 781/**
761 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 782 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
762 * @chan: ioat channel to be cleaned up 783 * @chan: ioat channel to be cleaned up
@@ -817,21 +838,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
817 */ 838 */
818 if (desc->async_tx.cookie) { 839 if (desc->async_tx.cookie) {
819 cookie = desc->async_tx.cookie; 840 cookie = desc->async_tx.cookie;
820 841 ioat_dma_unmap(ioat_chan, desc);
821 /*
822 * yes we are unmapping both _page and _single
823 * alloc'd regions with unmap_page. Is this
824 * *really* that bad?
825 */
826 pci_unmap_page(ioat_chan->device->pdev,
827 pci_unmap_addr(desc, dst),
828 pci_unmap_len(desc, len),
829 PCI_DMA_FROMDEVICE);
830 pci_unmap_page(ioat_chan->device->pdev,
831 pci_unmap_addr(desc, src),
832 pci_unmap_len(desc, len),
833 PCI_DMA_TODEVICE);
834
835 if (desc->async_tx.callback) { 842 if (desc->async_tx.callback) {
836 desc->async_tx.callback(desc->async_tx.callback_param); 843 desc->async_tx.callback(desc->async_tx.callback_param);
837 desc->async_tx.callback = NULL; 844 desc->async_tx.callback = NULL;
@@ -890,16 +897,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
890 if (desc->async_tx.cookie) { 897 if (desc->async_tx.cookie) {
891 cookie = desc->async_tx.cookie; 898 cookie = desc->async_tx.cookie;
892 desc->async_tx.cookie = 0; 899 desc->async_tx.cookie = 0;
893 900 ioat_dma_unmap(ioat_chan, desc);
894 pci_unmap_page(ioat_chan->device->pdev,
895 pci_unmap_addr(desc, dst),
896 pci_unmap_len(desc, len),
897 PCI_DMA_FROMDEVICE);
898 pci_unmap_page(ioat_chan->device->pdev,
899 pci_unmap_addr(desc, src),
900 pci_unmap_len(desc, len),
901 PCI_DMA_TODEVICE);
902
903 if (desc->async_tx.callback) { 901 if (desc->async_tx.callback) {
904 desc->async_tx.callback(desc->async_tx.callback_param); 902 desc->async_tx.callback(desc->async_tx.callback_param);
905 desc->async_tx.callback = NULL; 903 desc->async_tx.callback = NULL;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index b57564dd0232..434013d41288 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
82 struct device *dev = 82 struct device *dev =
83 &iop_chan->device->pdev->dev; 83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len; 84 u32 len = unmap->unmap_len;
85 u32 src_cnt = unmap->unmap_src_cnt; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 dma_addr_t addr = iop_desc_get_dest_addr(unmap, 86 u32 src_cnt;
87 iop_chan); 87 dma_addr_t addr;
88 88
89 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 while (src_cnt--) { 90 addr = iop_desc_get_dest_addr(unmap, iop_chan);
91 addr = iop_desc_get_src_addr(unmap, 91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
92 iop_chan, 92 }
93 src_cnt); 93
94 dma_unmap_page(dev, addr, len, 94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 DMA_TO_DEVICE); 95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap,
98 iop_chan,
99 src_cnt);
100 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE);
102 }
96 } 103 }
97 desc->group_head = NULL; 104 desc->group_head = NULL;
98 } 105 }
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 8239cfdbc2e6..a4e4494663bf 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -308,13 +308,23 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
308 struct device *dev = 308 struct device *dev =
309 &mv_chan->device->pdev->dev; 309 &mv_chan->device->pdev->dev;
310 u32 len = unmap->unmap_len; 310 u32 len = unmap->unmap_len;
311 u32 src_cnt = unmap->unmap_src_cnt; 311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 dma_addr_t addr = mv_desc_get_dest_addr(unmap); 312 u32 src_cnt;
313 dma_addr_t addr;
313 314
314 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
315 while (src_cnt--) { 316 addr = mv_desc_get_dest_addr(unmap);
316 addr = mv_desc_get_src_addr(unmap, src_cnt); 317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
317 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); 318 }
319
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap,
324 src_cnt);
325 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE);
327 }
318 } 328 }
319 desc->group_head = NULL; 329 desc->group_head = NULL;
320 } 330 }