diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 2 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 4 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 4 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 8 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 31 | ||||
-rw-r--r-- | drivers/dma/mmp_pdma.c | 1 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 101 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 5 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 27 | ||||
-rw-r--r-- | drivers/dma/s3c24xx-dma.c | 33 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-hpbdma.c | 11 | ||||
-rw-r--r-- | drivers/dma/txx9dmac.c | 1 |
13 files changed, 93 insertions, 142 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687cc2334..c823daaf9043 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -62,6 +62,7 @@ config INTEL_IOATDMA | |||
62 | tristate "Intel I/OAT DMA support" | 62 | tristate "Intel I/OAT DMA support" |
63 | depends on PCI && X86 | 63 | depends on PCI && X86 |
64 | select DMA_ENGINE | 64 | select DMA_ENGINE |
65 | select DMA_ENGINE_RAID | ||
65 | select DCA | 66 | select DCA |
66 | help | 67 | help |
67 | Enable support for the Intel(R) I/OAT DMA engine present | 68 | Enable support for the Intel(R) I/OAT DMA engine present |
@@ -112,6 +113,7 @@ config MV_XOR | |||
112 | bool "Marvell XOR engine support" | 113 | bool "Marvell XOR engine support" |
113 | depends on PLAT_ORION | 114 | depends on PLAT_ORION |
114 | select DMA_ENGINE | 115 | select DMA_ENGINE |
116 | select DMA_ENGINE_RAID | ||
115 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 117 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
116 | ---help--- | 118 | ---help--- |
117 | Enable support for the Marvell XOR engine. | 119 | Enable support for the Marvell XOR engine. |
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA | |||
187 | tristate "AMCC PPC440SPe ADMA support" | 189 | tristate "AMCC PPC440SPe ADMA support" |
188 | depends on 440SPe || 440SP | 190 | depends on 440SPe || 440SP |
189 | select DMA_ENGINE | 191 | select DMA_ENGINE |
192 | select DMA_ENGINE_RAID | ||
190 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 193 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
191 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 194 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
192 | help | 195 | help |
@@ -352,6 +355,7 @@ config NET_DMA | |||
352 | bool "Network: TCP receive copy offload" | 355 | bool "Network: TCP receive copy offload" |
353 | depends on DMA_ENGINE && NET | 356 | depends on DMA_ENGINE && NET |
354 | default (INTEL_IOATDMA || FSL_DMA) | 357 | default (INTEL_IOATDMA || FSL_DMA) |
358 | depends on BROKEN | ||
355 | help | 359 | help |
356 | This enables the use of DMA engines in the network stack to | 360 | This enables the use of DMA engines in the network stack to |
357 | offload receive copy-to-user operations, freeing CPU cycles. | 361 | offload receive copy-to-user operations, freeing CPU cycles. |
@@ -377,4 +381,7 @@ config DMATEST | |||
377 | Simple DMA test client. Say N unless you're debugging a | 381 | Simple DMA test client. Say N unless you're debugging a |
378 | DMA Device driver. | 382 | DMA Device driver. |
379 | 383 | ||
384 | config DMA_ENGINE_RAID | ||
385 | bool | ||
386 | |||
380 | endif | 387 | endif |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 16a2aa28f856..ec4ee5c1fe9d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd) | |||
1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); | 1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); |
1171 | 1171 | ||
1172 | dma_descriptor_unmap(txd); | 1172 | dma_descriptor_unmap(&vd->tx); |
1173 | if (!txd->done) | 1173 | if (!txd->done) |
1174 | pl08x_release_mux(plchan); | 1174 | pl08x_release_mux(plchan); |
1175 | 1175 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index f31d647acdfa..2787aba60c6b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) | |||
347 | { | 347 | { |
348 | return &chan->dev->device; | 348 | return &chan->dev->device; |
349 | } | 349 | } |
350 | static struct device *chan2parent(struct dma_chan *chan) | ||
351 | { | ||
352 | return chan->dev->device.parent; | ||
353 | } | ||
354 | 350 | ||
355 | #if defined(VERBOSE_DEBUG) | 351 | #if defined(VERBOSE_DEBUG) |
356 | static void vdbg_dump_regs(struct at_dma_chan *atchan) | 352 | static void vdbg_dump_regs(struct at_dma_chan *atchan) |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ea806bdc12ef..ef63b9058f3c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool { | |||
912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } | 912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
913 | static struct dmaengine_unmap_pool unmap_pool[] = { | 913 | static struct dmaengine_unmap_pool unmap_pool[] = { |
914 | __UNMAP_POOL(2), | 914 | __UNMAP_POOL(2), |
915 | #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) | 915 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
916 | __UNMAP_POOL(16), | 916 | __UNMAP_POOL(16), |
917 | __UNMAP_POOL(128), | 917 | __UNMAP_POOL(128), |
918 | __UNMAP_POOL(256), | 918 | __UNMAP_POOL(256), |
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
1054 | dma_cookie_t cookie; | 1054 | dma_cookie_t cookie; |
1055 | unsigned long flags; | 1055 | unsigned long flags; |
1056 | 1056 | ||
1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); | 1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); |
1058 | if (!unmap) | 1058 | if (!unmap) |
1059 | return -ENOMEM; | 1059 | return -ENOMEM; |
1060 | 1060 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3aaf926..9dfcaf5c1288 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -539,9 +539,9 @@ static int dmatest_func(void *data) | |||
539 | 539 | ||
540 | um->len = params->buf_size; | 540 | um->len = params->buf_size; |
541 | for (i = 0; i < src_cnt; i++) { | 541 | for (i = 0; i < src_cnt; i++) { |
542 | unsigned long buf = (unsigned long) thread->srcs[i]; | 542 | void *buf = thread->srcs[i]; |
543 | struct page *pg = virt_to_page(buf); | 543 | struct page *pg = virt_to_page(buf); |
544 | unsigned pg_off = buf & ~PAGE_MASK; | 544 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; |
545 | 545 | ||
546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
547 | um->len, DMA_TO_DEVICE); | 547 | um->len, DMA_TO_DEVICE); |
@@ -559,9 +559,9 @@ static int dmatest_func(void *data) | |||
559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
560 | dsts = &um->addr[src_cnt]; | 560 | dsts = &um->addr[src_cnt]; |
561 | for (i = 0; i < dst_cnt; i++) { | 561 | for (i = 0; i < dst_cnt; i++) { |
562 | unsigned long buf = (unsigned long) thread->dsts[i]; | 562 | void *buf = thread->dsts[i]; |
563 | struct page *pg = virt_to_page(buf); | 563 | struct page *pg = virt_to_page(buf); |
564 | unsigned pg_off = buf & ~PAGE_MASK; | 564 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; |
565 | 565 | ||
566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | 566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, |
567 | DMA_BIDIRECTIONAL); | 567 | DMA_BIDIRECTIONAL); |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7086a16a55f2..f157c6f76b32 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, | |||
86 | hw->count = CPU_TO_DMA(chan, count, 32); | 86 | hw->count = CPU_TO_DMA(chan, count, 32); |
87 | } | 87 | } |
88 | 88 | ||
89 | static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | ||
90 | { | ||
91 | return DMA_TO_CPU(chan, desc->hw.count, 32); | ||
92 | } | ||
93 | |||
94 | static void set_desc_src(struct fsldma_chan *chan, | 89 | static void set_desc_src(struct fsldma_chan *chan, |
95 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 90 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
96 | { | 91 | { |
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, | |||
101 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 96 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
102 | } | 97 | } |
103 | 98 | ||
104 | static dma_addr_t get_desc_src(struct fsldma_chan *chan, | ||
105 | struct fsl_desc_sw *desc) | ||
106 | { | ||
107 | u64 snoop_bits; | ||
108 | |||
109 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
110 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
111 | return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; | ||
112 | } | ||
113 | |||
114 | static void set_desc_dst(struct fsldma_chan *chan, | 99 | static void set_desc_dst(struct fsldma_chan *chan, |
115 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 100 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
116 | { | 101 | { |
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, | |||
121 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 106 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
122 | } | 107 | } |
123 | 108 | ||
124 | static dma_addr_t get_desc_dst(struct fsldma_chan *chan, | ||
125 | struct fsl_desc_sw *desc) | ||
126 | { | ||
127 | u64 snoop_bits; | ||
128 | |||
129 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
130 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
131 | return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; | ||
132 | } | ||
133 | |||
134 | static void set_desc_next(struct fsldma_chan *chan, | 109 | static void set_desc_next(struct fsldma_chan *chan, |
135 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 110 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
136 | { | 111 | { |
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
408 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 383 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
409 | struct fsl_desc_sw *child; | 384 | struct fsl_desc_sw *child; |
410 | unsigned long flags; | 385 | unsigned long flags; |
411 | dma_cookie_t cookie; | 386 | dma_cookie_t cookie = -EINVAL; |
412 | 387 | ||
413 | spin_lock_irqsave(&chan->desc_lock, flags); | 388 | spin_lock_irqsave(&chan->desc_lock, flags); |
414 | 389 | ||
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | |||
854 | struct fsl_desc_sw *desc) | 829 | struct fsl_desc_sw *desc) |
855 | { | 830 | { |
856 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | 831 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
857 | struct device *dev = chan->common.device->dev; | ||
858 | dma_addr_t src = get_desc_src(chan, desc); | ||
859 | dma_addr_t dst = get_desc_dst(chan, desc); | ||
860 | u32 len = get_desc_cnt(chan, desc); | ||
861 | 832 | ||
862 | /* Run the link descriptor callback function */ | 833 | /* Run the link descriptor callback function */ |
863 | if (txd->callback) { | 834 | if (txd->callback) { |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index dcb1e05149a7..8869500ab92b 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
1017 | } | 1017 | } |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | platform_set_drvdata(op, pdev); | ||
1020 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); | 1021 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); |
1021 | return 0; | 1022 | return 0; |
1022 | } | 1023 | } |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7807f0ef4e20..53fb0c8365b0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | |||
54 | hw_desc->desc_command = (1 << 31); | 54 | hw_desc->desc_command = (1 << 31); |
55 | } | 55 | } |
56 | 56 | ||
57 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | ||
58 | { | ||
59 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
60 | return hw_desc->phy_dest_addr; | ||
61 | } | ||
62 | |||
63 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 57 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
64 | u32 byte_count) | 58 | u32 byte_count) |
65 | { | 59 | { |
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) | |||
787 | /* | 781 | /* |
788 | * Perform a transaction to verify the HW works. | 782 | * Perform a transaction to verify the HW works. |
789 | */ | 783 | */ |
790 | #define MV_XOR_TEST_SIZE 2000 | ||
791 | 784 | ||
792 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | 785 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) |
793 | { | 786 | { |
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
797 | struct dma_chan *dma_chan; | 790 | struct dma_chan *dma_chan; |
798 | dma_cookie_t cookie; | 791 | dma_cookie_t cookie; |
799 | struct dma_async_tx_descriptor *tx; | 792 | struct dma_async_tx_descriptor *tx; |
793 | struct dmaengine_unmap_data *unmap; | ||
800 | int err = 0; | 794 | int err = 0; |
801 | 795 | ||
802 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 796 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
803 | if (!src) | 797 | if (!src) |
804 | return -ENOMEM; | 798 | return -ENOMEM; |
805 | 799 | ||
806 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 800 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
807 | if (!dest) { | 801 | if (!dest) { |
808 | kfree(src); | 802 | kfree(src); |
809 | return -ENOMEM; | 803 | return -ENOMEM; |
810 | } | 804 | } |
811 | 805 | ||
812 | /* Fill in src buffer */ | 806 | /* Fill in src buffer */ |
813 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | 807 | for (i = 0; i < PAGE_SIZE; i++) |
814 | ((u8 *) src)[i] = (u8)i; | 808 | ((u8 *) src)[i] = (u8)i; |
815 | 809 | ||
816 | dma_chan = &mv_chan->dmachan; | 810 | dma_chan = &mv_chan->dmachan; |
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
819 | goto out; | 813 | goto out; |
820 | } | 814 | } |
821 | 815 | ||
822 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | 816 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
823 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 817 | if (!unmap) { |
818 | err = -ENOMEM; | ||
819 | goto free_resources; | ||
820 | } | ||
821 | |||
822 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | ||
823 | PAGE_SIZE, DMA_TO_DEVICE); | ||
824 | unmap->to_cnt = 1; | ||
825 | unmap->addr[0] = src_dma; | ||
824 | 826 | ||
825 | src_dma = dma_map_single(dma_chan->device->dev, src, | 827 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
826 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | 828 | PAGE_SIZE, DMA_FROM_DEVICE); |
829 | unmap->from_cnt = 1; | ||
830 | unmap->addr[1] = dest_dma; | ||
831 | |||
832 | unmap->len = PAGE_SIZE; | ||
827 | 833 | ||
828 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 834 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
829 | MV_XOR_TEST_SIZE, 0); | 835 | PAGE_SIZE, 0); |
830 | cookie = mv_xor_tx_submit(tx); | 836 | cookie = mv_xor_tx_submit(tx); |
831 | mv_xor_issue_pending(dma_chan); | 837 | mv_xor_issue_pending(dma_chan); |
832 | async_tx_ack(tx); | 838 | async_tx_ack(tx); |
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
841 | } | 847 | } |
842 | 848 | ||
843 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | 849 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
844 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 850 | PAGE_SIZE, DMA_FROM_DEVICE); |
845 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | 851 | if (memcmp(src, dest, PAGE_SIZE)) { |
846 | dev_err(dma_chan->device->dev, | 852 | dev_err(dma_chan->device->dev, |
847 | "Self-test copy failed compare, disabling\n"); | 853 | "Self-test copy failed compare, disabling\n"); |
848 | err = -ENODEV; | 854 | err = -ENODEV; |
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
850 | } | 856 | } |
851 | 857 | ||
852 | free_resources: | 858 | free_resources: |
859 | dmaengine_unmap_put(unmap); | ||
853 | mv_xor_free_chan_resources(dma_chan); | 860 | mv_xor_free_chan_resources(dma_chan); |
854 | out: | 861 | out: |
855 | kfree(src); | 862 | kfree(src); |
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
867 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | 874 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; |
868 | dma_addr_t dest_dma; | 875 | dma_addr_t dest_dma; |
869 | struct dma_async_tx_descriptor *tx; | 876 | struct dma_async_tx_descriptor *tx; |
877 | struct dmaengine_unmap_data *unmap; | ||
870 | struct dma_chan *dma_chan; | 878 | struct dma_chan *dma_chan; |
871 | dma_cookie_t cookie; | 879 | dma_cookie_t cookie; |
872 | u8 cmp_byte = 0; | 880 | u8 cmp_byte = 0; |
873 | u32 cmp_word; | 881 | u32 cmp_word; |
874 | int err = 0; | 882 | int err = 0; |
883 | int src_count = MV_XOR_NUM_SRC_TEST; | ||
875 | 884 | ||
876 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 885 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
877 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 886 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
878 | if (!xor_srcs[src_idx]) { | 887 | if (!xor_srcs[src_idx]) { |
879 | while (src_idx--) | 888 | while (src_idx--) |
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
890 | } | 899 | } |
891 | 900 | ||
892 | /* Fill in src buffers */ | 901 | /* Fill in src buffers */ |
893 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 902 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
894 | u8 *ptr = page_address(xor_srcs[src_idx]); | 903 | u8 *ptr = page_address(xor_srcs[src_idx]); |
895 | for (i = 0; i < PAGE_SIZE; i++) | 904 | for (i = 0; i < PAGE_SIZE; i++) |
896 | ptr[i] = (1 << src_idx); | 905 | ptr[i] = (1 << src_idx); |
897 | } | 906 | } |
898 | 907 | ||
899 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | 908 | for (src_idx = 0; src_idx < src_count; src_idx++) |
900 | cmp_byte ^= (u8) (1 << src_idx); | 909 | cmp_byte ^= (u8) (1 << src_idx); |
901 | 910 | ||
902 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | 911 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
910 | goto out; | 919 | goto out; |
911 | } | 920 | } |
912 | 921 | ||
922 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, | ||
923 | GFP_KERNEL); | ||
924 | if (!unmap) { | ||
925 | err = -ENOMEM; | ||
926 | goto free_resources; | ||
927 | } | ||
928 | |||
913 | /* test xor */ | 929 | /* test xor */ |
914 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | 930 | for (i = 0; i < src_count; i++) { |
915 | DMA_FROM_DEVICE); | 931 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], |
932 | 0, PAGE_SIZE, DMA_TO_DEVICE); | ||
933 | dma_srcs[i] = unmap->addr[i]; | ||
934 | unmap->to_cnt++; | ||
935 | } | ||
916 | 936 | ||
917 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | 937 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
918 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 938 | DMA_FROM_DEVICE); |
919 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 939 | dest_dma = unmap->addr[src_count]; |
940 | unmap->from_cnt = 1; | ||
941 | unmap->len = PAGE_SIZE; | ||
920 | 942 | ||
921 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 943 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
922 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | 944 | src_count, PAGE_SIZE, 0); |
923 | 945 | ||
924 | cookie = mv_xor_tx_submit(tx); | 946 | cookie = mv_xor_tx_submit(tx); |
925 | mv_xor_issue_pending(dma_chan); | 947 | mv_xor_issue_pending(dma_chan); |
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
948 | } | 970 | } |
949 | 971 | ||
950 | free_resources: | 972 | free_resources: |
973 | dmaengine_unmap_put(unmap); | ||
951 | mv_xor_free_chan_resources(dma_chan); | 974 | mv_xor_free_chan_resources(dma_chan); |
952 | out: | 975 | out: |
953 | src_idx = MV_XOR_NUM_SRC_TEST; | 976 | src_idx = src_count; |
954 | while (src_idx--) | 977 | while (src_idx--) |
955 | __free_page(xor_srcs[src_idx]); | 978 | __free_page(xor_srcs[src_idx]); |
956 | __free_page(dest); | 979 | __free_page(dest); |
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1176 | int i = 0; | 1199 | int i = 0; |
1177 | 1200 | ||
1178 | for_each_child_of_node(pdev->dev.of_node, np) { | 1201 | for_each_child_of_node(pdev->dev.of_node, np) { |
1202 | struct mv_xor_chan *chan; | ||
1179 | dma_cap_mask_t cap_mask; | 1203 | dma_cap_mask_t cap_mask; |
1180 | int irq; | 1204 | int irq; |
1181 | 1205 | ||
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1193 | goto err_channel_add; | 1217 | goto err_channel_add; |
1194 | } | 1218 | } |
1195 | 1219 | ||
1196 | xordev->channels[i] = | 1220 | chan = mv_xor_channel_add(xordev, pdev, i, |
1197 | mv_xor_channel_add(xordev, pdev, i, | 1221 | cap_mask, irq); |
1198 | cap_mask, irq); | 1222 | if (IS_ERR(chan)) { |
1199 | if (IS_ERR(xordev->channels[i])) { | 1223 | ret = PTR_ERR(chan); |
1200 | ret = PTR_ERR(xordev->channels[i]); | ||
1201 | xordev->channels[i] = NULL; | ||
1202 | irq_dispose_mapping(irq); | 1224 | irq_dispose_mapping(irq); |
1203 | goto err_channel_add; | 1225 | goto err_channel_add; |
1204 | } | 1226 | } |
1205 | 1227 | ||
1228 | xordev->channels[i] = chan; | ||
1206 | i++; | 1229 | i++; |
1207 | } | 1230 | } |
1208 | } else if (pdata && pdata->channels) { | 1231 | } else if (pdata && pdata->channels) { |
1209 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | 1232 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { |
1210 | struct mv_xor_channel_data *cd; | 1233 | struct mv_xor_channel_data *cd; |
1234 | struct mv_xor_chan *chan; | ||
1211 | int irq; | 1235 | int irq; |
1212 | 1236 | ||
1213 | cd = &pdata->channels[i]; | 1237 | cd = &pdata->channels[i]; |
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1222 | goto err_channel_add; | 1246 | goto err_channel_add; |
1223 | } | 1247 | } |
1224 | 1248 | ||
1225 | xordev->channels[i] = | 1249 | chan = mv_xor_channel_add(xordev, pdev, i, |
1226 | mv_xor_channel_add(xordev, pdev, i, | 1250 | cd->cap_mask, irq); |
1227 | cd->cap_mask, irq); | 1251 | if (IS_ERR(chan)) { |
1228 | if (IS_ERR(xordev->channels[i])) { | 1252 | ret = PTR_ERR(chan); |
1229 | ret = PTR_ERR(xordev->channels[i]); | ||
1230 | goto err_channel_add; | 1253 | goto err_channel_add; |
1231 | } | 1254 | } |
1255 | |||
1256 | xordev->channels[i] = chan; | ||
1232 | } | 1257 | } |
1233 | } | 1258 | } |
1234 | 1259 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483b8f2d..536632f6479c 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2492 | 2492 | ||
2493 | static inline void _init_desc(struct dma_pl330_desc *desc) | 2493 | static inline void _init_desc(struct dma_pl330_desc *desc) |
2494 | { | 2494 | { |
2495 | desc->pchan = NULL; | ||
2496 | desc->req.x = &desc->px; | 2495 | desc->req.x = &desc->px; |
2497 | desc->req.token = desc; | 2496 | desc->req.token = desc; |
2498 | desc->rqcfg.swap = SWAP_NO; | 2497 | desc->rqcfg.swap = SWAP_NO; |
2499 | desc->rqcfg.privileged = 0; | ||
2500 | desc->rqcfg.insnaccess = 0; | ||
2501 | desc->rqcfg.scctl = SCCTRL0; | 2498 | desc->rqcfg.scctl = SCCTRL0; |
2502 | desc->rqcfg.dcctl = DCCTRL0; | 2499 | desc->rqcfg.dcctl = DCCTRL0; |
2503 | desc->req.cfg = &desc->rqcfg; | 2500 | desc->req.cfg = &desc->rqcfg; |
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | |||
2517 | if (!pdmac) | 2514 | if (!pdmac) |
2518 | return 0; | 2515 | return 0; |
2519 | 2516 | ||
2520 | desc = kmalloc(count * sizeof(*desc), flg); | 2517 | desc = kcalloc(count, sizeof(*desc), flg); |
2521 | if (!desc) | 2518 | if (!desc) |
2522 | return 0; | 2519 | return 0; |
2523 | 2520 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6b2a38..8bba298535b0 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, | |||
533 | } | 533 | } |
534 | 534 | ||
535 | /** | 535 | /** |
536 | * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation | ||
537 | */ | ||
538 | static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, | ||
539 | int value, unsigned long flags) | ||
540 | { | ||
541 | struct dma_cdb *hw_desc = desc->hw_desc; | ||
542 | |||
543 | memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); | ||
544 | desc->hw_next = NULL; | ||
545 | desc->src_cnt = 1; | ||
546 | desc->dst_cnt = 1; | ||
547 | |||
548 | if (flags & DMA_PREP_INTERRUPT) | ||
549 | set_bit(PPC440SPE_DESC_INT, &desc->flags); | ||
550 | else | ||
551 | clear_bit(PPC440SPE_DESC_INT, &desc->flags); | ||
552 | |||
553 | hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); | ||
554 | hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); | ||
555 | hw_desc->opc = DMA_CDB_OPC_DFILL128; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * ppc440spe_desc_set_src_addr - set source address into the descriptor | 536 | * ppc440spe_desc_set_src_addr - set source address into the descriptor |
560 | */ | 537 | */ |
561 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, | 538 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, |
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
1504 | struct ppc440spe_adma_chan *chan, | 1481 | struct ppc440spe_adma_chan *chan, |
1505 | dma_cookie_t cookie) | 1482 | dma_cookie_t cookie) |
1506 | { | 1483 | { |
1507 | int i; | ||
1508 | |||
1509 | BUG_ON(desc->async_tx.cookie < 0); | 1484 | BUG_ON(desc->async_tx.cookie < 0); |
1510 | if (desc->async_tx.cookie > 0) { | 1485 | if (desc->async_tx.cookie > 0) { |
1511 | cookie = desc->async_tx.cookie; | 1486 | cookie = desc->async_tx.cookie; |
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |||
3898 | ppc440spe_adma_prep_dma_interrupt; | 3873 | ppc440spe_adma_prep_dma_interrupt; |
3899 | } | 3874 | } |
3900 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " | 3875 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " |
3901 | "( %s%s%s%s%s%s%s)\n", | 3876 | "( %s%s%s%s%s%s)\n", |
3902 | dev_name(adev->dev), | 3877 | dev_name(adev->dev), |
3903 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", | 3878 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", |
3904 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", | 3879 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 4cb127978636..4eddedb6eb7d 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -628,42 +628,13 @@ retry: | |||
628 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | 628 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; |
629 | } | 629 | } |
630 | 630 | ||
631 | static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd) | ||
632 | { | ||
633 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
634 | struct s3c24xx_sg *dsg; | ||
635 | |||
636 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
637 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
638 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
639 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
640 | DMA_TO_DEVICE); | ||
641 | else { | ||
642 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
643 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
644 | DMA_TO_DEVICE); | ||
645 | } | ||
646 | } | ||
647 | |||
648 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
649 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
650 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
651 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
652 | DMA_FROM_DEVICE); | ||
653 | else | ||
654 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
655 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
656 | DMA_FROM_DEVICE); | ||
657 | } | ||
658 | } | ||
659 | |||
660 | static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) | 631 | static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) |
661 | { | 632 | { |
662 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); | 633 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); |
663 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); | 634 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); |
664 | 635 | ||
665 | if (!s3cchan->slave) | 636 | if (!s3cchan->slave) |
666 | s3c24xx_dma_unmap_buffers(txd); | 637 | dma_descriptor_unmap(&vd->tx); |
667 | 638 | ||
668 | s3c24xx_dma_free_txd(txd); | 639 | s3c24xx_dma_free_txd(txd); |
669 | } | 640 | } |
@@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, | |||
795 | 766 | ||
796 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | 767 | spin_lock_irqsave(&s3cchan->vc.lock, flags); |
797 | ret = dma_cookie_status(chan, cookie, txstate); | 768 | ret = dma_cookie_status(chan, cookie, txstate); |
798 | if (ret == DMA_SUCCESS) { | 769 | if (ret == DMA_COMPLETE) { |
799 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | 770 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); |
800 | return ret; | 771 | return ret; |
801 | } | 772 | } |
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index ebad84591a6e..3083d901a414 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) | 60 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) |
61 | 61 | ||
62 | /* DMA status register (DSTSR) bits */ | 62 | /* DMA status register (DSTSR) bits */ |
63 | #define HPB_DMAE_DSTSR_DQSTS BIT(2) | ||
63 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) | 64 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) |
64 | 65 | ||
65 | /* DMA common registers */ | 66 | /* DMA common registers */ |
@@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan) | |||
286 | 287 | ||
287 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); | 288 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); |
288 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); | 289 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); |
290 | |||
291 | chan->plane_idx = 0; | ||
292 | chan->first_desc = true; | ||
289 | } | 293 | } |
290 | 294 | ||
291 | static const struct hpb_dmae_slave_config * | 295 | static const struct hpb_dmae_slave_config * |
@@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan) | |||
385 | struct hpb_dmae_chan *chan = to_chan(schan); | 389 | struct hpb_dmae_chan *chan = to_chan(schan); |
386 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); | 390 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); |
387 | 391 | ||
388 | return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; | 392 | if (chan->xfer_mode == XFER_DOUBLE) |
393 | return dstsr & HPB_DMAE_DSTSR_DQSTS; | ||
394 | else | ||
395 | return dstsr & HPB_DMAE_DSTSR_DMSTS; | ||
389 | } | 396 | } |
390 | 397 | ||
391 | static int | 398 | static int |
@@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | |||
510 | } | 517 | } |
511 | 518 | ||
512 | schan = &new_hpb_chan->shdma_chan; | 519 | schan = &new_hpb_chan->shdma_chan; |
520 | schan->max_xfer_len = HPB_DMA_TCR_MAX; | ||
521 | |||
513 | shdma_chan_probe(sdev, schan, id); | 522 | shdma_chan_probe(sdev, schan, id); |
514 | 523 | ||
515 | if (pdev->id >= 0) | 524 | if (pdev->id >= 0) |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index bae6c29f5502..17686caf64d5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
406 | dma_async_tx_callback callback; | 406 | dma_async_tx_callback callback; |
407 | void *param; | 407 | void *param; |
408 | struct dma_async_tx_descriptor *txd = &desc->txd; | 408 | struct dma_async_tx_descriptor *txd = &desc->txd; |
409 | struct txx9dmac_slave *ds = dc->chan.private; | ||
410 | 409 | ||
411 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 410 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
412 | txd->cookie, desc); | 411 | txd->cookie, desc); |