diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
commit | 5115f3c19d17851aaff5a857f55b4a019c908775 (patch) | |
tree | 0d02cf01e12e86365f4f5e3b234f986daef181a7 /crypto | |
parent | c41b3810c09e60664433548c5218cc6ece6a8903 (diff) | |
parent | 17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This is fairly big pull by my standards as I had missed last merge
window. So we have the support for device tree for slave-dmaengine,
large updates to dw_dmac driver from Andy for reusing on different
architectures. Along with this we have fixes on bunch of the drivers"
Fix up trivial conflicts, usually due to #include line movement next to
each other.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits)
Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT"
ARM: dts: pl330: Add #dma-cells for generic dma binding support
DMA: PL330: Register the DMA controller with the generic DMA helpers
DMA: PL330: Add xlate function
DMA: PL330: Add new pl330 filter for DT case.
dma: tegra20-apb-dma: remove unnecessary assignment
edma: do not waste memory for dma_mask
dma: coh901318: set residue only if dma is in progress
dma: coh901318: avoid unbalanced locking
dmaengine.h: remove redundant else keyword
dma: of-dma: protect list write operation by spin_lock
dmaengine: ste_dma40: do not remove descriptors for cyclic transfers
dma: of-dma.c: fix memory leakage
dw_dmac: apply default dma_mask if needed
dmaengine: ioat - fix spare sparse complain
dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c
ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING
dw_dmac: add support for Lynxpoint DMA controllers
dw_dmac: return proper residue value
dw_dmac: fill individual length of descriptor
...
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 6 | ||||
-rw-r--r-- | crypto/async_tx/async_memset.c | 1 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 9 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 4 |
4 files changed, 13 insertions, 7 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 361b5e8239bc..9e62feffb374 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -67,6 +67,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
67 | 67 | ||
68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, | 68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, |
69 | len, dma_prep_flags); | 69 | len, dma_prep_flags); |
70 | if (!tx) { | ||
71 | dma_unmap_page(device->dev, dma_dest, len, | ||
72 | DMA_FROM_DEVICE); | ||
73 | dma_unmap_page(device->dev, dma_src, len, | ||
74 | DMA_TO_DEVICE); | ||
75 | } | ||
70 | } | 76 | } |
71 | 77 | ||
72 | if (tx) { | 78 | if (tx) { |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 58e4a8752aee..05a4d1e00148 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/module.h> | ||
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
30 | #include <linux/async_tx.h> | 31 | #include <linux/async_tx.h> |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 842120979374..7be34248b450 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -128,8 +128,8 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
128 | } | 128 | } |
129 | device->device_issue_pending(chan); | 129 | device->device_issue_pending(chan); |
130 | } else { | 130 | } else { |
131 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 131 | if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) |
132 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 132 | panic("%s: DMA error waiting for depend_tx\n", |
133 | __func__); | 133 | __func__); |
134 | tx->tx_submit(tx); | 134 | tx->tx_submit(tx); |
135 | } | 135 | } |
@@ -280,8 +280,9 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |||
280 | * we are referring to the correct operation | 280 | * we are referring to the correct operation |
281 | */ | 281 | */ |
282 | BUG_ON(async_tx_test_ack(*tx)); | 282 | BUG_ON(async_tx_test_ack(*tx)); |
283 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) | 283 | if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) |
284 | panic("DMA_ERROR waiting for transaction\n"); | 284 | panic("%s: DMA error waiting for transaction\n", |
285 | __func__); | ||
285 | async_tx_ack(*tx); | 286 | async_tx_ack(*tx); |
286 | *tx = NULL; | 287 | *tx = NULL; |
287 | } | 288 | } |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 154cc84381c2..8ade0a0481c6 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -230,9 +230,7 @@ EXPORT_SYMBOL_GPL(async_xor); | |||
230 | 230 | ||
231 | static int page_is_zero(struct page *p, unsigned int offset, size_t len) | 231 | static int page_is_zero(struct page *p, unsigned int offset, size_t len) |
232 | { | 232 | { |
233 | char *a = page_address(p) + offset; | 233 | return !memchr_inv(page_address(p) + offset, 0, len); |
234 | return ((*(u32 *) a) == 0 && | ||
235 | memcmp(a, a + 4, len - 4) == 0); | ||
236 | } | 234 | } |
237 | 235 | ||
238 | static inline struct dma_chan * | 236 | static inline struct dma_chan * |