diff options
| -rw-r--r-- | crypto/async_tx/async_memcpy.c | 38 | ||||
| -rw-r--r-- | crypto/async_tx/async_memset.c | 28 | ||||
| -rw-r--r-- | crypto/async_tx/async_tx.c | 9 | ||||
| -rw-r--r-- | crypto/async_tx/async_xor.c | 124 | ||||
| -rw-r--r-- | drivers/dma/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/dma/dmaengine.c | 49 | ||||
| -rw-r--r-- | drivers/dma/ioat_dma.c | 43 | ||||
| -rw-r--r-- | drivers/dma/iop-adma.c | 138 | ||||
| -rw-r--r-- | include/asm-arm/arch-iop13xx/adma.h | 18 | ||||
| -rw-r--r-- | include/asm-arm/hardware/iop3xx-adma.h | 30 | ||||
| -rw-r--r-- | include/linux/async_tx.h | 13 | ||||
| -rw-r--r-- | include/linux/dmaengine.h | 29 |
12 files changed, 259 insertions, 261 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 047e533fcc5b..0f6282207b32 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | * @src: src page | 35 | * @src: src page |
| 36 | * @offset: offset in pages to start transaction | 36 | * @offset: offset in pages to start transaction |
| 37 | * @len: length in bytes | 37 | * @len: length in bytes |
| 38 | * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, | 38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, |
| 39 | * @depend_tx: memcpy depends on the result of this transaction | 39 | * @depend_tx: memcpy depends on the result of this transaction |
| 40 | * @cb_fn: function to call when the memcpy completes | 40 | * @cb_fn: function to call when the memcpy completes |
| 41 | * @cb_param: parameter to pass to the callback routine | 41 | * @cb_param: parameter to pass to the callback routine |
| @@ -46,33 +46,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
| 46 | struct dma_async_tx_descriptor *depend_tx, | 46 | struct dma_async_tx_descriptor *depend_tx, |
| 47 | dma_async_tx_callback cb_fn, void *cb_param) | 47 | dma_async_tx_callback cb_fn, void *cb_param) |
| 48 | { | 48 | { |
| 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY); | 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, |
| 50 | &dest, 1, &src, 1, len); | ||
| 50 | struct dma_device *device = chan ? chan->device : NULL; | 51 | struct dma_device *device = chan ? chan->device : NULL; |
| 51 | int int_en = cb_fn ? 1 : 0; | 52 | struct dma_async_tx_descriptor *tx = NULL; |
| 52 | struct dma_async_tx_descriptor *tx = device ? | ||
| 53 | device->device_prep_dma_memcpy(chan, len, | ||
| 54 | int_en) : NULL; | ||
| 55 | 53 | ||
| 56 | if (tx) { /* run the memcpy asynchronously */ | 54 | if (device) { |
| 57 | dma_addr_t addr; | 55 | dma_addr_t dma_dest, dma_src; |
| 58 | enum dma_data_direction dir; | 56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; |
| 59 | 57 | ||
| 60 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | 58 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, |
| 61 | 59 | DMA_FROM_DEVICE); | |
| 62 | dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? | ||
| 63 | DMA_NONE : DMA_FROM_DEVICE; | ||
| 64 | |||
| 65 | addr = dma_map_page(device->dev, dest, dest_offset, len, dir); | ||
| 66 | tx->tx_set_dest(addr, tx, 0); | ||
| 67 | 60 | ||
| 68 | dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? | 61 | dma_src = dma_map_page(device->dev, src, src_offset, len, |
| 69 | DMA_NONE : DMA_TO_DEVICE; | 62 | DMA_TO_DEVICE); |
| 70 | 63 | ||
| 71 | addr = dma_map_page(device->dev, src, src_offset, len, dir); | 64 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, |
| 72 | tx->tx_set_src(addr, tx, 0); | 65 | len, dma_prep_flags); |
| 66 | } | ||
| 73 | 67 | ||
| 68 | if (tx) { | ||
| 69 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | ||
| 74 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 70 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
| 75 | } else { /* run the memcpy synchronously */ | 71 | } else { |
| 76 | void *dest_buf, *src_buf; | 72 | void *dest_buf, *src_buf; |
| 77 | pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); | 73 | pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); |
| 78 | 74 | ||
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 66ef6351202e..09c0e83664bc 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | * @val: fill value | 35 | * @val: fill value |
| 36 | * @offset: offset in pages to start transaction | 36 | * @offset: offset in pages to start transaction |
| 37 | * @len: length in bytes | 37 | * @len: length in bytes |
| 38 | * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK |
| 39 | * @depend_tx: memset depends on the result of this transaction | 39 | * @depend_tx: memset depends on the result of this transaction |
| 40 | * @cb_fn: function to call when the memcpy completes | 40 | * @cb_fn: function to call when the memcpy completes |
| 41 | * @cb_param: parameter to pass to the callback routine | 41 | * @cb_param: parameter to pass to the callback routine |
| @@ -46,24 +46,24 @@ async_memset(struct page *dest, int val, unsigned int offset, | |||
| 46 | struct dma_async_tx_descriptor *depend_tx, | 46 | struct dma_async_tx_descriptor *depend_tx, |
| 47 | dma_async_tx_callback cb_fn, void *cb_param) | 47 | dma_async_tx_callback cb_fn, void *cb_param) |
| 48 | { | 48 | { |
| 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET); | 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, |
| 50 | &dest, 1, NULL, 0, len); | ||
| 50 | struct dma_device *device = chan ? chan->device : NULL; | 51 | struct dma_device *device = chan ? chan->device : NULL; |
| 51 | int int_en = cb_fn ? 1 : 0; | 52 | struct dma_async_tx_descriptor *tx = NULL; |
| 52 | struct dma_async_tx_descriptor *tx = device ? | ||
| 53 | device->device_prep_dma_memset(chan, val, len, | ||
| 54 | int_en) : NULL; | ||
| 55 | 53 | ||
| 56 | if (tx) { /* run the memset asynchronously */ | 54 | if (device) { |
| 57 | dma_addr_t dma_addr; | 55 | dma_addr_t dma_dest; |
| 58 | enum dma_data_direction dir; | 56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; |
| 59 | 57 | ||
| 60 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | 58 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
| 61 | dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? | 59 | DMA_FROM_DEVICE); |
| 62 | DMA_NONE : DMA_FROM_DEVICE; | ||
| 63 | 60 | ||
| 64 | dma_addr = dma_map_page(device->dev, dest, offset, len, dir); | 61 | tx = device->device_prep_dma_memset(chan, dma_dest, val, len, |
| 65 | tx->tx_set_dest(dma_addr, tx, 0); | 62 | dma_prep_flags); |
| 63 | } | ||
| 66 | 64 | ||
| 65 | if (tx) { | ||
| 66 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | ||
| 67 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 67 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
| 68 | } else { /* run the memset synchronously */ | 68 | } else { /* run the memset synchronously */ |
| 69 | void *dest_buf; | 69 | void *dest_buf; |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index bc18cbb8ea79..562882189de5 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
| @@ -57,8 +57,7 @@ static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END]; | |||
| 57 | */ | 57 | */ |
| 58 | static spinlock_t async_tx_lock; | 58 | static spinlock_t async_tx_lock; |
| 59 | 59 | ||
| 60 | static struct list_head | 60 | static LIST_HEAD(async_tx_master_list); |
| 61 | async_tx_master_list = LIST_HEAD_INIT(async_tx_master_list); | ||
| 62 | 61 | ||
| 63 | /* async_tx_issue_pending_all - start all transactions on all channels */ | 62 | /* async_tx_issue_pending_all - start all transactions on all channels */ |
| 64 | void async_tx_issue_pending_all(void) | 63 | void async_tx_issue_pending_all(void) |
| @@ -362,13 +361,13 @@ static void __exit async_tx_exit(void) | |||
| 362 | } | 361 | } |
| 363 | 362 | ||
| 364 | /** | 363 | /** |
| 365 | * async_tx_find_channel - find a channel to carry out the operation or let | 364 | * __async_tx_find_channel - find a channel to carry out the operation or let |
| 366 | * the transaction execute synchronously | 365 | * the transaction execute synchronously |
| 367 | * @depend_tx: transaction dependency | 366 | * @depend_tx: transaction dependency |
| 368 | * @tx_type: transaction type | 367 | * @tx_type: transaction type |
| 369 | */ | 368 | */ |
| 370 | struct dma_chan * | 369 | struct dma_chan * |
| 371 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 370 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
| 372 | enum dma_transaction_type tx_type) | 371 | enum dma_transaction_type tx_type) |
| 373 | { | 372 | { |
| 374 | /* see if we can keep the chain on one channel */ | 373 | /* see if we can keep the chain on one channel */ |
| @@ -384,7 +383,7 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
| 384 | } else | 383 | } else |
| 385 | return NULL; | 384 | return NULL; |
| 386 | } | 385 | } |
| 387 | EXPORT_SYMBOL_GPL(async_tx_find_channel); | 386 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
| 388 | #else | 387 | #else |
| 389 | static int __init async_tx_init(void) | 388 | static int __init async_tx_init(void) |
| 390 | { | 389 | { |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 2575f674dcd5..2259a4ff15cb 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
| @@ -30,35 +30,51 @@ | |||
| 30 | #include <linux/raid/xor.h> | 30 | #include <linux/raid/xor.h> |
| 31 | #include <linux/async_tx.h> | 31 | #include <linux/async_tx.h> |
| 32 | 32 | ||
| 33 | static void | 33 | /* do_async_xor - dma map the pages and perform the xor with an engine. |
| 34 | do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device, | 34 | * This routine is marked __always_inline so it can be compiled away |
| 35 | * when CONFIG_DMA_ENGINE=n | ||
| 36 | */ | ||
| 37 | static __always_inline struct dma_async_tx_descriptor * | ||
| 38 | do_async_xor(struct dma_device *device, | ||
| 35 | struct dma_chan *chan, struct page *dest, struct page **src_list, | 39 | struct dma_chan *chan, struct page *dest, struct page **src_list, |
| 36 | unsigned int offset, unsigned int src_cnt, size_t len, | 40 | unsigned int offset, unsigned int src_cnt, size_t len, |
| 37 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 41 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, |
| 38 | dma_async_tx_callback cb_fn, void *cb_param) | 42 | dma_async_tx_callback cb_fn, void *cb_param) |
| 39 | { | 43 | { |
| 40 | dma_addr_t dma_addr; | 44 | dma_addr_t dma_dest; |
| 41 | enum dma_data_direction dir; | 45 | dma_addr_t *dma_src = (dma_addr_t *) src_list; |
| 46 | struct dma_async_tx_descriptor *tx; | ||
| 42 | int i; | 47 | int i; |
| 48 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
| 43 | 49 | ||
| 44 | pr_debug("%s: len: %zu\n", __FUNCTION__, len); | 50 | pr_debug("%s: len: %zu\n", __FUNCTION__, len); |
| 45 | 51 | ||
| 46 | dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? | 52 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
| 47 | DMA_NONE : DMA_FROM_DEVICE; | 53 | DMA_FROM_DEVICE); |
| 48 | |||
| 49 | dma_addr = dma_map_page(device->dev, dest, offset, len, dir); | ||
| 50 | tx->tx_set_dest(dma_addr, tx, 0); | ||
| 51 | |||
| 52 | dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? | ||
| 53 | DMA_NONE : DMA_TO_DEVICE; | ||
| 54 | 54 | ||
| 55 | for (i = 0; i < src_cnt; i++) { | 55 | for (i = 0; i < src_cnt; i++) |
| 56 | dma_addr = dma_map_page(device->dev, src_list[i], | 56 | dma_src[i] = dma_map_page(device->dev, src_list[i], offset, |
| 57 | offset, len, dir); | 57 | len, DMA_TO_DEVICE); |
| 58 | tx->tx_set_src(dma_addr, tx, i); | 58 | |
| 59 | /* Since we have clobbered the src_list we are committed | ||
| 60 | * to doing this asynchronously. Drivers force forward progress | ||
| 61 | * in case they can not provide a descriptor | ||
| 62 | */ | ||
| 63 | tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, | ||
| 64 | dma_prep_flags); | ||
| 65 | if (!tx) { | ||
| 66 | if (depend_tx) | ||
| 67 | dma_wait_for_async_tx(depend_tx); | ||
| 68 | |||
| 69 | while (!tx) | ||
| 70 | tx = device->device_prep_dma_xor(chan, dma_dest, | ||
| 71 | dma_src, src_cnt, len, | ||
| 72 | dma_prep_flags); | ||
| 59 | } | 73 | } |
| 60 | 74 | ||
| 61 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 75 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
| 76 | |||
| 77 | return tx; | ||
| 62 | } | 78 | } |
| 63 | 79 | ||
| 64 | static void | 80 | static void |
| @@ -102,7 +118,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 102 | * @src_cnt: number of source pages | 118 | * @src_cnt: number of source pages |
| 103 | * @len: length in bytes | 119 | * @len: length in bytes |
| 104 | * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, | 120 | * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, |
| 105 | * ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 121 | * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK |
| 106 | * @depend_tx: xor depends on the result of this transaction. | 122 | * @depend_tx: xor depends on the result of this transaction. |
| 107 | * @cb_fn: function to call when the xor completes | 123 | * @cb_fn: function to call when the xor completes |
| 108 | * @cb_param: parameter to pass to the callback routine | 124 | * @cb_param: parameter to pass to the callback routine |
| @@ -113,14 +129,16 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 113 | struct dma_async_tx_descriptor *depend_tx, | 129 | struct dma_async_tx_descriptor *depend_tx, |
| 114 | dma_async_tx_callback cb_fn, void *cb_param) | 130 | dma_async_tx_callback cb_fn, void *cb_param) |
| 115 | { | 131 | { |
| 116 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR); | 132 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, |
| 133 | &dest, 1, src_list, | ||
| 134 | src_cnt, len); | ||
| 117 | struct dma_device *device = chan ? chan->device : NULL; | 135 | struct dma_device *device = chan ? chan->device : NULL; |
| 118 | struct dma_async_tx_descriptor *tx = NULL; | 136 | struct dma_async_tx_descriptor *tx = NULL; |
| 119 | dma_async_tx_callback _cb_fn; | 137 | dma_async_tx_callback _cb_fn; |
| 120 | void *_cb_param; | 138 | void *_cb_param; |
| 121 | unsigned long local_flags; | 139 | unsigned long local_flags; |
| 122 | int xor_src_cnt; | 140 | int xor_src_cnt; |
| 123 | int i = 0, src_off = 0, int_en; | 141 | int i = 0, src_off = 0; |
| 124 | 142 | ||
| 125 | BUG_ON(src_cnt <= 1); | 143 | BUG_ON(src_cnt <= 1); |
| 126 | 144 | ||
| @@ -140,20 +158,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 140 | _cb_param = cb_param; | 158 | _cb_param = cb_param; |
| 141 | } | 159 | } |
| 142 | 160 | ||
| 143 | int_en = _cb_fn ? 1 : 0; | 161 | tx = do_async_xor(device, chan, dest, |
| 144 | 162 | &src_list[src_off], offset, | |
| 145 | tx = device->device_prep_dma_xor( | 163 | xor_src_cnt, len, local_flags, |
| 146 | chan, xor_src_cnt, len, int_en); | 164 | depend_tx, _cb_fn, _cb_param); |
| 147 | |||
| 148 | if (tx) { | ||
| 149 | do_async_xor(tx, device, chan, dest, | ||
| 150 | &src_list[src_off], offset, xor_src_cnt, len, | ||
| 151 | local_flags, depend_tx, _cb_fn, | ||
| 152 | _cb_param); | ||
| 153 | } else /* fall through */ | ||
| 154 | goto xor_sync; | ||
| 155 | } else { /* run the xor synchronously */ | 165 | } else { /* run the xor synchronously */ |
| 156 | xor_sync: | ||
| 157 | /* in the sync case the dest is an implied source | 166 | /* in the sync case the dest is an implied source |
| 158 | * (assumes the dest is at the src_off index) | 167 | * (assumes the dest is at the src_off index) |
| 159 | */ | 168 | */ |
| @@ -242,7 +251,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
| 242 | * @src_cnt: number of source pages | 251 | * @src_cnt: number of source pages |
| 243 | * @len: length in bytes | 252 | * @len: length in bytes |
| 244 | * @result: 0 if sum == 0 else non-zero | 253 | * @result: 0 if sum == 0 else non-zero |
| 245 | * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 254 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK |
| 246 | * @depend_tx: xor depends on the result of this transaction. | 255 | * @depend_tx: xor depends on the result of this transaction. |
| 247 | * @cb_fn: function to call when the xor completes | 256 | * @cb_fn: function to call when the xor completes |
| 248 | * @cb_param: parameter to pass to the callback routine | 257 | * @cb_param: parameter to pass to the callback routine |
| @@ -254,29 +263,36 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, | |||
| 254 | struct dma_async_tx_descriptor *depend_tx, | 263 | struct dma_async_tx_descriptor *depend_tx, |
| 255 | dma_async_tx_callback cb_fn, void *cb_param) | 264 | dma_async_tx_callback cb_fn, void *cb_param) |
| 256 | { | 265 | { |
| 257 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM); | 266 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM, |
| 267 | &dest, 1, src_list, | ||
| 268 | src_cnt, len); | ||
| 258 | struct dma_device *device = chan ? chan->device : NULL; | 269 | struct dma_device *device = chan ? chan->device : NULL; |
| 259 | int int_en = cb_fn ? 1 : 0; | 270 | struct dma_async_tx_descriptor *tx = NULL; |
| 260 | struct dma_async_tx_descriptor *tx = device ? | ||
| 261 | device->device_prep_dma_zero_sum(chan, src_cnt, len, result, | ||
| 262 | int_en) : NULL; | ||
| 263 | int i; | ||
| 264 | 271 | ||
| 265 | BUG_ON(src_cnt <= 1); | 272 | BUG_ON(src_cnt <= 1); |
| 266 | 273 | ||
| 267 | if (tx) { | 274 | if (device) { |
| 268 | dma_addr_t dma_addr; | 275 | dma_addr_t *dma_src = (dma_addr_t *) src_list; |
| 269 | enum dma_data_direction dir; | 276 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; |
| 277 | int i; | ||
| 270 | 278 | ||
| 271 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | 279 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); |
| 272 | 280 | ||
| 273 | dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? | 281 | for (i = 0; i < src_cnt; i++) |
| 274 | DMA_NONE : DMA_TO_DEVICE; | 282 | dma_src[i] = dma_map_page(device->dev, src_list[i], |
| 275 | 283 | offset, len, DMA_TO_DEVICE); | |
| 276 | for (i = 0; i < src_cnt; i++) { | 284 | |
| 277 | dma_addr = dma_map_page(device->dev, src_list[i], | 285 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, |
| 278 | offset, len, dir); | 286 | len, result, |
| 279 | tx->tx_set_src(dma_addr, tx, i); | 287 | dma_prep_flags); |
| 288 | if (!tx) { | ||
| 289 | if (depend_tx) | ||
| 290 | dma_wait_for_async_tx(depend_tx); | ||
| 291 | |||
| 292 | while (!tx) | ||
| 293 | tx = device->device_prep_dma_zero_sum(chan, | ||
| 294 | dma_src, src_cnt, len, result, | ||
| 295 | dma_prep_flags); | ||
| 280 | } | 296 | } |
| 281 | 297 | ||
| 282 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 298 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
| @@ -311,6 +327,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum); | |||
| 311 | 327 | ||
| 312 | static int __init async_xor_init(void) | 328 | static int __init async_xor_init(void) |
| 313 | { | 329 | { |
| 330 | #ifdef CONFIG_DMA_ENGINE | ||
| 331 | /* To conserve stack space the input src_list (array of page pointers) | ||
| 332 | * is reused to hold the array of dma addresses passed to the driver. | ||
| 333 | * This conversion is only possible when dma_addr_t is less than the | ||
| 334 | * the size of a pointer. HIGHMEM64G is known to violate this | ||
| 335 | * assumption. | ||
| 336 | */ | ||
| 337 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *)); | ||
| 338 | #endif | ||
| 339 | |||
| 314 | return 0; | 340 | return 0; |
| 315 | } | 341 | } |
| 316 | 342 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index c46b7c219ee9..a703deffb795 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
| 6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
| 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
| 8 | depends on !HIGHMEM64G | ||
| 8 | help | 9 | help |
| 9 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
| 10 | involving the host CPU. Currently, this framework can be | 11 | involving the host CPU. Currently, this framework can be |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index bcf52df30339..29965231b912 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
| 473 | { | 473 | { |
| 474 | struct dma_device *dev = chan->device; | 474 | struct dma_device *dev = chan->device; |
| 475 | struct dma_async_tx_descriptor *tx; | 475 | struct dma_async_tx_descriptor *tx; |
| 476 | dma_addr_t addr; | 476 | dma_addr_t dma_dest, dma_src; |
| 477 | dma_cookie_t cookie; | 477 | dma_cookie_t cookie; |
| 478 | int cpu; | 478 | int cpu; |
| 479 | 479 | ||
| 480 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | 480 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
| 481 | if (!tx) | 481 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); |
| 482 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | ||
| 483 | |||
| 484 | if (!tx) { | ||
| 485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
| 486 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
| 482 | return -ENOMEM; | 487 | return -ENOMEM; |
| 488 | } | ||
| 483 | 489 | ||
| 484 | tx->ack = 1; | 490 | tx->ack = 1; |
| 485 | tx->callback = NULL; | 491 | tx->callback = NULL; |
| 486 | addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | ||
| 487 | tx->tx_set_src(addr, tx, 0); | ||
| 488 | addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | ||
| 489 | tx->tx_set_dest(addr, tx, 0); | ||
| 490 | cookie = tx->tx_submit(tx); | 492 | cookie = tx->tx_submit(tx); |
| 491 | 493 | ||
| 492 | cpu = get_cpu(); | 494 | cpu = get_cpu(); |
| @@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
| 517 | { | 519 | { |
| 518 | struct dma_device *dev = chan->device; | 520 | struct dma_device *dev = chan->device; |
| 519 | struct dma_async_tx_descriptor *tx; | 521 | struct dma_async_tx_descriptor *tx; |
| 520 | dma_addr_t addr; | 522 | dma_addr_t dma_dest, dma_src; |
| 521 | dma_cookie_t cookie; | 523 | dma_cookie_t cookie; |
| 522 | int cpu; | 524 | int cpu; |
| 523 | 525 | ||
| 524 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | 526 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
| 525 | if (!tx) | 527 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); |
| 528 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | ||
| 529 | |||
| 530 | if (!tx) { | ||
| 531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
| 532 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
| 526 | return -ENOMEM; | 533 | return -ENOMEM; |
| 534 | } | ||
| 527 | 535 | ||
| 528 | tx->ack = 1; | 536 | tx->ack = 1; |
| 529 | tx->callback = NULL; | 537 | tx->callback = NULL; |
| 530 | addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | ||
| 531 | tx->tx_set_src(addr, tx, 0); | ||
| 532 | addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | ||
| 533 | tx->tx_set_dest(addr, tx, 0); | ||
| 534 | cookie = tx->tx_submit(tx); | 538 | cookie = tx->tx_submit(tx); |
| 535 | 539 | ||
| 536 | cpu = get_cpu(); | 540 | cpu = get_cpu(); |
| @@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
| 563 | { | 567 | { |
| 564 | struct dma_device *dev = chan->device; | 568 | struct dma_device *dev = chan->device; |
| 565 | struct dma_async_tx_descriptor *tx; | 569 | struct dma_async_tx_descriptor *tx; |
| 566 | dma_addr_t addr; | 570 | dma_addr_t dma_dest, dma_src; |
| 567 | dma_cookie_t cookie; | 571 | dma_cookie_t cookie; |
| 568 | int cpu; | 572 | int cpu; |
| 569 | 573 | ||
| 570 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | 574 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
| 571 | if (!tx) | 575 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
| 576 | DMA_FROM_DEVICE); | ||
| 577 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | ||
| 578 | |||
| 579 | if (!tx) { | ||
| 580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
| 581 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
| 572 | return -ENOMEM; | 582 | return -ENOMEM; |
| 583 | } | ||
| 573 | 584 | ||
| 574 | tx->ack = 1; | 585 | tx->ack = 1; |
| 575 | tx->callback = NULL; | 586 | tx->callback = NULL; |
| 576 | addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | ||
| 577 | tx->tx_set_src(addr, tx, 0); | ||
| 578 | addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); | ||
| 579 | tx->tx_set_dest(addr, tx, 0); | ||
| 580 | cookie = tx->tx_submit(tx); | 587 | cookie = tx->tx_submit(tx); |
| 581 | 588 | ||
| 582 | cpu = get_cpu(); | 589 | cpu = get_cpu(); |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 45e7b4666c7b..dff38accc5c1 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
| @@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
| 159 | return device->common.chancnt; | 159 | return device->common.chancnt; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static void ioat_set_src(dma_addr_t addr, | ||
| 163 | struct dma_async_tx_descriptor *tx, | ||
| 164 | int index) | ||
| 165 | { | ||
| 166 | tx_to_ioat_desc(tx)->src = addr; | ||
| 167 | } | ||
| 168 | |||
| 169 | static void ioat_set_dest(dma_addr_t addr, | ||
| 170 | struct dma_async_tx_descriptor *tx, | ||
| 171 | int index) | ||
| 172 | { | ||
| 173 | tx_to_ioat_desc(tx)->dst = addr; | ||
| 174 | } | ||
| 175 | |||
| 176 | /** | 162 | /** |
| 177 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | 163 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended |
| 178 | * descriptors to hw | 164 | * descriptors to hw |
| @@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | |||
| 415 | 401 | ||
| 416 | memset(desc, 0, sizeof(*desc)); | 402 | memset(desc, 0, sizeof(*desc)); |
| 417 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); | 403 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); |
| 418 | desc_sw->async_tx.tx_set_src = ioat_set_src; | ||
| 419 | desc_sw->async_tx.tx_set_dest = ioat_set_dest; | ||
| 420 | switch (ioat_chan->device->version) { | 404 | switch (ioat_chan->device->version) { |
| 421 | case IOAT_VER_1_2: | 405 | case IOAT_VER_1_2: |
| 422 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | 406 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; |
| @@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | |||
| 714 | 698 | ||
| 715 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | 699 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( |
| 716 | struct dma_chan *chan, | 700 | struct dma_chan *chan, |
| 701 | dma_addr_t dma_dest, | ||
| 702 | dma_addr_t dma_src, | ||
| 717 | size_t len, | 703 | size_t len, |
| 718 | int int_en) | 704 | unsigned long flags) |
| 719 | { | 705 | { |
| 720 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 706 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 721 | struct ioat_desc_sw *new; | 707 | struct ioat_desc_sw *new; |
| @@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
| 726 | 712 | ||
| 727 | if (new) { | 713 | if (new) { |
| 728 | new->len = len; | 714 | new->len = len; |
| 715 | new->dst = dma_dest; | ||
| 716 | new->src = dma_src; | ||
| 729 | return &new->async_tx; | 717 | return &new->async_tx; |
| 730 | } else | 718 | } else |
| 731 | return NULL; | 719 | return NULL; |
| @@ -733,8 +721,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
| 733 | 721 | ||
| 734 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | 722 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( |
| 735 | struct dma_chan *chan, | 723 | struct dma_chan *chan, |
| 724 | dma_addr_t dma_dest, | ||
| 725 | dma_addr_t dma_src, | ||
| 736 | size_t len, | 726 | size_t len, |
| 737 | int int_en) | 727 | unsigned long flags) |
| 738 | { | 728 | { |
| 739 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 729 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 740 | struct ioat_desc_sw *new; | 730 | struct ioat_desc_sw *new; |
| @@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
| 749 | 739 | ||
| 750 | if (new) { | 740 | if (new) { |
| 751 | new->len = len; | 741 | new->len = len; |
| 742 | new->dst = dma_dest; | ||
| 743 | new->src = dma_src; | ||
| 752 | return &new->async_tx; | 744 | return &new->async_tx; |
| 753 | } else | 745 | } else |
| 754 | return NULL; | 746 | return NULL; |
| @@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 1045 | u8 *dest; | 1037 | u8 *dest; |
| 1046 | struct dma_chan *dma_chan; | 1038 | struct dma_chan *dma_chan; |
| 1047 | struct dma_async_tx_descriptor *tx; | 1039 | struct dma_async_tx_descriptor *tx; |
| 1048 | dma_addr_t addr; | 1040 | dma_addr_t dma_dest, dma_src; |
| 1049 | dma_cookie_t cookie; | 1041 | dma_cookie_t cookie; |
| 1050 | int err = 0; | 1042 | int err = 0; |
| 1051 | 1043 | ||
| @@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 1073 | goto out; | 1065 | goto out; |
| 1074 | } | 1066 | } |
| 1075 | 1067 | ||
| 1076 | tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); | 1068 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, |
| 1069 | DMA_TO_DEVICE); | ||
| 1070 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
| 1071 | DMA_FROM_DEVICE); | ||
| 1072 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
| 1073 | IOAT_TEST_SIZE, 0); | ||
| 1077 | if (!tx) { | 1074 | if (!tx) { |
| 1078 | dev_err(&device->pdev->dev, | 1075 | dev_err(&device->pdev->dev, |
| 1079 | "Self-test prep failed, disabling\n"); | 1076 | "Self-test prep failed, disabling\n"); |
| @@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 1082 | } | 1079 | } |
| 1083 | 1080 | ||
| 1084 | async_tx_ack(tx); | 1081 | async_tx_ack(tx); |
| 1085 | addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, | ||
| 1086 | DMA_TO_DEVICE); | ||
| 1087 | tx->tx_set_src(addr, tx, 0); | ||
| 1088 | addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
| 1089 | DMA_FROM_DEVICE); | ||
| 1090 | tx->tx_set_dest(addr, tx, 0); | ||
| 1091 | tx->callback = ioat_dma_test_callback; | 1082 | tx->callback = ioat_dma_test_callback; |
| 1092 | tx->callback_param = (void *)0x8086; | 1083 | tx->callback_param = (void *)0x8086; |
| 1093 | cookie = tx->tx_submit(tx); | 1084 | cookie = tx->tx_submit(tx); |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index e5c62b75f36f..3986d54492bd 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
| @@ -284,7 +284,7 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, | |||
| 284 | int slots_per_op) | 284 | int slots_per_op) |
| 285 | { | 285 | { |
| 286 | struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; | 286 | struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; |
| 287 | struct list_head chain = LIST_HEAD_INIT(chain); | 287 | LIST_HEAD(chain); |
| 288 | int slots_found, retry = 0; | 288 | int slots_found, retry = 0; |
| 289 | 289 | ||
| 290 | /* start search from the last allocated descrtiptor | 290 | /* start search from the last allocated descrtiptor |
| @@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 443 | return cookie; | 443 | return cookie; |
| 444 | } | 444 | } |
| 445 | 445 | ||
| 446 | static void | ||
| 447 | iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, | ||
| 448 | int index) | ||
| 449 | { | ||
| 450 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
| 451 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); | ||
| 452 | |||
| 453 | /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */ | ||
| 454 | iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr); | ||
| 455 | } | ||
| 456 | |||
| 457 | static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); | 446 | static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); |
| 458 | static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); | 447 | static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); |
| 459 | 448 | ||
| @@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
| 486 | 475 | ||
| 487 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 476 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
| 488 | slot->async_tx.tx_submit = iop_adma_tx_submit; | 477 | slot->async_tx.tx_submit = iop_adma_tx_submit; |
| 489 | slot->async_tx.tx_set_dest = iop_adma_set_dest; | ||
| 490 | INIT_LIST_HEAD(&slot->chain_node); | 478 | INIT_LIST_HEAD(&slot->chain_node); |
| 491 | INIT_LIST_HEAD(&slot->slot_node); | 479 | INIT_LIST_HEAD(&slot->slot_node); |
| 492 | INIT_LIST_HEAD(&slot->async_tx.tx_list); | 480 | INIT_LIST_HEAD(&slot->async_tx.tx_list); |
| @@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
| 547 | return sw_desc ? &sw_desc->async_tx : NULL; | 535 | return sw_desc ? &sw_desc->async_tx : NULL; |
| 548 | } | 536 | } |
| 549 | 537 | ||
| 550 | static void | ||
| 551 | iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, | ||
| 552 | int index) | ||
| 553 | { | ||
| 554 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
| 555 | struct iop_adma_desc_slot *grp_start = sw_desc->group_head; | ||
| 556 | |||
| 557 | iop_desc_set_memcpy_src_addr(grp_start, addr); | ||
| 558 | } | ||
| 559 | |||
| 560 | static struct dma_async_tx_descriptor * | 538 | static struct dma_async_tx_descriptor * |
| 561 | iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) | 539 | iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, |
| 540 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
| 562 | { | 541 | { |
| 563 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 542 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
| 564 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 543 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
| @@ -576,11 +555,12 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) | |||
| 576 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | 555 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); |
| 577 | if (sw_desc) { | 556 | if (sw_desc) { |
| 578 | grp_start = sw_desc->group_head; | 557 | grp_start = sw_desc->group_head; |
| 579 | iop_desc_init_memcpy(grp_start, int_en); | 558 | iop_desc_init_memcpy(grp_start, flags); |
| 580 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 559 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
| 560 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
| 561 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | ||
| 581 | sw_desc->unmap_src_cnt = 1; | 562 | sw_desc->unmap_src_cnt = 1; |
| 582 | sw_desc->unmap_len = len; | 563 | sw_desc->unmap_len = len; |
| 583 | sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src; | ||
| 584 | } | 564 | } |
| 585 | spin_unlock_bh(&iop_chan->lock); | 565 | spin_unlock_bh(&iop_chan->lock); |
| 586 | 566 | ||
| @@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) | |||
| 588 | } | 568 | } |
| 589 | 569 | ||
| 590 | static struct dma_async_tx_descriptor * | 570 | static struct dma_async_tx_descriptor * |
| 591 | iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, | 571 | iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, |
| 592 | int int_en) | 572 | int value, size_t len, unsigned long flags) |
| 593 | { | 573 | { |
| 594 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 574 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
| 595 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 575 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
| @@ -607,9 +587,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, | |||
| 607 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | 587 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); |
| 608 | if (sw_desc) { | 588 | if (sw_desc) { |
| 609 | grp_start = sw_desc->group_head; | 589 | grp_start = sw_desc->group_head; |
| 610 | iop_desc_init_memset(grp_start, int_en); | 590 | iop_desc_init_memset(grp_start, flags); |
| 611 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 591 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
| 612 | iop_desc_set_block_fill_val(grp_start, value); | 592 | iop_desc_set_block_fill_val(grp_start, value); |
| 593 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
| 613 | sw_desc->unmap_src_cnt = 1; | 594 | sw_desc->unmap_src_cnt = 1; |
| 614 | sw_desc->unmap_len = len; | 595 | sw_desc->unmap_len = len; |
| 615 | } | 596 | } |
| @@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, | |||
| 618 | return sw_desc ? &sw_desc->async_tx : NULL; | 599 | return sw_desc ? &sw_desc->async_tx : NULL; |
| 619 | } | 600 | } |
| 620 | 601 | ||
| 621 | static void | ||
| 622 | iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, | ||
| 623 | int index) | ||
| 624 | { | ||
| 625 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
| 626 | struct iop_adma_desc_slot *grp_start = sw_desc->group_head; | ||
| 627 | |||
| 628 | iop_desc_set_xor_src_addr(grp_start, index, addr); | ||
| 629 | } | ||
| 630 | |||
| 631 | static struct dma_async_tx_descriptor * | 602 | static struct dma_async_tx_descriptor * |
| 632 | iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, | 603 | iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, |
| 633 | int int_en) | 604 | dma_addr_t *dma_src, unsigned int src_cnt, size_t len, |
| 605 | unsigned long flags) | ||
| 634 | { | 606 | { |
| 635 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 607 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
| 636 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 608 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
| @@ -641,39 +613,32 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, | |||
| 641 | BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); | 613 | BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); |
| 642 | 614 | ||
| 643 | dev_dbg(iop_chan->device->common.dev, | 615 | dev_dbg(iop_chan->device->common.dev, |
| 644 | "%s src_cnt: %d len: %u int_en: %d\n", | 616 | "%s src_cnt: %d len: %u flags: %lx\n", |
| 645 | __FUNCTION__, src_cnt, len, int_en); | 617 | __FUNCTION__, src_cnt, len, flags); |
| 646 | 618 | ||
| 647 | spin_lock_bh(&iop_chan->lock); | 619 | spin_lock_bh(&iop_chan->lock); |
| 648 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); | 620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); |
| 649 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | 621 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); |
| 650 | if (sw_desc) { | 622 | if (sw_desc) { |
| 651 | grp_start = sw_desc->group_head; | 623 | grp_start = sw_desc->group_head; |
| 652 | iop_desc_init_xor(grp_start, src_cnt, int_en); | 624 | iop_desc_init_xor(grp_start, src_cnt, flags); |
| 653 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 625 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
| 626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
| 654 | sw_desc->unmap_src_cnt = src_cnt; | 627 | sw_desc->unmap_src_cnt = src_cnt; |
| 655 | sw_desc->unmap_len = len; | 628 | sw_desc->unmap_len = len; |
| 656 | sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src; | 629 | while (src_cnt--) |
| 630 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | ||
| 631 | dma_src[src_cnt]); | ||
| 657 | } | 632 | } |
| 658 | spin_unlock_bh(&iop_chan->lock); | 633 | spin_unlock_bh(&iop_chan->lock); |
| 659 | 634 | ||
| 660 | return sw_desc ? &sw_desc->async_tx : NULL; | 635 | return sw_desc ? &sw_desc->async_tx : NULL; |
| 661 | } | 636 | } |
| 662 | 637 | ||
| 663 | static void | ||
| 664 | iop_adma_xor_zero_sum_set_src(dma_addr_t addr, | ||
| 665 | struct dma_async_tx_descriptor *tx, | ||
| 666 | int index) | ||
| 667 | { | ||
| 668 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
| 669 | struct iop_adma_desc_slot *grp_start = sw_desc->group_head; | ||
| 670 | |||
| 671 | iop_desc_set_zero_sum_src_addr(grp_start, index, addr); | ||
| 672 | } | ||
| 673 | |||
| 674 | static struct dma_async_tx_descriptor * | 638 | static struct dma_async_tx_descriptor * |
| 675 | iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, | 639 | iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, |
| 676 | size_t len, u32 *result, int int_en) | 640 | unsigned int src_cnt, size_t len, u32 *result, |
| 641 | unsigned long flags) | ||
| 677 | { | 642 | { |
| 678 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 643 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
| 679 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 644 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
| @@ -690,14 +655,16 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, | |||
| 690 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); | 655 | sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); |
| 691 | if (sw_desc) { | 656 | if (sw_desc) { |
| 692 | grp_start = sw_desc->group_head; | 657 | grp_start = sw_desc->group_head; |
| 693 | iop_desc_init_zero_sum(grp_start, src_cnt, int_en); | 658 | iop_desc_init_zero_sum(grp_start, src_cnt, flags); |
| 694 | iop_desc_set_zero_sum_byte_count(grp_start, len); | 659 | iop_desc_set_zero_sum_byte_count(grp_start, len); |
| 695 | grp_start->xor_check_result = result; | 660 | grp_start->xor_check_result = result; |
| 696 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 661 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
| 697 | __FUNCTION__, grp_start->xor_check_result); | 662 | __FUNCTION__, grp_start->xor_check_result); |
| 698 | sw_desc->unmap_src_cnt = src_cnt; | 663 | sw_desc->unmap_src_cnt = src_cnt; |
| 699 | sw_desc->unmap_len = len; | 664 | sw_desc->unmap_len = len; |
| 700 | sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src; | 665 | while (src_cnt--) |
| 666 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | ||
| 667 | dma_src[src_cnt]); | ||
| 701 | } | 668 | } |
| 702 | spin_unlock_bh(&iop_chan->lock); | 669 | spin_unlock_bh(&iop_chan->lock); |
| 703 | 670 | ||
| @@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
| 882 | goto out; | 849 | goto out; |
| 883 | } | 850 | } |
| 884 | 851 | ||
| 885 | tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1); | ||
| 886 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | 852 | dest_dma = dma_map_single(dma_chan->device->dev, dest, |
| 887 | IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); | 853 | IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); |
| 888 | iop_adma_set_dest(dest_dma, tx, 0); | ||
| 889 | src_dma = dma_map_single(dma_chan->device->dev, src, | 854 | src_dma = dma_map_single(dma_chan->device->dev, src, |
| 890 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); | 855 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); |
| 891 | iop_adma_memcpy_set_src(src_dma, tx, 0); | 856 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
| 857 | IOP_ADMA_TEST_SIZE, 1); | ||
| 892 | 858 | ||
| 893 | cookie = iop_adma_tx_submit(tx); | 859 | cookie = iop_adma_tx_submit(tx); |
| 894 | iop_adma_issue_pending(dma_chan); | 860 | iop_adma_issue_pending(dma_chan); |
| @@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 929 | struct page *dest; | 895 | struct page *dest; |
| 930 | struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; | 896 | struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; |
| 931 | struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; | 897 | struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; |
| 898 | dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; | ||
| 932 | dma_addr_t dma_addr, dest_dma; | 899 | dma_addr_t dma_addr, dest_dma; |
| 933 | struct dma_async_tx_descriptor *tx; | 900 | struct dma_async_tx_descriptor *tx; |
| 934 | struct dma_chan *dma_chan; | 901 | struct dma_chan *dma_chan; |
| @@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 981 | } | 948 | } |
| 982 | 949 | ||
| 983 | /* test xor */ | 950 | /* test xor */ |
| 984 | tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST, | ||
| 985 | PAGE_SIZE, 1); | ||
| 986 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, | 951 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, |
| 987 | PAGE_SIZE, DMA_FROM_DEVICE); | 952 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 988 | iop_adma_set_dest(dest_dma, tx, 0); | 953 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) |
| 989 | 954 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
| 990 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { | 955 | 0, PAGE_SIZE, DMA_TO_DEVICE); |
| 991 | dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, | 956 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
| 992 | PAGE_SIZE, DMA_TO_DEVICE); | 957 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1); |
| 993 | iop_adma_xor_set_src(dma_addr, tx, i); | ||
| 994 | } | ||
| 995 | 958 | ||
| 996 | cookie = iop_adma_tx_submit(tx); | 959 | cookie = iop_adma_tx_submit(tx); |
| 997 | iop_adma_issue_pending(dma_chan); | 960 | iop_adma_issue_pending(dma_chan); |
| @@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1032 | 995 | ||
| 1033 | zero_sum_result = 1; | 996 | zero_sum_result = 1; |
| 1034 | 997 | ||
| 1035 | tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, | 998 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) |
| 1036 | PAGE_SIZE, &zero_sum_result, 1); | 999 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, |
| 1037 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { | 1000 | zero_sum_srcs[i], 0, PAGE_SIZE, |
| 1038 | dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], | 1001 | DMA_TO_DEVICE); |
| 1039 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 1002 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
| 1040 | iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); | 1003 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
| 1041 | } | 1004 | &zero_sum_result, 1); |
| 1042 | 1005 | ||
| 1043 | cookie = iop_adma_tx_submit(tx); | 1006 | cookie = iop_adma_tx_submit(tx); |
| 1044 | iop_adma_issue_pending(dma_chan); | 1007 | iop_adma_issue_pending(dma_chan); |
| @@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1060 | } | 1023 | } |
| 1061 | 1024 | ||
| 1062 | /* test memset */ | 1025 | /* test memset */ |
| 1063 | tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1); | ||
| 1064 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | 1026 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, |
| 1065 | PAGE_SIZE, DMA_FROM_DEVICE); | 1027 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 1066 | iop_adma_set_dest(dma_addr, tx, 0); | 1028 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1); |
| 1067 | 1029 | ||
| 1068 | cookie = iop_adma_tx_submit(tx); | 1030 | cookie = iop_adma_tx_submit(tx); |
| 1069 | iop_adma_issue_pending(dma_chan); | 1031 | iop_adma_issue_pending(dma_chan); |
| @@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1089 | 1051 | ||
| 1090 | /* test for non-zero parity sum */ | 1052 | /* test for non-zero parity sum */ |
| 1091 | zero_sum_result = 0; | 1053 | zero_sum_result = 0; |
| 1092 | tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, | 1054 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) |
| 1093 | PAGE_SIZE, &zero_sum_result, 1); | 1055 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, |
| 1094 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { | 1056 | zero_sum_srcs[i], 0, PAGE_SIZE, |
| 1095 | dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], | 1057 | DMA_TO_DEVICE); |
| 1096 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 1058 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
| 1097 | iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); | 1059 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
| 1098 | } | 1060 | &zero_sum_result, 1); |
| 1099 | 1061 | ||
| 1100 | cookie = iop_adma_tx_submit(tx); | 1062 | cookie = iop_adma_tx_submit(tx); |
| 1101 | iop_adma_issue_pending(dma_chan); | 1063 | iop_adma_issue_pending(dma_chan); |
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h index 04006c1c5fd7..efd9a5eb1008 100644 --- a/include/asm-arm/arch-iop13xx/adma.h +++ b/include/asm-arm/arch-iop13xx/adma.h | |||
| @@ -247,7 +247,7 @@ static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc, | |||
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static inline void | 249 | static inline void |
| 250 | iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) | 250 | iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) |
| 251 | { | 251 | { |
| 252 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | 252 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; |
| 253 | union { | 253 | union { |
| @@ -257,13 +257,13 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) | |||
| 257 | 257 | ||
| 258 | u_desc_ctrl.value = 0; | 258 | u_desc_ctrl.value = 0; |
| 259 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ | 259 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ |
| 260 | u_desc_ctrl.field.int_en = int_en; | 260 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 261 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 261 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 262 | hw_desc->crc_addr = 0; | 262 | hw_desc->crc_addr = 0; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | static inline void | 265 | static inline void |
| 266 | iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) | 266 | iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) |
| 267 | { | 267 | { |
| 268 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | 268 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; |
| 269 | union { | 269 | union { |
| @@ -274,14 +274,15 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) | |||
| 274 | u_desc_ctrl.value = 0; | 274 | u_desc_ctrl.value = 0; |
| 275 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ | 275 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ |
| 276 | u_desc_ctrl.field.block_fill_en = 1; | 276 | u_desc_ctrl.field.block_fill_en = 1; |
| 277 | u_desc_ctrl.field.int_en = int_en; | 277 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 278 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 278 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 279 | hw_desc->crc_addr = 0; | 279 | hw_desc->crc_addr = 0; |
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | /* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ | 282 | /* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ |
| 283 | static inline void | 283 | static inline void |
| 284 | iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | 284 | iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, |
| 285 | unsigned long flags) | ||
| 285 | { | 286 | { |
| 286 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | 287 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; |
| 287 | union { | 288 | union { |
| @@ -292,7 +293,7 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | |||
| 292 | u_desc_ctrl.value = 0; | 293 | u_desc_ctrl.value = 0; |
| 293 | u_desc_ctrl.field.src_select = src_cnt - 1; | 294 | u_desc_ctrl.field.src_select = src_cnt - 1; |
| 294 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ | 295 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ |
| 295 | u_desc_ctrl.field.int_en = int_en; | 296 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 296 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 297 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 297 | hw_desc->crc_addr = 0; | 298 | hw_desc->crc_addr = 0; |
| 298 | 299 | ||
| @@ -301,7 +302,8 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | |||
| 301 | 302 | ||
| 302 | /* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ | 303 | /* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ |
| 303 | static inline int | 304 | static inline int |
| 304 | iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | 305 | iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, |
| 306 | unsigned long flags) | ||
| 305 | { | 307 | { |
| 306 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | 308 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; |
| 307 | union { | 309 | union { |
| @@ -314,7 +316,7 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | |||
| 314 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ | 316 | u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ |
| 315 | u_desc_ctrl.field.zero_result = 1; | 317 | u_desc_ctrl.field.zero_result = 1; |
| 316 | u_desc_ctrl.field.status_write_back_en = 1; | 318 | u_desc_ctrl.field.status_write_back_en = 1; |
| 317 | u_desc_ctrl.field.int_en = int_en; | 319 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 318 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 320 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 319 | hw_desc->crc_addr = 0; | 321 | hw_desc->crc_addr = 0; |
| 320 | 322 | ||
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h index 10834b54f681..5c529e6a5e3b 100644 --- a/include/asm-arm/hardware/iop3xx-adma.h +++ b/include/asm-arm/hardware/iop3xx-adma.h | |||
| @@ -414,7 +414,7 @@ static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc, | |||
| 414 | } | 414 | } |
| 415 | 415 | ||
| 416 | static inline void | 416 | static inline void |
| 417 | iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) | 417 | iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) |
| 418 | { | 418 | { |
| 419 | struct iop3xx_desc_dma *hw_desc = desc->hw_desc; | 419 | struct iop3xx_desc_dma *hw_desc = desc->hw_desc; |
| 420 | union { | 420 | union { |
| @@ -425,14 +425,14 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) | |||
| 425 | u_desc_ctrl.value = 0; | 425 | u_desc_ctrl.value = 0; |
| 426 | u_desc_ctrl.field.mem_to_mem_en = 1; | 426 | u_desc_ctrl.field.mem_to_mem_en = 1; |
| 427 | u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ | 427 | u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ |
| 428 | u_desc_ctrl.field.int_en = int_en; | 428 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 429 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 429 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 430 | hw_desc->upper_pci_src_addr = 0; | 430 | hw_desc->upper_pci_src_addr = 0; |
| 431 | hw_desc->crc_addr = 0; | 431 | hw_desc->crc_addr = 0; |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | static inline void | 434 | static inline void |
| 435 | iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) | 435 | iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) |
| 436 | { | 436 | { |
| 437 | struct iop3xx_desc_aau *hw_desc = desc->hw_desc; | 437 | struct iop3xx_desc_aau *hw_desc = desc->hw_desc; |
| 438 | union { | 438 | union { |
| @@ -443,12 +443,13 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) | |||
| 443 | u_desc_ctrl.value = 0; | 443 | u_desc_ctrl.value = 0; |
| 444 | u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ | 444 | u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ |
| 445 | u_desc_ctrl.field.dest_write_en = 1; | 445 | u_desc_ctrl.field.dest_write_en = 1; |
| 446 | u_desc_ctrl.field.int_en = int_en; | 446 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 447 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 447 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | static inline u32 | 450 | static inline u32 |
| 451 | iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en) | 451 | iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, |
| 452 | unsigned long flags) | ||
| 452 | { | 453 | { |
| 453 | int i, shift; | 454 | int i, shift; |
| 454 | u32 edcr; | 455 | u32 edcr; |
| @@ -509,21 +510,23 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en) | |||
| 509 | 510 | ||
| 510 | u_desc_ctrl.field.dest_write_en = 1; | 511 | u_desc_ctrl.field.dest_write_en = 1; |
| 511 | u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ | 512 | u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ |
| 512 | u_desc_ctrl.field.int_en = int_en; | 513 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 513 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 514 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 514 | 515 | ||
| 515 | return u_desc_ctrl.value; | 516 | return u_desc_ctrl.value; |
| 516 | } | 517 | } |
| 517 | 518 | ||
| 518 | static inline void | 519 | static inline void |
| 519 | iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | 520 | iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, |
| 521 | unsigned long flags) | ||
| 520 | { | 522 | { |
| 521 | iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en); | 523 | iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags); |
| 522 | } | 524 | } |
| 523 | 525 | ||
| 524 | /* return the number of operations */ | 526 | /* return the number of operations */ |
| 525 | static inline int | 527 | static inline int |
| 526 | iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | 528 | iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, |
| 529 | unsigned long flags) | ||
| 527 | { | 530 | { |
| 528 | int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; | 531 | int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; |
| 529 | struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; | 532 | struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; |
| @@ -538,10 +541,10 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | |||
| 538 | for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; | 541 | for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; |
| 539 | i += slots_per_op, j++) { | 542 | i += slots_per_op, j++) { |
| 540 | iter = iop_hw_desc_slot_idx(hw_desc, i); | 543 | iter = iop_hw_desc_slot_idx(hw_desc, i); |
| 541 | u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en); | 544 | u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags); |
| 542 | u_desc_ctrl.field.dest_write_en = 0; | 545 | u_desc_ctrl.field.dest_write_en = 0; |
| 543 | u_desc_ctrl.field.zero_result_en = 1; | 546 | u_desc_ctrl.field.zero_result_en = 1; |
| 544 | u_desc_ctrl.field.int_en = int_en; | 547 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 545 | iter->desc_ctrl = u_desc_ctrl.value; | 548 | iter->desc_ctrl = u_desc_ctrl.value; |
| 546 | 549 | ||
| 547 | /* for the subsequent descriptors preserve the store queue | 550 | /* for the subsequent descriptors preserve the store queue |
| @@ -559,7 +562,8 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | |||
| 559 | } | 562 | } |
| 560 | 563 | ||
| 561 | static inline void | 564 | static inline void |
| 562 | iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | 565 | iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, |
| 566 | unsigned long flags) | ||
| 563 | { | 567 | { |
| 564 | struct iop3xx_desc_aau *hw_desc = desc->hw_desc; | 568 | struct iop3xx_desc_aau *hw_desc = desc->hw_desc; |
| 565 | union { | 569 | union { |
| @@ -591,7 +595,7 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) | |||
| 591 | } | 595 | } |
| 592 | 596 | ||
| 593 | u_desc_ctrl.field.dest_write_en = 0; | 597 | u_desc_ctrl.field.dest_write_en = 0; |
| 594 | u_desc_ctrl.field.int_en = int_en; | 598 | u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; |
| 595 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 599 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
| 596 | } | 600 | } |
| 597 | 601 | ||
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index bdca3f1b3213..eb640f0acfac 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
| @@ -47,7 +47,6 @@ struct dma_chan_ref { | |||
| 47 | * address is an implied source, whereas the asynchronous case it must be listed | 47 | * address is an implied source, whereas the asynchronous case it must be listed |
| 48 | * as a source. The destination address must be the first address in the source | 48 | * as a source. The destination address must be the first address in the source |
| 49 | * array. | 49 | * array. |
| 50 | * @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations | ||
| 51 | * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a | 50 | * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a |
| 52 | * dependency chain | 51 | * dependency chain |
| 53 | * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. | 52 | * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. |
| @@ -55,7 +54,6 @@ struct dma_chan_ref { | |||
| 55 | enum async_tx_flags { | 54 | enum async_tx_flags { |
| 56 | ASYNC_TX_XOR_ZERO_DST = (1 << 0), | 55 | ASYNC_TX_XOR_ZERO_DST = (1 << 0), |
| 57 | ASYNC_TX_XOR_DROP_DST = (1 << 1), | 56 | ASYNC_TX_XOR_DROP_DST = (1 << 1), |
| 58 | ASYNC_TX_ASSUME_COHERENT = (1 << 2), | ||
| 59 | ASYNC_TX_ACK = (1 << 3), | 57 | ASYNC_TX_ACK = (1 << 3), |
| 60 | ASYNC_TX_DEP_ACK = (1 << 4), | 58 | ASYNC_TX_DEP_ACK = (1 << 4), |
| 61 | }; | 59 | }; |
| @@ -64,9 +62,15 @@ enum async_tx_flags { | |||
| 64 | void async_tx_issue_pending_all(void); | 62 | void async_tx_issue_pending_all(void); |
| 65 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 63 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
| 66 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); | 64 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); |
| 65 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | ||
| 66 | #include <asm/async_tx.h> | ||
| 67 | #else | ||
| 68 | #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ | ||
| 69 | __async_tx_find_channel(dep, type) | ||
| 67 | struct dma_chan * | 70 | struct dma_chan * |
| 68 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 71 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
| 69 | enum dma_transaction_type tx_type); | 72 | enum dma_transaction_type tx_type); |
| 73 | #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ | ||
| 70 | #else | 74 | #else |
| 71 | static inline void async_tx_issue_pending_all(void) | 75 | static inline void async_tx_issue_pending_all(void) |
| 72 | { | 76 | { |
| @@ -88,7 +92,8 @@ async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, | |||
| 88 | 92 | ||
| 89 | static inline struct dma_chan * | 93 | static inline struct dma_chan * |
| 90 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 94 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
| 91 | enum dma_transaction_type tx_type) | 95 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, |
| 96 | struct page **src, int src_count, size_t len) | ||
| 92 | { | 97 | { |
| 93 | return NULL; | 98 | return NULL; |
| 94 | } | 99 | } |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 5c84bf897593..acbb364674ff 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -95,6 +95,15 @@ enum dma_transaction_type { | |||
| 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) |
| 96 | 96 | ||
| 97 | /** | 97 | /** |
| 98 | * enum dma_prep_flags - DMA flags to augment operation preparation | ||
| 99 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | ||
| 100 | * this transaction | ||
| 101 | */ | ||
| 102 | enum dma_prep_flags { | ||
| 103 | DMA_PREP_INTERRUPT = (1 << 0), | ||
| 104 | }; | ||
| 105 | |||
| 106 | /** | ||
| 98 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 107 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. |
| 99 | * See linux/cpumask.h | 108 | * See linux/cpumask.h |
| 100 | */ | 109 | */ |
| @@ -209,8 +218,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
| 209 | * descriptors | 218 | * descriptors |
| 210 | * @chan: target channel for this operation | 219 | * @chan: target channel for this operation |
| 211 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 220 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine |
| 212 | * @tx_set_dest: set a destination address in a hardware descriptor | ||
| 213 | * @tx_set_src: set a source address in a hardware descriptor | ||
| 214 | * @callback: routine to call after this operation is complete | 221 | * @callback: routine to call after this operation is complete |
| 215 | * @callback_param: general parameter to pass to the callback routine | 222 | * @callback_param: general parameter to pass to the callback routine |
| 216 | * ---async_tx api specific fields--- | 223 | * ---async_tx api specific fields--- |
| @@ -227,10 +234,6 @@ struct dma_async_tx_descriptor { | |||
| 227 | struct list_head tx_list; | 234 | struct list_head tx_list; |
| 228 | struct dma_chan *chan; | 235 | struct dma_chan *chan; |
| 229 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 236 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 230 | void (*tx_set_dest)(dma_addr_t addr, | ||
| 231 | struct dma_async_tx_descriptor *tx, int index); | ||
| 232 | void (*tx_set_src)(dma_addr_t addr, | ||
| 233 | struct dma_async_tx_descriptor *tx, int index); | ||
| 234 | dma_async_tx_callback callback; | 237 | dma_async_tx_callback callback; |
| 235 | void *callback_param; | 238 | void *callback_param; |
| 236 | struct list_head depend_list; | 239 | struct list_head depend_list; |
| @@ -279,15 +282,17 @@ struct dma_device { | |||
| 279 | void (*device_free_chan_resources)(struct dma_chan *chan); | 282 | void (*device_free_chan_resources)(struct dma_chan *chan); |
| 280 | 283 | ||
| 281 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 284 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
| 282 | struct dma_chan *chan, size_t len, int int_en); | 285 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
| 286 | size_t len, unsigned long flags); | ||
| 283 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 287 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
| 284 | struct dma_chan *chan, unsigned int src_cnt, size_t len, | 288 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
| 285 | int int_en); | 289 | unsigned int src_cnt, size_t len, unsigned long flags); |
| 286 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | 290 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( |
| 287 | struct dma_chan *chan, unsigned int src_cnt, size_t len, | 291 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
| 288 | u32 *result, int int_en); | 292 | size_t len, u32 *result, unsigned long flags); |
| 289 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 293 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( |
| 290 | struct dma_chan *chan, int value, size_t len, int int_en); | 294 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
| 295 | unsigned long flags); | ||
| 291 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 296 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
| 292 | struct dma_chan *chan); | 297 | struct dma_chan *chan); |
| 293 | 298 | ||
