diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-01 15:47:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-01 15:47:04 -0400 |
commit | 3f6d9e0896b325c95e5155ee8e7bcb47443ad413 (patch) | |
tree | 1d81c52daca19cd4e1f08b37324eebf3a1e4e75d | |
parent | 3270c8eacc81f000e2f5f03e36f80d6617e2f5c4 (diff) | |
parent | 8c8fe97b2b8a216523e2faf1ccca66ddab634e3e (diff) |
Merge tag 'dmaengine-fix-4.2-rc5' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul:
"We had a regression due to reuse of descriptor so we have reverted
that.
The rest are driver fixes:
- at_hdmac and at_xdmac for residue, trannfer width, and channel config
- pl330 final fix for dma fails and overflow issue
- xgene resouce map fix
- mv_xor big endian op fix"
* tag 'dmaengine-fix-4.2-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
Revert "dmaengine: virt-dma: don't always free descriptor upon completion"
dmaengine: mv_xor: fix big endian operation in register mode
dmaengine: xgene-dma: Fix the resource map to handle overlapping
dmaengine: at_xdmac: fix transfer data width in at_xdmac_prep_slave_sg()
dmaengine: at_hdmac: fix residue computation
dmaengine: at_xdmac: fix bug about channel configuration
dmaengine: pl330: Really fix choppy sound because of wrong residue calculation
dmaengine: pl330: Fix overflow when reporting residue in memcpy
-rw-r--r-- | Documentation/devicetree/bindings/dma/apm-xgene-dma.txt | 2 | ||||
-rw-r--r-- | arch/arm64/boot/dts/apm/apm-storm.dtsi | 2 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 132 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 3 | ||||
-rw-r--r-- | drivers/dma/at_xdmac.c | 26 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 9 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 3 | ||||
-rw-r--r-- | drivers/dma/virt-dma.c | 19 | ||||
-rw-r--r-- | drivers/dma/virt-dma.h | 13 | ||||
-rw-r--r-- | drivers/dma/xgene-dma.c | 3 |
10 files changed, 121 insertions, 91 deletions
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt index d3058768b23d..c53e0b08032f 100644 --- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt +++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt | |||
@@ -35,7 +35,7 @@ Example: | |||
35 | device_type = "dma"; | 35 | device_type = "dma"; |
36 | reg = <0x0 0x1f270000 0x0 0x10000>, | 36 | reg = <0x0 0x1f270000 0x0 0x10000>, |
37 | <0x0 0x1f200000 0x0 0x10000>, | 37 | <0x0 0x1f200000 0x0 0x10000>, |
38 | <0x0 0x1b008000 0x0 0x2000>, | 38 | <0x0 0x1b000000 0x0 0x400000>, |
39 | <0x0 0x1054a000 0x0 0x100>; | 39 | <0x0 0x1054a000 0x0 0x100>; |
40 | interrupts = <0x0 0x82 0x4>, | 40 | interrupts = <0x0 0x82 0x4>, |
41 | <0x0 0xb8 0x4>, | 41 | <0x0 0xb8 0x4>, |
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0689c3fb56e3..58093edeea2e 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
@@ -823,7 +823,7 @@ | |||
823 | device_type = "dma"; | 823 | device_type = "dma"; |
824 | reg = <0x0 0x1f270000 0x0 0x10000>, | 824 | reg = <0x0 0x1f270000 0x0 0x10000>, |
825 | <0x0 0x1f200000 0x0 0x10000>, | 825 | <0x0 0x1f200000 0x0 0x10000>, |
826 | <0x0 0x1b008000 0x0 0x2000>, | 826 | <0x0 0x1b000000 0x0 0x400000>, |
827 | <0x0 0x1054a000 0x0 0x100>; | 827 | <0x0 0x1054a000 0x0 0x100>; |
828 | interrupts = <0x0 0x82 0x4>, | 828 | interrupts = <0x0 0x82 0x4>, |
829 | <0x0 0xb8 0x4>, | 829 | <0x0 0xb8 0x4>, |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 59892126d175..d3629b7482dd 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -48,6 +48,8 @@ | |||
48 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | 48 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ |
49 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | 49 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
50 | 50 | ||
51 | #define ATC_MAX_DSCR_TRIALS 10 | ||
52 | |||
51 | /* | 53 | /* |
52 | * Initial number of descriptors to allocate for each channel. This could | 54 | * Initial number of descriptors to allocate for each channel. This could |
53 | * be increased during dma usage. | 55 | * be increased during dma usage. |
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, | |||
285 | * | 287 | * |
286 | * @current_len: the number of bytes left before reading CTRLA | 288 | * @current_len: the number of bytes left before reading CTRLA |
287 | * @ctrla: the value of CTRLA | 289 | * @ctrla: the value of CTRLA |
288 | * @desc: the descriptor containing the transfer width | ||
289 | */ | 290 | */ |
290 | static inline int atc_calc_bytes_left(int current_len, u32 ctrla, | 291 | static inline int atc_calc_bytes_left(int current_len, u32 ctrla) |
291 | struct at_desc *desc) | ||
292 | { | 292 | { |
293 | return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); | 293 | u32 btsize = (ctrla & ATC_BTSIZE_MAX); |
294 | } | 294 | u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); |
295 | 295 | ||
296 | /** | 296 | /* |
297 | * atc_calc_bytes_left_from_reg - calculates the number of bytes left according | 297 | * According to the datasheet, when reading the Control A Register |
298 | * to the current value of CTRLA. | 298 | * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the |
299 | * | 299 | * number of transfers completed on the Source Interface. |
300 | * @current_len: the number of bytes left before reading CTRLA | 300 | * So btsize is always a number of source width transfers. |
301 | * @atchan: the channel to read CTRLA for | 301 | */ |
302 | * @desc: the descriptor containing the transfer width | 302 | return current_len - (btsize << src_width); |
303 | */ | ||
304 | static inline int atc_calc_bytes_left_from_reg(int current_len, | ||
305 | struct at_dma_chan *atchan, struct at_desc *desc) | ||
306 | { | ||
307 | u32 ctrla = channel_readl(atchan, CTRLA); | ||
308 | |||
309 | return atc_calc_bytes_left(current_len, ctrla, desc); | ||
310 | } | 303 | } |
311 | 304 | ||
312 | /** | 305 | /** |
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) | |||
320 | struct at_desc *desc_first = atc_first_active(atchan); | 313 | struct at_desc *desc_first = atc_first_active(atchan); |
321 | struct at_desc *desc; | 314 | struct at_desc *desc; |
322 | int ret; | 315 | int ret; |
323 | u32 ctrla, dscr; | 316 | u32 ctrla, dscr, trials; |
324 | 317 | ||
325 | /* | 318 | /* |
326 | * If the cookie doesn't match to the currently running transfer then | 319 | * If the cookie doesn't match to the currently running transfer then |
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) | |||
346 | * the channel's DSCR register and compare it against the value | 339 | * the channel's DSCR register and compare it against the value |
347 | * of the hardware linked list structure of each child | 340 | * of the hardware linked list structure of each child |
348 | * descriptor. | 341 | * descriptor. |
342 | * | ||
343 | * The CTRLA register provides us with the amount of data | ||
344 | * already read from the source for the current child | ||
345 | * descriptor. So we can compute a more accurate residue by also | ||
346 | * removing the number of bytes corresponding to this amount of | ||
347 | * data. | ||
348 | * | ||
349 | * However, the DSCR and CTRLA registers cannot be read both | ||
350 | * atomically. Hence a race condition may occur: the first read | ||
351 | * register may refer to one child descriptor whereas the second | ||
352 | * read may refer to a later child descriptor in the list | ||
353 | * because of the DMA transfer progression inbetween the two | ||
354 | * reads. | ||
355 | * | ||
356 | * One solution could have been to pause the DMA transfer, read | ||
357 | * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, | ||
358 | * this approach presents some drawbacks: | ||
359 | * - If the DMA transfer is paused, RX overruns or TX underruns | ||
360 | * are more likey to occur depending on the system latency. | ||
361 | * Taking the USART driver as an example, it uses a cyclic DMA | ||
362 | * transfer to read data from the Receive Holding Register | ||
363 | * (RHR) to avoid RX overruns since the RHR is not protected | ||
364 | * by any FIFO on most Atmel SoCs. So pausing the DMA transfer | ||
365 | * to compute the residue would break the USART driver design. | ||
366 | * - The atc_pause() function masks interrupts but we'd rather | ||
367 | * avoid to do so for system latency purpose. | ||
368 | * | ||
369 | * Then we'd rather use another solution: the DSCR is read a | ||
370 | * first time, the CTRLA is read in turn, next the DSCR is read | ||
371 | * a second time. If the two consecutive read values of the DSCR | ||
372 | * are the same then we assume both refers to the very same | ||
373 | * child descriptor as well as the CTRLA value read inbetween | ||
374 | * does. For cyclic tranfers, the assumption is that a full loop | ||
375 | * is "not so fast". | ||
376 | * If the two DSCR values are different, we read again the CTRLA | ||
377 | * then the DSCR till two consecutive read values from DSCR are | ||
378 | * equal or till the maxium trials is reach. | ||
379 | * This algorithm is very unlikely not to find a stable value for | ||
380 | * DSCR. | ||
349 | */ | 381 | */ |
350 | 382 | ||
351 | ctrla = channel_readl(atchan, CTRLA); | ||
352 | rmb(); /* ensure CTRLA is read before DSCR */ | ||
353 | dscr = channel_readl(atchan, DSCR); | 383 | dscr = channel_readl(atchan, DSCR); |
384 | rmb(); /* ensure DSCR is read before CTRLA */ | ||
385 | ctrla = channel_readl(atchan, CTRLA); | ||
386 | for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { | ||
387 | u32 new_dscr; | ||
388 | |||
389 | rmb(); /* ensure DSCR is read after CTRLA */ | ||
390 | new_dscr = channel_readl(atchan, DSCR); | ||
391 | |||
392 | /* | ||
393 | * If the DSCR register value has not changed inside the | ||
394 | * DMA controller since the previous read, we assume | ||
395 | * that both the dscr and ctrla values refers to the | ||
396 | * very same descriptor. | ||
397 | */ | ||
398 | if (likely(new_dscr == dscr)) | ||
399 | break; | ||
400 | |||
401 | /* | ||
402 | * DSCR has changed inside the DMA controller, so the | ||
403 | * previouly read value of CTRLA may refer to an already | ||
404 | * processed descriptor hence could be outdated. | ||
405 | * We need to update ctrla to match the current | ||
406 | * descriptor. | ||
407 | */ | ||
408 | dscr = new_dscr; | ||
409 | rmb(); /* ensure DSCR is read before CTRLA */ | ||
410 | ctrla = channel_readl(atchan, CTRLA); | ||
411 | } | ||
412 | if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) | ||
413 | return -ETIMEDOUT; | ||
354 | 414 | ||
355 | /* for the first descriptor we can be more accurate */ | 415 | /* for the first descriptor we can be more accurate */ |
356 | if (desc_first->lli.dscr == dscr) | 416 | if (desc_first->lli.dscr == dscr) |
357 | return atc_calc_bytes_left(ret, ctrla, desc_first); | 417 | return atc_calc_bytes_left(ret, ctrla); |
358 | 418 | ||
359 | ret -= desc_first->len; | 419 | ret -= desc_first->len; |
360 | list_for_each_entry(desc, &desc_first->tx_list, desc_node) { | 420 | list_for_each_entry(desc, &desc_first->tx_list, desc_node) { |
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) | |||
365 | } | 425 | } |
366 | 426 | ||
367 | /* | 427 | /* |
368 | * For the last descriptor in the chain we can calculate | 428 | * For the current descriptor in the chain we can calculate |
369 | * the remaining bytes using the channel's register. | 429 | * the remaining bytes using the channel's register. |
370 | * Note that the transfer width of the first and last | ||
371 | * descriptor may differ. | ||
372 | */ | 430 | */ |
373 | if (!desc->lli.dscr) | 431 | ret = atc_calc_bytes_left(ret, ctrla); |
374 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); | ||
375 | } else { | 432 | } else { |
376 | /* single transfer */ | 433 | /* single transfer */ |
377 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); | 434 | ctrla = channel_readl(atchan, CTRLA); |
435 | ret = atc_calc_bytes_left(ret, ctrla); | ||
378 | } | 436 | } |
379 | 437 | ||
380 | return ret; | 438 | return ret; |
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan, | |||
726 | 784 | ||
727 | desc->txd.cookie = -EBUSY; | 785 | desc->txd.cookie = -EBUSY; |
728 | desc->total_len = desc->len = len; | 786 | desc->total_len = desc->len = len; |
729 | desc->tx_width = dwidth; | ||
730 | 787 | ||
731 | /* set end-of-link to the last link descriptor of list*/ | 788 | /* set end-of-link to the last link descriptor of list*/ |
732 | set_desc_eol(desc); | 789 | set_desc_eol(desc); |
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
804 | first->txd.cookie = -EBUSY; | 861 | first->txd.cookie = -EBUSY; |
805 | first->total_len = len; | 862 | first->total_len = len; |
806 | 863 | ||
807 | /* set transfer width for the calculation of the residue */ | ||
808 | first->tx_width = src_width; | ||
809 | prev->tx_width = src_width; | ||
810 | |||
811 | /* set end-of-link to the last link descriptor of list*/ | 864 | /* set end-of-link to the last link descriptor of list*/ |
812 | set_desc_eol(desc); | 865 | set_desc_eol(desc); |
813 | 866 | ||
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
956 | first->txd.cookie = -EBUSY; | 1009 | first->txd.cookie = -EBUSY; |
957 | first->total_len = total_len; | 1010 | first->total_len = total_len; |
958 | 1011 | ||
959 | /* set transfer width for the calculation of the residue */ | ||
960 | first->tx_width = reg_width; | ||
961 | prev->tx_width = reg_width; | ||
962 | |||
963 | /* first link descriptor of list is responsible of flags */ | 1012 | /* first link descriptor of list is responsible of flags */ |
964 | first->txd.flags = flags; /* client is in control of this ack */ | 1013 | first->txd.flags = flags; /* client is in control of this ack */ |
965 | 1014 | ||
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan, | |||
1077 | desc->txd.cookie = 0; | 1126 | desc->txd.cookie = 0; |
1078 | desc->len = len; | 1127 | desc->len = len; |
1079 | 1128 | ||
1080 | /* | ||
1081 | * Although we only need the transfer width for the first and | ||
1082 | * the last descriptor, its easier to set it to all descriptors. | ||
1083 | */ | ||
1084 | desc->tx_width = src_width; | ||
1085 | |||
1086 | atc_desc_chain(&first, &prev, desc); | 1129 | atc_desc_chain(&first, &prev, desc); |
1087 | 1130 | ||
1088 | /* update the lengths and addresses for the next loop cycle */ | 1131 | /* update the lengths and addresses for the next loop cycle */ |
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
1256 | /* First descriptor of the chain embedds additional information */ | 1299 | /* First descriptor of the chain embedds additional information */ |
1257 | first->txd.cookie = -EBUSY; | 1300 | first->txd.cookie = -EBUSY; |
1258 | first->total_len = buf_len; | 1301 | first->total_len = buf_len; |
1259 | first->tx_width = reg_width; | ||
1260 | 1302 | ||
1261 | return &first->txd; | 1303 | return &first->txd; |
1262 | 1304 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index bc8d5ebedd19..7f5a08230f76 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -112,6 +112,7 @@ | |||
112 | #define ATC_SRC_WIDTH_BYTE (0x0 << 24) | 112 | #define ATC_SRC_WIDTH_BYTE (0x0 << 24) |
113 | #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) | 113 | #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) |
114 | #define ATC_SRC_WIDTH_WORD (0x2 << 24) | 114 | #define ATC_SRC_WIDTH_WORD (0x2 << 24) |
115 | #define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) | ||
115 | #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ | 116 | #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ |
116 | #define ATC_DST_WIDTH(x) ((x) << 28) | 117 | #define ATC_DST_WIDTH(x) ((x) << 28) |
117 | #define ATC_DST_WIDTH_BYTE (0x0 << 28) | 118 | #define ATC_DST_WIDTH_BYTE (0x0 << 28) |
@@ -182,7 +183,6 @@ struct at_lli { | |||
182 | * @txd: support for the async_tx api | 183 | * @txd: support for the async_tx api |
183 | * @desc_node: node on the channed descriptors list | 184 | * @desc_node: node on the channed descriptors list |
184 | * @len: descriptor byte count | 185 | * @len: descriptor byte count |
185 | * @tx_width: transfer width | ||
186 | * @total_len: total transaction byte count | 186 | * @total_len: total transaction byte count |
187 | */ | 187 | */ |
188 | struct at_desc { | 188 | struct at_desc { |
@@ -194,7 +194,6 @@ struct at_desc { | |||
194 | struct dma_async_tx_descriptor txd; | 194 | struct dma_async_tx_descriptor txd; |
195 | struct list_head desc_node; | 195 | struct list_head desc_node; |
196 | size_t len; | 196 | size_t len; |
197 | u32 tx_width; | ||
198 | size_t total_len; | 197 | size_t total_len; |
199 | 198 | ||
200 | /* Interleaved data */ | 199 | /* Interleaved data */ |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index cf1213de7865..40afa2a16cfc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, | |||
359 | * descriptor view 2 since some fields of the configuration register | 359 | * descriptor view 2 since some fields of the configuration register |
360 | * depend on transfer size and src/dest addresses. | 360 | * depend on transfer size and src/dest addresses. |
361 | */ | 361 | */ |
362 | if (at_xdmac_chan_is_cyclic(atchan)) { | 362 | if (at_xdmac_chan_is_cyclic(atchan)) |
363 | reg = AT_XDMAC_CNDC_NDVIEW_NDV1; | 363 | reg = AT_XDMAC_CNDC_NDVIEW_NDV1; |
364 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); | 364 | else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) |
365 | } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) { | ||
366 | reg = AT_XDMAC_CNDC_NDVIEW_NDV3; | 365 | reg = AT_XDMAC_CNDC_NDVIEW_NDV3; |
367 | } else { | 366 | else |
368 | /* | ||
369 | * No need to write AT_XDMAC_CC reg, it will be done when the | ||
370 | * descriptor is fecthed. | ||
371 | */ | ||
372 | reg = AT_XDMAC_CNDC_NDVIEW_NDV2; | 367 | reg = AT_XDMAC_CNDC_NDVIEW_NDV2; |
373 | } | 368 | /* |
369 | * Even if the register will be updated from the configuration in the | ||
370 | * descriptor when using view 2 or higher, the PROT bit won't be set | ||
371 | * properly. This bit can be modified only by using the channel | ||
372 | * configuration register. | ||
373 | */ | ||
374 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); | ||
374 | 375 | ||
375 | reg |= AT_XDMAC_CNDC_NDDUP | 376 | reg |= AT_XDMAC_CNDC_NDDUP |
376 | | AT_XDMAC_CNDC_NDSUP | 377 | | AT_XDMAC_CNDC_NDSUP |
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
681 | desc->lld.mbr_sa = mem; | 682 | desc->lld.mbr_sa = mem; |
682 | desc->lld.mbr_da = atchan->sconfig.dst_addr; | 683 | desc->lld.mbr_da = atchan->sconfig.dst_addr; |
683 | } | 684 | } |
684 | desc->lld.mbr_cfg = atchan->cfg; | 685 | dwidth = at_xdmac_get_dwidth(atchan->cfg); |
685 | dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); | ||
686 | fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) | 686 | fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) |
687 | ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) | 687 | ? dwidth |
688 | : AT_XDMAC_CC_DWIDTH_BYTE; | 688 | : AT_XDMAC_CC_DWIDTH_BYTE; |
689 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | 689 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ |
690 | | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | 690 | | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ |
691 | | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | 691 | | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ |
692 | | (len >> fixed_dwidth); /* microblock length */ | 692 | | (len >> fixed_dwidth); /* microblock length */ |
693 | desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | | ||
694 | AT_XDMAC_CC_DWIDTH(fixed_dwidth); | ||
693 | dev_dbg(chan2dev(chan), | 695 | dev_dbg(chan2dev(chan), |
694 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", | 696 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", |
695 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); | 697 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaf1ead2597..f1325f62563e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan, | |||
162 | config &= ~0x7; | 162 | config &= ~0x7; |
163 | config |= op_mode; | 163 | config |= op_mode; |
164 | 164 | ||
165 | if (IS_ENABLED(__BIG_ENDIAN)) | 165 | #if defined(__BIG_ENDIAN) |
166 | config |= XOR_DESCRIPTOR_SWAP; | 166 | config |= XOR_DESCRIPTOR_SWAP; |
167 | else | 167 | #else |
168 | config &= ~XOR_DESCRIPTOR_SWAP; | 168 | config &= ~XOR_DESCRIPTOR_SWAP; |
169 | #endif | ||
169 | 170 | ||
170 | writel_relaxed(config, XOR_CONFIG(chan)); | 171 | writel_relaxed(config, XOR_CONFIG(chan)); |
171 | chan->current_type = type; | 172 | chan->current_type = type; |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f513f77b1d85..ecab4ea059b4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2328 | desc->txd.callback = last->txd.callback; | 2328 | desc->txd.callback = last->txd.callback; |
2329 | desc->txd.callback_param = last->txd.callback_param; | 2329 | desc->txd.callback_param = last->txd.callback_param; |
2330 | } | 2330 | } |
2331 | last->last = false; | 2331 | desc->last = false; |
2332 | 2332 | ||
2333 | dma_cookie_assign(&desc->txd); | 2333 | dma_cookie_assign(&desc->txd); |
2334 | 2334 | ||
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
2623 | desc->rqcfg.brst_len = 1; | 2623 | desc->rqcfg.brst_len = 1; |
2624 | 2624 | ||
2625 | desc->rqcfg.brst_len = get_burst_len(desc, len); | 2625 | desc->rqcfg.brst_len = get_burst_len(desc, len); |
2626 | desc->bytes_requested = len; | ||
2626 | 2627 | ||
2627 | desc->txd.flags = flags; | 2628 | desc->txd.flags = flags; |
2628 | 2629 | ||
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d8d30f..6f80432a3f0a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c | |||
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | |||
29 | spin_lock_irqsave(&vc->lock, flags); | 29 | spin_lock_irqsave(&vc->lock, flags); |
30 | cookie = dma_cookie_assign(tx); | 30 | cookie = dma_cookie_assign(tx); |
31 | 31 | ||
32 | list_move_tail(&vd->node, &vc->desc_submitted); | 32 | list_add_tail(&vd->node, &vc->desc_submitted); |
33 | spin_unlock_irqrestore(&vc->lock, flags); | 33 | spin_unlock_irqrestore(&vc->lock, flags); |
34 | 34 | ||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | 35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", |
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) | |||
83 | cb_data = vd->tx.callback_param; | 83 | cb_data = vd->tx.callback_param; |
84 | 84 | ||
85 | list_del(&vd->node); | 85 | list_del(&vd->node); |
86 | if (async_tx_test_ack(&vd->tx)) | 86 | |
87 | list_add(&vd->node, &vc->desc_allocated); | 87 | vc->desc_free(vd); |
88 | else | ||
89 | vc->desc_free(vd); | ||
90 | 88 | ||
91 | if (cb) | 89 | if (cb) |
92 | cb(cb_data); | 90 | cb(cb_data); |
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | |||
98 | while (!list_empty(head)) { | 96 | while (!list_empty(head)) { |
99 | struct virt_dma_desc *vd = list_first_entry(head, | 97 | struct virt_dma_desc *vd = list_first_entry(head, |
100 | struct virt_dma_desc, node); | 98 | struct virt_dma_desc, node); |
101 | if (async_tx_test_ack(&vd->tx)) { | 99 | list_del(&vd->node); |
102 | list_move_tail(&vd->node, &vc->desc_allocated); | 100 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); |
103 | } else { | 101 | vc->desc_free(vd); |
104 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | ||
105 | list_del(&vd->node); | ||
106 | vc->desc_free(vd); | ||
107 | } | ||
108 | } | 102 | } |
109 | } | 103 | } |
110 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | 104 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); |
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | |||
114 | dma_cookie_init(&vc->chan); | 108 | dma_cookie_init(&vc->chan); |
115 | 109 | ||
116 | spin_lock_init(&vc->lock); | 110 | spin_lock_init(&vc->lock); |
117 | INIT_LIST_HEAD(&vc->desc_allocated); | ||
118 | INIT_LIST_HEAD(&vc->desc_submitted); | 111 | INIT_LIST_HEAD(&vc->desc_submitted); |
119 | INIT_LIST_HEAD(&vc->desc_issued); | 112 | INIT_LIST_HEAD(&vc->desc_issued); |
120 | INIT_LIST_HEAD(&vc->desc_completed); | 113 | INIT_LIST_HEAD(&vc->desc_completed); |
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75dbcb15..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h | |||
@@ -29,7 +29,6 @@ struct virt_dma_chan { | |||
29 | spinlock_t lock; | 29 | spinlock_t lock; |
30 | 30 | ||
31 | /* protected by vc.lock */ | 31 | /* protected by vc.lock */ |
32 | struct list_head desc_allocated; | ||
33 | struct list_head desc_submitted; | 32 | struct list_head desc_submitted; |
34 | struct list_head desc_issued; | 33 | struct list_head desc_issued; |
35 | struct list_head desc_completed; | 34 | struct list_head desc_completed; |
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan | |||
56 | struct virt_dma_desc *vd, unsigned long tx_flags) | 55 | struct virt_dma_desc *vd, unsigned long tx_flags) |
57 | { | 56 | { |
58 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); | 57 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); |
59 | unsigned long flags; | ||
60 | 58 | ||
61 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); | 59 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); |
62 | vd->tx.flags = tx_flags; | 60 | vd->tx.flags = tx_flags; |
63 | vd->tx.tx_submit = vchan_tx_submit; | 61 | vd->tx.tx_submit = vchan_tx_submit; |
64 | 62 | ||
65 | spin_lock_irqsave(&vc->lock, flags); | ||
66 | list_add_tail(&vd->node, &vc->desc_allocated); | ||
67 | spin_unlock_irqrestore(&vc->lock, flags); | ||
68 | |||
69 | return &vd->tx; | 63 | return &vd->tx; |
70 | } | 64 | } |
71 | 65 | ||
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |||
128 | } | 122 | } |
129 | 123 | ||
130 | /** | 124 | /** |
131 | * vchan_get_all_descriptors - obtain all allocated, submitted and issued | 125 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors |
132 | * descriptors | ||
133 | * vc: virtual channel to get descriptors from | 126 | * vc: virtual channel to get descriptors from |
134 | * head: list of descriptors found | 127 | * head: list of descriptors found |
135 | * | 128 | * |
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |||
141 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | 134 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, |
142 | struct list_head *head) | 135 | struct list_head *head) |
143 | { | 136 | { |
144 | list_splice_tail_init(&vc->desc_allocated, head); | ||
145 | list_splice_tail_init(&vc->desc_submitted, head); | 137 | list_splice_tail_init(&vc->desc_submitted, head); |
146 | list_splice_tail_init(&vc->desc_issued, head); | 138 | list_splice_tail_init(&vc->desc_issued, head); |
147 | list_splice_tail_init(&vc->desc_completed, head); | 139 | list_splice_tail_init(&vc->desc_completed, head); |
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | |||
149 | 141 | ||
150 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | 142 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) |
151 | { | 143 | { |
152 | struct virt_dma_desc *vd; | ||
153 | unsigned long flags; | 144 | unsigned long flags; |
154 | LIST_HEAD(head); | 145 | LIST_HEAD(head); |
155 | 146 | ||
156 | spin_lock_irqsave(&vc->lock, flags); | 147 | spin_lock_irqsave(&vc->lock, flags); |
157 | vchan_get_all_descriptors(vc, &head); | 148 | vchan_get_all_descriptors(vc, &head); |
158 | list_for_each_entry(vd, &head, node) | ||
159 | async_tx_clear_ack(&vd->tx); | ||
160 | spin_unlock_irqrestore(&vc->lock, flags); | 149 | spin_unlock_irqrestore(&vc->lock, flags); |
161 | 150 | ||
162 | vchan_dma_desc_free_list(vc, &head); | 151 | vchan_dma_desc_free_list(vc, &head); |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec766..dff22ab01851 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -111,6 +111,7 @@ | |||
111 | #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 | 111 | #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 |
112 | #define XGENE_DMA_BLK_MEM_RDY 0xD074 | 112 | #define XGENE_DMA_BLK_MEM_RDY 0xD074 |
113 | #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF | 113 | #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF |
114 | #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 | ||
114 | 115 | ||
115 | /* X-Gene SoC EFUSE csr register and bit defination */ | 116 | /* X-Gene SoC EFUSE csr register and bit defination */ |
116 | #define XGENE_SOC_JTAG1_SHADOW 0x18 | 117 | #define XGENE_SOC_JTAG1_SHADOW 0x18 |
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev, | |||
1887 | return -ENOMEM; | 1888 | return -ENOMEM; |
1888 | } | 1889 | } |
1889 | 1890 | ||
1891 | pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; | ||
1892 | |||
1890 | /* Get efuse csr region */ | 1893 | /* Get efuse csr region */ |
1891 | res = platform_get_resource(pdev, IORESOURCE_MEM, 3); | 1894 | res = platform_get_resource(pdev, IORESOURCE_MEM, 3); |
1892 | if (!res) { | 1895 | if (!res) { |