diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-10-15 07:47:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-10-15 07:47:07 -0400 |
commit | 7a23c5abb930cefcef85df6dc0c8fb3e8961980c (patch) | |
tree | 2248f10da08a860b946c85b72faa97b64e0a087c | |
parent | e7a36a6ec9cf1b60273e48ee980b8920f333bd4d (diff) | |
parent | edf10919e5fc8dfd10e57ed72f651204559bc6ba (diff) |
Merge tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul:
"Here are fixes for this round
- fix spinlock usage amd fifo response for altera driver
- fix ti crossbar race condition
- fix edma memcpy align"
* tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: altera: fix spinlock usage
dmaengine: altera: fix response FIFO emptying
dmaengine: ti-dma-crossbar: Fix possible race condition with dma_inuse
dmaengine: edma: Align the memcpy acnt array size with the transfer
-rw-r--r-- | drivers/dma/altera-msgdma.c | 37 | ||||
-rw-r--r-- | drivers/dma/edma.c | 19 | ||||
-rw-r--r-- | drivers/dma/ti-dma-crossbar.c | 3 |
3 files changed, 40 insertions, 19 deletions
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 32905d5606ac..339186f25a2a 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c | |||
@@ -212,11 +212,12 @@ struct msgdma_device { | |||
212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) | 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) |
213 | { | 213 | { |
214 | struct msgdma_sw_desc *desc; | 214 | struct msgdma_sw_desc *desc; |
215 | unsigned long flags; | ||
215 | 216 | ||
216 | spin_lock_bh(&mdev->lock); | 217 | spin_lock_irqsave(&mdev->lock, flags); |
217 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); | 218 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); |
218 | list_del(&desc->node); | 219 | list_del(&desc->node); |
219 | spin_unlock_bh(&mdev->lock); | 220 | spin_unlock_irqrestore(&mdev->lock, flags); |
220 | 221 | ||
221 | INIT_LIST_HEAD(&desc->tx_list); | 222 | INIT_LIST_HEAD(&desc->tx_list); |
222 | 223 | ||
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
306 | struct msgdma_device *mdev = to_mdev(tx->chan); | 307 | struct msgdma_device *mdev = to_mdev(tx->chan); |
307 | struct msgdma_sw_desc *new; | 308 | struct msgdma_sw_desc *new; |
308 | dma_cookie_t cookie; | 309 | dma_cookie_t cookie; |
310 | unsigned long flags; | ||
309 | 311 | ||
310 | new = tx_to_desc(tx); | 312 | new = tx_to_desc(tx); |
311 | spin_lock_bh(&mdev->lock); | 313 | spin_lock_irqsave(&mdev->lock, flags); |
312 | cookie = dma_cookie_assign(tx); | 314 | cookie = dma_cookie_assign(tx); |
313 | 315 | ||
314 | list_add_tail(&new->node, &mdev->pending_list); | 316 | list_add_tail(&new->node, &mdev->pending_list); |
315 | spin_unlock_bh(&mdev->lock); | 317 | spin_unlock_irqrestore(&mdev->lock, flags); |
316 | 318 | ||
317 | return cookie; | 319 | return cookie; |
318 | } | 320 | } |
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
336 | struct msgdma_extended_desc *desc; | 338 | struct msgdma_extended_desc *desc; |
337 | size_t copy; | 339 | size_t copy; |
338 | u32 desc_cnt; | 340 | u32 desc_cnt; |
341 | unsigned long irqflags; | ||
339 | 342 | ||
340 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); | 343 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); |
341 | 344 | ||
342 | spin_lock_bh(&mdev->lock); | 345 | spin_lock_irqsave(&mdev->lock, irqflags); |
343 | if (desc_cnt > mdev->desc_free_cnt) { | 346 | if (desc_cnt > mdev->desc_free_cnt) { |
344 | spin_unlock_bh(&mdev->lock); | 347 | spin_unlock_bh(&mdev->lock); |
345 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 348 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
346 | return NULL; | 349 | return NULL; |
347 | } | 350 | } |
348 | mdev->desc_free_cnt -= desc_cnt; | 351 | mdev->desc_free_cnt -= desc_cnt; |
349 | spin_unlock_bh(&mdev->lock); | 352 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
350 | 353 | ||
351 | do { | 354 | do { |
352 | /* Allocate and populate the descriptor */ | 355 | /* Allocate and populate the descriptor */ |
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
397 | u32 desc_cnt = 0, i; | 400 | u32 desc_cnt = 0, i; |
398 | struct scatterlist *sg; | 401 | struct scatterlist *sg; |
399 | u32 stride; | 402 | u32 stride; |
403 | unsigned long irqflags; | ||
400 | 404 | ||
401 | for_each_sg(sgl, sg, sg_len, i) | 405 | for_each_sg(sgl, sg, sg_len, i) |
402 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); | 406 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); |
403 | 407 | ||
404 | spin_lock_bh(&mdev->lock); | 408 | spin_lock_irqsave(&mdev->lock, irqflags); |
405 | if (desc_cnt > mdev->desc_free_cnt) { | 409 | if (desc_cnt > mdev->desc_free_cnt) { |
406 | spin_unlock_bh(&mdev->lock); | 410 | spin_unlock_bh(&mdev->lock); |
407 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 411 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
408 | return NULL; | 412 | return NULL; |
409 | } | 413 | } |
410 | mdev->desc_free_cnt -= desc_cnt; | 414 | mdev->desc_free_cnt -= desc_cnt; |
411 | spin_unlock_bh(&mdev->lock); | 415 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
412 | 416 | ||
413 | avail = sg_dma_len(sgl); | 417 | avail = sg_dma_len(sgl); |
414 | 418 | ||
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) | |||
566 | static void msgdma_issue_pending(struct dma_chan *chan) | 570 | static void msgdma_issue_pending(struct dma_chan *chan) |
567 | { | 571 | { |
568 | struct msgdma_device *mdev = to_mdev(chan); | 572 | struct msgdma_device *mdev = to_mdev(chan); |
573 | unsigned long flags; | ||
569 | 574 | ||
570 | spin_lock_bh(&mdev->lock); | 575 | spin_lock_irqsave(&mdev->lock, flags); |
571 | msgdma_start_transfer(mdev); | 576 | msgdma_start_transfer(mdev); |
572 | spin_unlock_bh(&mdev->lock); | 577 | spin_unlock_irqrestore(&mdev->lock, flags); |
573 | } | 578 | } |
574 | 579 | ||
575 | /** | 580 | /** |
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) | |||
634 | static void msgdma_free_chan_resources(struct dma_chan *dchan) | 639 | static void msgdma_free_chan_resources(struct dma_chan *dchan) |
635 | { | 640 | { |
636 | struct msgdma_device *mdev = to_mdev(dchan); | 641 | struct msgdma_device *mdev = to_mdev(dchan); |
642 | unsigned long flags; | ||
637 | 643 | ||
638 | spin_lock_bh(&mdev->lock); | 644 | spin_lock_irqsave(&mdev->lock, flags); |
639 | msgdma_free_descriptors(mdev); | 645 | msgdma_free_descriptors(mdev); |
640 | spin_unlock_bh(&mdev->lock); | 646 | spin_unlock_irqrestore(&mdev->lock, flags); |
641 | kfree(mdev->sw_desq); | 647 | kfree(mdev->sw_desq); |
642 | } | 648 | } |
643 | 649 | ||
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) | |||
682 | u32 count; | 688 | u32 count; |
683 | u32 __maybe_unused size; | 689 | u32 __maybe_unused size; |
684 | u32 __maybe_unused status; | 690 | u32 __maybe_unused status; |
691 | unsigned long flags; | ||
685 | 692 | ||
686 | spin_lock(&mdev->lock); | 693 | spin_lock_irqsave(&mdev->lock, flags); |
687 | 694 | ||
688 | /* Read number of responses that are available */ | 695 | /* Read number of responses that are available */ |
689 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); | 696 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); |
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data) | |||
698 | * bits. So we need to just drop these values. | 705 | * bits. So we need to just drop these values. |
699 | */ | 706 | */ |
700 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); | 707 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); |
701 | status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); | 708 | status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); |
702 | 709 | ||
703 | msgdma_complete_descriptor(mdev); | 710 | msgdma_complete_descriptor(mdev); |
704 | msgdma_chan_desc_cleanup(mdev); | 711 | msgdma_chan_desc_cleanup(mdev); |
705 | } | 712 | } |
706 | 713 | ||
707 | spin_unlock(&mdev->lock); | 714 | spin_unlock_irqrestore(&mdev->lock, flags); |
708 | } | 715 | } |
709 | 716 | ||
710 | /** | 717 | /** |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3879f80a4815..a7ea20e7b8e9 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
1143 | struct edma_desc *edesc; | 1143 | struct edma_desc *edesc; |
1144 | struct device *dev = chan->device->dev; | 1144 | struct device *dev = chan->device->dev; |
1145 | struct edma_chan *echan = to_edma_chan(chan); | 1145 | struct edma_chan *echan = to_edma_chan(chan); |
1146 | unsigned int width, pset_len; | 1146 | unsigned int width, pset_len, array_size; |
1147 | 1147 | ||
1148 | if (unlikely(!echan || !len)) | 1148 | if (unlikely(!echan || !len)) |
1149 | return NULL; | 1149 | return NULL; |
1150 | 1150 | ||
1151 | /* Align the array size (acnt block) with the transfer properties */ | ||
1152 | switch (__ffs((src | dest | len))) { | ||
1153 | case 0: | ||
1154 | array_size = SZ_32K - 1; | ||
1155 | break; | ||
1156 | case 1: | ||
1157 | array_size = SZ_32K - 2; | ||
1158 | break; | ||
1159 | default: | ||
1160 | array_size = SZ_32K - 4; | ||
1161 | break; | ||
1162 | } | ||
1163 | |||
1151 | if (len < SZ_64K) { | 1164 | if (len < SZ_64K) { |
1152 | /* | 1165 | /* |
1153 | * Transfer size less than 64K can be handled with one paRAM | 1166 | * Transfer size less than 64K can be handled with one paRAM |
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
1169 | * When the full_length is multibple of 32767 one slot can be | 1182 | * When the full_length is multibple of 32767 one slot can be |
1170 | * used to complete the transfer. | 1183 | * used to complete the transfer. |
1171 | */ | 1184 | */ |
1172 | width = SZ_32K - 1; | 1185 | width = array_size; |
1173 | pset_len = rounddown(len, width); | 1186 | pset_len = rounddown(len, width); |
1174 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | 1187 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ |
1175 | if (unlikely(pset_len == len)) | 1188 | if (unlikely(pset_len == len)) |
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
1217 | } | 1230 | } |
1218 | dest += pset_len; | 1231 | dest += pset_len; |
1219 | src += pset_len; | 1232 | src += pset_len; |
1220 | pset_len = width = len % (SZ_32K - 1); | 1233 | pset_len = width = len % array_size; |
1221 | 1234 | ||
1222 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | 1235 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, |
1223 | width, pset_len, DMA_MEM_TO_MEM); | 1236 | width, pset_len, DMA_MEM_TO_MEM); |
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2f65a8fde21d..f1d04b70ee67 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
262 | mutex_lock(&xbar->mutex); | 262 | mutex_lock(&xbar->mutex); |
263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, | 263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, |
264 | xbar->dma_requests); | 264 | xbar->dma_requests); |
265 | mutex_unlock(&xbar->mutex); | ||
266 | if (map->xbar_out == xbar->dma_requests) { | 265 | if (map->xbar_out == xbar->dma_requests) { |
266 | mutex_unlock(&xbar->mutex); | ||
267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | 267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); |
268 | kfree(map); | 268 | kfree(map); |
269 | return ERR_PTR(-ENOMEM); | 269 | return ERR_PTR(-ENOMEM); |
270 | } | 270 | } |
271 | set_bit(map->xbar_out, xbar->dma_inuse); | 271 | set_bit(map->xbar_out, xbar->dma_inuse); |
272 | mutex_unlock(&xbar->mutex); | ||
272 | 273 | ||
273 | map->xbar_in = (u16)dma_spec->args[0]; | 274 | map->xbar_in = (u16)dma_spec->args[0]; |
274 | 275 | ||