aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-12-17 16:28:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-12-17 16:28:49 -0500
commitc43727908f5589970d954e995a6cb0dcff837dcd (patch)
treed1c40a19c091c719d5476b526d5a97fc84892c03
parentb9f5fb1800d8a4a3bc6cd3152c5f3d252986cf79 (diff)
parent2610acf46b9ed528ec2cacd717bc9d354e452b73 (diff)
Merge tag 'dmaengine-fix-4.15-rc4' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: "This time consisting of fixes in a bunch of drivers and the dmatest module: - Fix for disable clk on error path in fsl-edma driver - Disable clk fail fix in jz4740 driver - Fix long pending bug in dmatest driver for dangling pointer - Fix potential NULL pointer dereference in at_hdmac driver - Error handling path in ioat driver" * tag 'dmaengine-fix-4.15-rc4' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: fsl-edma: disable clks on all error paths dmaengine: jz4740: disable/unprepare clk if probe fails dmaengine: dmatest: move callback wait queue to thread context dmaengine: at_hdmac: fix potential NULL pointer dereference in atc_prep_dma_interleaved dmaengine: ioat: Fix error handling path
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c55
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/ioat/init.c2
5 files changed, 52 insertions, 41 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fbab271b3bf9..a861b5b4d443 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
708 unsigned long flags) 708 unsigned long flags)
709{ 709{
710 struct at_dma_chan *atchan = to_at_dma_chan(chan); 710 struct at_dma_chan *atchan = to_at_dma_chan(chan);
711 struct data_chunk *first = xt->sgl; 711 struct data_chunk *first;
712 struct at_desc *desc = NULL; 712 struct at_desc *desc = NULL;
713 size_t xfer_count; 713 size_t xfer_count;
714 unsigned int dwidth; 714 unsigned int dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) 720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
721 return NULL; 721 return NULL;
722 722
723 first = xt->sgl;
724
723 dev_info(chan2dev(chan), 725 dev_info(chan2dev(chan),
724 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", 726 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
725 __func__, &xt->src_start, &xt->dst_start, xt->numf, 727 __func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index d50273fed715..afd5e10f8927 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
555 555
556 ret = dma_async_device_register(dd); 556 ret = dma_async_device_register(dd);
557 if (ret) 557 if (ret)
558 return ret; 558 goto err_clk;
559 559
560 irq = platform_get_irq(pdev, 0); 560 irq = platform_get_irq(pdev, 0);
561 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); 561 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
568 568
569err_unregister: 569err_unregister:
570 dma_async_device_unregister(dd); 570 dma_async_device_unregister(dd);
571err_clk:
572 clk_disable_unprepare(dmadev->clk);
571 return ret; 573 return ret;
572} 574}
573 575
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 47edc7fbf91f..ec5f9d2bc820 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
155#define PATTERN_COUNT_MASK 0x1f 155#define PATTERN_COUNT_MASK 0x1f
156#define PATTERN_MEMSET_IDX 0x01 156#define PATTERN_MEMSET_IDX 0x01
157 157
158/* poor man's completion - we want to use wait_event_freezable() on it */
159struct dmatest_done {
160 bool done;
161 wait_queue_head_t *wait;
162};
163
158struct dmatest_thread { 164struct dmatest_thread {
159 struct list_head node; 165 struct list_head node;
160 struct dmatest_info *info; 166 struct dmatest_info *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
165 u8 **dsts; 171 u8 **dsts;
166 u8 **udsts; 172 u8 **udsts;
167 enum dma_transaction_type type; 173 enum dma_transaction_type type;
174 wait_queue_head_t done_wait;
175 struct dmatest_done test_done;
168 bool done; 176 bool done;
169}; 177};
170 178
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
342 return error_count; 350 return error_count;
343} 351}
344 352
345/* poor man's completion - we want to use wait_event_freezable() on it */
346struct dmatest_done {
347 bool done;
348 wait_queue_head_t *wait;
349};
350 353
351static void dmatest_callback(void *arg) 354static void dmatest_callback(void *arg)
352{ 355{
353 struct dmatest_done *done = arg; 356 struct dmatest_done *done = arg;
354 357 struct dmatest_thread *thread =
355 done->done = true; 358 container_of(arg, struct dmatest_thread, done_wait);
356 wake_up_all(done->wait); 359 if (!thread->done) {
360 done->done = true;
361 wake_up_all(done->wait);
362 } else {
363 /*
364 * If thread->done, it means that this callback occurred
365 * after the parent thread has cleaned up. This can
366 * happen in the case that driver doesn't implement
367 * the terminate_all() functionality and a dma operation
368 * did not occur within the timeout period
369 */
370 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
371 }
357} 372}
358 373
359static unsigned int min_odd(unsigned int x, unsigned int y) 374static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
424 */ 439 */
425static int dmatest_func(void *data) 440static int dmatest_func(void *data)
426{ 441{
427 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
428 struct dmatest_thread *thread = data; 442 struct dmatest_thread *thread = data;
429 struct dmatest_done done = { .wait = &done_wait }; 443 struct dmatest_done *done = &thread->test_done;
430 struct dmatest_info *info; 444 struct dmatest_info *info;
431 struct dmatest_params *params; 445 struct dmatest_params *params;
432 struct dma_chan *chan; 446 struct dma_chan *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
673 continue; 687 continue;
674 } 688 }
675 689
676 done.done = false; 690 done->done = false;
677 tx->callback = dmatest_callback; 691 tx->callback = dmatest_callback;
678 tx->callback_param = &done; 692 tx->callback_param = done;
679 cookie = tx->tx_submit(tx); 693 cookie = tx->tx_submit(tx);
680 694
681 if (dma_submit_error(cookie)) { 695 if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
688 } 702 }
689 dma_async_issue_pending(chan); 703 dma_async_issue_pending(chan);
690 704
691 wait_event_freezable_timeout(done_wait, done.done, 705 wait_event_freezable_timeout(thread->done_wait, done->done,
692 msecs_to_jiffies(params->timeout)); 706 msecs_to_jiffies(params->timeout));
693 707
694 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 708 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
695 709
696 if (!done.done) { 710 if (!done->done) {
697 /*
698 * We're leaving the timed out dma operation with
699 * dangling pointer to done_wait. To make this
700 * correct, we'll need to allocate wait_done for
701 * each test iteration and perform "who's gonna
702 * free it this time?" dancing. For now, just
703 * leave it dangling.
704 */
705 WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
706 dmaengine_unmap_put(um); 711 dmaengine_unmap_put(um);
707 result("test timed out", total_tests, src_off, dst_off, 712 result("test timed out", total_tests, src_off, dst_off,
708 len, 0); 713 len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
789 dmatest_KBs(runtime, total_len), ret); 794 dmatest_KBs(runtime, total_len), ret);
790 795
791 /* terminate all transfers on specified channels */ 796 /* terminate all transfers on specified channels */
792 if (ret) 797 if (ret || failed_tests)
793 dmaengine_terminate_all(chan); 798 dmaengine_terminate_all(chan);
794 799
795 thread->done = true; 800 thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
849 thread->info = info; 854 thread->info = info;
850 thread->chan = dtc->chan; 855 thread->chan = dtc->chan;
851 thread->type = type; 856 thread->type = type;
857 thread->test_done.wait = &thread->done_wait;
858 init_waitqueue_head(&thread->done_wait);
852 smp_wmb(); 859 smp_wmb();
853 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", 860 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
854 dma_chan_name(chan), op, i); 861 dma_chan_name(chan), op, i);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c74e25..c7568869284e 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
863 } 863 }
864} 864}
865 865
866static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) 866static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
867{ 867{
868 int i; 868 int i;
869 869
870 for (i = 0; i < DMAMUX_NR; i++) 870 for (i = 0; i < nr_clocks; i++)
871 clk_disable_unprepare(fsl_edma->muxclk[i]); 871 clk_disable_unprepare(fsl_edma->muxclk[i]);
872} 872}
873 873
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
904 904
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); 905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
906 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); 906 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
907 if (IS_ERR(fsl_edma->muxbase[i])) 907 if (IS_ERR(fsl_edma->muxbase[i])) {
908 /* on error: disable all previously enabled clks */
909 fsl_disable_clocks(fsl_edma, i);
908 return PTR_ERR(fsl_edma->muxbase[i]); 910 return PTR_ERR(fsl_edma->muxbase[i]);
911 }
909 912
910 sprintf(clkname, "dmamux%d", i); 913 sprintf(clkname, "dmamux%d", i);
911 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); 914 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
912 if (IS_ERR(fsl_edma->muxclk[i])) { 915 if (IS_ERR(fsl_edma->muxclk[i])) {
913 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); 916 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
917 /* on error: disable all previously enabled clks */
918 fsl_disable_clocks(fsl_edma, i);
914 return PTR_ERR(fsl_edma->muxclk[i]); 919 return PTR_ERR(fsl_edma->muxclk[i]);
915 } 920 }
916 921
917 ret = clk_prepare_enable(fsl_edma->muxclk[i]); 922 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
918 if (ret) { 923 if (ret)
919 /* disable only clks which were enabled on error */ 924 /* on error: disable all previously enabled clks */
920 for (; i >= 0; i--) 925 fsl_disable_clocks(fsl_edma, i);
921 clk_disable_unprepare(fsl_edma->muxclk[i]);
922
923 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
924 return ret;
925 }
926 926
927 } 927 }
928 928
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
976 if (ret) { 976 if (ret) {
977 dev_err(&pdev->dev, 977 dev_err(&pdev->dev,
978 "Can't register Freescale eDMA engine. (%d)\n", ret); 978 "Can't register Freescale eDMA engine. (%d)\n", ret);
979 fsl_disable_clocks(fsl_edma); 979 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
980 return ret; 980 return ret;
981 } 981 }
982 982
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
985 dev_err(&pdev->dev, 985 dev_err(&pdev->dev,
986 "Can't register Freescale eDMA of_dma. (%d)\n", ret); 986 "Can't register Freescale eDMA of_dma. (%d)\n", ret);
987 dma_async_device_unregister(&fsl_edma->dma_dev); 987 dma_async_device_unregister(&fsl_edma->dma_dev);
988 fsl_disable_clocks(fsl_edma); 988 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
989 return ret; 989 return ret;
990 } 990 }
991 991
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
1015 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); 1015 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
1016 of_dma_controller_free(np); 1016 of_dma_controller_free(np);
1017 dma_async_device_unregister(&fsl_edma->dma_dev); 1017 dma_async_device_unregister(&fsl_edma->dma_dev);
1018 fsl_disable_clocks(fsl_edma); 1018 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
1019 1019
1020 return 0; 1020 return 0;
1021} 1021}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2f31d3d0caa6..7792a9186f9c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
390 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 390 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
391 dev_err(dev, "Self-test copy failed compare, disabling\n"); 391 dev_err(dev, "Self-test copy failed compare, disabling\n");
392 err = -ENODEV; 392 err = -ENODEV;
393 goto free_resources; 393 goto unmap_dma;
394 } 394 }
395 395
396unmap_dma: 396unmap_dma: