aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/at_hdmac.c5
-rw-r--r--drivers/dma/bcm2835-dma.c95
-rw-r--r--drivers/dma/dma-axi-dmac.c3
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/dmatest.c269
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h2
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/dw/Makefile2
-rw-r--r--drivers/dma/dw/core.c245
-rw-r--r--drivers/dma/dw/dw.c138
-rw-r--r--drivers/dma/dw/idma32.c160
-rw-r--r--drivers/dma/dw/internal.h15
-rw-r--r--drivers/dma/dw/pci.c53
-rw-r--r--drivers/dma/dw/platform.c22
-rw-r--r--drivers/dma/dw/regs.h30
-rw-r--r--drivers/dma/imx-sdma.c25
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c40
-rw-r--r--drivers/dma/ioat/registers.h24
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/qcom/bam_dma.c4
-rw-r--r--drivers/dma/qcom/hidma.c19
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c3
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sprd-dma.c19
-rw-r--r--drivers/dma/st_fdma.c6
-rw-r--r--drivers/dma/timb_dma.c4
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c1
31 files changed, 715 insertions, 498 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 01d936c9fe89..a0a9cd76c1d4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
134 struct at_desc *ret = NULL; 134 struct at_desc *ret = NULL;
135 unsigned long flags; 135 unsigned long flags;
136 unsigned int i = 0; 136 unsigned int i = 0;
137 LIST_HEAD(tmp_list);
138 137
139 spin_lock_irqsave(&atchan->lock, flags); 138 spin_lock_irqsave(&atchan->lock, flags);
140 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 139 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
@@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan)
1387 int chan_id = atchan->chan_common.chan_id; 1386 int chan_id = atchan->chan_common.chan_id;
1388 unsigned long flags; 1387 unsigned long flags;
1389 1388
1390 LIST_HEAD(list);
1391
1392 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1389 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1393 1390
1394 spin_lock_irqsave(&atchan->lock, flags); 1391 spin_lock_irqsave(&atchan->lock, flags);
@@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan)
1408 int chan_id = atchan->chan_common.chan_id; 1405 int chan_id = atchan->chan_common.chan_id;
1409 unsigned long flags; 1406 unsigned long flags;
1410 1407
1411 LIST_HEAD(list);
1412
1413 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1408 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1414 1409
1415 if (!atc_chan_is_paused(atchan)) 1410 if (!atc_chan_is_paused(atchan))
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 1a44c8086d77..ec8a291d62ba 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -2,9 +2,6 @@
2/* 2/*
3 * BCM2835 DMA engine support 3 * BCM2835 DMA engine support
4 * 4 *
5 * This driver only supports cyclic DMA transfers
6 * as needed for the I2S module.
7 *
8 * Author: Florian Meier <florian.meier@koalo.de> 5 * Author: Florian Meier <florian.meier@koalo.de>
9 * Copyright 2013 6 * Copyright 2013
10 * 7 *
@@ -42,7 +39,6 @@
42 39
43struct bcm2835_dmadev { 40struct bcm2835_dmadev {
44 struct dma_device ddev; 41 struct dma_device ddev;
45 spinlock_t lock;
46 void __iomem *base; 42 void __iomem *base;
47 struct device_dma_parameters dma_parms; 43 struct device_dma_parameters dma_parms;
48}; 44};
@@ -64,7 +60,6 @@ struct bcm2835_cb_entry {
64 60
65struct bcm2835_chan { 61struct bcm2835_chan {
66 struct virt_dma_chan vc; 62 struct virt_dma_chan vc;
67 struct list_head node;
68 63
69 struct dma_slave_config cfg; 64 struct dma_slave_config cfg;
70 unsigned int dreq; 65 unsigned int dreq;
@@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
312 return NULL; 307 return NULL;
313 308
314 /* allocate and setup the descriptor. */ 309 /* allocate and setup the descriptor. */
315 d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry), 310 d = kzalloc(struct_size(d, cb_list, frames), gfp);
316 gfp);
317 if (!d) 311 if (!d)
318 return NULL; 312 return NULL;
319 313
@@ -406,39 +400,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
406 } 400 }
407} 401}
408 402
409static int bcm2835_dma_abort(void __iomem *chan_base) 403static void bcm2835_dma_abort(struct bcm2835_chan *c)
410{ 404{
411 unsigned long cs; 405 void __iomem *chan_base = c->chan_base;
412 long int timeout = 10000; 406 long int timeout = 10000;
413 407
414 cs = readl(chan_base + BCM2835_DMA_CS); 408 /*
415 if (!(cs & BCM2835_DMA_ACTIVE)) 409 * A zero control block address means the channel is idle.
416 return 0; 410 * (The ACTIVE flag in the CS register is not a reliable indicator.)
411 */
412 if (!readl(chan_base + BCM2835_DMA_ADDR))
413 return;
417 414
418 /* Write 0 to the active bit - Pause the DMA */ 415 /* Write 0 to the active bit - Pause the DMA */
419 writel(0, chan_base + BCM2835_DMA_CS); 416 writel(0, chan_base + BCM2835_DMA_CS);
420 417
421 /* Wait for any current AXI transfer to complete */ 418 /* Wait for any current AXI transfer to complete */
422 while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 419 while ((readl(chan_base + BCM2835_DMA_CS) &
420 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
423 cpu_relax(); 421 cpu_relax();
424 cs = readl(chan_base + BCM2835_DMA_CS);
425 }
426 422
427 /* We'll un-pause when we set of our next DMA */ 423 /* Peripheral might be stuck and fail to signal AXI write responses */
428 if (!timeout) 424 if (!timeout)
429 return -ETIMEDOUT; 425 dev_err(c->vc.chan.device->dev,
430 426 "failed to complete outstanding writes\n");
431 if (!(cs & BCM2835_DMA_ACTIVE))
432 return 0;
433 427
434 /* Terminate the control block chain */ 428 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
435 writel(0, chan_base + BCM2835_DMA_NEXTCB);
436
437 /* Abort the whole DMA */
438 writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
439 chan_base + BCM2835_DMA_CS);
440
441 return 0;
442} 429}
443 430
444static void bcm2835_dma_start_desc(struct bcm2835_chan *c) 431static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
@@ -476,8 +463,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
476 463
477 spin_lock_irqsave(&c->vc.lock, flags); 464 spin_lock_irqsave(&c->vc.lock, flags);
478 465
479 /* Acknowledge interrupt */ 466 /*
480 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 467 * Clear the INT flag to receive further interrupts. Keep the channel
468 * active in case the descriptor is cyclic or in case the client has
469 * already terminated the descriptor and issued a new one. (May happen
470 * if this IRQ handler is threaded.) If the channel is finished, it
471 * will remain idle despite the ACTIVE flag being set.
472 */
473 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
474 c->chan_base + BCM2835_DMA_CS);
481 475
482 d = c->desc; 476 d = c->desc;
483 477
@@ -485,11 +479,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
485 if (d->cyclic) { 479 if (d->cyclic) {
486 /* call the cyclic callback */ 480 /* call the cyclic callback */
487 vchan_cyclic_callback(&d->vd); 481 vchan_cyclic_callback(&d->vd);
488 482 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
489 /* Keep the DMA engine running */
490 writel(BCM2835_DMA_ACTIVE,
491 c->chan_base + BCM2835_DMA_CS);
492 } else {
493 vchan_cookie_complete(&c->desc->vd); 483 vchan_cookie_complete(&c->desc->vd);
494 bcm2835_dma_start_desc(c); 484 bcm2835_dma_start_desc(c);
495 } 485 }
@@ -507,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
507 497
508 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); 498 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
509 499
500 /*
501 * Control blocks are 256 bit in length and must start at a 256 bit
502 * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1).
503 */
510 c->cb_pool = dma_pool_create(dev_name(dev), dev, 504 c->cb_pool = dma_pool_create(dev_name(dev), dev,
511 sizeof(struct bcm2835_dma_cb), 0, 0); 505 sizeof(struct bcm2835_dma_cb), 32, 0);
512 if (!c->cb_pool) { 506 if (!c->cb_pool) {
513 dev_err(dev, "unable to allocate descriptor pool\n"); 507 dev_err(dev, "unable to allocate descriptor pool\n");
514 return -ENOMEM; 508 return -ENOMEM;
@@ -777,39 +771,16 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
777static int bcm2835_dma_terminate_all(struct dma_chan *chan) 771static int bcm2835_dma_terminate_all(struct dma_chan *chan)
778{ 772{
779 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 773 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
780 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
781 unsigned long flags; 774 unsigned long flags;
782 int timeout = 10000;
783 LIST_HEAD(head); 775 LIST_HEAD(head);
784 776
785 spin_lock_irqsave(&c->vc.lock, flags); 777 spin_lock_irqsave(&c->vc.lock, flags);
786 778
787 /* Prevent this channel being scheduled */ 779 /* stop DMA activity */
788 spin_lock(&d->lock);
789 list_del_init(&c->node);
790 spin_unlock(&d->lock);
791
792 /*
793 * Stop DMA activity: we assume the callback will not be called
794 * after bcm_dma_abort() returns (even if it does, it will see
795 * c->desc is NULL and exit.)
796 */
797 if (c->desc) { 780 if (c->desc) {
798 vchan_terminate_vdesc(&c->desc->vd); 781 vchan_terminate_vdesc(&c->desc->vd);
799 c->desc = NULL; 782 c->desc = NULL;
800 bcm2835_dma_abort(c->chan_base); 783 bcm2835_dma_abort(c);
801
802 /* Wait for stopping */
803 while (--timeout) {
804 if (!(readl(c->chan_base + BCM2835_DMA_CS) &
805 BCM2835_DMA_ACTIVE))
806 break;
807
808 cpu_relax();
809 }
810
811 if (!timeout)
812 dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
813 } 784 }
814 785
815 vchan_get_all_descriptors(&c->vc, &head); 786 vchan_get_all_descriptors(&c->vc, &head);
@@ -837,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
837 808
838 c->vc.desc_free = bcm2835_dma_desc_free; 809 c->vc.desc_free = bcm2835_dma_desc_free;
839 vchan_init(&c->vc, &d->ddev); 810 vchan_init(&c->vc, &d->ddev);
840 INIT_LIST_HEAD(&c->node);
841 811
842 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); 812 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
843 c->ch = chan_id; 813 c->ch = chan_id;
@@ -940,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
940 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 910 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
941 od->ddev.dev = &pdev->dev; 911 od->ddev.dev = &pdev->dev;
942 INIT_LIST_HEAD(&od->ddev.channels); 912 INIT_LIST_HEAD(&od->ddev.channels);
943 spin_lock_init(&od->lock);
944 913
945 platform_set_drvdata(pdev, od); 914 platform_set_drvdata(pdev, od);
946 915
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 15b2453d2647..ffc0adc2f6ce 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
367 struct axi_dmac_desc *desc; 367 struct axi_dmac_desc *desc;
368 unsigned int i; 368 unsigned int i;
369 369
370 desc = kzalloc(sizeof(struct axi_dmac_desc) + 370 desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
371 sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
372 if (!desc) 371 if (!desc)
373 return NULL; 372 return NULL;
374 373
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index a8b6225faa12..9ce0a386225b 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
838 if (!soc_data) 838 if (!soc_data)
839 return -EINVAL; 839 return -EINVAL;
840 840
841 jzdma = devm_kzalloc(dev, sizeof(*jzdma) 841 jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
842 + sizeof(*jzdma->chan) * soc_data->nb_channels, 842 soc_data->nb_channels), GFP_KERNEL);
843 GFP_KERNEL);
844 if (!jzdma) 843 if (!jzdma)
845 return -ENOMEM; 844 return -ENOMEM;
846 845
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2eea4ef72915..50221d467d86 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -200,15 +200,20 @@ struct dmatest_done {
200 wait_queue_head_t *wait; 200 wait_queue_head_t *wait;
201}; 201};
202 202
203struct dmatest_data {
204 u8 **raw;
205 u8 **aligned;
206 unsigned int cnt;
207 unsigned int off;
208};
209
203struct dmatest_thread { 210struct dmatest_thread {
204 struct list_head node; 211 struct list_head node;
205 struct dmatest_info *info; 212 struct dmatest_info *info;
206 struct task_struct *task; 213 struct task_struct *task;
207 struct dma_chan *chan; 214 struct dma_chan *chan;
208 u8 **srcs; 215 struct dmatest_data src;
209 u8 **usrcs; 216 struct dmatest_data dst;
210 u8 **dsts;
211 u8 **udsts;
212 enum dma_transaction_type type; 217 enum dma_transaction_type type;
213 wait_queue_head_t done_wait; 218 wait_queue_head_t done_wait;
214 struct dmatest_done test_done; 219 struct dmatest_done test_done;
@@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
481 return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); 486 return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
482} 487}
483 488
489static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
490{
491 unsigned int i;
492
493 for (i = 0; i < cnt; i++)
494 kfree(d->raw[i]);
495
496 kfree(d->aligned);
497 kfree(d->raw);
498}
499
500static void dmatest_free_test_data(struct dmatest_data *d)
501{
502 __dmatest_free_test_data(d, d->cnt);
503}
504
505static int dmatest_alloc_test_data(struct dmatest_data *d,
506 unsigned int buf_size, u8 align)
507{
508 unsigned int i = 0;
509
510 d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
511 if (!d->raw)
512 return -ENOMEM;
513
514 d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
515 if (!d->aligned)
516 goto err;
517
518 for (i = 0; i < d->cnt; i++) {
519 d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
520 if (!d->raw[i])
521 goto err;
522
523 /* align to alignment restriction */
524 if (align)
525 d->aligned[i] = PTR_ALIGN(d->raw[i], align);
526 else
527 d->aligned[i] = d->raw[i];
528 }
529
530 return 0;
531err:
532 __dmatest_free_test_data(d, i);
533 return -ENOMEM;
534}
535
484/* 536/*
485 * This function repeatedly tests DMA transfers of various lengths and 537 * This function repeatedly tests DMA transfers of various lengths and
486 * offsets for a given operation type until it is told to exit by 538 * offsets for a given operation type until it is told to exit by
@@ -511,8 +563,9 @@ static int dmatest_func(void *data)
511 enum dma_ctrl_flags flags; 563 enum dma_ctrl_flags flags;
512 u8 *pq_coefs = NULL; 564 u8 *pq_coefs = NULL;
513 int ret; 565 int ret;
514 int src_cnt; 566 unsigned int buf_size;
515 int dst_cnt; 567 struct dmatest_data *src;
568 struct dmatest_data *dst;
516 int i; 569 int i;
517 ktime_t ktime, start, diff; 570 ktime_t ktime, start, diff;
518 ktime_t filltime = 0; 571 ktime_t filltime = 0;
@@ -535,25 +588,27 @@ static int dmatest_func(void *data)
535 params = &info->params; 588 params = &info->params;
536 chan = thread->chan; 589 chan = thread->chan;
537 dev = chan->device; 590 dev = chan->device;
591 src = &thread->src;
592 dst = &thread->dst;
538 if (thread->type == DMA_MEMCPY) { 593 if (thread->type == DMA_MEMCPY) {
539 align = params->alignment < 0 ? dev->copy_align : 594 align = params->alignment < 0 ? dev->copy_align :
540 params->alignment; 595 params->alignment;
541 src_cnt = dst_cnt = 1; 596 src->cnt = dst->cnt = 1;
542 } else if (thread->type == DMA_MEMSET) { 597 } else if (thread->type == DMA_MEMSET) {
543 align = params->alignment < 0 ? dev->fill_align : 598 align = params->alignment < 0 ? dev->fill_align :
544 params->alignment; 599 params->alignment;
545 src_cnt = dst_cnt = 1; 600 src->cnt = dst->cnt = 1;
546 is_memset = true; 601 is_memset = true;
547 } else if (thread->type == DMA_XOR) { 602 } else if (thread->type == DMA_XOR) {
548 /* force odd to ensure dst = src */ 603 /* force odd to ensure dst = src */
549 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); 604 src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
550 dst_cnt = 1; 605 dst->cnt = 1;
551 align = params->alignment < 0 ? dev->xor_align : 606 align = params->alignment < 0 ? dev->xor_align :
552 params->alignment; 607 params->alignment;
553 } else if (thread->type == DMA_PQ) { 608 } else if (thread->type == DMA_PQ) {
554 /* force odd to ensure dst = src */ 609 /* force odd to ensure dst = src */
555 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); 610 src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
556 dst_cnt = 2; 611 dst->cnt = 2;
557 align = params->alignment < 0 ? dev->pq_align : 612 align = params->alignment < 0 ? dev->pq_align :
558 params->alignment; 613 params->alignment;
559 614
@@ -561,75 +616,38 @@ static int dmatest_func(void *data)
561 if (!pq_coefs) 616 if (!pq_coefs)
562 goto err_thread_type; 617 goto err_thread_type;
563 618
564 for (i = 0; i < src_cnt; i++) 619 for (i = 0; i < src->cnt; i++)
565 pq_coefs[i] = 1; 620 pq_coefs[i] = 1;
566 } else 621 } else
567 goto err_thread_type; 622 goto err_thread_type;
568 623
569 /* Check if buffer count fits into map count variable (u8) */ 624 /* Check if buffer count fits into map count variable (u8) */
570 if ((src_cnt + dst_cnt) >= 255) { 625 if ((src->cnt + dst->cnt) >= 255) {
571 pr_err("too many buffers (%d of 255 supported)\n", 626 pr_err("too many buffers (%d of 255 supported)\n",
572 src_cnt + dst_cnt); 627 src->cnt + dst->cnt);
573 goto err_free_coefs; 628 goto err_free_coefs;
574 } 629 }
575 630
576 if (1 << align > params->buf_size) { 631 buf_size = params->buf_size;
632 if (1 << align > buf_size) {
577 pr_err("%u-byte buffer too small for %d-byte alignment\n", 633 pr_err("%u-byte buffer too small for %d-byte alignment\n",
578 params->buf_size, 1 << align); 634 buf_size, 1 << align);
579 goto err_free_coefs; 635 goto err_free_coefs;
580 } 636 }
581 637
582 thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); 638 if (dmatest_alloc_test_data(src, buf_size, align) < 0)
583 if (!thread->srcs)
584 goto err_free_coefs; 639 goto err_free_coefs;
585 640
586 thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); 641 if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
587 if (!thread->usrcs) 642 goto err_src;
588 goto err_usrcs;
589
590 for (i = 0; i < src_cnt; i++) {
591 thread->usrcs[i] = kmalloc(params->buf_size + align,
592 GFP_KERNEL);
593 if (!thread->usrcs[i])
594 goto err_srcbuf;
595
596 /* align srcs to alignment restriction */
597 if (align)
598 thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
599 else
600 thread->srcs[i] = thread->usrcs[i];
601 }
602 thread->srcs[i] = NULL;
603
604 thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
605 if (!thread->dsts)
606 goto err_dsts;
607
608 thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
609 if (!thread->udsts)
610 goto err_udsts;
611
612 for (i = 0; i < dst_cnt; i++) {
613 thread->udsts[i] = kmalloc(params->buf_size + align,
614 GFP_KERNEL);
615 if (!thread->udsts[i])
616 goto err_dstbuf;
617
618 /* align dsts to alignment restriction */
619 if (align)
620 thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
621 else
622 thread->dsts[i] = thread->udsts[i];
623 }
624 thread->dsts[i] = NULL;
625 643
626 set_user_nice(current, 10); 644 set_user_nice(current, 10);
627 645
628 srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL); 646 srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
629 if (!srcs) 647 if (!srcs)
630 goto err_dstbuf; 648 goto err_dst;
631 649
632 dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL); 650 dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
633 if (!dma_pq) 651 if (!dma_pq)
634 goto err_srcs_array; 652 goto err_srcs_array;
635 653
@@ -644,21 +662,21 @@ static int dmatest_func(void *data)
644 struct dma_async_tx_descriptor *tx = NULL; 662 struct dma_async_tx_descriptor *tx = NULL;
645 struct dmaengine_unmap_data *um; 663 struct dmaengine_unmap_data *um;
646 dma_addr_t *dsts; 664 dma_addr_t *dsts;
647 unsigned int src_off, dst_off, len; 665 unsigned int len;
648 666
649 total_tests++; 667 total_tests++;
650 668
651 if (params->transfer_size) { 669 if (params->transfer_size) {
652 if (params->transfer_size >= params->buf_size) { 670 if (params->transfer_size >= buf_size) {
653 pr_err("%u-byte transfer size must be lower than %u-buffer size\n", 671 pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
654 params->transfer_size, params->buf_size); 672 params->transfer_size, buf_size);
655 break; 673 break;
656 } 674 }
657 len = params->transfer_size; 675 len = params->transfer_size;
658 } else if (params->norandom) { 676 } else if (params->norandom) {
659 len = params->buf_size; 677 len = buf_size;
660 } else { 678 } else {
661 len = dmatest_random() % params->buf_size + 1; 679 len = dmatest_random() % buf_size + 1;
662 } 680 }
663 681
664 /* Do not alter transfer size explicitly defined by user */ 682 /* Do not alter transfer size explicitly defined by user */
@@ -670,59 +688,59 @@ static int dmatest_func(void *data)
670 total_len += len; 688 total_len += len;
671 689
672 if (params->norandom) { 690 if (params->norandom) {
673 src_off = 0; 691 src->off = 0;
674 dst_off = 0; 692 dst->off = 0;
675 } else { 693 } else {
676 src_off = dmatest_random() % (params->buf_size - len + 1); 694 src->off = dmatest_random() % (buf_size - len + 1);
677 dst_off = dmatest_random() % (params->buf_size - len + 1); 695 dst->off = dmatest_random() % (buf_size - len + 1);
678 696
679 src_off = (src_off >> align) << align; 697 src->off = (src->off >> align) << align;
680 dst_off = (dst_off >> align) << align; 698 dst->off = (dst->off >> align) << align;
681 } 699 }
682 700
683 if (!params->noverify) { 701 if (!params->noverify) {
684 start = ktime_get(); 702 start = ktime_get();
685 dmatest_init_srcs(thread->srcs, src_off, len, 703 dmatest_init_srcs(src->aligned, src->off, len,
686 params->buf_size, is_memset); 704 buf_size, is_memset);
687 dmatest_init_dsts(thread->dsts, dst_off, len, 705 dmatest_init_dsts(dst->aligned, dst->off, len,
688 params->buf_size, is_memset); 706 buf_size, is_memset);
689 707
690 diff = ktime_sub(ktime_get(), start); 708 diff = ktime_sub(ktime_get(), start);
691 filltime = ktime_add(filltime, diff); 709 filltime = ktime_add(filltime, diff);
692 } 710 }
693 711
694 um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt, 712 um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
695 GFP_KERNEL); 713 GFP_KERNEL);
696 if (!um) { 714 if (!um) {
697 failed_tests++; 715 failed_tests++;
698 result("unmap data NULL", total_tests, 716 result("unmap data NULL", total_tests,
699 src_off, dst_off, len, ret); 717 src->off, dst->off, len, ret);
700 continue; 718 continue;
701 } 719 }
702 720
703 um->len = params->buf_size; 721 um->len = buf_size;
704 for (i = 0; i < src_cnt; i++) { 722 for (i = 0; i < src->cnt; i++) {
705 void *buf = thread->srcs[i]; 723 void *buf = src->aligned[i];
706 struct page *pg = virt_to_page(buf); 724 struct page *pg = virt_to_page(buf);
707 unsigned long pg_off = offset_in_page(buf); 725 unsigned long pg_off = offset_in_page(buf);
708 726
709 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 727 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
710 um->len, DMA_TO_DEVICE); 728 um->len, DMA_TO_DEVICE);
711 srcs[i] = um->addr[i] + src_off; 729 srcs[i] = um->addr[i] + src->off;
712 ret = dma_mapping_error(dev->dev, um->addr[i]); 730 ret = dma_mapping_error(dev->dev, um->addr[i]);
713 if (ret) { 731 if (ret) {
714 dmaengine_unmap_put(um); 732 dmaengine_unmap_put(um);
715 result("src mapping error", total_tests, 733 result("src mapping error", total_tests,
716 src_off, dst_off, len, ret); 734 src->off, dst->off, len, ret);
717 failed_tests++; 735 failed_tests++;
718 continue; 736 continue;
719 } 737 }
720 um->to_cnt++; 738 um->to_cnt++;
721 } 739 }
722 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 740 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
723 dsts = &um->addr[src_cnt]; 741 dsts = &um->addr[src->cnt];
724 for (i = 0; i < dst_cnt; i++) { 742 for (i = 0; i < dst->cnt; i++) {
725 void *buf = thread->dsts[i]; 743 void *buf = dst->aligned[i];
726 struct page *pg = virt_to_page(buf); 744 struct page *pg = virt_to_page(buf);
727 unsigned long pg_off = offset_in_page(buf); 745 unsigned long pg_off = offset_in_page(buf);
728 746
@@ -732,7 +750,7 @@ static int dmatest_func(void *data)
732 if (ret) { 750 if (ret) {
733 dmaengine_unmap_put(um); 751 dmaengine_unmap_put(um);
734 result("dst mapping error", total_tests, 752 result("dst mapping error", total_tests,
735 src_off, dst_off, len, ret); 753 src->off, dst->off, len, ret);
736 failed_tests++; 754 failed_tests++;
737 continue; 755 continue;
738 } 756 }
@@ -741,30 +759,30 @@ static int dmatest_func(void *data)
741 759
742 if (thread->type == DMA_MEMCPY) 760 if (thread->type == DMA_MEMCPY)
743 tx = dev->device_prep_dma_memcpy(chan, 761 tx = dev->device_prep_dma_memcpy(chan,
744 dsts[0] + dst_off, 762 dsts[0] + dst->off,
745 srcs[0], len, flags); 763 srcs[0], len, flags);
746 else if (thread->type == DMA_MEMSET) 764 else if (thread->type == DMA_MEMSET)
747 tx = dev->device_prep_dma_memset(chan, 765 tx = dev->device_prep_dma_memset(chan,
748 dsts[0] + dst_off, 766 dsts[0] + dst->off,
749 *(thread->srcs[0] + src_off), 767 *(src->aligned[0] + src->off),
750 len, flags); 768 len, flags);
751 else if (thread->type == DMA_XOR) 769 else if (thread->type == DMA_XOR)
752 tx = dev->device_prep_dma_xor(chan, 770 tx = dev->device_prep_dma_xor(chan,
753 dsts[0] + dst_off, 771 dsts[0] + dst->off,
754 srcs, src_cnt, 772 srcs, src->cnt,
755 len, flags); 773 len, flags);
756 else if (thread->type == DMA_PQ) { 774 else if (thread->type == DMA_PQ) {
757 for (i = 0; i < dst_cnt; i++) 775 for (i = 0; i < dst->cnt; i++)
758 dma_pq[i] = dsts[i] + dst_off; 776 dma_pq[i] = dsts[i] + dst->off;
759 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, 777 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
760 src_cnt, pq_coefs, 778 src->cnt, pq_coefs,
761 len, flags); 779 len, flags);
762 } 780 }
763 781
764 if (!tx) { 782 if (!tx) {
765 dmaengine_unmap_put(um); 783 dmaengine_unmap_put(um);
766 result("prep error", total_tests, src_off, 784 result("prep error", total_tests, src->off,
767 dst_off, len, ret); 785 dst->off, len, ret);
768 msleep(100); 786 msleep(100);
769 failed_tests++; 787 failed_tests++;
770 continue; 788 continue;
@@ -777,8 +795,8 @@ static int dmatest_func(void *data)
777 795
778 if (dma_submit_error(cookie)) { 796 if (dma_submit_error(cookie)) {
779 dmaengine_unmap_put(um); 797 dmaengine_unmap_put(um);
780 result("submit error", total_tests, src_off, 798 result("submit error", total_tests, src->off,
781 dst_off, len, ret); 799 dst->off, len, ret);
782 msleep(100); 800 msleep(100);
783 failed_tests++; 801 failed_tests++;
784 continue; 802 continue;
@@ -793,58 +811,58 @@ static int dmatest_func(void *data)
793 dmaengine_unmap_put(um); 811 dmaengine_unmap_put(um);
794 812
795 if (!done->done) { 813 if (!done->done) {
796 result("test timed out", total_tests, src_off, dst_off, 814 result("test timed out", total_tests, src->off, dst->off,
797 len, 0); 815 len, 0);
798 failed_tests++; 816 failed_tests++;
799 continue; 817 continue;
800 } else if (status != DMA_COMPLETE) { 818 } else if (status != DMA_COMPLETE) {
801 result(status == DMA_ERROR ? 819 result(status == DMA_ERROR ?
802 "completion error status" : 820 "completion error status" :
803 "completion busy status", total_tests, src_off, 821 "completion busy status", total_tests, src->off,
804 dst_off, len, ret); 822 dst->off, len, ret);
805 failed_tests++; 823 failed_tests++;
806 continue; 824 continue;
807 } 825 }
808 826
809 if (params->noverify) { 827 if (params->noverify) {
810 verbose_result("test passed", total_tests, src_off, 828 verbose_result("test passed", total_tests, src->off,
811 dst_off, len, 0); 829 dst->off, len, 0);
812 continue; 830 continue;
813 } 831 }
814 832
815 start = ktime_get(); 833 start = ktime_get();
816 pr_debug("%s: verifying source buffer...\n", current->comm); 834 pr_debug("%s: verifying source buffer...\n", current->comm);
817 error_count = dmatest_verify(thread->srcs, 0, src_off, 835 error_count = dmatest_verify(src->aligned, 0, src->off,
818 0, PATTERN_SRC, true, is_memset); 836 0, PATTERN_SRC, true, is_memset);
819 error_count += dmatest_verify(thread->srcs, src_off, 837 error_count += dmatest_verify(src->aligned, src->off,
820 src_off + len, src_off, 838 src->off + len, src->off,
821 PATTERN_SRC | PATTERN_COPY, true, is_memset); 839 PATTERN_SRC | PATTERN_COPY, true, is_memset);
822 error_count += dmatest_verify(thread->srcs, src_off + len, 840 error_count += dmatest_verify(src->aligned, src->off + len,
823 params->buf_size, src_off + len, 841 buf_size, src->off + len,
824 PATTERN_SRC, true, is_memset); 842 PATTERN_SRC, true, is_memset);
825 843
826 pr_debug("%s: verifying dest buffer...\n", current->comm); 844 pr_debug("%s: verifying dest buffer...\n", current->comm);
827 error_count += dmatest_verify(thread->dsts, 0, dst_off, 845 error_count += dmatest_verify(dst->aligned, 0, dst->off,
828 0, PATTERN_DST, false, is_memset); 846 0, PATTERN_DST, false, is_memset);
829 847
830 error_count += dmatest_verify(thread->dsts, dst_off, 848 error_count += dmatest_verify(dst->aligned, dst->off,
831 dst_off + len, src_off, 849 dst->off + len, src->off,
832 PATTERN_SRC | PATTERN_COPY, false, is_memset); 850 PATTERN_SRC | PATTERN_COPY, false, is_memset);
833 851
834 error_count += dmatest_verify(thread->dsts, dst_off + len, 852 error_count += dmatest_verify(dst->aligned, dst->off + len,
835 params->buf_size, dst_off + len, 853 buf_size, dst->off + len,
836 PATTERN_DST, false, is_memset); 854 PATTERN_DST, false, is_memset);
837 855
838 diff = ktime_sub(ktime_get(), start); 856 diff = ktime_sub(ktime_get(), start);
839 comparetime = ktime_add(comparetime, diff); 857 comparetime = ktime_add(comparetime, diff);
840 858
841 if (error_count) { 859 if (error_count) {
842 result("data error", total_tests, src_off, dst_off, 860 result("data error", total_tests, src->off, dst->off,
843 len, error_count); 861 len, error_count);
844 failed_tests++; 862 failed_tests++;
845 } else { 863 } else {
846 verbose_result("test passed", total_tests, src_off, 864 verbose_result("test passed", total_tests, src->off,
847 dst_off, len, 0); 865 dst->off, len, 0);
848 } 866 }
849 } 867 }
850 ktime = ktime_sub(ktime_get(), ktime); 868 ktime = ktime_sub(ktime_get(), ktime);
@@ -856,19 +874,10 @@ static int dmatest_func(void *data)
856 kfree(dma_pq); 874 kfree(dma_pq);
857err_srcs_array: 875err_srcs_array:
858 kfree(srcs); 876 kfree(srcs);
859err_dstbuf: 877err_dst:
860 for (i = 0; thread->udsts[i]; i++) 878 dmatest_free_test_data(dst);
861 kfree(thread->udsts[i]); 879err_src:
862 kfree(thread->udsts); 880 dmatest_free_test_data(src);
863err_udsts:
864 kfree(thread->dsts);
865err_dsts:
866err_srcbuf:
867 for (i = 0; thread->usrcs[i]; i++)
868 kfree(thread->usrcs[i]);
869 kfree(thread->usrcs);
870err_usrcs:
871 kfree(thread->srcs);
872err_free_coefs: 881err_free_coefs:
873 kfree(pq_coefs); 882 kfree(pq_coefs);
874err_thread_type: 883err_thread_type:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index f8888dc0b8dc..18b6014cf9b4 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -75,7 +75,7 @@ struct __packed axi_dma_lli {
75 __le32 sstat; 75 __le32 sstat;
76 __le32 dstat; 76 __le32 dstat;
77 __le32 status_lo; 77 __le32 status_lo;
78 __le32 ststus_hi; 78 __le32 status_hi;
79 __le32 reserved_lo; 79 __le32 reserved_lo;
80 __le32 reserved_hi; 80 __le32 reserved_hi;
81}; 81};
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 04b9728c1d26..e5162690de8f 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -1,3 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0
2
1# 3#
2# DMA engine configuration for dw 4# DMA engine configuration for dw
3# 5#
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index 2b949c2e4504..63ed895c09aa 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o 2obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
3dw_dmac_core-objs := core.o 3dw_dmac_core-objs := core.o dw.o idma32.o
4 4
5obj-$(CONFIG_DW_DMAC) += dw_dmac.o 5obj-$(CONFIG_DW_DMAC) += dw_dmac.o
6dw_dmac-objs := platform.o 6dw_dmac-objs := platform.o
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index dc053e62f894..21cb2a58dbd2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Core driver for the Synopsys DesignWare DMA Controller 3 * Core driver for the Synopsys DesignWare DMA Controller
3 * 4 *
4 * Copyright (C) 2007-2008 Atmel Corporation 5 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics 6 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation 7 * Copyright (C) 2013 Intel Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <linux/bitops.h> 10#include <linux/bitops.h>
@@ -37,27 +34,6 @@
37 * support descriptor writeback. 34 * support descriptor writeback.
38 */ 35 */
39 36
40#define DWC_DEFAULT_CTLLO(_chan) ({ \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 bool _is_slave = is_slave_direction(_dwc->direction); \
44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
45 DW_DMA_MSIZE_16; \
46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
47 DW_DMA_MSIZE_16; \
48 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
49 _dwc->dws.p_master : _dwc->dws.m_master; \
50 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
51 _dwc->dws.p_master : _dwc->dws.m_master; \
52 \
53 (DWC_CTLL_DST_MSIZE(_dmsize) \
54 | DWC_CTLL_SRC_MSIZE(_smsize) \
55 | DWC_CTLL_LLP_D_EN \
56 | DWC_CTLL_LLP_S_EN \
57 | DWC_CTLL_DMS(_dms) \
58 | DWC_CTLL_SMS(_sms)); \
59 })
60
61/* The set of bus widths supported by the DMA controller */ 37/* The set of bus widths supported by the DMA controller */
62#define DW_DMA_BUSWIDTHS \ 38#define DW_DMA_BUSWIDTHS \
63 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 39 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
@@ -138,44 +114,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
138 dwc->descs_allocated--; 114 dwc->descs_allocated--;
139} 115}
140 116
141static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
142{
143 u32 cfghi = 0;
144 u32 cfglo = 0;
145
146 /* Set default burst alignment */
147 cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
148
149 /* Low 4 bits of the request lines */
150 cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
151 cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
152
153 /* Request line extension (2 bits) */
154 cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
155 cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
156
157 channel_writel(dwc, CFG_LO, cfglo);
158 channel_writel(dwc, CFG_HI, cfghi);
159}
160
161static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
162{
163 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
164 u32 cfghi = DWC_CFGH_FIFO_MODE;
165 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
166 bool hs_polarity = dwc->dws.hs_polarity;
167
168 cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
169 cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
170 cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
171
172 /* Set polarity of handshake interface */
173 cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
174
175 channel_writel(dwc, CFG_LO, cfglo);
176 channel_writel(dwc, CFG_HI, cfghi);
177}
178
179static void dwc_initialize(struct dw_dma_chan *dwc) 117static void dwc_initialize(struct dw_dma_chan *dwc)
180{ 118{
181 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 119 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -183,10 +121,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
183 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) 121 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
184 return; 122 return;
185 123
186 if (dw->pdata->is_idma32) 124 dw->initialize_chan(dwc);
187 dwc_initialize_chan_idma32(dwc);
188 else
189 dwc_initialize_chan_dw(dwc);
190 125
191 /* Enable interrupts */ 126 /* Enable interrupts */
192 channel_set_bit(dw, MASK.XFER, dwc->mask); 127 channel_set_bit(dw, MASK.XFER, dwc->mask);
@@ -215,37 +150,6 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
215 cpu_relax(); 150 cpu_relax();
216} 151}
217 152
218static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
219 unsigned int width, size_t *len)
220{
221 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
222 u32 block;
223
224 /* Always in bytes for iDMA 32-bit */
225 if (dw->pdata->is_idma32)
226 width = 0;
227
228 if ((bytes >> width) > dwc->block_size) {
229 block = dwc->block_size;
230 *len = block << width;
231 } else {
232 block = bytes >> width;
233 *len = bytes;
234 }
235
236 return block;
237}
238
239static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
240{
241 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
242
243 if (dw->pdata->is_idma32)
244 return IDMA32C_CTLH_BLOCK_TS(block);
245
246 return DWC_CTLH_BLOCK_TS(block) << width;
247}
248
249/*----------------------------------------------------------------------*/ 153/*----------------------------------------------------------------------*/
250 154
251/* Perform single block transfer */ 155/* Perform single block transfer */
@@ -391,10 +295,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
391/* Returns how many bytes were already received from source */ 295/* Returns how many bytes were already received from source */
392static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) 296static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
393{ 297{
298 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
394 u32 ctlhi = channel_readl(dwc, CTL_HI); 299 u32 ctlhi = channel_readl(dwc, CTL_HI);
395 u32 ctllo = channel_readl(dwc, CTL_LO); 300 u32 ctllo = channel_readl(dwc, CTL_LO);
396 301
397 return block2bytes(dwc, ctlhi, ctllo >> 4 & 7); 302 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
398} 303}
399 304
400static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 305static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -651,7 +556,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
651 unsigned int src_width; 556 unsigned int src_width;
652 unsigned int dst_width; 557 unsigned int dst_width;
653 unsigned int data_width = dw->pdata->data_width[m_master]; 558 unsigned int data_width = dw->pdata->data_width[m_master];
654 u32 ctllo; 559 u32 ctllo, ctlhi;
655 u8 lms = DWC_LLP_LMS(m_master); 560 u8 lms = DWC_LLP_LMS(m_master);
656 561
657 dev_vdbg(chan2dev(chan), 562 dev_vdbg(chan2dev(chan),
@@ -667,7 +572,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
667 572
668 src_width = dst_width = __ffs(data_width | src | dest | len); 573 src_width = dst_width = __ffs(data_width | src | dest | len);
669 574
670 ctllo = DWC_DEFAULT_CTLLO(chan) 575 ctllo = dw->prepare_ctllo(dwc)
671 | DWC_CTLL_DST_WIDTH(dst_width) 576 | DWC_CTLL_DST_WIDTH(dst_width)
672 | DWC_CTLL_SRC_WIDTH(src_width) 577 | DWC_CTLL_SRC_WIDTH(src_width)
673 | DWC_CTLL_DST_INC 578 | DWC_CTLL_DST_INC
@@ -680,10 +585,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
680 if (!desc) 585 if (!desc)
681 goto err_desc_get; 586 goto err_desc_get;
682 587
588 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
589
683 lli_write(desc, sar, src + offset); 590 lli_write(desc, sar, src + offset);
684 lli_write(desc, dar, dest + offset); 591 lli_write(desc, dar, dest + offset);
685 lli_write(desc, ctllo, ctllo); 592 lli_write(desc, ctllo, ctllo);
686 lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count)); 593 lli_write(desc, ctlhi, ctlhi);
687 desc->len = xfer_count; 594 desc->len = xfer_count;
688 595
689 if (!first) { 596 if (!first) {
@@ -721,7 +628,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
721 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 628 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
722 struct dw_desc *prev; 629 struct dw_desc *prev;
723 struct dw_desc *first; 630 struct dw_desc *first;
724 u32 ctllo; 631 u32 ctllo, ctlhi;
725 u8 m_master = dwc->dws.m_master; 632 u8 m_master = dwc->dws.m_master;
726 u8 lms = DWC_LLP_LMS(m_master); 633 u8 lms = DWC_LLP_LMS(m_master);
727 dma_addr_t reg; 634 dma_addr_t reg;
@@ -745,10 +652,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
745 case DMA_MEM_TO_DEV: 652 case DMA_MEM_TO_DEV:
746 reg_width = __ffs(sconfig->dst_addr_width); 653 reg_width = __ffs(sconfig->dst_addr_width);
747 reg = sconfig->dst_addr; 654 reg = sconfig->dst_addr;
748 ctllo = (DWC_DEFAULT_CTLLO(chan) 655 ctllo = dw->prepare_ctllo(dwc)
749 | DWC_CTLL_DST_WIDTH(reg_width) 656 | DWC_CTLL_DST_WIDTH(reg_width)
750 | DWC_CTLL_DST_FIX 657 | DWC_CTLL_DST_FIX
751 | DWC_CTLL_SRC_INC); 658 | DWC_CTLL_SRC_INC;
752 659
753 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 660 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
754 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 661 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
@@ -768,9 +675,11 @@ slave_sg_todev_fill_desc:
768 if (!desc) 675 if (!desc)
769 goto err_desc_get; 676 goto err_desc_get;
770 677
678 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
679
771 lli_write(desc, sar, mem); 680 lli_write(desc, sar, mem);
772 lli_write(desc, dar, reg); 681 lli_write(desc, dar, reg);
773 lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen)); 682 lli_write(desc, ctlhi, ctlhi);
774 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); 683 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
775 desc->len = dlen; 684 desc->len = dlen;
776 685
@@ -793,10 +702,10 @@ slave_sg_todev_fill_desc:
793 case DMA_DEV_TO_MEM: 702 case DMA_DEV_TO_MEM:
794 reg_width = __ffs(sconfig->src_addr_width); 703 reg_width = __ffs(sconfig->src_addr_width);
795 reg = sconfig->src_addr; 704 reg = sconfig->src_addr;
796 ctllo = (DWC_DEFAULT_CTLLO(chan) 705 ctllo = dw->prepare_ctllo(dwc)
797 | DWC_CTLL_SRC_WIDTH(reg_width) 706 | DWC_CTLL_SRC_WIDTH(reg_width)
798 | DWC_CTLL_DST_INC 707 | DWC_CTLL_DST_INC
799 | DWC_CTLL_SRC_FIX); 708 | DWC_CTLL_SRC_FIX;
800 709
801 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 710 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
802 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 711 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
@@ -814,9 +723,11 @@ slave_sg_fromdev_fill_desc:
814 if (!desc) 723 if (!desc)
815 goto err_desc_get; 724 goto err_desc_get;
816 725
726 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
727
817 lli_write(desc, sar, reg); 728 lli_write(desc, sar, reg);
818 lli_write(desc, dar, mem); 729 lli_write(desc, dar, mem);
819 lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen)); 730 lli_write(desc, ctlhi, ctlhi);
820 mem_width = __ffs(data_width | mem | dlen); 731 mem_width = __ffs(data_width | mem | dlen);
821 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); 732 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
822 desc->len = dlen; 733 desc->len = dlen;
@@ -876,22 +787,12 @@ EXPORT_SYMBOL_GPL(dw_dma_filter);
876static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 787static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
877{ 788{
878 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 789 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
879 struct dma_slave_config *sc = &dwc->dma_sconfig;
880 struct dw_dma *dw = to_dw_dma(chan->device); 790 struct dw_dma *dw = to_dw_dma(chan->device);
881 /*
882 * Fix sconfig's burst size according to dw_dmac. We need to convert
883 * them as:
884 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
885 *
886 * NOTE: burst size 2 is not supported by DesignWare controller.
887 * iDMA 32-bit supports it.
888 */
889 u32 s = dw->pdata->is_idma32 ? 1 : 2;
890 791
891 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 792 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
892 793
893 sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0; 794 dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
894 sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0; 795 dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
895 796
896 return 0; 797 return 0;
897} 798}
@@ -900,16 +801,9 @@ static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
900{ 801{
901 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 802 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
902 unsigned int count = 20; /* timeout iterations */ 803 unsigned int count = 20; /* timeout iterations */
903 u32 cfglo;
904 804
905 cfglo = channel_readl(dwc, CFG_LO); 805 dw->suspend_chan(dwc, drain);
906 if (dw->pdata->is_idma32) { 806
907 if (drain)
908 cfglo |= IDMA32C_CFGL_CH_DRAIN;
909 else
910 cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
911 }
912 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
913 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 807 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
914 udelay(2); 808 udelay(2);
915 809
@@ -928,11 +822,11 @@ static int dwc_pause(struct dma_chan *chan)
928 return 0; 822 return 0;
929} 823}
930 824
931static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 825static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
932{ 826{
933 u32 cfglo = channel_readl(dwc, CFG_LO); 827 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
934 828
935 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); 829 dw->resume_chan(dwc, drain);
936 830
937 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); 831 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
938} 832}
@@ -945,7 +839,7 @@ static int dwc_resume(struct dma_chan *chan)
945 spin_lock_irqsave(&dwc->lock, flags); 839 spin_lock_irqsave(&dwc->lock, flags);
946 840
947 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) 841 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
948 dwc_chan_resume(dwc); 842 dwc_chan_resume(dwc, false);
949 843
950 spin_unlock_irqrestore(&dwc->lock, flags); 844 spin_unlock_irqrestore(&dwc->lock, flags);
951 845
@@ -968,7 +862,7 @@ static int dwc_terminate_all(struct dma_chan *chan)
968 862
969 dwc_chan_disable(dw, dwc); 863 dwc_chan_disable(dw, dwc);
970 864
971 dwc_chan_resume(dwc); 865 dwc_chan_resume(dwc, true);
972 866
973 /* active_list entries will end up before queued entries */ 867 /* active_list entries will end up before queued entries */
974 list_splice_init(&dwc->queue, &list); 868 list_splice_init(&dwc->queue, &list);
@@ -1058,33 +952,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
1058 952
1059/*----------------------------------------------------------------------*/ 953/*----------------------------------------------------------------------*/
1060 954
1061/* 955void do_dw_dma_off(struct dw_dma *dw)
1062 * Program FIFO size of channels.
1063 *
1064 * By default full FIFO (512 bytes) is assigned to channel 0. Here we
1065 * slice FIFO on equal parts between channels.
1066 */
1067static void idma32_fifo_partition(struct dw_dma *dw)
1068{
1069 u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
1070 IDMA32C_FP_UPDATE;
1071 u64 fifo_partition = 0;
1072
1073 if (!dw->pdata->is_idma32)
1074 return;
1075
1076 /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
1077 fifo_partition |= value << 0;
1078
1079 /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
1080 fifo_partition |= value << 32;
1081
1082 /* Program FIFO Partition registers - 64 bytes per channel */
1083 idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
1084 idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
1085}
1086
1087static void dw_dma_off(struct dw_dma *dw)
1088{ 956{
1089 unsigned int i; 957 unsigned int i;
1090 958
@@ -1103,7 +971,7 @@ static void dw_dma_off(struct dw_dma *dw)
1103 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); 971 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
1104} 972}
1105 973
1106static void dw_dma_on(struct dw_dma *dw) 974void do_dw_dma_on(struct dw_dma *dw)
1107{ 975{
1108 dma_writel(dw, CFG, DW_CFG_DMA_EN); 976 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1109} 977}
@@ -1139,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1139 1007
1140 /* Enable controller here if needed */ 1008 /* Enable controller here if needed */
1141 if (!dw->in_use) 1009 if (!dw->in_use)
1142 dw_dma_on(dw); 1010 do_dw_dma_on(dw);
1143 dw->in_use |= dwc->mask; 1011 dw->in_use |= dwc->mask;
1144 1012
1145 return 0; 1013 return 0;
@@ -1150,7 +1018,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1150 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1018 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1151 struct dw_dma *dw = to_dw_dma(chan->device); 1019 struct dw_dma *dw = to_dw_dma(chan->device);
1152 unsigned long flags; 1020 unsigned long flags;
1153 LIST_HEAD(list);
1154 1021
1155 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1022 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1156 dwc->descs_allocated); 1023 dwc->descs_allocated);
@@ -1177,30 +1044,25 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1177 /* Disable controller in case it was a last user */ 1044 /* Disable controller in case it was a last user */
1178 dw->in_use &= ~dwc->mask; 1045 dw->in_use &= ~dwc->mask;
1179 if (!dw->in_use) 1046 if (!dw->in_use)
1180 dw_dma_off(dw); 1047 do_dw_dma_off(dw);
1181 1048
1182 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1049 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1183} 1050}
1184 1051
1185int dw_dma_probe(struct dw_dma_chip *chip) 1052int do_dma_probe(struct dw_dma_chip *chip)
1186{ 1053{
1054 struct dw_dma *dw = chip->dw;
1187 struct dw_dma_platform_data *pdata; 1055 struct dw_dma_platform_data *pdata;
1188 struct dw_dma *dw;
1189 bool autocfg = false; 1056 bool autocfg = false;
1190 unsigned int dw_params; 1057 unsigned int dw_params;
1191 unsigned int i; 1058 unsigned int i;
1192 int err; 1059 int err;
1193 1060
1194 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1195 if (!dw)
1196 return -ENOMEM;
1197
1198 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); 1061 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1199 if (!dw->pdata) 1062 if (!dw->pdata)
1200 return -ENOMEM; 1063 return -ENOMEM;
1201 1064
1202 dw->regs = chip->regs; 1065 dw->regs = chip->regs;
1203 chip->dw = dw;
1204 1066
1205 pm_runtime_get_sync(chip->dev); 1067 pm_runtime_get_sync(chip->dev);
1206 1068
@@ -1227,8 +1089,6 @@ int dw_dma_probe(struct dw_dma_chip *chip)
1227 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); 1089 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1228 1090
1229 /* Fill platform data with the default values */ 1091 /* Fill platform data with the default values */
1230 pdata->is_private = true;
1231 pdata->is_memcpy = true;
1232 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; 1092 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1233 pdata->chan_priority = CHAN_PRIORITY_ASCENDING; 1093 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1234 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { 1094 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
@@ -1252,15 +1112,10 @@ int dw_dma_probe(struct dw_dma_chip *chip)
1252 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1112 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1253 1113
1254 /* Force dma off, just in case */ 1114 /* Force dma off, just in case */
1255 dw_dma_off(dw); 1115 dw->disable(dw);
1256
1257 idma32_fifo_partition(dw);
1258 1116
1259 /* Device and instance ID for IRQ and DMA pool */ 1117 /* Device and instance ID for IRQ and DMA pool */
1260 if (pdata->is_idma32) 1118 dw->set_device_name(dw, chip->id);
1261 snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
1262 else
1263 snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
1264 1119
1265 /* Create a pool of consistent memory blocks for hardware descriptors */ 1120 /* Create a pool of consistent memory blocks for hardware descriptors */
1266 dw->desc_pool = dmam_pool_create(dw->name, chip->dev, 1121 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
@@ -1340,10 +1195,8 @@ int dw_dma_probe(struct dw_dma_chip *chip)
1340 1195
1341 /* Set capabilities */ 1196 /* Set capabilities */
1342 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1197 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1343 if (pdata->is_private) 1198 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1344 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1199 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1345 if (pdata->is_memcpy)
1346 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1347 1200
1348 dw->dma.dev = chip->dev; 1201 dw->dma.dev = chip->dev;
1349 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1202 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
@@ -1384,16 +1237,15 @@ err_pdata:
1384 pm_runtime_put_sync_suspend(chip->dev); 1237 pm_runtime_put_sync_suspend(chip->dev);
1385 return err; 1238 return err;
1386} 1239}
1387EXPORT_SYMBOL_GPL(dw_dma_probe);
1388 1240
1389int dw_dma_remove(struct dw_dma_chip *chip) 1241int do_dma_remove(struct dw_dma_chip *chip)
1390{ 1242{
1391 struct dw_dma *dw = chip->dw; 1243 struct dw_dma *dw = chip->dw;
1392 struct dw_dma_chan *dwc, *_dwc; 1244 struct dw_dma_chan *dwc, *_dwc;
1393 1245
1394 pm_runtime_get_sync(chip->dev); 1246 pm_runtime_get_sync(chip->dev);
1395 1247
1396 dw_dma_off(dw); 1248 do_dw_dma_off(dw);
1397 dma_async_device_unregister(&dw->dma); 1249 dma_async_device_unregister(&dw->dma);
1398 1250
1399 free_irq(chip->irq, dw); 1251 free_irq(chip->irq, dw);
@@ -1408,27 +1260,24 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1408 pm_runtime_put_sync_suspend(chip->dev); 1260 pm_runtime_put_sync_suspend(chip->dev);
1409 return 0; 1261 return 0;
1410} 1262}
1411EXPORT_SYMBOL_GPL(dw_dma_remove);
1412 1263
1413int dw_dma_disable(struct dw_dma_chip *chip) 1264int do_dw_dma_disable(struct dw_dma_chip *chip)
1414{ 1265{
1415 struct dw_dma *dw = chip->dw; 1266 struct dw_dma *dw = chip->dw;
1416 1267
1417 dw_dma_off(dw); 1268 dw->disable(dw);
1418 return 0; 1269 return 0;
1419} 1270}
1420EXPORT_SYMBOL_GPL(dw_dma_disable); 1271EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1421 1272
1422int dw_dma_enable(struct dw_dma_chip *chip) 1273int do_dw_dma_enable(struct dw_dma_chip *chip)
1423{ 1274{
1424 struct dw_dma *dw = chip->dw; 1275 struct dw_dma *dw = chip->dw;
1425 1276
1426 idma32_fifo_partition(dw); 1277 dw->enable(dw);
1427
1428 dw_dma_on(dw);
1429 return 0; 1278 return 0;
1430} 1279}
1431EXPORT_SYMBOL_GPL(dw_dma_enable); 1280EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1432 1281
1433MODULE_LICENSE("GPL v2"); 1282MODULE_LICENSE("GPL v2");
1434MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1283MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
new file mode 100644
index 000000000000..7a085b3c1854
--- /dev/null
+++ b/drivers/dma/dw/dw.c
@@ -0,0 +1,138 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2007-2008 Atmel Corporation
3// Copyright (C) 2010-2011 ST Microelectronics
4// Copyright (C) 2013,2018 Intel Corporation
5
6#include <linux/bitops.h>
7#include <linux/dmaengine.h>
8#include <linux/errno.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11
12#include "internal.h"
13
14static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
15{
16 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
17 u32 cfghi = DWC_CFGH_FIFO_MODE;
18 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
19 bool hs_polarity = dwc->dws.hs_polarity;
20
21 cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
22 cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
23 cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
24
25 /* Set polarity of handshake interface */
26 cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
27
28 channel_writel(dwc, CFG_LO, cfglo);
29 channel_writel(dwc, CFG_HI, cfghi);
30}
31
32static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain)
33{
34 u32 cfglo = channel_readl(dwc, CFG_LO);
35
36 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
37}
38
39static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain)
40{
41 u32 cfglo = channel_readl(dwc, CFG_LO);
42
43 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
44}
45
46static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc,
47 size_t bytes, unsigned int width, size_t *len)
48{
49 u32 block;
50
51 if ((bytes >> width) > dwc->block_size) {
52 block = dwc->block_size;
53 *len = dwc->block_size << width;
54 } else {
55 block = bytes >> width;
56 *len = bytes;
57 }
58
59 return block;
60}
61
62static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
63{
64 return DWC_CTLH_BLOCK_TS(block) << width;
65}
66
67static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
68{
69 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
70 bool is_slave = is_slave_direction(dwc->direction);
71 u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16;
72 u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16;
73 u8 p_master = dwc->dws.p_master;
74 u8 m_master = dwc->dws.m_master;
75 u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
76 u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master;
77
78 return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
79 DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
80 DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
81}
82
83static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
84{
85 /*
86 * Fix burst size according to dw_dmac. We need to convert them as:
87 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
88 */
89 *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0;
90}
91
92static void dw_dma_set_device_name(struct dw_dma *dw, int id)
93{
94 snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id);
95}
96
97static void dw_dma_disable(struct dw_dma *dw)
98{
99 do_dw_dma_off(dw);
100}
101
102static void dw_dma_enable(struct dw_dma *dw)
103{
104 do_dw_dma_on(dw);
105}
106
107int dw_dma_probe(struct dw_dma_chip *chip)
108{
109 struct dw_dma *dw;
110
111 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
112 if (!dw)
113 return -ENOMEM;
114
115 /* Channel operations */
116 dw->initialize_chan = dw_dma_initialize_chan;
117 dw->suspend_chan = dw_dma_suspend_chan;
118 dw->resume_chan = dw_dma_resume_chan;
119 dw->prepare_ctllo = dw_dma_prepare_ctllo;
120 dw->encode_maxburst = dw_dma_encode_maxburst;
121 dw->bytes2block = dw_dma_bytes2block;
122 dw->block2bytes = dw_dma_block2bytes;
123
124 /* Device operations */
125 dw->set_device_name = dw_dma_set_device_name;
126 dw->disable = dw_dma_disable;
127 dw->enable = dw_dma_enable;
128
129 chip->dw = dw;
130 return do_dma_probe(chip);
131}
132EXPORT_SYMBOL_GPL(dw_dma_probe);
133
134int dw_dma_remove(struct dw_dma_chip *chip)
135{
136 return do_dma_remove(chip);
137}
138EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
new file mode 100644
index 000000000000..f00657308811
--- /dev/null
+++ b/drivers/dma/dw/idma32.c
@@ -0,0 +1,160 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2013,2018 Intel Corporation
3
4#include <linux/bitops.h>
5#include <linux/dmaengine.h>
6#include <linux/errno.h>
7#include <linux/slab.h>
8#include <linux/types.h>
9
10#include "internal.h"
11
12static void idma32_initialize_chan(struct dw_dma_chan *dwc)
13{
14 u32 cfghi = 0;
15 u32 cfglo = 0;
16
17 /* Set default burst alignment */
18 cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
19
20 /* Low 4 bits of the request lines */
21 cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
22 cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
23
24 /* Request line extension (2 bits) */
25 cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
26 cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
27
28 channel_writel(dwc, CFG_LO, cfglo);
29 channel_writel(dwc, CFG_HI, cfghi);
30}
31
32static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain)
33{
34 u32 cfglo = channel_readl(dwc, CFG_LO);
35
36 if (drain)
37 cfglo |= IDMA32C_CFGL_CH_DRAIN;
38
39 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
40}
41
42static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain)
43{
44 u32 cfglo = channel_readl(dwc, CFG_LO);
45
46 if (drain)
47 cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
48
49 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
50}
51
52static u32 idma32_bytes2block(struct dw_dma_chan *dwc,
53 size_t bytes, unsigned int width, size_t *len)
54{
55 u32 block;
56
57 if (bytes > dwc->block_size) {
58 block = dwc->block_size;
59 *len = dwc->block_size;
60 } else {
61 block = bytes;
62 *len = bytes;
63 }
64
65 return block;
66}
67
68static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
69{
70 return IDMA32C_CTLH_BLOCK_TS(block);
71}
72
73static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
74{
75 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
76 bool is_slave = is_slave_direction(dwc->direction);
77 u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8;
78 u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8;
79
80 return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
81 DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
82}
83
84static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
85{
86 *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
87}
88
89static void idma32_set_device_name(struct dw_dma *dw, int id)
90{
91 snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
92}
93
94/*
95 * Program FIFO size of channels.
96 *
97 * By default full FIFO (512 bytes) is assigned to channel 0. Here we
98 * slice FIFO on equal parts between channels.
99 */
100static void idma32_fifo_partition(struct dw_dma *dw)
101{
102 u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
103 IDMA32C_FP_UPDATE;
104 u64 fifo_partition = 0;
105
106 /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
107 fifo_partition |= value << 0;
108
109 /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
110 fifo_partition |= value << 32;
111
112 /* Program FIFO Partition registers - 64 bytes per channel */
113 idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
114 idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
115}
116
117static void idma32_disable(struct dw_dma *dw)
118{
119 do_dw_dma_off(dw);
120 idma32_fifo_partition(dw);
121}
122
123static void idma32_enable(struct dw_dma *dw)
124{
125 idma32_fifo_partition(dw);
126 do_dw_dma_on(dw);
127}
128
129int idma32_dma_probe(struct dw_dma_chip *chip)
130{
131 struct dw_dma *dw;
132
133 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
134 if (!dw)
135 return -ENOMEM;
136
137 /* Channel operations */
138 dw->initialize_chan = idma32_initialize_chan;
139 dw->suspend_chan = idma32_suspend_chan;
140 dw->resume_chan = idma32_resume_chan;
141 dw->prepare_ctllo = idma32_prepare_ctllo;
142 dw->encode_maxburst = idma32_encode_maxburst;
143 dw->bytes2block = idma32_bytes2block;
144 dw->block2bytes = idma32_block2bytes;
145
146 /* Device operations */
147 dw->set_device_name = idma32_set_device_name;
148 dw->disable = idma32_disable;
149 dw->enable = idma32_enable;
150
151 chip->dw = dw;
152 return do_dma_probe(chip);
153}
154EXPORT_SYMBOL_GPL(idma32_dma_probe);
155
156int idma32_dma_remove(struct dw_dma_chip *chip)
157{
158 return do_dma_remove(chip);
159}
160EXPORT_SYMBOL_GPL(idma32_dma_remove);
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 41439732ff6b..1dd7a4e6dd23 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -1,11 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Driver for the Synopsys DesignWare DMA Controller 3 * Driver for the Synopsys DesignWare DMA Controller
3 * 4 *
4 * Copyright (C) 2013 Intel Corporation 5 * Copyright (C) 2013 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */ 6 */
10 7
11#ifndef _DMA_DW_INTERNAL_H 8#ifndef _DMA_DW_INTERNAL_H
@@ -15,8 +12,14 @@
15 12
16#include "regs.h" 13#include "regs.h"
17 14
18int dw_dma_disable(struct dw_dma_chip *chip); 15int do_dma_probe(struct dw_dma_chip *chip);
19int dw_dma_enable(struct dw_dma_chip *chip); 16int do_dma_remove(struct dw_dma_chip *chip);
17
18void do_dw_dma_on(struct dw_dma *dw);
19void do_dw_dma_off(struct dw_dma *dw);
20
21int do_dw_dma_disable(struct dw_dma_chip *chip);
22int do_dw_dma_enable(struct dw_dma_chip *chip);
20 23
21extern bool dw_dma_filter(struct dma_chan *chan, void *param); 24extern bool dw_dma_filter(struct dma_chan *chan, void *param);
22 25
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 7778ed705a1a..e79a75db0852 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * PCI driver for the Synopsys DesignWare DMA Controller 3 * PCI driver for the Synopsys DesignWare DMA Controller
3 * 4 *
4 * Copyright (C) 2013 Intel Corporation 5 * Copyright (C) 2013 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11 8
12#include <linux/module.h> 9#include <linux/module.h>
@@ -15,21 +12,33 @@
15 12
16#include "internal.h" 13#include "internal.h"
17 14
18static struct dw_dma_platform_data mrfld_pdata = { 15struct dw_dma_pci_data {
16 const struct dw_dma_platform_data *pdata;
17 int (*probe)(struct dw_dma_chip *chip);
18};
19
20static const struct dw_dma_pci_data dw_pci_data = {
21 .probe = dw_dma_probe,
22};
23
24static const struct dw_dma_platform_data idma32_pdata = {
19 .nr_channels = 8, 25 .nr_channels = 8,
20 .is_private = true,
21 .is_memcpy = true,
22 .is_idma32 = true,
23 .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, 26 .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
24 .chan_priority = CHAN_PRIORITY_ASCENDING, 27 .chan_priority = CHAN_PRIORITY_ASCENDING,
25 .block_size = 131071, 28 .block_size = 131071,
26 .nr_masters = 1, 29 .nr_masters = 1,
27 .data_width = {4}, 30 .data_width = {4},
31 .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
32};
33
34static const struct dw_dma_pci_data idma32_pci_data = {
35 .pdata = &idma32_pdata,
36 .probe = idma32_dma_probe,
28}; 37};
29 38
30static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 39static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
31{ 40{
32 const struct dw_dma_platform_data *pdata = (void *)pid->driver_data; 41 const struct dw_dma_pci_data *data = (void *)pid->driver_data;
33 struct dw_dma_chip *chip; 42 struct dw_dma_chip *chip;
34 int ret; 43 int ret;
35 44
@@ -62,9 +71,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
62 chip->id = pdev->devfn; 71 chip->id = pdev->devfn;
63 chip->regs = pcim_iomap_table(pdev)[0]; 72 chip->regs = pcim_iomap_table(pdev)[0];
64 chip->irq = pdev->irq; 73 chip->irq = pdev->irq;
65 chip->pdata = pdata; 74 chip->pdata = data->pdata;
66 75
67 ret = dw_dma_probe(chip); 76 ret = data->probe(chip);
68 if (ret) 77 if (ret)
69 return ret; 78 return ret;
70 79
@@ -90,7 +99,7 @@ static int dw_pci_suspend_late(struct device *dev)
90 struct pci_dev *pci = to_pci_dev(dev); 99 struct pci_dev *pci = to_pci_dev(dev);
91 struct dw_dma_chip *chip = pci_get_drvdata(pci); 100 struct dw_dma_chip *chip = pci_get_drvdata(pci);
92 101
93 return dw_dma_disable(chip); 102 return do_dw_dma_disable(chip);
94}; 103};
95 104
96static int dw_pci_resume_early(struct device *dev) 105static int dw_pci_resume_early(struct device *dev)
@@ -98,7 +107,7 @@ static int dw_pci_resume_early(struct device *dev)
98 struct pci_dev *pci = to_pci_dev(dev); 107 struct pci_dev *pci = to_pci_dev(dev);
99 struct dw_dma_chip *chip = pci_get_drvdata(pci); 108 struct dw_dma_chip *chip = pci_get_drvdata(pci);
100 109
101 return dw_dma_enable(chip); 110 return do_dw_dma_enable(chip);
102}; 111};
103 112
104#endif /* CONFIG_PM_SLEEP */ 113#endif /* CONFIG_PM_SLEEP */
@@ -109,24 +118,24 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
109 118
110static const struct pci_device_id dw_pci_id_table[] = { 119static const struct pci_device_id dw_pci_id_table[] = {
111 /* Medfield (GPDMA) */ 120 /* Medfield (GPDMA) */
112 { PCI_VDEVICE(INTEL, 0x0827) }, 121 { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
113 122
114 /* BayTrail */ 123 /* BayTrail */
115 { PCI_VDEVICE(INTEL, 0x0f06) }, 124 { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
116 { PCI_VDEVICE(INTEL, 0x0f40) }, 125 { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
117 126
118 /* Merrifield iDMA 32-bit (GPDMA) */ 127 /* Merrifield */
119 { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata }, 128 { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
120 129
121 /* Braswell */ 130 /* Braswell */
122 { PCI_VDEVICE(INTEL, 0x2286) }, 131 { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
123 { PCI_VDEVICE(INTEL, 0x22c0) }, 132 { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
124 133
125 /* Haswell */ 134 /* Haswell */
126 { PCI_VDEVICE(INTEL, 0x9c60) }, 135 { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
127 136
128 /* Broadwell */ 137 /* Broadwell */
129 { PCI_VDEVICE(INTEL, 0x9ce0) }, 138 { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
130 139
131 { } 140 { }
132}; 141};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 31ff8113c3de..382dfd9e9600 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Platform driver for the Synopsys DesignWare DMA Controller 3 * Platform driver for the Synopsys DesignWare DMA Controller
3 * 4 *
@@ -6,10 +7,6 @@
6 * Copyright (C) 2013 Intel Corporation 7 * Copyright (C) 2013 Intel Corporation
7 * 8 *
8 * Some parts of this driver are derived from the original dw_dmac. 9 * Some parts of this driver are derived from the original dw_dmac.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */ 10 */
14 11
15#include <linux/module.h> 12#include <linux/module.h>
@@ -128,15 +125,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
128 pdata->nr_masters = nr_masters; 125 pdata->nr_masters = nr_masters;
129 pdata->nr_channels = nr_channels; 126 pdata->nr_channels = nr_channels;
130 127
131 if (of_property_read_bool(np, "is_private"))
132 pdata->is_private = true;
133
134 /*
135 * All known devices, which use DT for configuration, support
136 * memory-to-memory transfers. So enable it by default.
137 */
138 pdata->is_memcpy = true;
139
140 if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) 128 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
141 pdata->chan_allocation_order = (unsigned char)tmp; 129 pdata->chan_allocation_order = (unsigned char)tmp;
142 130
@@ -264,7 +252,7 @@ static void dw_shutdown(struct platform_device *pdev)
264 struct dw_dma_chip *chip = platform_get_drvdata(pdev); 252 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
265 253
266 /* 254 /*
267 * We have to call dw_dma_disable() to stop any ongoing transfer. On 255 * We have to call do_dw_dma_disable() to stop any ongoing transfer. On
268 * some platforms we can't do that since DMA device is powered off. 256 * some platforms we can't do that since DMA device is powered off.
269 * Moreover we have no possibility to check if the platform is affected 257 * Moreover we have no possibility to check if the platform is affected
270 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() 258 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
@@ -273,7 +261,7 @@ static void dw_shutdown(struct platform_device *pdev)
273 * used by the driver. 261 * used by the driver.
274 */ 262 */
275 pm_runtime_get_sync(chip->dev); 263 pm_runtime_get_sync(chip->dev);
276 dw_dma_disable(chip); 264 do_dw_dma_disable(chip);
277 pm_runtime_put_sync_suspend(chip->dev); 265 pm_runtime_put_sync_suspend(chip->dev);
278 266
279 clk_disable_unprepare(chip->clk); 267 clk_disable_unprepare(chip->clk);
@@ -303,7 +291,7 @@ static int dw_suspend_late(struct device *dev)
303{ 291{
304 struct dw_dma_chip *chip = dev_get_drvdata(dev); 292 struct dw_dma_chip *chip = dev_get_drvdata(dev);
305 293
306 dw_dma_disable(chip); 294 do_dw_dma_disable(chip);
307 clk_disable_unprepare(chip->clk); 295 clk_disable_unprepare(chip->clk);
308 296
309 return 0; 297 return 0;
@@ -318,7 +306,7 @@ static int dw_resume_early(struct device *dev)
318 if (ret) 306 if (ret)
319 return ret; 307 return ret;
320 308
321 return dw_dma_enable(chip); 309 return do_dw_dma_enable(chip);
322} 310}
323 311
324#endif /* CONFIG_PM_SLEEP */ 312#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 646c9c960c07..3fce66ecee7a 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -1,13 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Driver for the Synopsys DesignWare AHB DMA Controller 3 * Driver for the Synopsys DesignWare AHB DMA Controller
3 * 4 *
4 * Copyright (C) 2005-2007 Atmel Corporation 5 * Copyright (C) 2005-2007 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics 6 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2016 Intel Corporation 7 * Copyright (C) 2016 Intel Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <linux/bitops.h> 10#include <linux/bitops.h>
@@ -222,6 +219,16 @@ enum dw_dma_msize {
222 219
223/* iDMA 32-bit support */ 220/* iDMA 32-bit support */
224 221
222/* bursts size */
223enum idma32_msize {
224 IDMA32_MSIZE_1,
225 IDMA32_MSIZE_2,
226 IDMA32_MSIZE_4,
227 IDMA32_MSIZE_8,
228 IDMA32_MSIZE_16,
229 IDMA32_MSIZE_32,
230};
231
225/* Bitfields in CTL_HI */ 232/* Bitfields in CTL_HI */
226#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0) 233#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0)
227#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK) 234#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
@@ -312,6 +319,21 @@ struct dw_dma {
312 u8 all_chan_mask; 319 u8 all_chan_mask;
313 u8 in_use; 320 u8 in_use;
314 321
322 /* Channel operations */
323 void (*initialize_chan)(struct dw_dma_chan *dwc);
324 void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
325 void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
326 u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
327 void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
328 u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
329 unsigned int width, size_t *len);
330 size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
331
332 /* Device operations */
333 void (*set_device_name)(struct dw_dma *dw, int id);
334 void (*disable)(struct dw_dma *dw);
335 void (*enable)(struct dw_dma *dw);
336
315 /* platform data */ 337 /* platform data */
316 struct dw_dma_platform_data *pdata; 338 struct dw_dma_platform_data *pdata;
317}; 339};
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..237a9c165072 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -440,6 +440,8 @@ struct sdma_engine {
440 unsigned int irq; 440 unsigned int irq;
441 dma_addr_t bd0_phys; 441 dma_addr_t bd0_phys;
442 struct sdma_buffer_descriptor *bd0; 442 struct sdma_buffer_descriptor *bd0;
443 /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
444 bool clk_ratio;
443}; 445};
444 446
445static int sdma_config_write(struct dma_chan *chan, 447static int sdma_config_write(struct dma_chan *chan,
@@ -662,8 +664,11 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
662 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 664 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
663 665
664 /* Set bits of CONFIG register with dynamic context switching */ 666 /* Set bits of CONFIG register with dynamic context switching */
665 if (readl(sdma->regs + SDMA_H_CONFIG) == 0) 667 reg = readl(sdma->regs + SDMA_H_CONFIG);
666 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 668 if ((reg & SDMA_H_CONFIG_CSM) == 0) {
669 reg |= SDMA_H_CONFIG_CSM;
670 writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
671 }
667 672
668 return ret; 673 return ret;
669} 674}
@@ -1839,6 +1844,9 @@ static int sdma_init(struct sdma_engine *sdma)
1839 if (ret) 1844 if (ret)
1840 goto disable_clk_ipg; 1845 goto disable_clk_ipg;
1841 1846
1847 if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
1848 sdma->clk_ratio = 1;
1849
1842 /* Be sure SDMA has not started yet */ 1850 /* Be sure SDMA has not started yet */
1843 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 1851 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1844 1852
@@ -1879,8 +1887,10 @@ static int sdma_init(struct sdma_engine *sdma)
1879 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); 1887 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1880 1888
1881 /* Set bits of CONFIG register but with static context switching */ 1889 /* Set bits of CONFIG register but with static context switching */
1882 /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1890 if (sdma->clk_ratio)
1883 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); 1891 writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
1892 else
1893 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1884 1894
1885 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1895 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1886 1896
@@ -1903,11 +1913,16 @@ disable_clk_ipg:
1903static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 1913static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1904{ 1914{
1905 struct sdma_channel *sdmac = to_sdma_chan(chan); 1915 struct sdma_channel *sdmac = to_sdma_chan(chan);
1916 struct sdma_engine *sdma = sdmac->sdma;
1906 struct imx_dma_data *data = fn_param; 1917 struct imx_dma_data *data = fn_param;
1907 1918
1908 if (!imx_dma_is_general_purpose(chan)) 1919 if (!imx_dma_is_general_purpose(chan))
1909 return false; 1920 return false;
1910 1921
1922 /* return false if it's not the right device */
1923 if (sdma->dev->of_node != data->of_node)
1924 return false;
1925
1911 sdmac->data = *data; 1926 sdmac->data = *data;
1912 chan->private = &sdmac->data; 1927 chan->private = &sdmac->data;
1913 1928
@@ -1935,6 +1950,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1935 * be set to sdmac->event_id1. 1950 * be set to sdmac->event_id1.
1936 */ 1951 */
1937 data.dma_request2 = 0; 1952 data.dma_request2 = 0;
1953 data.of_node = ofdma->of_node;
1938 1954
1939 return dma_request_channel(mask, sdma_filter_fn, &data); 1955 return dma_request_channel(mask, sdma_filter_fn, &data);
1940} 1956}
@@ -2097,6 +2113,7 @@ static int sdma_probe(struct platform_device *pdev)
2097 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; 2113 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2098 sdma->dma_device.device_issue_pending = sdma_issue_pending; 2114 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2099 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 2115 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
2116 sdma->dma_device.copy_align = 2;
2100 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); 2117 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2101 2118
2102 platform_set_drvdata(pdev, sdma); 2119 platform_set_drvdata(pdev, sdma);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 23fb2fa04000..f373a139e0c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -372,6 +372,7 @@ struct ioat_ring_ent **
372ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) 372ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
373{ 373{
374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c); 374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
375 struct ioat_ring_ent **ring; 376 struct ioat_ring_ent **ring;
376 int total_descs = 1 << order; 377 int total_descs = 1 << order;
377 int i, chunks; 378 int i, chunks;
@@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
437 } 438 }
438 ring[i]->hw->next = ring[0]->txd.phys; 439 ring[i]->hw->next = ring[0]->txd.phys;
439 440
441 /* setup descriptor pre-fetching for v3.4 */
442 if (ioat_dma->cap & IOAT_CAP_DPS) {
443 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
444
445 if (chunks == 1)
446 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
447
448 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
449
450 }
451
440 return ring; 452 return ring;
441} 453}
442 454
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1ab42ec2b7ff..aaafd0e882b5 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -27,7 +27,7 @@
27#include "registers.h" 27#include "registers.h"
28#include "hw.h" 28#include "hw.h"
29 29
30#define IOAT_DMA_VERSION "4.00" 30#define IOAT_DMA_VERSION "5.00"
31 31
32#define IOAT_DMA_DCA_ANY_CPU ~0 32#define IOAT_DMA_DCA_ANY_CPU ~0
33 33
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index abcc51b343ce..781c94de8e81 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -66,11 +66,14 @@
66 66
67#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 67#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
68 68
69#define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00
70
69#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 71#define IOAT_VER_1_2 0x12 /* Version 1.2 */
70#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 72#define IOAT_VER_2_0 0x20 /* Version 2.0 */
71#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 73#define IOAT_VER_3_0 0x30 /* Version 3.0 */
72#define IOAT_VER_3_2 0x32 /* Version 3.2 */ 74#define IOAT_VER_3_2 0x32 /* Version 3.2 */
73#define IOAT_VER_3_3 0x33 /* Version 3.3 */ 75#define IOAT_VER_3_3 0x33 /* Version 3.3 */
76#define IOAT_VER_3_4 0x34 /* Version 3.4 */
74 77
75 78
76int system_has_dca_enabled(struct pci_dev *pdev); 79int system_has_dca_enabled(struct pci_dev *pdev);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2d810dfcdc48..d41dc9a9ff68 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = {
119 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, 119 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
120 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, 120 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
121 121
122 /* I/OAT v3.4 platforms */
123 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
124
122 { 0, } 125 { 0, }
123}; 126};
124MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 127MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -135,10 +138,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
135static int ioat_dca_enabled = 1; 138static int ioat_dca_enabled = 1;
136module_param(ioat_dca_enabled, int, 0644); 139module_param(ioat_dca_enabled, int, 0644);
137MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 140MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
138int ioat_pending_level = 4; 141int ioat_pending_level = 7;
139module_param(ioat_pending_level, int, 0644); 142module_param(ioat_pending_level, int, 0644);
140MODULE_PARM_DESC(ioat_pending_level, 143MODULE_PARM_DESC(ioat_pending_level,
141 "high-water mark for pushing ioat descriptors (default: 4)"); 144 "high-water mark for pushing ioat descriptors (default: 7)");
142static char ioat_interrupt_style[32] = "msix"; 145static char ioat_interrupt_style[32] = "msix";
143module_param_string(ioat_interrupt_style, ioat_interrupt_style, 146module_param_string(ioat_interrupt_style, ioat_interrupt_style,
144 sizeof(ioat_interrupt_style), 0644); 147 sizeof(ioat_interrupt_style), 0644);
@@ -635,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c)
635 ioat_stop(ioat_chan); 638 ioat_stop(ioat_chan);
636 ioat_reset_hw(ioat_chan); 639 ioat_reset_hw(ioat_chan);
637 640
641 /* Put LTR to idle */
642 if (ioat_dma->version >= IOAT_VER_3_4)
643 writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
644 ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
645
638 spin_lock_bh(&ioat_chan->cleanup_lock); 646 spin_lock_bh(&ioat_chan->cleanup_lock);
639 spin_lock_bh(&ioat_chan->prep_lock); 647 spin_lock_bh(&ioat_chan->prep_lock);
640 descs = ioat_ring_space(ioat_chan); 648 descs = ioat_ring_space(ioat_chan);
@@ -724,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
724 spin_unlock_bh(&ioat_chan->prep_lock); 732 spin_unlock_bh(&ioat_chan->prep_lock);
725 spin_unlock_bh(&ioat_chan->cleanup_lock); 733 spin_unlock_bh(&ioat_chan->cleanup_lock);
726 734
735 /* Setting up LTR values for 3.4 or later */
736 if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) {
737 u32 lat_val;
738
739 lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL |
740 IOAT_CHAN_LTR_ACTIVE_SNLATSCALE |
741 IOAT_CHAN_LTR_ACTIVE_SNREQMNT;
742 writel(lat_val, ioat_chan->reg_base +
743 IOAT_CHAN_LTR_ACTIVE_OFFSET);
744
745 lat_val = IOAT_CHAN_LTR_IDLE_SNVAL |
746 IOAT_CHAN_LTR_IDLE_SNLATSCALE |
747 IOAT_CHAN_LTR_IDLE_SNREQMNT;
748 writel(lat_val, ioat_chan->reg_base +
749 IOAT_CHAN_LTR_IDLE_OFFSET);
750
751 /* Select to active */
752 writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE,
753 ioat_chan->reg_base +
754 IOAT_CHAN_LTR_SWSEL_OFFSET);
755 }
756
727 ioat_start_null_desc(ioat_chan); 757 ioat_start_null_desc(ioat_chan);
728 758
729 /* check that we got off the ground */ 759 /* check that we got off the ground */
@@ -1185,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1185 if (err) 1215 if (err)
1186 return err; 1216 return err;
1187 1217
1218 if (ioat_dma->cap & IOAT_CAP_DPS)
1219 writeb(ioat_pending_level + 1,
1220 ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
1221
1188 return 0; 1222 return 0;
1189} 1223}
1190 1224
@@ -1350,6 +1384,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1350 pci_set_drvdata(pdev, device); 1384 pci_set_drvdata(pdev, device);
1351 1385
1352 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1386 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1387 if (device->version >= IOAT_VER_3_4)
1388 ioat_dca_enabled = 0;
1353 if (device->version >= IOAT_VER_3_0) { 1389 if (device->version >= IOAT_VER_3_0) {
1354 if (is_skx_ioat(pdev)) 1390 if (is_skx_ioat(pdev))
1355 device->version = IOAT_VER_3_2; 1391 device->version = IOAT_VER_3_2;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 2f3bbc88ff2a..99c1c24d465d 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -84,6 +84,9 @@
84#define IOAT_CAP_PQ 0x00000200 84#define IOAT_CAP_PQ 0x00000200
85#define IOAT_CAP_DWBES 0x00002000 85#define IOAT_CAP_DWBES 0x00002000
86#define IOAT_CAP_RAID16SS 0x00020000 86#define IOAT_CAP_RAID16SS 0x00020000
87#define IOAT_CAP_DPS 0x00800000
88
89#define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */
87 90
88#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ 91#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
89 92
@@ -243,4 +246,25 @@
243 246
244#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ 247#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
245 248
249#define IOAT_CHAN_DRSCTL_OFFSET 0xB6
250#define IOAT_CHAN_DRSZ_4KB 0x0000
251#define IOAT_CHAN_DRSZ_8KB 0x0001
252#define IOAT_CHAN_DRSZ_2MB 0x0009
253#define IOAT_CHAN_DRS_EN 0x0100
254#define IOAT_CHAN_DRS_AUTOWRAP 0x0200
255
256#define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC
257#define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0
258#define IOAT_CHAN_LTR_SWSEL_IDLE 0x1
259
260#define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0
261#define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */
262#define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */
263#define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */
264
265#define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4
266#define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */
267#define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */
268#define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */
269
246#endif /* _IOAT_REGISTERS_H_ */ 270#endif /* _IOAT_REGISTERS_H_ */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7f595355fb79..fe4a7c71fede 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1059 mv_chan->op_in_desc = XOR_MODE_IN_DESC; 1059 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1060 1060
1061 dma_dev = &mv_chan->dmadev; 1061 dma_dev = &mv_chan->dmadev;
1062 dma_dev->dev = &pdev->dev;
1062 mv_chan->xordev = xordev; 1063 mv_chan->xordev = xordev;
1063 1064
1064 /* 1065 /*
@@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1091 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1092 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1092 dma_dev->device_tx_status = mv_xor_status; 1093 dma_dev->device_tx_status = mv_xor_status;
1093 dma_dev->device_issue_pending = mv_xor_issue_pending; 1094 dma_dev->device_issue_pending = mv_xor_issue_pending;
1094 dma_dev->dev = &pdev->dev;
1095 1095
1096 /* set prep routines based on capability */ 1096 /* set prep routines based on capability */
1097 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1097 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cff1b143fff5..eec79fdf27a5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan)
2267 struct dma_pl330_desc *desc; 2267 struct dma_pl330_desc *desc;
2268 unsigned long flags; 2268 unsigned long flags;
2269 struct pl330_dmac *pl330 = pch->dmac; 2269 struct pl330_dmac *pl330 = pch->dmac;
2270 LIST_HEAD(list);
2271 bool power_down = false; 2270 bool power_down = false;
2272 2271
2273 pm_runtime_get_sync(pl330->ddma.dev); 2272 pm_runtime_get_sync(pl330->ddma.dev);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 1617715aa6e0..cb860cb53c27 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
636 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); 636 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
637 637
638 /* allocate enough room to accomodate the number of entries */ 638 /* allocate enough room to accomodate the number of entries */
639 async_desc = kzalloc(sizeof(*async_desc) + 639 async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
640 (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); 640 GFP_NOWAIT);
641 641
642 if (!async_desc) 642 if (!async_desc)
643 goto err_out; 643 goto err_out;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 43d4b00b8138..411f91fde734 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
138 desc = &mdesc->desc; 138 desc = &mdesc->desc;
139 last_cookie = desc->cookie; 139 last_cookie = desc->cookie;
140 140
141 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
142
141 spin_lock_irqsave(&mchan->lock, irqflags); 143 spin_lock_irqsave(&mchan->lock, irqflags);
144 if (llstat == DMA_COMPLETE) {
145 mchan->last_success = last_cookie;
146 result.result = DMA_TRANS_NOERROR;
147 } else {
148 result.result = DMA_TRANS_ABORTED;
149 }
150
142 dma_cookie_complete(desc); 151 dma_cookie_complete(desc);
143 spin_unlock_irqrestore(&mchan->lock, irqflags); 152 spin_unlock_irqrestore(&mchan->lock, irqflags);
144 153
145 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
146 dmaengine_desc_get_callback(desc, &cb); 154 dmaengine_desc_get_callback(desc, &cb);
147 155
148 dma_run_dependencies(desc); 156 dma_run_dependencies(desc);
149 157
150 spin_lock_irqsave(&mchan->lock, irqflags); 158 spin_lock_irqsave(&mchan->lock, irqflags);
151 list_move(&mdesc->node, &mchan->free); 159 list_move(&mdesc->node, &mchan->free);
152
153 if (llstat == DMA_COMPLETE) {
154 mchan->last_success = last_cookie;
155 result.result = DMA_TRANS_NOERROR;
156 } else
157 result.result = DMA_TRANS_ABORTED;
158
159 spin_unlock_irqrestore(&mchan->lock, irqflags); 160 spin_unlock_irqrestore(&mchan->lock, irqflags);
160 161
161 dmaengine_desc_callback_invoke(&cb, &result); 162 dmaengine_desc_callback_invoke(&cb, &result);
@@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
415 if (!mdesc) 416 if (!mdesc)
416 return NULL; 417 return NULL;
417 418
419 mdesc->desc.flags = flags;
418 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 420 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
419 src, dest, len, flags, 421 src, dest, len, flags,
420 HIDMA_TRE_MEMCPY); 422 HIDMA_TRE_MEMCPY);
@@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
447 if (!mdesc) 449 if (!mdesc)
448 return NULL; 450 return NULL;
449 451
452 mdesc->desc.flags = flags;
450 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 453 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
451 value, dest, len, flags, 454 value, dest, len, flags,
452 HIDMA_TRE_MEMSET); 455 HIDMA_TRE_MEMSET);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index d64edeb6771a..681de12f4c67 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void)
423 hidma_mgmt_of_populate_channels(child); 423 hidma_mgmt_of_populate_channels(child);
424 } 424 }
425#endif 425#endif
426 platform_driver_register(&hidma_mgmt_driver); 426 return platform_driver_register(&hidma_mgmt_driver);
427 427
428 return 0;
429} 428}
430module_init(hidma_mgmt_init); 429module_init(hidma_mgmt_init);
431MODULE_LICENSE("GPL v2"); 430MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 784d5f1a473b..3fae23768b47 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -705,7 +705,6 @@ static int sa11x0_dma_device_pause(struct dma_chan *chan)
705 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 705 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
706 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 706 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
707 struct sa11x0_dma_phy *p; 707 struct sa11x0_dma_phy *p;
708 LIST_HEAD(head);
709 unsigned long flags; 708 unsigned long flags;
710 709
711 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 710 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
@@ -732,7 +731,6 @@ static int sa11x0_dma_device_resume(struct dma_chan *chan)
732 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 731 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
733 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 732 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
734 struct sa11x0_dma_phy *p; 733 struct sa11x0_dma_phy *p;
735 LIST_HEAD(head);
736 unsigned long flags; 734 unsigned long flags;
737 735
738 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 736 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index e2f016700fcc..48431e2da987 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -580,15 +580,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
580 580
581static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) 581static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
582{ 582{
583 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); 583 return pm_runtime_get_sync(chan->device->dev);
584 int ret;
585
586 ret = pm_runtime_get_sync(chan->device->dev);
587 if (ret < 0)
588 return ret;
589
590 schan->dev_id = SPRD_DMA_SOFTWARE_UID;
591 return 0;
592} 584}
593 585
594static void sprd_dma_free_chan_resources(struct dma_chan *chan) 586static void sprd_dma_free_chan_resources(struct dma_chan *chan)
@@ -1021,13 +1013,10 @@ static void sprd_dma_free_desc(struct virt_dma_desc *vd)
1021static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) 1013static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
1022{ 1014{
1023 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); 1015 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1024 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); 1016 u32 slave_id = *(u32 *)param;
1025 u32 req = *(u32 *)param;
1026 1017
1027 if (req < sdev->total_chns) 1018 schan->dev_id = slave_id;
1028 return req == schan->chn_num + 1; 1019 return true;
1029 else
1030 return false;
1031} 1020}
1032 1021
1033static int sprd_dma_probe(struct platform_device *pdev) 1022static int sprd_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 07c20aa2e955..bc7a1de3f29b 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -243,8 +243,7 @@ static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
243 struct st_fdma_desc *fdesc; 243 struct st_fdma_desc *fdesc;
244 int i; 244 int i;
245 245
246 fdesc = kzalloc(sizeof(*fdesc) + 246 fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
247 sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
248 if (!fdesc) 247 if (!fdesc)
249 return NULL; 248 return NULL;
250 249
@@ -294,8 +293,6 @@ static void st_fdma_free_chan_res(struct dma_chan *chan)
294 struct rproc *rproc = fchan->fdev->slim_rproc->rproc; 293 struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
295 unsigned long flags; 294 unsigned long flags;
296 295
297 LIST_HEAD(head);
298
299 dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", 296 dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
300 __func__, fchan->vchan.chan.chan_id); 297 __func__, fchan->vchan.chan.chan_id);
301 298
@@ -626,7 +623,6 @@ static void st_fdma_issue_pending(struct dma_chan *chan)
626static int st_fdma_pause(struct dma_chan *chan) 623static int st_fdma_pause(struct dma_chan *chan)
627{ 624{
628 unsigned long flags; 625 unsigned long flags;
629 LIST_HEAD(head);
630 struct st_fdma_chan *fchan = to_st_fdma_chan(chan); 626 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
631 int ch_id = fchan->vchan.chan.chan_id; 627 int ch_id = fchan->vchan.chan.chan_id;
632 unsigned long cmd = FDMA_CMD_PAUSE(ch_id); 628 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index fc0f9c8766a8..afbb1c95b721 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -643,8 +643,8 @@ static int td_probe(struct platform_device *pdev)
643 DRIVER_NAME)) 643 DRIVER_NAME))
644 return -EBUSY; 644 return -EBUSY;
645 645
646 td = kzalloc(sizeof(struct timb_dma) + 646 td = kzalloc(struct_size(td, channels, pdata->nr_channels),
647 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); 647 GFP_KERNEL);
648 if (!td) { 648 if (!td) {
649 err = -ENOMEM; 649 err = -ENOMEM;
650 goto err_release_region; 650 goto err_release_region;
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 98dbc796353f..53ca9ba6ab4b 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -153,7 +153,6 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
153#ifdef CONFIG_SERIAL_8250_DMA 153#ifdef CONFIG_SERIAL_8250_DMA
154static const struct dw_dma_platform_data qrk_serial_dma_pdata = { 154static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
155 .nr_channels = 2, 155 .nr_channels = 2,
156 .is_private = true,
157 .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, 156 .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
158 .chan_priority = CHAN_PRIORITY_ASCENDING, 157 .chan_priority = CHAN_PRIORITY_ASCENDING,
159 .block_size = 4095, 158 .block_size = 4095,