diff options
author | Vinod Koul <vkoul@kernel.org> | 2019-03-12 02:33:42 -0400 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2019-03-12 02:33:42 -0400 |
commit | 5c196f5efa48dbde12f9e47f30dbecfada450f45 (patch) | |
tree | e2737d06cc2ff331a7372e9e41be3a5b04669c6c /drivers/dma/bcm2835-dma.c | |
parent | a3711d49beb0768a4df852914676d13fd534a84d (diff) | |
parent | 37c22cabf225e89de3e98a6ed02a6c55fcb53fde (diff) |
Merge branch 'topic/brcm' into for-linus
Diffstat (limited to 'drivers/dma/bcm2835-dma.c')
-rw-r--r-- | drivers/dma/bcm2835-dma.c | 92 |
1 files changed, 31 insertions, 61 deletions
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 7beec403c2c9..ec8a291d62ba 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -2,9 +2,6 @@ | |||
2 | /* | 2 | /* |
3 | * BCM2835 DMA engine support | 3 | * BCM2835 DMA engine support |
4 | * | 4 | * |
5 | * This driver only supports cyclic DMA transfers | ||
6 | * as needed for the I2S module. | ||
7 | * | ||
8 | * Author: Florian Meier <florian.meier@koalo.de> | 5 | * Author: Florian Meier <florian.meier@koalo.de> |
9 | * Copyright 2013 | 6 | * Copyright 2013 |
10 | * | 7 | * |
@@ -42,7 +39,6 @@ | |||
42 | 39 | ||
43 | struct bcm2835_dmadev { | 40 | struct bcm2835_dmadev { |
44 | struct dma_device ddev; | 41 | struct dma_device ddev; |
45 | spinlock_t lock; | ||
46 | void __iomem *base; | 42 | void __iomem *base; |
47 | struct device_dma_parameters dma_parms; | 43 | struct device_dma_parameters dma_parms; |
48 | }; | 44 | }; |
@@ -64,7 +60,6 @@ struct bcm2835_cb_entry { | |||
64 | 60 | ||
65 | struct bcm2835_chan { | 61 | struct bcm2835_chan { |
66 | struct virt_dma_chan vc; | 62 | struct virt_dma_chan vc; |
67 | struct list_head node; | ||
68 | 63 | ||
69 | struct dma_slave_config cfg; | 64 | struct dma_slave_config cfg; |
70 | unsigned int dreq; | 65 | unsigned int dreq; |
@@ -405,39 +400,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( | |||
405 | } | 400 | } |
406 | } | 401 | } |
407 | 402 | ||
408 | static int bcm2835_dma_abort(void __iomem *chan_base) | 403 | static void bcm2835_dma_abort(struct bcm2835_chan *c) |
409 | { | 404 | { |
410 | unsigned long cs; | 405 | void __iomem *chan_base = c->chan_base; |
411 | long int timeout = 10000; | 406 | long int timeout = 10000; |
412 | 407 | ||
413 | cs = readl(chan_base + BCM2835_DMA_CS); | 408 | /* |
414 | if (!(cs & BCM2835_DMA_ACTIVE)) | 409 | * A zero control block address means the channel is idle. |
415 | return 0; | 410 | * (The ACTIVE flag in the CS register is not a reliable indicator.) |
411 | */ | ||
412 | if (!readl(chan_base + BCM2835_DMA_ADDR)) | ||
413 | return; | ||
416 | 414 | ||
417 | /* Write 0 to the active bit - Pause the DMA */ | 415 | /* Write 0 to the active bit - Pause the DMA */ |
418 | writel(0, chan_base + BCM2835_DMA_CS); | 416 | writel(0, chan_base + BCM2835_DMA_CS); |
419 | 417 | ||
420 | /* Wait for any current AXI transfer to complete */ | 418 | /* Wait for any current AXI transfer to complete */ |
421 | while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { | 419 | while ((readl(chan_base + BCM2835_DMA_CS) & |
420 | BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) | ||
422 | cpu_relax(); | 421 | cpu_relax(); |
423 | cs = readl(chan_base + BCM2835_DMA_CS); | ||
424 | } | ||
425 | 422 | ||
426 | /* We'll un-pause when we set of our next DMA */ | 423 | /* Peripheral might be stuck and fail to signal AXI write responses */ |
427 | if (!timeout) | 424 | if (!timeout) |
428 | return -ETIMEDOUT; | 425 | dev_err(c->vc.chan.device->dev, |
429 | 426 | "failed to complete outstanding writes\n"); | |
430 | if (!(cs & BCM2835_DMA_ACTIVE)) | ||
431 | return 0; | ||
432 | 427 | ||
433 | /* Terminate the control block chain */ | 428 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); |
434 | writel(0, chan_base + BCM2835_DMA_NEXTCB); | ||
435 | |||
436 | /* Abort the whole DMA */ | ||
437 | writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, | ||
438 | chan_base + BCM2835_DMA_CS); | ||
439 | |||
440 | return 0; | ||
441 | } | 429 | } |
442 | 430 | ||
443 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) | 431 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) |
@@ -475,8 +463,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
475 | 463 | ||
476 | spin_lock_irqsave(&c->vc.lock, flags); | 464 | spin_lock_irqsave(&c->vc.lock, flags); |
477 | 465 | ||
478 | /* Acknowledge interrupt */ | 466 | /* |
479 | writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); | 467 | * Clear the INT flag to receive further interrupts. Keep the channel |
468 | * active in case the descriptor is cyclic or in case the client has | ||
469 | * already terminated the descriptor and issued a new one. (May happen | ||
470 | * if this IRQ handler is threaded.) If the channel is finished, it | ||
471 | * will remain idle despite the ACTIVE flag being set. | ||
472 | */ | ||
473 | writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, | ||
474 | c->chan_base + BCM2835_DMA_CS); | ||
480 | 475 | ||
481 | d = c->desc; | 476 | d = c->desc; |
482 | 477 | ||
@@ -484,11 +479,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
484 | if (d->cyclic) { | 479 | if (d->cyclic) { |
485 | /* call the cyclic callback */ | 480 | /* call the cyclic callback */ |
486 | vchan_cyclic_callback(&d->vd); | 481 | vchan_cyclic_callback(&d->vd); |
487 | 482 | } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { | |
488 | /* Keep the DMA engine running */ | ||
489 | writel(BCM2835_DMA_ACTIVE, | ||
490 | c->chan_base + BCM2835_DMA_CS); | ||
491 | } else { | ||
492 | vchan_cookie_complete(&c->desc->vd); | 483 | vchan_cookie_complete(&c->desc->vd); |
493 | bcm2835_dma_start_desc(c); | 484 | bcm2835_dma_start_desc(c); |
494 | } | 485 | } |
@@ -506,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | |||
506 | 497 | ||
507 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); | 498 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); |
508 | 499 | ||
500 | /* | ||
501 | * Control blocks are 256 bit in length and must start at a 256 bit | ||
502 | * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1). | ||
503 | */ | ||
509 | c->cb_pool = dma_pool_create(dev_name(dev), dev, | 504 | c->cb_pool = dma_pool_create(dev_name(dev), dev, |
510 | sizeof(struct bcm2835_dma_cb), 0, 0); | 505 | sizeof(struct bcm2835_dma_cb), 32, 0); |
511 | if (!c->cb_pool) { | 506 | if (!c->cb_pool) { |
512 | dev_err(dev, "unable to allocate descriptor pool\n"); | 507 | dev_err(dev, "unable to allocate descriptor pool\n"); |
513 | return -ENOMEM; | 508 | return -ENOMEM; |
@@ -776,39 +771,16 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan, | |||
776 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) | 771 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) |
777 | { | 772 | { |
778 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 773 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
779 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); | ||
780 | unsigned long flags; | 774 | unsigned long flags; |
781 | int timeout = 10000; | ||
782 | LIST_HEAD(head); | 775 | LIST_HEAD(head); |
783 | 776 | ||
784 | spin_lock_irqsave(&c->vc.lock, flags); | 777 | spin_lock_irqsave(&c->vc.lock, flags); |
785 | 778 | ||
786 | /* Prevent this channel being scheduled */ | 779 | /* stop DMA activity */ |
787 | spin_lock(&d->lock); | ||
788 | list_del_init(&c->node); | ||
789 | spin_unlock(&d->lock); | ||
790 | |||
791 | /* | ||
792 | * Stop DMA activity: we assume the callback will not be called | ||
793 | * after bcm_dma_abort() returns (even if it does, it will see | ||
794 | * c->desc is NULL and exit.) | ||
795 | */ | ||
796 | if (c->desc) { | 780 | if (c->desc) { |
797 | vchan_terminate_vdesc(&c->desc->vd); | 781 | vchan_terminate_vdesc(&c->desc->vd); |
798 | c->desc = NULL; | 782 | c->desc = NULL; |
799 | bcm2835_dma_abort(c->chan_base); | 783 | bcm2835_dma_abort(c); |
800 | |||
801 | /* Wait for stopping */ | ||
802 | while (--timeout) { | ||
803 | if (!(readl(c->chan_base + BCM2835_DMA_CS) & | ||
804 | BCM2835_DMA_ACTIVE)) | ||
805 | break; | ||
806 | |||
807 | cpu_relax(); | ||
808 | } | ||
809 | |||
810 | if (!timeout) | ||
811 | dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); | ||
812 | } | 784 | } |
813 | 785 | ||
814 | vchan_get_all_descriptors(&c->vc, &head); | 786 | vchan_get_all_descriptors(&c->vc, &head); |
@@ -836,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, | |||
836 | 808 | ||
837 | c->vc.desc_free = bcm2835_dma_desc_free; | 809 | c->vc.desc_free = bcm2835_dma_desc_free; |
838 | vchan_init(&c->vc, &d->ddev); | 810 | vchan_init(&c->vc, &d->ddev); |
839 | INIT_LIST_HEAD(&c->node); | ||
840 | 811 | ||
841 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); | 812 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); |
842 | c->ch = chan_id; | 813 | c->ch = chan_id; |
@@ -939,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
939 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 910 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
940 | od->ddev.dev = &pdev->dev; | 911 | od->ddev.dev = &pdev->dev; |
941 | INIT_LIST_HEAD(&od->ddev.channels); | 912 | INIT_LIST_HEAD(&od->ddev.channels); |
942 | spin_lock_init(&od->lock); | ||
943 | 913 | ||
944 | platform_set_drvdata(pdev, od); | 914 | platform_set_drvdata(pdev, od); |
945 | 915 | ||