diff options
author | Thomas Abraham <thomas.abraham@linaro.org> | 2011-10-24 05:43:11 -0400 |
---|---|---|
committer | Kukjin Kim <kgene.kim@samsung.com> | 2011-12-22 20:07:03 -0500 |
commit | cd072515215ccc37051cadc516ce28545257be41 (patch) | |
tree | f67c1ed0a2a988181c20f545f0ad1732b2444a90 /drivers | |
parent | 3e2ec13a8185183cd7ff237dadc948a0f9f7398f (diff) |
DMA: PL330: Infer transfer direction from transfer request instead of platform data
The transfer direction for a channel can be inferred from the transfer
request and the need for specifying transfer direction in platfrom data
can be eliminated. So the structure definition 'struct dma_pl330_peri'
is no longer required.
The channel's private data is set to point to a channel id specified in
the platform data (instead of an instance of type 'struct dma_pl330_peri').
The filter function is correspondingly modified to match the channel id.
With the 'struct dma_pl330_peri' removed from platform data, the dma
controller transfer capabilities cannot be inferred any more. Hence,
the dma controller capabilities is specified using platform data.
Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
Acked-by: Boojin Kim <boojin.kim@samsung.com>
Signed-off-by: Thomas Abraham <thomas.abraham@linaro.org>
Acked-by: Grant Likely <grant.likely@secretlab.ca>
Acked-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Kukjin Kim <kgene.kim@samsung.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/pl330.c | 65 |
1 files changed, 16 insertions, 49 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 0c434dca4bf2..317aaeaa6f66 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -272,13 +272,13 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | |||
272 | 272 | ||
273 | bool pl330_filter(struct dma_chan *chan, void *param) | 273 | bool pl330_filter(struct dma_chan *chan, void *param) |
274 | { | 274 | { |
275 | struct dma_pl330_peri *peri; | 275 | u8 *peri_id; |
276 | 276 | ||
277 | if (chan->device->dev->driver != &pl330_driver.drv) | 277 | if (chan->device->dev->driver != &pl330_driver.drv) |
278 | return false; | 278 | return false; |
279 | 279 | ||
280 | peri = chan->private; | 280 | peri_id = chan->private; |
281 | return peri->peri_id == (unsigned)param; | 281 | return *peri_id == (unsigned)param; |
282 | } | 282 | } |
283 | EXPORT_SYMBOL(pl330_filter); | 283 | EXPORT_SYMBOL(pl330_filter); |
284 | 284 | ||
@@ -512,7 +512,7 @@ pluck_desc(struct dma_pl330_dmac *pdmac) | |||
512 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | 512 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) |
513 | { | 513 | { |
514 | struct dma_pl330_dmac *pdmac = pch->dmac; | 514 | struct dma_pl330_dmac *pdmac = pch->dmac; |
515 | struct dma_pl330_peri *peri = pch->chan.private; | 515 | u8 *peri_id = pch->chan.private; |
516 | struct dma_pl330_desc *desc; | 516 | struct dma_pl330_desc *desc; |
517 | 517 | ||
518 | /* Pluck one desc from the pool of DMAC */ | 518 | /* Pluck one desc from the pool of DMAC */ |
@@ -537,13 +537,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
537 | desc->txd.cookie = 0; | 537 | desc->txd.cookie = 0; |
538 | async_tx_ack(&desc->txd); | 538 | async_tx_ack(&desc->txd); |
539 | 539 | ||
540 | if (peri) { | 540 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; |
541 | desc->req.rqtype = peri->rqtype; | ||
542 | desc->req.peri = pch->chan.chan_id; | ||
543 | } else { | ||
544 | desc->req.rqtype = MEMTOMEM; | ||
545 | desc->req.peri = 0; | ||
546 | } | ||
547 | 541 | ||
548 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 542 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
549 | 543 | ||
@@ -630,12 +624,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
630 | case DMA_TO_DEVICE: | 624 | case DMA_TO_DEVICE: |
631 | desc->rqcfg.src_inc = 1; | 625 | desc->rqcfg.src_inc = 1; |
632 | desc->rqcfg.dst_inc = 0; | 626 | desc->rqcfg.dst_inc = 0; |
627 | desc->req.rqtype = MEMTODEV; | ||
633 | src = dma_addr; | 628 | src = dma_addr; |
634 | dst = pch->fifo_addr; | 629 | dst = pch->fifo_addr; |
635 | break; | 630 | break; |
636 | case DMA_FROM_DEVICE: | 631 | case DMA_FROM_DEVICE: |
637 | desc->rqcfg.src_inc = 0; | 632 | desc->rqcfg.src_inc = 0; |
638 | desc->rqcfg.dst_inc = 1; | 633 | desc->rqcfg.dst_inc = 1; |
634 | desc->req.rqtype = DEVTOMEM; | ||
639 | src = pch->fifo_addr; | 635 | src = pch->fifo_addr; |
640 | dst = dma_addr; | 636 | dst = dma_addr; |
641 | break; | 637 | break; |
@@ -661,16 +657,12 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
661 | { | 657 | { |
662 | struct dma_pl330_desc *desc; | 658 | struct dma_pl330_desc *desc; |
663 | struct dma_pl330_chan *pch = to_pchan(chan); | 659 | struct dma_pl330_chan *pch = to_pchan(chan); |
664 | struct dma_pl330_peri *peri = chan->private; | ||
665 | struct pl330_info *pi; | 660 | struct pl330_info *pi; |
666 | int burst; | 661 | int burst; |
667 | 662 | ||
668 | if (unlikely(!pch || !len)) | 663 | if (unlikely(!pch || !len)) |
669 | return NULL; | 664 | return NULL; |
670 | 665 | ||
671 | if (peri && peri->rqtype != MEMTOMEM) | ||
672 | return NULL; | ||
673 | |||
674 | pi = &pch->dmac->pif; | 666 | pi = &pch->dmac->pif; |
675 | 667 | ||
676 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); | 668 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); |
@@ -679,6 +671,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
679 | 671 | ||
680 | desc->rqcfg.src_inc = 1; | 672 | desc->rqcfg.src_inc = 1; |
681 | desc->rqcfg.dst_inc = 1; | 673 | desc->rqcfg.dst_inc = 1; |
674 | desc->req.rqtype = MEMTOMEM; | ||
682 | 675 | ||
683 | /* Select max possible burst size */ | 676 | /* Select max possible burst size */ |
684 | burst = pi->pcfg.data_bus_width / 8; | 677 | burst = pi->pcfg.data_bus_width / 8; |
@@ -707,24 +700,13 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
707 | { | 700 | { |
708 | struct dma_pl330_desc *first, *desc = NULL; | 701 | struct dma_pl330_desc *first, *desc = NULL; |
709 | struct dma_pl330_chan *pch = to_pchan(chan); | 702 | struct dma_pl330_chan *pch = to_pchan(chan); |
710 | struct dma_pl330_peri *peri = chan->private; | ||
711 | struct scatterlist *sg; | 703 | struct scatterlist *sg; |
712 | unsigned long flags; | 704 | unsigned long flags; |
713 | int i; | 705 | int i; |
714 | dma_addr_t addr; | 706 | dma_addr_t addr; |
715 | 707 | ||
716 | if (unlikely(!pch || !sgl || !sg_len || !peri)) | 708 | if (unlikely(!pch || !sgl || !sg_len)) |
717 | return NULL; | ||
718 | |||
719 | /* Make sure the direction is consistent */ | ||
720 | if ((direction == DMA_TO_DEVICE && | ||
721 | peri->rqtype != MEMTODEV) || | ||
722 | (direction == DMA_FROM_DEVICE && | ||
723 | peri->rqtype != DEVTOMEM)) { | ||
724 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", | ||
725 | __func__, __LINE__); | ||
726 | return NULL; | 709 | return NULL; |
727 | } | ||
728 | 710 | ||
729 | addr = pch->fifo_addr; | 711 | addr = pch->fifo_addr; |
730 | 712 | ||
@@ -765,11 +747,13 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
765 | if (direction == DMA_TO_DEVICE) { | 747 | if (direction == DMA_TO_DEVICE) { |
766 | desc->rqcfg.src_inc = 1; | 748 | desc->rqcfg.src_inc = 1; |
767 | desc->rqcfg.dst_inc = 0; | 749 | desc->rqcfg.dst_inc = 0; |
750 | desc->req.rqtype = MEMTODEV; | ||
768 | fill_px(&desc->px, | 751 | fill_px(&desc->px, |
769 | addr, sg_dma_address(sg), sg_dma_len(sg)); | 752 | addr, sg_dma_address(sg), sg_dma_len(sg)); |
770 | } else { | 753 | } else { |
771 | desc->rqcfg.src_inc = 0; | 754 | desc->rqcfg.src_inc = 0; |
772 | desc->rqcfg.dst_inc = 1; | 755 | desc->rqcfg.dst_inc = 1; |
756 | desc->req.rqtype = DEVTOMEM; | ||
773 | fill_px(&desc->px, | 757 | fill_px(&desc->px, |
774 | sg_dma_address(sg), addr, sg_dma_len(sg)); | 758 | sg_dma_address(sg), addr, sg_dma_len(sg)); |
775 | } | 759 | } |
@@ -876,28 +860,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
876 | 860 | ||
877 | for (i = 0; i < num_chan; i++) { | 861 | for (i = 0; i < num_chan; i++) { |
878 | pch = &pdmac->peripherals[i]; | 862 | pch = &pdmac->peripherals[i]; |
879 | if (pdat) { | 863 | pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; |
880 | struct dma_pl330_peri *peri = &pdat->peri[i]; | ||
881 | |||
882 | switch (peri->rqtype) { | ||
883 | case MEMTOMEM: | ||
884 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
885 | break; | ||
886 | case MEMTODEV: | ||
887 | case DEVTOMEM: | ||
888 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
889 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); | ||
890 | break; | ||
891 | default: | ||
892 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
893 | continue; | ||
894 | } | ||
895 | pch->chan.private = peri; | ||
896 | } else { | ||
897 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
898 | pch->chan.private = NULL; | ||
899 | } | ||
900 | |||
901 | INIT_LIST_HEAD(&pch->work_list); | 864 | INIT_LIST_HEAD(&pch->work_list); |
902 | spin_lock_init(&pch->lock); | 865 | spin_lock_init(&pch->lock); |
903 | pch->pl330_chid = NULL; | 866 | pch->pl330_chid = NULL; |
@@ -909,6 +872,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
909 | } | 872 | } |
910 | 873 | ||
911 | pd->dev = &adev->dev; | 874 | pd->dev = &adev->dev; |
875 | if (pdat) | ||
876 | pd->cap_mask = pdat->cap_mask; | ||
877 | else | ||
878 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
912 | 879 | ||
913 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; | 880 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; |
914 | pd->device_free_chan_resources = pl330_free_chan_resources; | 881 | pd->device_free_chan_resources = pl330_free_chan_resources; |