aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2013-10-30 03:37:18 -0400
committerVinod Koul <vinod.koul@intel.com>2013-10-30 03:37:18 -0400
commitb967aecf1714c10d1e6c045e43b6385884f1ca77 (patch)
treea6aebedea9c5ddd1bbaf42e913be01e17f98c735 /drivers
parent959f58544b7f20c92d5eb43d1232c96c15c01bfb (diff)
parent2abd5f1b97fce6e197be01d67a9567c7793c80d3 (diff)
Merge branch 'for-linus' into next
Conflicts: drivers/dma/edma.c Moved the memory leak fix post merge Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/amba-pl08x.c3
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/cppi41.c94
-rw-r--r--drivers/dma/edma.c211
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/mmp_pdma.c7
-rw-r--r--drivers/dma/mmp_tdma.c34
-rw-r--r--drivers/dma/pl330.c25
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
11 files changed, 254 insertions, 135 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5bf1c7..9b5025777ac8 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2133,8 +2133,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2133 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2133 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2134 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2134 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2135 2135
2136 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 2136 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
2137 DRIVER_NAME, pl08x);
2138 if (ret) { 2137 if (ret) {
2139 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2138 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2140 __func__, adev->irq[0]); 2139 __func__, adev->irq[0]);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 31011d2a26fc..3025b9107af2 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
2694 if (irq < 0) 2694 if (irq < 0)
2695 return irq; 2695 return irq;
2696 2696
2697 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, 2697 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
2698 "coh901318", base); 2698 "coh901318", base);
2699 if (err) 2699 if (err)
2700 return err; 2700 return err;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 7c82b92f9b16..167c0223ae9e 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -674,14 +674,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
674 } 674 }
675} 675}
676 676
677static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) 677static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
678{ 678{
679 struct cppi41_channel *cchan; 679 struct cppi41_channel *cchan;
680 int i; 680 int i;
681 int ret; 681 int ret;
682 u32 n_chans; 682 u32 n_chans;
683 683
684 ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", 684 ret = of_property_read_u32(dev->of_node, "#dma-channels",
685 &n_chans); 685 &n_chans);
686 if (ret) 686 if (ret)
687 return ret; 687 return ret;
@@ -719,7 +719,7 @@ err:
719 return -ENOMEM; 719 return -ENOMEM;
720} 720}
721 721
722static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) 722static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
723{ 723{
724 unsigned int mem_decs; 724 unsigned int mem_decs;
725 int i; 725 int i;
@@ -731,7 +731,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
731 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); 731 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
732 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); 732 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
733 733
734 dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, 734 dma_free_coherent(dev, mem_decs, cdd->cd,
735 cdd->descs_phys); 735 cdd->descs_phys);
736 } 736 }
737} 737}
@@ -741,19 +741,19 @@ static void disable_sched(struct cppi41_dd *cdd)
741 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); 741 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
742} 742}
743 743
744static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) 744static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
745{ 745{
746 disable_sched(cdd); 746 disable_sched(cdd);
747 747
748 purge_descs(pdev, cdd); 748 purge_descs(dev, cdd);
749 749
750 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); 750 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
751 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); 751 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
752 dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, 752 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
753 cdd->scratch_phys); 753 cdd->scratch_phys);
754} 754}
755 755
756static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) 756static int init_descs(struct device *dev, struct cppi41_dd *cdd)
757{ 757{
758 unsigned int desc_size; 758 unsigned int desc_size;
759 unsigned int mem_decs; 759 unsigned int mem_decs;
@@ -777,7 +777,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
777 reg |= ilog2(ALLOC_DECS_NUM) - 5; 777 reg |= ilog2(ALLOC_DECS_NUM) - 5;
778 778
779 BUILD_BUG_ON(DESCS_AREAS != 1); 779 BUILD_BUG_ON(DESCS_AREAS != 1);
780 cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, 780 cdd->cd = dma_alloc_coherent(dev, mem_decs,
781 &cdd->descs_phys, GFP_KERNEL); 781 &cdd->descs_phys, GFP_KERNEL);
782 if (!cdd->cd) 782 if (!cdd->cd)
783 return -ENOMEM; 783 return -ENOMEM;
@@ -813,12 +813,12 @@ static void init_sched(struct cppi41_dd *cdd)
813 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); 813 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
814} 814}
815 815
816static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) 816static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
817{ 817{
818 int ret; 818 int ret;
819 819
820 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); 820 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
821 cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, 821 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
822 &cdd->scratch_phys, GFP_KERNEL); 822 &cdd->scratch_phys, GFP_KERNEL);
823 if (!cdd->qmgr_scratch) 823 if (!cdd->qmgr_scratch)
824 return -ENOMEM; 824 return -ENOMEM;
@@ -827,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
827 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); 827 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
828 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); 828 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
829 829
830 ret = init_descs(pdev, cdd); 830 ret = init_descs(dev, cdd);
831 if (ret) 831 if (ret)
832 goto err_td; 832 goto err_td;
833 833
@@ -835,7 +835,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
835 init_sched(cdd); 835 init_sched(cdd);
836 return 0; 836 return 0;
837err_td: 837err_td:
838 deinit_cpii41(pdev, cdd); 838 deinit_cppi41(dev, cdd);
839 return ret; 839 return ret;
840} 840}
841 841
@@ -914,11 +914,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
914}; 914};
915MODULE_DEVICE_TABLE(of, cppi41_dma_ids); 915MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
916 916
917static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) 917static const struct cppi_glue_infos *get_glue_info(struct device *dev)
918{ 918{
919 const struct of_device_id *of_id; 919 const struct of_device_id *of_id;
920 920
921 of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); 921 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
922 if (!of_id) 922 if (!of_id)
923 return NULL; 923 return NULL;
924 return of_id->data; 924 return of_id->data;
@@ -927,11 +927,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
927static int cppi41_dma_probe(struct platform_device *pdev) 927static int cppi41_dma_probe(struct platform_device *pdev)
928{ 928{
929 struct cppi41_dd *cdd; 929 struct cppi41_dd *cdd;
930 struct device *dev = &pdev->dev;
930 const struct cppi_glue_infos *glue_info; 931 const struct cppi_glue_infos *glue_info;
931 int irq; 932 int irq;
932 int ret; 933 int ret;
933 934
934 glue_info = get_glue_info(pdev); 935 glue_info = get_glue_info(dev);
935 if (!glue_info) 936 if (!glue_info)
936 return -EINVAL; 937 return -EINVAL;
937 938
@@ -946,14 +947,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
946 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 947 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
947 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 948 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
948 cdd->ddev.device_control = cppi41_dma_control; 949 cdd->ddev.device_control = cppi41_dma_control;
949 cdd->ddev.dev = &pdev->dev; 950 cdd->ddev.dev = dev;
950 INIT_LIST_HEAD(&cdd->ddev.channels); 951 INIT_LIST_HEAD(&cdd->ddev.channels);
951 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 952 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
952 953
953 cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); 954 cdd->usbss_mem = of_iomap(dev->of_node, 0);
954 cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); 955 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
955 cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); 956 cdd->sched_mem = of_iomap(dev->of_node, 2);
956 cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); 957 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
957 958
958 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || 959 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
959 !cdd->qmgr_mem) { 960 !cdd->qmgr_mem) {
@@ -961,8 +962,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
961 goto err_remap; 962 goto err_remap;
962 } 963 }
963 964
964 pm_runtime_enable(&pdev->dev); 965 pm_runtime_enable(dev);
965 ret = pm_runtime_get_sync(&pdev->dev); 966 ret = pm_runtime_get_sync(dev);
966 if (ret) 967 if (ret)
967 goto err_get_sync; 968 goto err_get_sync;
968 969
@@ -970,22 +971,22 @@ static int cppi41_dma_probe(struct platform_device *pdev)
970 cdd->queues_tx = glue_info->queues_tx; 971 cdd->queues_tx = glue_info->queues_tx;
971 cdd->td_queue = glue_info->td_queue; 972 cdd->td_queue = glue_info->td_queue;
972 973
973 ret = init_cppi41(pdev, cdd); 974 ret = init_cppi41(dev, cdd);
974 if (ret) 975 if (ret)
975 goto err_init_cppi; 976 goto err_init_cppi;
976 977
977 ret = cppi41_add_chans(pdev, cdd); 978 ret = cppi41_add_chans(dev, cdd);
978 if (ret) 979 if (ret)
979 goto err_chans; 980 goto err_chans;
980 981
981 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 982 irq = irq_of_parse_and_map(dev->of_node, 0);
982 if (!irq) 983 if (!irq)
983 goto err_irq; 984 goto err_irq;
984 985
985 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); 986 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
986 987
987 ret = request_irq(irq, glue_info->isr, IRQF_SHARED, 988 ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
988 dev_name(&pdev->dev), cdd); 989 dev_name(dev), cdd);
989 if (ret) 990 if (ret)
990 goto err_irq; 991 goto err_irq;
991 cdd->irq = irq; 992 cdd->irq = irq;
@@ -994,7 +995,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
994 if (ret) 995 if (ret)
995 goto err_dma_reg; 996 goto err_dma_reg;
996 997
997 ret = of_dma_controller_register(pdev->dev.of_node, 998 ret = of_dma_controller_register(dev->of_node,
998 cppi41_dma_xlate, &cpp41_dma_info); 999 cppi41_dma_xlate, &cpp41_dma_info);
999 if (ret) 1000 if (ret)
1000 goto err_of; 1001 goto err_of;
@@ -1009,11 +1010,11 @@ err_irq:
1009 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1010 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1010 cleanup_chans(cdd); 1011 cleanup_chans(cdd);
1011err_chans: 1012err_chans:
1012 deinit_cpii41(pdev, cdd); 1013 deinit_cppi41(dev, cdd);
1013err_init_cppi: 1014err_init_cppi:
1014 pm_runtime_put(&pdev->dev); 1015 pm_runtime_put(dev);
1015err_get_sync: 1016err_get_sync:
1016 pm_runtime_disable(&pdev->dev); 1017 pm_runtime_disable(dev);
1017 iounmap(cdd->usbss_mem); 1018 iounmap(cdd->usbss_mem);
1018 iounmap(cdd->ctrl_mem); 1019 iounmap(cdd->ctrl_mem);
1019 iounmap(cdd->sched_mem); 1020 iounmap(cdd->sched_mem);
@@ -1033,7 +1034,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
1033 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1034 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1034 free_irq(cdd->irq, cdd); 1035 free_irq(cdd->irq, cdd);
1035 cleanup_chans(cdd); 1036 cleanup_chans(cdd);
1036 deinit_cpii41(pdev, cdd); 1037 deinit_cppi41(&pdev->dev, cdd);
1037 iounmap(cdd->usbss_mem); 1038 iounmap(cdd->usbss_mem);
1038 iounmap(cdd->ctrl_mem); 1039 iounmap(cdd->ctrl_mem);
1039 iounmap(cdd->sched_mem); 1040 iounmap(cdd->sched_mem);
@@ -1044,12 +1045,41 @@ static int cppi41_dma_remove(struct platform_device *pdev)
1044 return 0; 1045 return 0;
1045} 1046}
1046 1047
1048#ifdef CONFIG_PM_SLEEP
1049static int cppi41_suspend(struct device *dev)
1050{
1051 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1052
1053 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1054 disable_sched(cdd);
1055
1056 return 0;
1057}
1058
1059static int cppi41_resume(struct device *dev)
1060{
1061 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1062 int i;
1063
1064 for (i = 0; i < DESCS_AREAS; i++)
1065 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1066
1067 init_sched(cdd);
1068 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1069
1070 return 0;
1071}
1072#endif
1073
1074static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
1075
1047static struct platform_driver cpp41_dma_driver = { 1076static struct platform_driver cpp41_dma_driver = {
1048 .probe = cppi41_dma_probe, 1077 .probe = cppi41_dma_probe,
1049 .remove = cppi41_dma_remove, 1078 .remove = cppi41_dma_remove,
1050 .driver = { 1079 .driver = {
1051 .name = "cppi41-dma-engine", 1080 .name = "cppi41-dma-engine",
1052 .owner = THIS_MODULE, 1081 .owner = THIS_MODULE,
1082 .pm = &cppi41_pm_ops,
1053 .of_match_table = of_match_ptr(cppi41_dma_ids), 1083 .of_match_table = of_match_ptr(cppi41_dma_ids),
1054 }, 1084 },
1055}; 1085};
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 10b577fcf48d..8c612415867e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -46,8 +46,14 @@
46#define EDMA_CHANS 64 46#define EDMA_CHANS 64
47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */ 47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
48 48
49/* Max of 16 segments per channel to conserve PaRAM slots */ 49/*
50#define MAX_NR_SG 16 50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20.
55 */
56#define MAX_NR_SG 20
51#define EDMA_MAX_SLOTS MAX_NR_SG 57#define EDMA_MAX_SLOTS MAX_NR_SG
52#define EDMA_DESCRIPTORS 16 58#define EDMA_DESCRIPTORS 16
53 59
@@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
250 return ret; 256 return ret;
251} 257}
252 258
259/*
260 * A PaRAM set configuration abstraction used by other modes
261 * @chan: Channel who's PaRAM set we're configuring
262 * @pset: PaRAM set to initialize and setup.
263 * @src_addr: Source address of the DMA
264 * @dst_addr: Destination address of the DMA
265 * @burst: In units of dev_width, how much to send
266 * @dev_width: How much is the dev_width
267 * @dma_length: Total length of the DMA transfer
268 * @direction: Direction of the transfer
269 */
270static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
271 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
272 enum dma_slave_buswidth dev_width, unsigned int dma_length,
273 enum dma_transfer_direction direction)
274{
275 struct edma_chan *echan = to_edma_chan(chan);
276 struct device *dev = chan->device->dev;
277 int acnt, bcnt, ccnt, cidx;
278 int src_bidx, dst_bidx, src_cidx, dst_cidx;
279 int absync;
280
281 acnt = dev_width;
282 /*
283 * If the maxburst is equal to the fifo width, use
284 * A-synced transfers. This allows for large contiguous
285 * buffer transfers using only one PaRAM set.
286 */
287 if (burst == 1) {
288 /*
289 * For the A-sync case, bcnt and ccnt are the remainder
290 * and quotient respectively of the division of:
291 * (dma_length / acnt) by (SZ_64K -1). This is so
292 * that in case bcnt over flows, we have ccnt to use.
293 * Note: In A-sync tranfer only, bcntrld is used, but it
294 * only applies for sg_dma_len(sg) >= SZ_64K.
295 * In this case, the best way adopted is- bccnt for the
296 * first frame will be the remainder below. Then for
297 * every successive frame, bcnt will be SZ_64K-1. This
298 * is assured as bcntrld = 0xffff in end of function.
299 */
300 absync = false;
301 ccnt = dma_length / acnt / (SZ_64K - 1);
302 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
303 /*
304 * If bcnt is non-zero, we have a remainder and hence an
305 * extra frame to transfer, so increment ccnt.
306 */
307 if (bcnt)
308 ccnt++;
309 else
310 bcnt = SZ_64K - 1;
311 cidx = acnt;
312 } else {
313 /*
314 * If maxburst is greater than the fifo address_width,
315 * use AB-synced transfers where A count is the fifo
316 * address_width and B count is the maxburst. In this
317 * case, we are limited to transfers of C count frames
318 * of (address_width * maxburst) where C count is limited
319 * to SZ_64K-1. This places an upper bound on the length
320 * of an SG segment that can be handled.
321 */
322 absync = true;
323 bcnt = burst;
324 ccnt = dma_length / (acnt * bcnt);
325 if (ccnt > (SZ_64K - 1)) {
326 dev_err(dev, "Exceeded max SG segment size\n");
327 return -EINVAL;
328 }
329 cidx = acnt * bcnt;
330 }
331
332 if (direction == DMA_MEM_TO_DEV) {
333 src_bidx = acnt;
334 src_cidx = cidx;
335 dst_bidx = 0;
336 dst_cidx = 0;
337 } else if (direction == DMA_DEV_TO_MEM) {
338 src_bidx = 0;
339 src_cidx = 0;
340 dst_bidx = acnt;
341 dst_cidx = cidx;
342 } else {
343 dev_err(dev, "%s: direction not implemented yet\n", __func__);
344 return -EINVAL;
345 }
346
347 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
348 /* Configure A or AB synchronized transfers */
349 if (absync)
350 pset->opt |= SYNCDIM;
351
352 pset->src = src_addr;
353 pset->dst = dst_addr;
354
355 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
356 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
357
358 pset->a_b_cnt = bcnt << 16 | acnt;
359 pset->ccnt = ccnt;
360 /*
361 * Only time when (bcntrld) auto reload is required is for
362 * A-sync case, and in this case, a requirement of reload value
363 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
364 * and then later will be populated by edma_execute.
365 */
366 pset->link_bcntrld = 0xffffffff;
367 return absync;
368}
369
253static struct dma_async_tx_descriptor *edma_prep_slave_sg( 370static struct dma_async_tx_descriptor *edma_prep_slave_sg(
254 struct dma_chan *chan, struct scatterlist *sgl, 371 struct dma_chan *chan, struct scatterlist *sgl,
255 unsigned int sg_len, enum dma_transfer_direction direction, 372 unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
258 struct edma_chan *echan = to_edma_chan(chan); 375 struct edma_chan *echan = to_edma_chan(chan);
259 struct device *dev = chan->device->dev; 376 struct device *dev = chan->device->dev;
260 struct edma_desc *edesc; 377 struct edma_desc *edesc;
261 dma_addr_t dev_addr; 378 dma_addr_t src_addr = 0, dst_addr = 0;
262 enum dma_slave_buswidth dev_width; 379 enum dma_slave_buswidth dev_width;
263 u32 burst; 380 u32 burst;
264 struct scatterlist *sg; 381 struct scatterlist *sg;
265 int acnt, bcnt, ccnt, src, dst, cidx; 382 int i, nslots, ret;
266 int src_bidx, dst_bidx, src_cidx, dst_cidx;
267 int i, nslots;
268 383
269 if (unlikely(!echan || !sgl || !sg_len)) 384 if (unlikely(!echan || !sgl || !sg_len))
270 return NULL; 385 return NULL;
271 386
272 if (direction == DMA_DEV_TO_MEM) { 387 if (direction == DMA_DEV_TO_MEM) {
273 dev_addr = echan->cfg.src_addr; 388 src_addr = echan->cfg.src_addr;
274 dev_width = echan->cfg.src_addr_width; 389 dev_width = echan->cfg.src_addr_width;
275 burst = echan->cfg.src_maxburst; 390 burst = echan->cfg.src_maxburst;
276 } else if (direction == DMA_MEM_TO_DEV) { 391 } else if (direction == DMA_MEM_TO_DEV) {
277 dev_addr = echan->cfg.dst_addr; 392 dst_addr = echan->cfg.dst_addr;
278 dev_width = echan->cfg.dst_addr_width; 393 dev_width = echan->cfg.dst_addr_width;
279 burst = echan->cfg.dst_maxburst; 394 burst = echan->cfg.dst_maxburst;
280 } else { 395 } else {
@@ -315,64 +430,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
315 430
316 /* Configure PaRAM sets for each SG */ 431 /* Configure PaRAM sets for each SG */
317 for_each_sg(sgl, sg, sg_len, i) { 432 for_each_sg(sgl, sg, sg_len, i) {
318 433 /* Get address for each SG */
319 acnt = dev_width; 434 if (direction == DMA_DEV_TO_MEM)
320 435 dst_addr = sg_dma_address(sg);
321 /* 436 else
322 * If the maxburst is equal to the fifo width, use 437 src_addr = sg_dma_address(sg);
323 * A-synced transfers. This allows for large contiguous 438
324 * buffer transfers using only one PaRAM set. 439 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
325 */ 440 dst_addr, burst, dev_width,
326 if (burst == 1) { 441 sg_dma_len(sg), direction);
327 edesc->absync = false; 442 if (ret < 0) {
328 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); 443 kfree(edesc);
329 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); 444 return NULL;
330 if (bcnt)
331 ccnt++;
332 else
333 bcnt = SZ_64K - 1;
334 cidx = acnt;
335 /*
336 * If maxburst is greater than the fifo address_width,
337 * use AB-synced transfers where A count is the fifo
338 * address_width and B count is the maxburst. In this
339 * case, we are limited to transfers of C count frames
340 * of (address_width * maxburst) where C count is limited
341 * to SZ_64K-1. This places an upper bound on the length
342 * of an SG segment that can be handled.
343 */
344 } else {
345 edesc->absync = true;
346 bcnt = burst;
347 ccnt = sg_dma_len(sg) / (acnt * bcnt);
348 if (ccnt > (SZ_64K - 1)) {
349 dev_err(dev, "Exceeded max SG segment size\n");
350 kfree(edesc);
351 return NULL;
352 }
353 cidx = acnt * bcnt;
354 }
355
356 if (direction == DMA_MEM_TO_DEV) {
357 src = sg_dma_address(sg);
358 dst = dev_addr;
359 src_bidx = acnt;
360 src_cidx = cidx;
361 dst_bidx = 0;
362 dst_cidx = 0;
363 } else {
364 src = dev_addr;
365 dst = sg_dma_address(sg);
366 src_bidx = 0;
367 src_cidx = 0;
368 dst_bidx = acnt;
369 dst_cidx = cidx;
370 } 445 }
371 446
372 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 447 edesc->absync = ret;
373 /* Configure A or AB synchronized transfers */
374 if (edesc->absync)
375 edesc->pset[i].opt |= SYNCDIM;
376 448
377 /* If this is the last in a current SG set of transactions, 449 /* If this is the last in a current SG set of transactions,
378 enable interrupts so that next set is processed */ 450 enable interrupts so that next set is processed */
@@ -382,17 +454,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
382 /* If this is the last set, enable completion interrupt flag */ 454 /* If this is the last set, enable completion interrupt flag */
383 if (i == sg_len - 1) 455 if (i == sg_len - 1)
384 edesc->pset[i].opt |= TCINTEN; 456 edesc->pset[i].opt |= TCINTEN;
385
386 edesc->pset[i].src = src;
387 edesc->pset[i].dst = dst;
388
389 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
390 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
391
392 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
393 edesc->pset[i].ccnt = ccnt;
394 edesc->pset[i].link_bcntrld = 0xffffffff;
395
396 } 457 }
397 458
398 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 459 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a2c330f5f952..da430623fbc4 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
693 693
694 irq = platform_get_irq(op, 0); 694 irq = platform_get_irq(op, 0);
695 ret = devm_request_irq(&op->dev, irq, 695 ret = devm_request_irq(&op->dev, irq,
696 k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); 696 k3_dma_int_handler, 0, DRIVER_NAME, d);
697 if (ret) 697 if (ret)
698 return ret; 698 return ret;
699 699
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index ff8d7827f8cb..dcb1e05149a7 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
798 * move the descriptors to a temporary list so we can drop 798 * move the descriptors to a temporary list so we can drop
799 * the lock during the entire cleanup operation 799 * the lock during the entire cleanup operation
800 */ 800 */
801 list_del(&desc->node); 801 list_move(&desc->node, &chain_cleanup);
802 list_add(&desc->node, &chain_cleanup);
803 802
804 /* 803 /*
805 * Look for the first list entry which has the ENDIRQEN flag 804 * Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
863 862
864 if (irq) { 863 if (irq) {
865 ret = devm_request_irq(pdev->dev, irq, 864 ret = devm_request_irq(pdev->dev, irq,
866 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); 865 mmp_pdma_chan_handler, 0, "pdma", phy);
867 if (ret) { 866 if (ret) {
868 dev_err(pdev->dev, "channel request irq fail!\n"); 867 dev_err(pdev->dev, "channel request irq fail!\n");
869 return ret; 868 return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
970 /* all chan share one irq, demux inside */ 969 /* all chan share one irq, demux inside */
971 irq = platform_get_irq(op, 0); 970 irq = platform_get_irq(op, 0);
972 ret = devm_request_irq(pdev->dev, irq, 971 ret = devm_request_irq(pdev->dev, irq,
973 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); 972 mmp_pdma_int_handler, 0, "pdma", pdev);
974 if (ret) 973 if (ret)
975 return ret; 974 return ret;
976 } 975 }
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 38cb517fb2eb..8f3e865053d4 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -62,6 +62,11 @@
62#define TDCR_BURSTSZ_16B (0x3 << 6) 62#define TDCR_BURSTSZ_16B (0x3 << 6)
63#define TDCR_BURSTSZ_32B (0x6 << 6) 63#define TDCR_BURSTSZ_32B (0x6 << 6)
64#define TDCR_BURSTSZ_64B (0x7 << 6) 64#define TDCR_BURSTSZ_64B (0x7 << 6)
65#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
66#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
67#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
68#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
69#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
65#define TDCR_BURSTSZ_SQU_32B (0x7 << 6) 70#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
66#define TDCR_BURSTSZ_128B (0x5 << 6) 71#define TDCR_BURSTSZ_128B (0x5 << 6)
67#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ 72#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
228 return -EINVAL; 233 return -EINVAL;
229 } 234 }
230 } else if (tdmac->type == PXA910_SQU) { 235 } else if (tdmac->type == PXA910_SQU) {
231 tdcr |= TDCR_BURSTSZ_SQU_32B;
232 tdcr |= TDCR_SSPMOD; 236 tdcr |= TDCR_SSPMOD;
237
238 switch (tdmac->burst_sz) {
239 case 1:
240 tdcr |= TDCR_BURSTSZ_SQU_1B;
241 break;
242 case 2:
243 tdcr |= TDCR_BURSTSZ_SQU_2B;
244 break;
245 case 4:
246 tdcr |= TDCR_BURSTSZ_SQU_4B;
247 break;
248 case 8:
249 tdcr |= TDCR_BURSTSZ_SQU_8B;
250 break;
251 case 16:
252 tdcr |= TDCR_BURSTSZ_SQU_16B;
253 break;
254 case 32:
255 tdcr |= TDCR_BURSTSZ_SQU_32B;
256 break;
257 default:
258 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
259 return -EINVAL;
260 }
233 } 261 }
234 262
235 writel(tdcr, tdmac->reg_base + TDCR); 263 writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
324 352
325 if (tdmac->irq) { 353 if (tdmac->irq) {
326 ret = devm_request_irq(tdmac->dev, tdmac->irq, 354 ret = devm_request_irq(tdmac->dev, tdmac->irq,
327 mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); 355 mmp_tdma_chan_handler, 0, "tdma", tdmac);
328 if (ret) 356 if (ret)
329 return ret; 357 return ret;
330 } 358 }
@@ -559,7 +587,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
559 if (irq_num != chan_num) { 587 if (irq_num != chan_num) {
560 irq = platform_get_irq(pdev, 0); 588 irq = platform_get_irq(pdev, 0);
561 ret = devm_request_irq(&pdev->dev, irq, 589 ret = devm_request_irq(&pdev->dev, irq,
562 mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); 590 mmp_tdma_int_handler, 0, "tdma", tdev);
563 if (ret) 591 if (ret)
564 return ret; 592 return ret;
565 } 593 }
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24d20bf..96de393aaf4f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2922,16 +2922,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2922 2922
2923 amba_set_drvdata(adev, pdmac); 2923 amba_set_drvdata(adev, pdmac);
2924 2924
2925 irq = adev->irq[0]; 2925 for (i = 0; i <= AMBA_NR_IRQS; i++) {
2926 ret = request_irq(irq, pl330_irq_handler, 0, 2926 irq = adev->irq[i];
2927 dev_name(&adev->dev), pi); 2927 if (irq) {
2928 if (ret) 2928 ret = devm_request_irq(&adev->dev, irq,
2929 return ret; 2929 pl330_irq_handler, 0,
2930 dev_name(&adev->dev), pi);
2931 if (ret)
2932 return ret;
2933 } else {
2934 break;
2935 }
2936 }
2930 2937
2931 pi->pcfg.periph_id = adev->periphid; 2938 pi->pcfg.periph_id = adev->periphid;
2932 ret = pl330_add(pi); 2939 ret = pl330_add(pi);
2933 if (ret) 2940 if (ret)
2934 goto probe_err1; 2941 return ret;
2935 2942
2936 INIT_LIST_HEAD(&pdmac->desc_pool); 2943 INIT_LIST_HEAD(&pdmac->desc_pool);
2937 spin_lock_init(&pdmac->pool_lock); 2944 spin_lock_init(&pdmac->pool_lock);
@@ -3044,8 +3051,6 @@ probe_err3:
3044 } 3051 }
3045probe_err2: 3052probe_err2:
3046 pl330_del(pi); 3053 pl330_del(pi);
3047probe_err1:
3048 free_irq(irq, pi);
3049 3054
3050 return ret; 3055 return ret;
3051} 3056}
@@ -3055,7 +3060,6 @@ static int pl330_remove(struct amba_device *adev)
3055 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); 3060 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
3056 struct dma_pl330_chan *pch, *_p; 3061 struct dma_pl330_chan *pch, *_p;
3057 struct pl330_info *pi; 3062 struct pl330_info *pi;
3058 int irq;
3059 3063
3060 if (!pdmac) 3064 if (!pdmac)
3061 return 0; 3065 return 0;
@@ -3082,9 +3086,6 @@ static int pl330_remove(struct amba_device *adev)
3082 3086
3083 pl330_del(pi); 3087 pl330_del(pi);
3084 3088
3085 irq = adev->irq[0];
3086 free_irq(irq, pi);
3087
3088 return 0; 3089 return 0;
3089} 3090}
3090 3091
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 1069e8869f20..0d765c0e21ec 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
685static int sh_dmae_probe(struct platform_device *pdev) 685static int sh_dmae_probe(struct platform_device *pdev)
686{ 686{
687 const struct sh_dmae_pdata *pdata; 687 const struct sh_dmae_pdata *pdata;
688 unsigned long irqflags = IRQF_DISABLED, 688 unsigned long irqflags = 0,
689 chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 689 chan_flag[SH_DMAE_MAX_CHANNELS] = {};
690 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 690 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
691 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 691 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
838 IORESOURCE_IRQ_SHAREABLE) 838 IORESOURCE_IRQ_SHAREABLE)
839 chan_flag[irq_cnt] = IRQF_SHARED; 839 chan_flag[irq_cnt] = IRQF_SHARED;
840 else 840 else
841 chan_flag[irq_cnt] = IRQF_DISABLED; 841 chan_flag[irq_cnt] = 0;
842 dev_dbg(&pdev->dev, 842 dev_dbg(&pdev->dev,
843 "Found IRQ %d for channel %d\n", 843 "Found IRQ %d for channel %d\n",
844 i, irq_cnt); 844 i, irq_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 82d2b97ad942..3d5e4ee94f5f 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/log2.h>
17#include <linux/pm.h> 18#include <linux/pm.h>
18#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
19#include <linux/err.h> 20#include <linux/err.h>
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2796 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2797 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2797 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2798 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2798 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2799 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2799 ((src_addr_width > 1) && (src_addr_width & 1)) || 2800 !is_power_of_2(src_addr_width) ||
2800 ((dst_addr_width > 1) && (dst_addr_width & 1))) 2801 !is_power_of_2(dst_addr_width))
2801 return -EINVAL; 2802 return -EINVAL;
2802 2803
2803 cfg->src_info.data_width = src_addr_width; 2804 cfg->src_info.data_width = src_addr_width;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 5d4986e5f5fa..67a6752bf863 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
1018 return &dma_desc->txd; 1018 return &dma_desc->txd;
1019} 1019}
1020 1020
1021struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( 1021static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1022 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 1022 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1023 size_t period_len, enum dma_transfer_direction direction, 1023 size_t period_len, enum dma_transfer_direction direction,
1024 unsigned long flags, void *context) 1024 unsigned long flags, void *context)