diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 21:11:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 21:11:04 -0400 |
commit | 52d589a01d4545ce1dc5c3892bb8c7b55edfe714 (patch) | |
tree | 864858dae5d01aae411497e926679cf92392b4f6 /drivers/dma | |
parent | 0a582821d4f8edf41d9b56ae057ee2002fc275f0 (diff) | |
parent | 6b997bab20448cfe85456e4789d5d9222ab6b830 (diff) |
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"For dmaengine contributions we have:
- designware cleanup by Andy
- my series moving device_control users to dmanegine_xxx APIs for
later removal of device_control API
- minor fixes spread over drivers mainly mv_xor, pl330, mmp, imx-sdma
etc"
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (60 commits)
serial: atmel: add missing dmaengine header
dmaengine: remove FSLDMA_EXTERNAL_START
dmaengine: freescale: remove FSLDMA_EXTERNAL_START control method
carma-fpga: move to fsl_dma_external_start()
carma-fpga: use dmaengine_xxx() API
dmaengine: freescale: add and export fsl_dma_external_start()
dmaengine: add dmaengine_prep_dma_sg() helper
video: mx3fb: use dmaengine_terminate_all() API
serial: sh-sci: use dmaengine_terminate_all() API
net: ks8842: use dmaengine_terminate_all() API
mtd: sh_flctl: use dmaengine_terminate_all() API
mtd: fsmc_nand: use dmaengine_terminate_all() API
V4L2: mx3_camer: use dmaengine_pause() API
dmaengine: coh901318: use dmaengine_terminate_all() API
pata_arasan_cf: use dmaengine_terminate_all() API
dmaengine: edma: check for echan->edesc => NULL in edma_dma_pause()
dmaengine: dw: export probe()/remove() and Co to users
dmaengine: dw: enable and disable controller when needed
dmaengine: dw: always export dw_dma_{en,dis}able
dmaengine: dw: introduce dw_dma_on() helper
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 2 | ||||
-rw-r--r-- | drivers/dma/cppi41.c | 16 | ||||
-rw-r--r-- | drivers/dma/dw/core.c | 147 | ||||
-rw-r--r-- | drivers/dma/dw/internal.h | 61 | ||||
-rw-r--r-- | drivers/dma/dw/pci.c | 8 | ||||
-rw-r--r-- | drivers/dma/dw/platform.c | 92 | ||||
-rw-r--r-- | drivers/dma/dw/regs.h | 41 | ||||
-rw-r--r-- | drivers/dma/edma.c | 2 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 25 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 35 | ||||
-rw-r--r-- | drivers/dma/mmp_tdma.c | 19 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 268 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 62 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 19 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-audmapp.c | 15 | ||||
-rw-r--r-- | drivers/dma/sun6i-dma.c | 23 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_vdma.c | 1 |
18 files changed, 359 insertions, 479 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a016490c95ae..de469821bc1b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -270,7 +270,7 @@ config IMX_SDMA | |||
270 | select DMA_ENGINE | 270 | select DMA_ENGINE |
271 | help | 271 | help |
272 | Support the i.MX SDMA engine. This engine is integrated into | 272 | Support the i.MX SDMA engine. This engine is integrated into |
273 | Freescale i.MX25/31/35/51/53 chips. | 273 | Freescale i.MX25/31/35/51/53/6 chips. |
274 | 274 | ||
275 | config IMX_DMA | 275 | config IMX_DMA |
276 | tristate "i.MX DMA support" | 276 | tristate "i.MX DMA support" |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 3c6716e0b78e..e88588d8ecd3 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -2156,7 +2156,7 @@ coh901318_free_chan_resources(struct dma_chan *chan) | |||
2156 | 2156 | ||
2157 | spin_unlock_irqrestore(&cohc->lock, flags); | 2157 | spin_unlock_irqrestore(&cohc->lock, flags); |
2158 | 2158 | ||
2159 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 2159 | dmaengine_terminate_all(chan); |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | 2162 | ||
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 8f8b0b608875..a58eec3b2cad 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -938,7 +938,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
938 | if (!glue_info) | 938 | if (!glue_info) |
939 | return -EINVAL; | 939 | return -EINVAL; |
940 | 940 | ||
941 | cdd = kzalloc(sizeof(*cdd), GFP_KERNEL); | 941 | cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL); |
942 | if (!cdd) | 942 | if (!cdd) |
943 | return -ENOMEM; | 943 | return -ENOMEM; |
944 | 944 | ||
@@ -959,10 +959,8 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
959 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); | 959 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); |
960 | 960 | ||
961 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || | 961 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || |
962 | !cdd->qmgr_mem) { | 962 | !cdd->qmgr_mem) |
963 | ret = -ENXIO; | 963 | return -ENXIO; |
964 | goto err_remap; | ||
965 | } | ||
966 | 964 | ||
967 | pm_runtime_enable(dev); | 965 | pm_runtime_enable(dev); |
968 | ret = pm_runtime_get_sync(dev); | 966 | ret = pm_runtime_get_sync(dev); |
@@ -989,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
989 | 987 | ||
990 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | 988 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); |
991 | 989 | ||
992 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, | 990 | ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED, |
993 | dev_name(dev), cdd); | 991 | dev_name(dev), cdd); |
994 | if (ret) | 992 | if (ret) |
995 | goto err_irq; | 993 | goto err_irq; |
@@ -1009,7 +1007,6 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
1009 | err_of: | 1007 | err_of: |
1010 | dma_async_device_unregister(&cdd->ddev); | 1008 | dma_async_device_unregister(&cdd->ddev); |
1011 | err_dma_reg: | 1009 | err_dma_reg: |
1012 | free_irq(irq, cdd); | ||
1013 | err_irq: | 1010 | err_irq: |
1014 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1011 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
1015 | cleanup_chans(cdd); | 1012 | cleanup_chans(cdd); |
@@ -1023,8 +1020,6 @@ err_get_sync: | |||
1023 | iounmap(cdd->ctrl_mem); | 1020 | iounmap(cdd->ctrl_mem); |
1024 | iounmap(cdd->sched_mem); | 1021 | iounmap(cdd->sched_mem); |
1025 | iounmap(cdd->qmgr_mem); | 1022 | iounmap(cdd->qmgr_mem); |
1026 | err_remap: | ||
1027 | kfree(cdd); | ||
1028 | return ret; | 1023 | return ret; |
1029 | } | 1024 | } |
1030 | 1025 | ||
@@ -1036,7 +1031,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1036 | dma_async_device_unregister(&cdd->ddev); | 1031 | dma_async_device_unregister(&cdd->ddev); |
1037 | 1032 | ||
1038 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1033 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
1039 | free_irq(cdd->irq, cdd); | 1034 | devm_free_irq(&pdev->dev, cdd->irq, cdd); |
1040 | cleanup_chans(cdd); | 1035 | cleanup_chans(cdd); |
1041 | deinit_cppi41(&pdev->dev, cdd); | 1036 | deinit_cppi41(&pdev->dev, cdd); |
1042 | iounmap(cdd->usbss_mem); | 1037 | iounmap(cdd->usbss_mem); |
@@ -1045,7 +1040,6 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1045 | iounmap(cdd->qmgr_mem); | 1040 | iounmap(cdd->qmgr_mem); |
1046 | pm_runtime_put(&pdev->dev); | 1041 | pm_runtime_put(&pdev->dev); |
1047 | pm_runtime_disable(&pdev->dev); | 1042 | pm_runtime_disable(&pdev->dev); |
1048 | kfree(cdd); | ||
1049 | return 0; | 1043 | return 0; |
1050 | } | 1044 | } |
1051 | 1045 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1af731b83b3f..244722170410 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
16 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
17 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
@@ -37,24 +36,6 @@ | |||
37 | * support descriptor writeback. | 36 | * support descriptor writeback. |
38 | */ | 37 | */ |
39 | 38 | ||
40 | static inline bool is_request_line_unset(struct dw_dma_chan *dwc) | ||
41 | { | ||
42 | return dwc->request_line == (typeof(dwc->request_line))~0; | ||
43 | } | ||
44 | |||
45 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) | ||
46 | { | ||
47 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
48 | struct dw_dma_slave *dws = dwc->chan.private; | ||
49 | unsigned char mmax = dw->nr_masters - 1; | ||
50 | |||
51 | if (!is_request_line_unset(dwc)) | ||
52 | return; | ||
53 | |||
54 | dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); | ||
55 | dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); | ||
56 | } | ||
57 | |||
58 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
59 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 40 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
60 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 41 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
@@ -155,13 +136,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
155 | */ | 136 | */ |
156 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 137 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
157 | 138 | ||
158 | cfghi = dws->cfg_hi; | 139 | cfghi |= DWC_CFGH_DST_PER(dws->dst_id); |
159 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 140 | cfghi |= DWC_CFGH_SRC_PER(dws->src_id); |
160 | } else { | 141 | } else { |
161 | if (dwc->direction == DMA_MEM_TO_DEV) | 142 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
162 | cfghi = DWC_CFGH_DST_PER(dwc->request_line); | 143 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); |
163 | else if (dwc->direction == DMA_DEV_TO_MEM) | ||
164 | cfghi = DWC_CFGH_SRC_PER(dwc->request_line); | ||
165 | } | 144 | } |
166 | 145 | ||
167 | channel_writel(dwc, CFG_LO, cfglo); | 146 | channel_writel(dwc, CFG_LO, cfglo); |
@@ -939,6 +918,26 @@ err_desc_get: | |||
939 | return NULL; | 918 | return NULL; |
940 | } | 919 | } |
941 | 920 | ||
921 | bool dw_dma_filter(struct dma_chan *chan, void *param) | ||
922 | { | ||
923 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
924 | struct dw_dma_slave *dws = param; | ||
925 | |||
926 | if (!dws || dws->dma_dev != chan->device->dev) | ||
927 | return false; | ||
928 | |||
929 | /* We have to copy data since dws can be temporary storage */ | ||
930 | |||
931 | dwc->src_id = dws->src_id; | ||
932 | dwc->dst_id = dws->dst_id; | ||
933 | |||
934 | dwc->src_master = dws->src_master; | ||
935 | dwc->dst_master = dws->dst_master; | ||
936 | |||
937 | return true; | ||
938 | } | ||
939 | EXPORT_SYMBOL_GPL(dw_dma_filter); | ||
940 | |||
942 | /* | 941 | /* |
943 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | 942 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: |
944 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | 943 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. |
@@ -967,10 +966,6 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
967 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 966 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
968 | dwc->direction = sconfig->direction; | 967 | dwc->direction = sconfig->direction; |
969 | 968 | ||
970 | /* Take the request line from slave_id member */ | ||
971 | if (is_request_line_unset(dwc)) | ||
972 | dwc->request_line = sconfig->slave_id; | ||
973 | |||
974 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 969 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
975 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | 970 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
976 | 971 | ||
@@ -1099,6 +1094,31 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
1099 | spin_unlock_irqrestore(&dwc->lock, flags); | 1094 | spin_unlock_irqrestore(&dwc->lock, flags); |
1100 | } | 1095 | } |
1101 | 1096 | ||
1097 | /*----------------------------------------------------------------------*/ | ||
1098 | |||
1099 | static void dw_dma_off(struct dw_dma *dw) | ||
1100 | { | ||
1101 | int i; | ||
1102 | |||
1103 | dma_writel(dw, CFG, 0); | ||
1104 | |||
1105 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1106 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1107 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1108 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1109 | |||
1110 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | ||
1111 | cpu_relax(); | ||
1112 | |||
1113 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1114 | dw->chan[i].initialized = false; | ||
1115 | } | ||
1116 | |||
1117 | static void dw_dma_on(struct dw_dma *dw) | ||
1118 | { | ||
1119 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1120 | } | ||
1121 | |||
1102 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 1122 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
1103 | { | 1123 | { |
1104 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1124 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
@@ -1123,7 +1143,10 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1123 | * doesn't mean what you think it means), and status writeback. | 1143 | * doesn't mean what you think it means), and status writeback. |
1124 | */ | 1144 | */ |
1125 | 1145 | ||
1126 | dwc_set_masters(dwc); | 1146 | /* Enable controller here if needed */ |
1147 | if (!dw->in_use) | ||
1148 | dw_dma_on(dw); | ||
1149 | dw->in_use |= dwc->mask; | ||
1127 | 1150 | ||
1128 | spin_lock_irqsave(&dwc->lock, flags); | 1151 | spin_lock_irqsave(&dwc->lock, flags); |
1129 | i = dwc->descs_allocated; | 1152 | i = dwc->descs_allocated; |
@@ -1182,7 +1205,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1182 | list_splice_init(&dwc->free_list, &list); | 1205 | list_splice_init(&dwc->free_list, &list); |
1183 | dwc->descs_allocated = 0; | 1206 | dwc->descs_allocated = 0; |
1184 | dwc->initialized = false; | 1207 | dwc->initialized = false; |
1185 | dwc->request_line = ~0; | ||
1186 | 1208 | ||
1187 | /* Disable interrupts */ | 1209 | /* Disable interrupts */ |
1188 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1210 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1190,6 +1212,11 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1190 | 1212 | ||
1191 | spin_unlock_irqrestore(&dwc->lock, flags); | 1213 | spin_unlock_irqrestore(&dwc->lock, flags); |
1192 | 1214 | ||
1215 | /* Disable controller in case it was a last user */ | ||
1216 | dw->in_use &= ~dwc->mask; | ||
1217 | if (!dw->in_use) | ||
1218 | dw_dma_off(dw); | ||
1219 | |||
1193 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1220 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1194 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1221 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1195 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | 1222 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); |
@@ -1460,24 +1487,6 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); | |||
1460 | 1487 | ||
1461 | /*----------------------------------------------------------------------*/ | 1488 | /*----------------------------------------------------------------------*/ |
1462 | 1489 | ||
1463 | static void dw_dma_off(struct dw_dma *dw) | ||
1464 | { | ||
1465 | int i; | ||
1466 | |||
1467 | dma_writel(dw, CFG, 0); | ||
1468 | |||
1469 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1470 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1471 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1472 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1473 | |||
1474 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | ||
1475 | cpu_relax(); | ||
1476 | |||
1477 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1478 | dw->chan[i].initialized = false; | ||
1479 | } | ||
1480 | |||
1481 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | 1490 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) |
1482 | { | 1491 | { |
1483 | struct dw_dma *dw; | 1492 | struct dw_dma *dw; |
@@ -1495,13 +1504,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1495 | dw->regs = chip->regs; | 1504 | dw->regs = chip->regs; |
1496 | chip->dw = dw; | 1505 | chip->dw = dw; |
1497 | 1506 | ||
1498 | dw->clk = devm_clk_get(chip->dev, "hclk"); | ||
1499 | if (IS_ERR(dw->clk)) | ||
1500 | return PTR_ERR(dw->clk); | ||
1501 | err = clk_prepare_enable(dw->clk); | ||
1502 | if (err) | ||
1503 | return err; | ||
1504 | |||
1505 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | 1507 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); |
1506 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1508 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; |
1507 | 1509 | ||
@@ -1604,7 +1606,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1604 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1606 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1605 | 1607 | ||
1606 | dwc->direction = DMA_TRANS_NONE; | 1608 | dwc->direction = DMA_TRANS_NONE; |
1607 | dwc->request_line = ~0; | ||
1608 | 1609 | ||
1609 | /* Hardware configuration */ | 1610 | /* Hardware configuration */ |
1610 | if (autocfg) { | 1611 | if (autocfg) { |
@@ -1659,8 +1660,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1659 | dw->dma.device_tx_status = dwc_tx_status; | 1660 | dw->dma.device_tx_status = dwc_tx_status; |
1660 | dw->dma.device_issue_pending = dwc_issue_pending; | 1661 | dw->dma.device_issue_pending = dwc_issue_pending; |
1661 | 1662 | ||
1662 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1663 | |||
1664 | err = dma_async_device_register(&dw->dma); | 1663 | err = dma_async_device_register(&dw->dma); |
1665 | if (err) | 1664 | if (err) |
1666 | goto err_dma_register; | 1665 | goto err_dma_register; |
@@ -1673,7 +1672,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1673 | err_dma_register: | 1672 | err_dma_register: |
1674 | free_irq(chip->irq, dw); | 1673 | free_irq(chip->irq, dw); |
1675 | err_pdata: | 1674 | err_pdata: |
1676 | clk_disable_unprepare(dw->clk); | ||
1677 | return err; | 1675 | return err; |
1678 | } | 1676 | } |
1679 | EXPORT_SYMBOL_GPL(dw_dma_probe); | 1677 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
@@ -1695,46 +1693,27 @@ int dw_dma_remove(struct dw_dma_chip *chip) | |||
1695 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1693 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1696 | } | 1694 | } |
1697 | 1695 | ||
1698 | clk_disable_unprepare(dw->clk); | ||
1699 | |||
1700 | return 0; | 1696 | return 0; |
1701 | } | 1697 | } |
1702 | EXPORT_SYMBOL_GPL(dw_dma_remove); | 1698 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
1703 | 1699 | ||
1704 | void dw_dma_shutdown(struct dw_dma_chip *chip) | 1700 | int dw_dma_disable(struct dw_dma_chip *chip) |
1705 | { | ||
1706 | struct dw_dma *dw = chip->dw; | ||
1707 | |||
1708 | dw_dma_off(dw); | ||
1709 | clk_disable_unprepare(dw->clk); | ||
1710 | } | ||
1711 | EXPORT_SYMBOL_GPL(dw_dma_shutdown); | ||
1712 | |||
1713 | #ifdef CONFIG_PM_SLEEP | ||
1714 | |||
1715 | int dw_dma_suspend(struct dw_dma_chip *chip) | ||
1716 | { | 1701 | { |
1717 | struct dw_dma *dw = chip->dw; | 1702 | struct dw_dma *dw = chip->dw; |
1718 | 1703 | ||
1719 | dw_dma_off(dw); | 1704 | dw_dma_off(dw); |
1720 | clk_disable_unprepare(dw->clk); | ||
1721 | |||
1722 | return 0; | 1705 | return 0; |
1723 | } | 1706 | } |
1724 | EXPORT_SYMBOL_GPL(dw_dma_suspend); | 1707 | EXPORT_SYMBOL_GPL(dw_dma_disable); |
1725 | 1708 | ||
1726 | int dw_dma_resume(struct dw_dma_chip *chip) | 1709 | int dw_dma_enable(struct dw_dma_chip *chip) |
1727 | { | 1710 | { |
1728 | struct dw_dma *dw = chip->dw; | 1711 | struct dw_dma *dw = chip->dw; |
1729 | 1712 | ||
1730 | clk_prepare_enable(dw->clk); | 1713 | dw_dma_on(dw); |
1731 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1732 | |||
1733 | return 0; | 1714 | return 0; |
1734 | } | 1715 | } |
1735 | EXPORT_SYMBOL_GPL(dw_dma_resume); | 1716 | EXPORT_SYMBOL_GPL(dw_dma_enable); |
1736 | |||
1737 | #endif /* CONFIG_PM_SLEEP */ | ||
1738 | 1717 | ||
1739 | MODULE_LICENSE("GPL v2"); | 1718 | MODULE_LICENSE("GPL v2"); |
1740 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); | 1719 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 32667f9e0dda..41439732ff6b 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h | |||
@@ -8,63 +8,16 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _DW_DMAC_INTERNAL_H | 11 | #ifndef _DMA_DW_INTERNAL_H |
12 | #define _DW_DMAC_INTERNAL_H | 12 | #define _DMA_DW_INTERNAL_H |
13 | 13 | ||
14 | #include <linux/device.h> | 14 | #include <linux/dma/dw.h> |
15 | #include <linux/dw_dmac.h> | ||
16 | 15 | ||
17 | #include "regs.h" | 16 | #include "regs.h" |
18 | 17 | ||
19 | /** | 18 | int dw_dma_disable(struct dw_dma_chip *chip); |
20 | * struct dw_dma_chip - representation of DesignWare DMA controller hardware | 19 | int dw_dma_enable(struct dw_dma_chip *chip); |
21 | * @dev: struct device of the DMA controller | ||
22 | * @irq: irq line | ||
23 | * @regs: memory mapped I/O space | ||
24 | * @dw: struct dw_dma that is filed by dw_dma_probe() | ||
25 | */ | ||
26 | struct dw_dma_chip { | ||
27 | struct device *dev; | ||
28 | int irq; | ||
29 | void __iomem *regs; | ||
30 | struct dw_dma *dw; | ||
31 | }; | ||
32 | |||
33 | /* Export to the platform drivers */ | ||
34 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); | ||
35 | int dw_dma_remove(struct dw_dma_chip *chip); | ||
36 | |||
37 | void dw_dma_shutdown(struct dw_dma_chip *chip); | ||
38 | |||
39 | #ifdef CONFIG_PM_SLEEP | ||
40 | |||
41 | int dw_dma_suspend(struct dw_dma_chip *chip); | ||
42 | int dw_dma_resume(struct dw_dma_chip *chip); | ||
43 | |||
44 | #endif /* CONFIG_PM_SLEEP */ | ||
45 | 20 | ||
46 | /** | 21 | extern bool dw_dma_filter(struct dma_chan *chan, void *param); |
47 | * dwc_get_dms - get destination master | ||
48 | * @slave: pointer to the custom slave configuration | ||
49 | * | ||
50 | * Returns destination master in the custom slave configuration if defined, or | ||
51 | * default value otherwise. | ||
52 | */ | ||
53 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | ||
54 | { | ||
55 | return slave ? slave->dst_master : 0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dwc_get_sms - get source master | ||
60 | * @slave: pointer to the custom slave configuration | ||
61 | * | ||
62 | * Returns source master in the custom slave configuration if defined, or | ||
63 | * default value otherwise. | ||
64 | */ | ||
65 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | ||
66 | { | ||
67 | return slave ? slave->src_master : 1; | ||
68 | } | ||
69 | 22 | ||
70 | #endif /* _DW_DMAC_INTERNAL_H */ | 23 | #endif /* _DMA_DW_INTERNAL_H */ |
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 39e30c3c7a9d..b144706b3d85 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -82,7 +82,7 @@ static int dw_pci_suspend_late(struct device *dev) | |||
82 | struct pci_dev *pci = to_pci_dev(dev); | 82 | struct pci_dev *pci = to_pci_dev(dev); |
83 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 83 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
84 | 84 | ||
85 | return dw_dma_suspend(chip); | 85 | return dw_dma_disable(chip); |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int dw_pci_resume_early(struct device *dev) | 88 | static int dw_pci_resume_early(struct device *dev) |
@@ -90,7 +90,7 @@ static int dw_pci_resume_early(struct device *dev) | |||
90 | struct pci_dev *pci = to_pci_dev(dev); | 90 | struct pci_dev *pci = to_pci_dev(dev); |
91 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 91 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
92 | 92 | ||
93 | return dw_dma_resume(chip); | 93 | return dw_dma_enable(chip); |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #endif /* CONFIG_PM_SLEEP */ | 96 | #endif /* CONFIG_PM_SLEEP */ |
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = { | |||
108 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, | 108 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, |
109 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, | 109 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, |
110 | 110 | ||
111 | /* Braswell */ | ||
112 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_pdata }, | ||
113 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_pdata }, | ||
114 | |||
111 | /* Haswell */ | 115 | /* Haswell */ |
112 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata }, | 116 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata }, |
113 | { } | 117 | { } |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index c5b339af6be5..a630161473a4 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -25,72 +25,49 @@ | |||
25 | 25 | ||
26 | #include "internal.h" | 26 | #include "internal.h" |
27 | 27 | ||
28 | struct dw_dma_of_filter_args { | ||
29 | struct dw_dma *dw; | ||
30 | unsigned int req; | ||
31 | unsigned int src; | ||
32 | unsigned int dst; | ||
33 | }; | ||
34 | |||
35 | static bool dw_dma_of_filter(struct dma_chan *chan, void *param) | ||
36 | { | ||
37 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
38 | struct dw_dma_of_filter_args *fargs = param; | ||
39 | |||
40 | /* Ensure the device matches our channel */ | ||
41 | if (chan->device != &fargs->dw->dma) | ||
42 | return false; | ||
43 | |||
44 | dwc->request_line = fargs->req; | ||
45 | dwc->src_master = fargs->src; | ||
46 | dwc->dst_master = fargs->dst; | ||
47 | |||
48 | return true; | ||
49 | } | ||
50 | |||
51 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | 28 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, |
52 | struct of_dma *ofdma) | 29 | struct of_dma *ofdma) |
53 | { | 30 | { |
54 | struct dw_dma *dw = ofdma->of_dma_data; | 31 | struct dw_dma *dw = ofdma->of_dma_data; |
55 | struct dw_dma_of_filter_args fargs = { | 32 | struct dw_dma_slave slave = { |
56 | .dw = dw, | 33 | .dma_dev = dw->dma.dev, |
57 | }; | 34 | }; |
58 | dma_cap_mask_t cap; | 35 | dma_cap_mask_t cap; |
59 | 36 | ||
60 | if (dma_spec->args_count != 3) | 37 | if (dma_spec->args_count != 3) |
61 | return NULL; | 38 | return NULL; |
62 | 39 | ||
63 | fargs.req = dma_spec->args[0]; | 40 | slave.src_id = dma_spec->args[0]; |
64 | fargs.src = dma_spec->args[1]; | 41 | slave.dst_id = dma_spec->args[0]; |
65 | fargs.dst = dma_spec->args[2]; | 42 | slave.src_master = dma_spec->args[1]; |
43 | slave.dst_master = dma_spec->args[2]; | ||
66 | 44 | ||
67 | if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || | 45 | if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || |
68 | fargs.src >= dw->nr_masters || | 46 | slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || |
69 | fargs.dst >= dw->nr_masters)) | 47 | slave.src_master >= dw->nr_masters || |
48 | slave.dst_master >= dw->nr_masters)) | ||
70 | return NULL; | 49 | return NULL; |
71 | 50 | ||
72 | dma_cap_zero(cap); | 51 | dma_cap_zero(cap); |
73 | dma_cap_set(DMA_SLAVE, cap); | 52 | dma_cap_set(DMA_SLAVE, cap); |
74 | 53 | ||
75 | /* TODO: there should be a simpler way to do this */ | 54 | /* TODO: there should be a simpler way to do this */ |
76 | return dma_request_channel(cap, dw_dma_of_filter, &fargs); | 55 | return dma_request_channel(cap, dw_dma_filter, &slave); |
77 | } | 56 | } |
78 | 57 | ||
79 | #ifdef CONFIG_ACPI | 58 | #ifdef CONFIG_ACPI |
80 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | 59 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) |
81 | { | 60 | { |
82 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
83 | struct acpi_dma_spec *dma_spec = param; | 61 | struct acpi_dma_spec *dma_spec = param; |
62 | struct dw_dma_slave slave = { | ||
63 | .dma_dev = dma_spec->dev, | ||
64 | .src_id = dma_spec->slave_id, | ||
65 | .dst_id = dma_spec->slave_id, | ||
66 | .src_master = 1, | ||
67 | .dst_master = 0, | ||
68 | }; | ||
84 | 69 | ||
85 | if (chan->device->dev != dma_spec->dev || | 70 | return dw_dma_filter(chan, &slave); |
86 | chan->chan_id != dma_spec->chan_id) | ||
87 | return false; | ||
88 | |||
89 | dwc->request_line = dma_spec->slave_id; | ||
90 | dwc->src_master = dwc_get_sms(NULL); | ||
91 | dwc->dst_master = dwc_get_dms(NULL); | ||
92 | |||
93 | return true; | ||
94 | } | 71 | } |
95 | 72 | ||
96 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) | 73 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) |
@@ -201,10 +178,17 @@ static int dw_probe(struct platform_device *pdev) | |||
201 | 178 | ||
202 | chip->dev = dev; | 179 | chip->dev = dev; |
203 | 180 | ||
204 | err = dw_dma_probe(chip, pdata); | 181 | chip->clk = devm_clk_get(chip->dev, "hclk"); |
182 | if (IS_ERR(chip->clk)) | ||
183 | return PTR_ERR(chip->clk); | ||
184 | err = clk_prepare_enable(chip->clk); | ||
205 | if (err) | 185 | if (err) |
206 | return err; | 186 | return err; |
207 | 187 | ||
188 | err = dw_dma_probe(chip, pdata); | ||
189 | if (err) | ||
190 | goto err_dw_dma_probe; | ||
191 | |||
208 | platform_set_drvdata(pdev, chip); | 192 | platform_set_drvdata(pdev, chip); |
209 | 193 | ||
210 | if (pdev->dev.of_node) { | 194 | if (pdev->dev.of_node) { |
@@ -219,6 +203,10 @@ static int dw_probe(struct platform_device *pdev) | |||
219 | dw_dma_acpi_controller_register(chip->dw); | 203 | dw_dma_acpi_controller_register(chip->dw); |
220 | 204 | ||
221 | return 0; | 205 | return 0; |
206 | |||
207 | err_dw_dma_probe: | ||
208 | clk_disable_unprepare(chip->clk); | ||
209 | return err; | ||
222 | } | 210 | } |
223 | 211 | ||
224 | static int dw_remove(struct platform_device *pdev) | 212 | static int dw_remove(struct platform_device *pdev) |
@@ -228,14 +216,18 @@ static int dw_remove(struct platform_device *pdev) | |||
228 | if (pdev->dev.of_node) | 216 | if (pdev->dev.of_node) |
229 | of_dma_controller_free(pdev->dev.of_node); | 217 | of_dma_controller_free(pdev->dev.of_node); |
230 | 218 | ||
231 | return dw_dma_remove(chip); | 219 | dw_dma_remove(chip); |
220 | clk_disable_unprepare(chip->clk); | ||
221 | |||
222 | return 0; | ||
232 | } | 223 | } |
233 | 224 | ||
234 | static void dw_shutdown(struct platform_device *pdev) | 225 | static void dw_shutdown(struct platform_device *pdev) |
235 | { | 226 | { |
236 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 227 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
237 | 228 | ||
238 | dw_dma_shutdown(chip); | 229 | dw_dma_disable(chip); |
230 | clk_disable_unprepare(chip->clk); | ||
239 | } | 231 | } |
240 | 232 | ||
241 | #ifdef CONFIG_OF | 233 | #ifdef CONFIG_OF |
@@ -261,7 +253,10 @@ static int dw_suspend_late(struct device *dev) | |||
261 | struct platform_device *pdev = to_platform_device(dev); | 253 | struct platform_device *pdev = to_platform_device(dev); |
262 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 254 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
263 | 255 | ||
264 | return dw_dma_suspend(chip); | 256 | dw_dma_disable(chip); |
257 | clk_disable_unprepare(chip->clk); | ||
258 | |||
259 | return 0; | ||
265 | } | 260 | } |
266 | 261 | ||
267 | static int dw_resume_early(struct device *dev) | 262 | static int dw_resume_early(struct device *dev) |
@@ -269,7 +264,8 @@ static int dw_resume_early(struct device *dev) | |||
269 | struct platform_device *pdev = to_platform_device(dev); | 264 | struct platform_device *pdev = to_platform_device(dev); |
270 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 265 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
271 | 266 | ||
272 | return dw_dma_resume(chip); | 267 | clk_prepare_enable(chip->clk); |
268 | return dw_dma_enable(chip); | ||
273 | } | 269 | } |
274 | 270 | ||
275 | #endif /* CONFIG_PM_SLEEP */ | 271 | #endif /* CONFIG_PM_SLEEP */ |
@@ -281,7 +277,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = { | |||
281 | static struct platform_driver dw_driver = { | 277 | static struct platform_driver dw_driver = { |
282 | .probe = dw_probe, | 278 | .probe = dw_probe, |
283 | .remove = dw_remove, | 279 | .remove = dw_remove, |
284 | .shutdown = dw_shutdown, | 280 | .shutdown = dw_shutdown, |
285 | .driver = { | 281 | .driver = { |
286 | .name = "dw_dmac", | 282 | .name = "dw_dmac", |
287 | .pm = &dw_dev_pm_ops, | 283 | .pm = &dw_dev_pm_ops, |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index bb98d3e91e8b..848e232f7cc7 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
14 | #include <linux/dw_dmac.h> | ||
15 | 14 | ||
16 | #define DW_DMA_MAX_NR_CHANNELS 8 | 15 | #define DW_DMA_MAX_NR_CHANNELS 8 |
17 | #define DW_DMA_MAX_NR_REQUESTS 16 | 16 | #define DW_DMA_MAX_NR_REQUESTS 16 |
@@ -132,6 +131,18 @@ struct dw_dma_regs { | |||
132 | /* Bitfields in DWC_PARAMS */ | 131 | /* Bitfields in DWC_PARAMS */ |
133 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ | 132 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ |
134 | 133 | ||
134 | /* bursts size */ | ||
135 | enum dw_dma_msize { | ||
136 | DW_DMA_MSIZE_1, | ||
137 | DW_DMA_MSIZE_4, | ||
138 | DW_DMA_MSIZE_8, | ||
139 | DW_DMA_MSIZE_16, | ||
140 | DW_DMA_MSIZE_32, | ||
141 | DW_DMA_MSIZE_64, | ||
142 | DW_DMA_MSIZE_128, | ||
143 | DW_DMA_MSIZE_256, | ||
144 | }; | ||
145 | |||
135 | /* Bitfields in CTL_LO */ | 146 | /* Bitfields in CTL_LO */ |
136 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 147 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
137 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 148 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
@@ -161,20 +172,35 @@ struct dw_dma_regs { | |||
161 | #define DWC_CTLH_DONE 0x00001000 | 172 | #define DWC_CTLH_DONE 0x00001000 |
162 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff | 173 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff |
163 | 174 | ||
164 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ | 175 | /* Bitfields in CFG_LO */ |
165 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ | 176 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ |
166 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ | 177 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ |
167 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ | 178 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ |
168 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ | 179 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ |
169 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ | 180 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ |
170 | #define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ | 181 | #define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ |
182 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ | ||
183 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) | ||
184 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) | ||
185 | #define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ | ||
186 | #define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) | ||
187 | #define DWC_CFGL_LOCK_BUS_XACT (2 << 14) | ||
188 | #define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ | ||
189 | #define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ | ||
190 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | ||
191 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | ||
171 | #define DWC_CFGL_MAX_BURST(x) ((x) << 20) | 192 | #define DWC_CFGL_MAX_BURST(x) ((x) << 20) |
172 | #define DWC_CFGL_RELOAD_SAR (1 << 30) | 193 | #define DWC_CFGL_RELOAD_SAR (1 << 30) |
173 | #define DWC_CFGL_RELOAD_DAR (1 << 31) | 194 | #define DWC_CFGL_RELOAD_DAR (1 << 31) |
174 | 195 | ||
175 | /* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */ | 196 | /* Bitfields in CFG_HI */ |
197 | #define DWC_CFGH_FCMODE (1 << 0) | ||
198 | #define DWC_CFGH_FIFO_MODE (1 << 1) | ||
199 | #define DWC_CFGH_PROTCTL(x) ((x) << 2) | ||
176 | #define DWC_CFGH_DS_UPD_EN (1 << 5) | 200 | #define DWC_CFGH_DS_UPD_EN (1 << 5) |
177 | #define DWC_CFGH_SS_UPD_EN (1 << 6) | 201 | #define DWC_CFGH_SS_UPD_EN (1 << 6) |
202 | #define DWC_CFGH_SRC_PER(x) ((x) << 7) | ||
203 | #define DWC_CFGH_DST_PER(x) ((x) << 11) | ||
178 | 204 | ||
179 | /* Bitfields in SGR */ | 205 | /* Bitfields in SGR */ |
180 | #define DWC_SGR_SGI(x) ((x) << 0) | 206 | #define DWC_SGR_SGI(x) ((x) << 0) |
@@ -221,9 +247,10 @@ struct dw_dma_chan { | |||
221 | bool nollp; | 247 | bool nollp; |
222 | 248 | ||
223 | /* custom slave configuration */ | 249 | /* custom slave configuration */ |
224 | unsigned int request_line; | 250 | u8 src_id; |
225 | unsigned char src_master; | 251 | u8 dst_id; |
226 | unsigned char dst_master; | 252 | u8 src_master; |
253 | u8 dst_master; | ||
227 | 254 | ||
228 | /* configuration passed via DMA_SLAVE_CONFIG */ | 255 | /* configuration passed via DMA_SLAVE_CONFIG */ |
229 | struct dma_slave_config dma_sconfig; | 256 | struct dma_slave_config dma_sconfig; |
@@ -250,11 +277,11 @@ struct dw_dma { | |||
250 | void __iomem *regs; | 277 | void __iomem *regs; |
251 | struct dma_pool *desc_pool; | 278 | struct dma_pool *desc_pool; |
252 | struct tasklet_struct tasklet; | 279 | struct tasklet_struct tasklet; |
253 | struct clk *clk; | ||
254 | 280 | ||
255 | /* channels */ | 281 | /* channels */ |
256 | struct dw_dma_chan *chan; | 282 | struct dw_dma_chan *chan; |
257 | u8 all_chan_mask; | 283 | u8 all_chan_mask; |
284 | u8 in_use; | ||
258 | 285 | ||
259 | /* hardware configuration */ | 286 | /* hardware configuration */ |
260 | unsigned char nr_masters; | 287 | unsigned char nr_masters; |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 7b65633f495e..123f578d6dd3 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -288,7 +288,7 @@ static int edma_slave_config(struct edma_chan *echan, | |||
288 | static int edma_dma_pause(struct edma_chan *echan) | 288 | static int edma_dma_pause(struct edma_chan *echan) |
289 | { | 289 | { |
290 | /* Pause/Resume only allowed with cyclic mode */ | 290 | /* Pause/Resume only allowed with cyclic mode */ |
291 | if (!echan->edesc->cyclic) | 291 | if (!echan->edesc || !echan->edesc->cyclic) |
292 | return -EINVAL; | 292 | return -EINVAL; |
293 | 293 | ||
294 | edma_pause(echan->ch_num); | 294 | edma_pause(echan->ch_num); |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index d5d6885ab341..994bcb2c6b92 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/of_address.h> | 36 | #include <linux/of_address.h> |
37 | #include <linux/of_irq.h> | 37 | #include <linux/of_irq.h> |
38 | #include <linux/of_platform.h> | 38 | #include <linux/of_platform.h> |
39 | 39 | #include <linux/fsldma.h> | |
40 | #include "dmaengine.h" | 40 | #include "dmaengine.h" |
41 | #include "fsldma.h" | 41 | #include "fsldma.h" |
42 | 42 | ||
@@ -367,6 +367,20 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | |||
367 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 367 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
368 | } | 368 | } |
369 | 369 | ||
370 | int fsl_dma_external_start(struct dma_chan *dchan, int enable) | ||
371 | { | ||
372 | struct fsldma_chan *chan; | ||
373 | |||
374 | if (!dchan) | ||
375 | return -EINVAL; | ||
376 | |||
377 | chan = to_fsl_chan(dchan); | ||
378 | |||
379 | fsl_chan_toggle_ext_start(chan, enable); | ||
380 | return 0; | ||
381 | } | ||
382 | EXPORT_SYMBOL_GPL(fsl_dma_external_start); | ||
383 | |||
370 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | 384 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
371 | { | 385 | { |
372 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | 386 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); |
@@ -998,15 +1012,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
998 | chan->set_request_count(chan, size); | 1012 | chan->set_request_count(chan, size); |
999 | return 0; | 1013 | return 0; |
1000 | 1014 | ||
1001 | case FSLDMA_EXTERNAL_START: | ||
1002 | |||
1003 | /* make sure the channel supports external start */ | ||
1004 | if (!chan->toggle_ext_start) | ||
1005 | return -ENXIO; | ||
1006 | |||
1007 | chan->toggle_ext_start(chan, arg); | ||
1008 | return 0; | ||
1009 | |||
1010 | default: | 1015 | default: |
1011 | return -ENXIO; | 1016 | return -ENXIO; |
1012 | } | 1017 | } |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f7626e37d0b8..88afc48c2ca7 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1334,7 +1334,7 @@ err_firmware: | |||
1334 | release_firmware(fw); | 1334 | release_firmware(fw); |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | static int __init sdma_get_firmware(struct sdma_engine *sdma, | 1337 | static int sdma_get_firmware(struct sdma_engine *sdma, |
1338 | const char *fw_name) | 1338 | const char *fw_name) |
1339 | { | 1339 | { |
1340 | int ret; | 1340 | int ret; |
@@ -1448,7 +1448,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, | |||
1448 | return dma_request_channel(mask, sdma_filter_fn, &data); | 1448 | return dma_request_channel(mask, sdma_filter_fn, &data); |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | static int __init sdma_probe(struct platform_device *pdev) | 1451 | static int sdma_probe(struct platform_device *pdev) |
1452 | { | 1452 | { |
1453 | const struct of_device_id *of_id = | 1453 | const struct of_device_id *of_id = |
1454 | of_match_device(sdma_dt_ids, &pdev->dev); | 1454 | of_match_device(sdma_dt_ids, &pdev->dev); |
@@ -1603,6 +1603,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1603 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 1603 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
1604 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | 1604 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); |
1605 | 1605 | ||
1606 | platform_set_drvdata(pdev, sdma); | ||
1607 | |||
1606 | ret = dma_async_device_register(&sdma->dma_device); | 1608 | ret = dma_async_device_register(&sdma->dma_device); |
1607 | if (ret) { | 1609 | if (ret) { |
1608 | dev_err(&pdev->dev, "unable to register\n"); | 1610 | dev_err(&pdev->dev, "unable to register\n"); |
@@ -1640,7 +1642,27 @@ err_irq: | |||
1640 | 1642 | ||
1641 | static int sdma_remove(struct platform_device *pdev) | 1643 | static int sdma_remove(struct platform_device *pdev) |
1642 | { | 1644 | { |
1643 | return -EBUSY; | 1645 | struct sdma_engine *sdma = platform_get_drvdata(pdev); |
1646 | struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1647 | int irq = platform_get_irq(pdev, 0); | ||
1648 | int i; | ||
1649 | |||
1650 | dma_async_device_unregister(&sdma->dma_device); | ||
1651 | kfree(sdma->script_addrs); | ||
1652 | free_irq(irq, sdma); | ||
1653 | iounmap(sdma->regs); | ||
1654 | release_mem_region(iores->start, resource_size(iores)); | ||
1655 | /* Kill the tasklet */ | ||
1656 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
1657 | struct sdma_channel *sdmac = &sdma->channel[i]; | ||
1658 | |||
1659 | tasklet_kill(&sdmac->tasklet); | ||
1660 | } | ||
1661 | kfree(sdma); | ||
1662 | |||
1663 | platform_set_drvdata(pdev, NULL); | ||
1664 | dev_info(&pdev->dev, "Removed...\n"); | ||
1665 | return 0; | ||
1644 | } | 1666 | } |
1645 | 1667 | ||
1646 | static struct platform_driver sdma_driver = { | 1668 | static struct platform_driver sdma_driver = { |
@@ -1650,13 +1672,10 @@ static struct platform_driver sdma_driver = { | |||
1650 | }, | 1672 | }, |
1651 | .id_table = sdma_devtypes, | 1673 | .id_table = sdma_devtypes, |
1652 | .remove = sdma_remove, | 1674 | .remove = sdma_remove, |
1675 | .probe = sdma_probe, | ||
1653 | }; | 1676 | }; |
1654 | 1677 | ||
1655 | static int __init sdma_module_init(void) | 1678 | module_platform_driver(sdma_driver); |
1656 | { | ||
1657 | return platform_driver_probe(&sdma_driver, sdma_probe); | ||
1658 | } | ||
1659 | module_init(sdma_module_init); | ||
1660 | 1679 | ||
1661 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | 1680 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); |
1662 | MODULE_DESCRIPTION("i.MX SDMA driver"); | 1681 | MODULE_DESCRIPTION("i.MX SDMA driver"); |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 6ad30e2c5038..c6bd015b7165 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -148,10 +148,16 @@ static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) | |||
148 | tdmac->reg_base + TDCR); | 148 | tdmac->reg_base + TDCR); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void mmp_tdma_enable_irq(struct mmp_tdma_chan *tdmac, bool enable) | ||
152 | { | ||
153 | if (enable) | ||
154 | writel(TDIMR_COMP, tdmac->reg_base + TDIMR); | ||
155 | else | ||
156 | writel(0, tdmac->reg_base + TDIMR); | ||
157 | } | ||
158 | |||
151 | static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | 159 | static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) |
152 | { | 160 | { |
153 | /* enable irq */ | ||
154 | writel(TDIMR_COMP, tdmac->reg_base + TDIMR); | ||
155 | /* enable dma chan */ | 161 | /* enable dma chan */ |
156 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | 162 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, |
157 | tdmac->reg_base + TDCR); | 163 | tdmac->reg_base + TDCR); |
@@ -163,9 +169,6 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | |||
163 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 169 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, |
164 | tdmac->reg_base + TDCR); | 170 | tdmac->reg_base + TDCR); |
165 | 171 | ||
166 | /* disable irq */ | ||
167 | writel(0, tdmac->reg_base + TDIMR); | ||
168 | |||
169 | tdmac->status = DMA_COMPLETE; | 172 | tdmac->status = DMA_COMPLETE; |
170 | } | 173 | } |
171 | 174 | ||
@@ -434,6 +437,10 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | |||
434 | i++; | 437 | i++; |
435 | } | 438 | } |
436 | 439 | ||
440 | /* enable interrupt */ | ||
441 | if (flags & DMA_PREP_INTERRUPT) | ||
442 | mmp_tdma_enable_irq(tdmac, true); | ||
443 | |||
437 | tdmac->buf_len = buf_len; | 444 | tdmac->buf_len = buf_len; |
438 | tdmac->period_len = period_len; | 445 | tdmac->period_len = period_len; |
439 | tdmac->pos = 0; | 446 | tdmac->pos = 0; |
@@ -455,6 +462,8 @@ static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
455 | switch (cmd) { | 462 | switch (cmd) { |
456 | case DMA_TERMINATE_ALL: | 463 | case DMA_TERMINATE_ALL: |
457 | mmp_tdma_disable_chan(tdmac); | 464 | mmp_tdma_disable_chan(tdmac); |
465 | /* disable interrupt */ | ||
466 | mmp_tdma_enable_irq(tdmac, false); | ||
458 | break; | 467 | break; |
459 | case DMA_PAUSE: | 468 | case DMA_PAUSE: |
460 | mmp_tdma_pause_chan(tdmac); | 469 | mmp_tdma_pause_chan(tdmac); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7938272f2edf..a63837ca1410 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -45,19 +45,18 @@ static void mv_xor_issue_pending(struct dma_chan *chan); | |||
45 | #define mv_chan_to_devp(chan) \ | 45 | #define mv_chan_to_devp(chan) \ |
46 | ((chan)->dmadev.dev) | 46 | ((chan)->dmadev.dev) |
47 | 47 | ||
48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | 48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, |
49 | dma_addr_t addr, u32 byte_count, | ||
50 | enum dma_ctrl_flags flags) | ||
49 | { | 51 | { |
50 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 52 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
51 | 53 | ||
52 | hw_desc->status = (1 << 31); | 54 | hw_desc->status = XOR_DESC_DMA_OWNED; |
53 | hw_desc->phy_next_desc = 0; | 55 | hw_desc->phy_next_desc = 0; |
54 | hw_desc->desc_command = (1 << 31); | 56 | /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ |
55 | } | 57 | hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? |
56 | 58 | XOR_DESC_EOD_INT_EN : 0; | |
57 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 59 | hw_desc->phy_dest_addr = addr; |
58 | u32 byte_count) | ||
59 | { | ||
60 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
61 | hw_desc->byte_count = byte_count; | 60 | hw_desc->byte_count = byte_count; |
62 | } | 61 | } |
63 | 62 | ||
@@ -75,20 +74,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |||
75 | hw_desc->phy_next_desc = 0; | 74 | hw_desc->phy_next_desc = 0; |
76 | } | 75 | } |
77 | 76 | ||
78 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | ||
79 | dma_addr_t addr) | ||
80 | { | ||
81 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
82 | hw_desc->phy_dest_addr = addr; | ||
83 | } | ||
84 | |||
85 | static int mv_chan_memset_slot_count(size_t len) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | ||
91 | |||
92 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | 77 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, |
93 | int index, dma_addr_t addr) | 78 | int index, dma_addr_t addr) |
94 | { | 79 | { |
@@ -123,17 +108,12 @@ static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |||
123 | return intr_cause; | 108 | return intr_cause; |
124 | } | 109 | } |
125 | 110 | ||
126 | static int mv_is_err_intr(u32 intr_cause) | ||
127 | { | ||
128 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | ||
129 | return 1; | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | 111 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) |
135 | { | 112 | { |
136 | u32 val = ~(1 << (chan->idx * 16)); | 113 | u32 val; |
114 | |||
115 | val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; | ||
116 | val = ~(val << (chan->idx * 16)); | ||
137 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); | 117 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
138 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); | 118 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
139 | } | 119 | } |
@@ -144,17 +124,6 @@ static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | |||
144 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); | 124 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
145 | } | 125 | } |
146 | 126 | ||
147 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | ||
148 | { | ||
149 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | ||
150 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | ||
151 | |||
152 | if (chain_old_tail->type != desc->type) | ||
153 | return 0; | ||
154 | |||
155 | return 1; | ||
156 | } | ||
157 | |||
158 | static void mv_set_mode(struct mv_xor_chan *chan, | 127 | static void mv_set_mode(struct mv_xor_chan *chan, |
159 | enum dma_transaction_type type) | 128 | enum dma_transaction_type type) |
160 | { | 129 | { |
@@ -206,11 +175,6 @@ static char mv_chan_is_busy(struct mv_xor_chan *chan) | |||
206 | return (state == 1) ? 1 : 0; | 175 | return (state == 1) ? 1 : 0; |
207 | } | 176 | } |
208 | 177 | ||
209 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | ||
210 | { | ||
211 | return 1; | ||
212 | } | ||
213 | |||
214 | /** | 178 | /** |
215 | * mv_xor_free_slots - flags descriptor slots for reuse | 179 | * mv_xor_free_slots - flags descriptor slots for reuse |
216 | * @slot: Slot to free | 180 | * @slot: Slot to free |
@@ -222,7 +186,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |||
222 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", | 186 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", |
223 | __func__, __LINE__, slot); | 187 | __func__, __LINE__, slot); |
224 | 188 | ||
225 | slot->slots_per_op = 0; | 189 | slot->slot_used = 0; |
226 | 190 | ||
227 | } | 191 | } |
228 | 192 | ||
@@ -236,13 +200,11 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |||
236 | { | 200 | { |
237 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", | 201 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
238 | __func__, __LINE__, sw_desc); | 202 | __func__, __LINE__, sw_desc); |
239 | if (sw_desc->type != mv_chan->current_type) | ||
240 | mv_set_mode(mv_chan, sw_desc->type); | ||
241 | 203 | ||
242 | /* set the hardware chain */ | 204 | /* set the hardware chain */ |
243 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | 205 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
244 | 206 | ||
245 | mv_chan->pending += sw_desc->slot_cnt; | 207 | mv_chan->pending++; |
246 | mv_xor_issue_pending(&mv_chan->dmachan); | 208 | mv_xor_issue_pending(&mv_chan->dmachan); |
247 | } | 209 | } |
248 | 210 | ||
@@ -263,8 +225,6 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
263 | desc->async_tx.callback_param); | 225 | desc->async_tx.callback_param); |
264 | 226 | ||
265 | dma_descriptor_unmap(&desc->async_tx); | 227 | dma_descriptor_unmap(&desc->async_tx); |
266 | if (desc->group_head) | ||
267 | desc->group_head = NULL; | ||
268 | } | 228 | } |
269 | 229 | ||
270 | /* run dependent operations */ | 230 | /* run dependent operations */ |
@@ -377,19 +337,16 @@ static void mv_xor_tasklet(unsigned long data) | |||
377 | } | 337 | } |
378 | 338 | ||
379 | static struct mv_xor_desc_slot * | 339 | static struct mv_xor_desc_slot * |
380 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | 340 | mv_xor_alloc_slot(struct mv_xor_chan *mv_chan) |
381 | int slots_per_op) | ||
382 | { | 341 | { |
383 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | 342 | struct mv_xor_desc_slot *iter, *_iter; |
384 | LIST_HEAD(chain); | 343 | int retry = 0; |
385 | int slots_found, retry = 0; | ||
386 | 344 | ||
387 | /* start search from the last allocated descrtiptor | 345 | /* start search from the last allocated descrtiptor |
388 | * if a contiguous allocation can not be found start searching | 346 | * if a contiguous allocation can not be found start searching |
389 | * from the beginning of the list | 347 | * from the beginning of the list |
390 | */ | 348 | */ |
391 | retry: | 349 | retry: |
392 | slots_found = 0; | ||
393 | if (retry == 0) | 350 | if (retry == 0) |
394 | iter = mv_chan->last_used; | 351 | iter = mv_chan->last_used; |
395 | else | 352 | else |
@@ -399,55 +356,29 @@ retry: | |||
399 | 356 | ||
400 | list_for_each_entry_safe_continue( | 357 | list_for_each_entry_safe_continue( |
401 | iter, _iter, &mv_chan->all_slots, slot_node) { | 358 | iter, _iter, &mv_chan->all_slots, slot_node) { |
359 | |||
402 | prefetch(_iter); | 360 | prefetch(_iter); |
403 | prefetch(&_iter->async_tx); | 361 | prefetch(&_iter->async_tx); |
404 | if (iter->slots_per_op) { | 362 | if (iter->slot_used) { |
405 | /* give up after finding the first busy slot | 363 | /* give up after finding the first busy slot |
406 | * on the second pass through the list | 364 | * on the second pass through the list |
407 | */ | 365 | */ |
408 | if (retry) | 366 | if (retry) |
409 | break; | 367 | break; |
410 | |||
411 | slots_found = 0; | ||
412 | continue; | 368 | continue; |
413 | } | 369 | } |
414 | 370 | ||
415 | /* start the allocation if the slot is correctly aligned */ | 371 | /* pre-ack descriptor */ |
416 | if (!slots_found++) | 372 | async_tx_ack(&iter->async_tx); |
417 | alloc_start = iter; | 373 | |
418 | 374 | iter->slot_used = 1; | |
419 | if (slots_found == num_slots) { | 375 | INIT_LIST_HEAD(&iter->chain_node); |
420 | struct mv_xor_desc_slot *alloc_tail = NULL; | 376 | iter->async_tx.cookie = -EBUSY; |
421 | struct mv_xor_desc_slot *last_used = NULL; | 377 | mv_chan->last_used = iter; |
422 | iter = alloc_start; | 378 | mv_desc_clear_next_desc(iter); |
423 | while (num_slots) { | 379 | |
424 | int i; | 380 | return iter; |
425 | 381 | ||
426 | /* pre-ack all but the last descriptor */ | ||
427 | async_tx_ack(&iter->async_tx); | ||
428 | |||
429 | list_add_tail(&iter->chain_node, &chain); | ||
430 | alloc_tail = iter; | ||
431 | iter->async_tx.cookie = 0; | ||
432 | iter->slot_cnt = num_slots; | ||
433 | iter->xor_check_result = NULL; | ||
434 | for (i = 0; i < slots_per_op; i++) { | ||
435 | iter->slots_per_op = slots_per_op - i; | ||
436 | last_used = iter; | ||
437 | iter = list_entry(iter->slot_node.next, | ||
438 | struct mv_xor_desc_slot, | ||
439 | slot_node); | ||
440 | } | ||
441 | num_slots -= slots_per_op; | ||
442 | } | ||
443 | alloc_tail->group_head = alloc_start; | ||
444 | alloc_tail->async_tx.cookie = -EBUSY; | ||
445 | list_splice(&chain, &alloc_tail->tx_list); | ||
446 | mv_chan->last_used = last_used; | ||
447 | mv_desc_clear_next_desc(alloc_start); | ||
448 | mv_desc_clear_next_desc(alloc_tail); | ||
449 | return alloc_tail; | ||
450 | } | ||
451 | } | 382 | } |
452 | if (!retry++) | 383 | if (!retry++) |
453 | goto retry; | 384 | goto retry; |
@@ -464,7 +395,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
464 | { | 395 | { |
465 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | 396 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); |
466 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | 397 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); |
467 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | 398 | struct mv_xor_desc_slot *old_chain_tail; |
468 | dma_cookie_t cookie; | 399 | dma_cookie_t cookie; |
469 | int new_hw_chain = 1; | 400 | int new_hw_chain = 1; |
470 | 401 | ||
@@ -472,30 +403,24 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
472 | "%s sw_desc %p: async_tx %p\n", | 403 | "%s sw_desc %p: async_tx %p\n", |
473 | __func__, sw_desc, &sw_desc->async_tx); | 404 | __func__, sw_desc, &sw_desc->async_tx); |
474 | 405 | ||
475 | grp_start = sw_desc->group_head; | ||
476 | |||
477 | spin_lock_bh(&mv_chan->lock); | 406 | spin_lock_bh(&mv_chan->lock); |
478 | cookie = dma_cookie_assign(tx); | 407 | cookie = dma_cookie_assign(tx); |
479 | 408 | ||
480 | if (list_empty(&mv_chan->chain)) | 409 | if (list_empty(&mv_chan->chain)) |
481 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); | 410 | list_add_tail(&sw_desc->chain_node, &mv_chan->chain); |
482 | else { | 411 | else { |
483 | new_hw_chain = 0; | 412 | new_hw_chain = 0; |
484 | 413 | ||
485 | old_chain_tail = list_entry(mv_chan->chain.prev, | 414 | old_chain_tail = list_entry(mv_chan->chain.prev, |
486 | struct mv_xor_desc_slot, | 415 | struct mv_xor_desc_slot, |
487 | chain_node); | 416 | chain_node); |
488 | list_splice_init(&grp_start->tx_list, | 417 | list_add_tail(&sw_desc->chain_node, &mv_chan->chain); |
489 | &old_chain_tail->chain_node); | ||
490 | |||
491 | if (!mv_can_chain(grp_start)) | ||
492 | goto submit_done; | ||
493 | 418 | ||
494 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", | 419 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
495 | &old_chain_tail->async_tx.phys); | 420 | &old_chain_tail->async_tx.phys); |
496 | 421 | ||
497 | /* fix up the hardware chain */ | 422 | /* fix up the hardware chain */ |
498 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | 423 | mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); |
499 | 424 | ||
500 | /* if the channel is not busy */ | 425 | /* if the channel is not busy */ |
501 | if (!mv_chan_is_busy(mv_chan)) { | 426 | if (!mv_chan_is_busy(mv_chan)) { |
@@ -510,9 +435,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
510 | } | 435 | } |
511 | 436 | ||
512 | if (new_hw_chain) | 437 | if (new_hw_chain) |
513 | mv_xor_start_new_chain(mv_chan, grp_start); | 438 | mv_xor_start_new_chain(mv_chan, sw_desc); |
514 | 439 | ||
515 | submit_done: | ||
516 | spin_unlock_bh(&mv_chan->lock); | 440 | spin_unlock_bh(&mv_chan->lock); |
517 | 441 | ||
518 | return cookie; | 442 | return cookie; |
@@ -533,8 +457,9 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
533 | while (idx < num_descs_in_pool) { | 457 | while (idx < num_descs_in_pool) { |
534 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 458 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
535 | if (!slot) { | 459 | if (!slot) { |
536 | printk(KERN_INFO "MV XOR Channel only initialized" | 460 | dev_info(mv_chan_to_devp(mv_chan), |
537 | " %d descriptor slots", idx); | 461 | "channel only initialized %d descriptor slots", |
462 | idx); | ||
538 | break; | 463 | break; |
539 | } | 464 | } |
540 | virt_desc = mv_chan->dma_desc_pool_virt; | 465 | virt_desc = mv_chan->dma_desc_pool_virt; |
@@ -544,7 +469,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
544 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 469 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
545 | INIT_LIST_HEAD(&slot->chain_node); | 470 | INIT_LIST_HEAD(&slot->chain_node); |
546 | INIT_LIST_HEAD(&slot->slot_node); | 471 | INIT_LIST_HEAD(&slot->slot_node); |
547 | INIT_LIST_HEAD(&slot->tx_list); | ||
548 | dma_desc = mv_chan->dma_desc_pool; | 472 | dma_desc = mv_chan->dma_desc_pool; |
549 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | 473 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
550 | slot->idx = idx++; | 474 | slot->idx = idx++; |
@@ -568,51 +492,11 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
568 | } | 492 | } |
569 | 493 | ||
570 | static struct dma_async_tx_descriptor * | 494 | static struct dma_async_tx_descriptor * |
571 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
572 | size_t len, unsigned long flags) | ||
573 | { | ||
574 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
575 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
576 | int slot_cnt; | ||
577 | |||
578 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
579 | "%s dest: %pad src %pad len: %u flags: %ld\n", | ||
580 | __func__, &dest, &src, len, flags); | ||
581 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
582 | return NULL; | ||
583 | |||
584 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | ||
585 | |||
586 | spin_lock_bh(&mv_chan->lock); | ||
587 | slot_cnt = mv_chan_memcpy_slot_count(len); | ||
588 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
589 | if (sw_desc) { | ||
590 | sw_desc->type = DMA_MEMCPY; | ||
591 | sw_desc->async_tx.flags = flags; | ||
592 | grp_start = sw_desc->group_head; | ||
593 | mv_desc_init(grp_start, flags); | ||
594 | mv_desc_set_byte_count(grp_start, len); | ||
595 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
596 | mv_desc_set_src_addr(grp_start, 0, src); | ||
597 | sw_desc->unmap_src_cnt = 1; | ||
598 | sw_desc->unmap_len = len; | ||
599 | } | ||
600 | spin_unlock_bh(&mv_chan->lock); | ||
601 | |||
602 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
603 | "%s sw_desc %p async_tx %p\n", | ||
604 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); | ||
605 | |||
606 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
607 | } | ||
608 | |||
609 | static struct dma_async_tx_descriptor * | ||
610 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 495 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
611 | unsigned int src_cnt, size_t len, unsigned long flags) | 496 | unsigned int src_cnt, size_t len, unsigned long flags) |
612 | { | 497 | { |
613 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 498 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
614 | struct mv_xor_desc_slot *sw_desc, *grp_start; | 499 | struct mv_xor_desc_slot *sw_desc; |
615 | int slot_cnt; | ||
616 | 500 | ||
617 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 501 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
618 | return NULL; | 502 | return NULL; |
@@ -624,20 +508,13 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
624 | __func__, src_cnt, len, &dest, flags); | 508 | __func__, src_cnt, len, &dest, flags); |
625 | 509 | ||
626 | spin_lock_bh(&mv_chan->lock); | 510 | spin_lock_bh(&mv_chan->lock); |
627 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | 511 | sw_desc = mv_xor_alloc_slot(mv_chan); |
628 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
629 | if (sw_desc) { | 512 | if (sw_desc) { |
630 | sw_desc->type = DMA_XOR; | 513 | sw_desc->type = DMA_XOR; |
631 | sw_desc->async_tx.flags = flags; | 514 | sw_desc->async_tx.flags = flags; |
632 | grp_start = sw_desc->group_head; | 515 | mv_desc_init(sw_desc, dest, len, flags); |
633 | mv_desc_init(grp_start, flags); | ||
634 | /* the byte count field is the same as in memcpy desc*/ | ||
635 | mv_desc_set_byte_count(grp_start, len); | ||
636 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
637 | sw_desc->unmap_src_cnt = src_cnt; | ||
638 | sw_desc->unmap_len = len; | ||
639 | while (src_cnt--) | 516 | while (src_cnt--) |
640 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | 517 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
641 | } | 518 | } |
642 | spin_unlock_bh(&mv_chan->lock); | 519 | spin_unlock_bh(&mv_chan->lock); |
643 | dev_dbg(mv_chan_to_devp(mv_chan), | 520 | dev_dbg(mv_chan_to_devp(mv_chan), |
@@ -646,6 +523,35 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
646 | return sw_desc ? &sw_desc->async_tx : NULL; | 523 | return sw_desc ? &sw_desc->async_tx : NULL; |
647 | } | 524 | } |
648 | 525 | ||
526 | static struct dma_async_tx_descriptor * | ||
527 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
528 | size_t len, unsigned long flags) | ||
529 | { | ||
530 | /* | ||
531 | * A MEMCPY operation is identical to an XOR operation with only | ||
532 | * a single source address. | ||
533 | */ | ||
534 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | ||
535 | } | ||
536 | |||
537 | static struct dma_async_tx_descriptor * | ||
538 | mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | ||
539 | { | ||
540 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
541 | dma_addr_t src, dest; | ||
542 | size_t len; | ||
543 | |||
544 | src = mv_chan->dummy_src_addr; | ||
545 | dest = mv_chan->dummy_dst_addr; | ||
546 | len = MV_XOR_MIN_BYTE_COUNT; | ||
547 | |||
548 | /* | ||
549 | * We implement the DMA_INTERRUPT operation as a minimum sized | ||
550 | * XOR operation with a single dummy source address. | ||
551 | */ | ||
552 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | ||
553 | } | ||
554 | |||
649 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | 555 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
650 | { | 556 | { |
651 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 557 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
@@ -733,18 +639,16 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |||
733 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | 639 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, |
734 | u32 intr_cause) | 640 | u32 intr_cause) |
735 | { | 641 | { |
736 | if (intr_cause & (1 << 4)) { | 642 | if (intr_cause & XOR_INT_ERR_DECODE) { |
737 | dev_dbg(mv_chan_to_devp(chan), | 643 | dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); |
738 | "ignore this error\n"); | 644 | return; |
739 | return; | ||
740 | } | 645 | } |
741 | 646 | ||
742 | dev_err(mv_chan_to_devp(chan), | 647 | dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", |
743 | "error on chan %d. intr cause 0x%08x\n", | ||
744 | chan->idx, intr_cause); | 648 | chan->idx, intr_cause); |
745 | 649 | ||
746 | mv_dump_xor_regs(chan); | 650 | mv_dump_xor_regs(chan); |
747 | BUG(); | 651 | WARN_ON(1); |
748 | } | 652 | } |
749 | 653 | ||
750 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | 654 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) |
@@ -754,7 +658,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |||
754 | 658 | ||
755 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); | 659 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
756 | 660 | ||
757 | if (mv_is_err_intr(intr_cause)) | 661 | if (intr_cause & XOR_INTR_ERRORS) |
758 | mv_xor_err_interrupt_handler(chan, intr_cause); | 662 | mv_xor_err_interrupt_handler(chan, intr_cause); |
759 | 663 | ||
760 | tasklet_schedule(&chan->irq_tasklet); | 664 | tasklet_schedule(&chan->irq_tasklet); |
@@ -1041,6 +945,10 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) | |||
1041 | 945 | ||
1042 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, | 946 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1043 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); | 947 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
948 | dma_unmap_single(dev, mv_chan->dummy_src_addr, | ||
949 | MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | ||
950 | dma_unmap_single(dev, mv_chan->dummy_dst_addr, | ||
951 | MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | ||
1044 | 952 | ||
1045 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, | 953 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
1046 | device_node) { | 954 | device_node) { |
@@ -1070,6 +978,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1070 | 978 | ||
1071 | dma_dev = &mv_chan->dmadev; | 979 | dma_dev = &mv_chan->dmadev; |
1072 | 980 | ||
981 | /* | ||
982 | * These source and destination dummy buffers are used to implement | ||
983 | * a DMA_INTERRUPT operation as a minimum-sized XOR operation. | ||
984 | * Hence, we only need to map the buffers at initialization-time. | ||
985 | */ | ||
986 | mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, | ||
987 | mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | ||
988 | mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, | ||
989 | mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | ||
990 | |||
1073 | /* allocate coherent memory for hardware descriptors | 991 | /* allocate coherent memory for hardware descriptors |
1074 | * note: writecombine gives slightly better performance, but | 992 | * note: writecombine gives slightly better performance, but |
1075 | * requires that we explicitly flush the writes | 993 | * requires that we explicitly flush the writes |
@@ -1094,6 +1012,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1094 | dma_dev->dev = &pdev->dev; | 1012 | dma_dev->dev = &pdev->dev; |
1095 | 1013 | ||
1096 | /* set prep routines based on capability */ | 1014 | /* set prep routines based on capability */ |
1015 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) | ||
1016 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | ||
1097 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1017 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1098 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1018 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1099 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1019 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
@@ -1116,7 +1036,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1116 | 1036 | ||
1117 | mv_chan_unmask_interrupts(mv_chan); | 1037 | mv_chan_unmask_interrupts(mv_chan); |
1118 | 1038 | ||
1119 | mv_set_mode(mv_chan, DMA_MEMCPY); | 1039 | mv_set_mode(mv_chan, DMA_XOR); |
1120 | 1040 | ||
1121 | spin_lock_init(&mv_chan->lock); | 1041 | spin_lock_init(&mv_chan->lock); |
1122 | INIT_LIST_HEAD(&mv_chan->chain); | 1042 | INIT_LIST_HEAD(&mv_chan->chain); |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index d0749229c875..78edc7e44569 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -23,17 +23,22 @@ | |||
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | 25 | ||
26 | #define USE_TIMER | ||
27 | #define MV_XOR_POOL_SIZE PAGE_SIZE | 26 | #define MV_XOR_POOL_SIZE PAGE_SIZE |
28 | #define MV_XOR_SLOT_SIZE 64 | 27 | #define MV_XOR_SLOT_SIZE 64 |
29 | #define MV_XOR_THRESHOLD 1 | 28 | #define MV_XOR_THRESHOLD 1 |
30 | #define MV_XOR_MAX_CHANNELS 2 | 29 | #define MV_XOR_MAX_CHANNELS 2 |
31 | 30 | ||
31 | #define MV_XOR_MIN_BYTE_COUNT SZ_128 | ||
32 | #define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1) | ||
33 | |||
32 | /* Values for the XOR_CONFIG register */ | 34 | /* Values for the XOR_CONFIG register */ |
33 | #define XOR_OPERATION_MODE_XOR 0 | 35 | #define XOR_OPERATION_MODE_XOR 0 |
34 | #define XOR_OPERATION_MODE_MEMCPY 2 | 36 | #define XOR_OPERATION_MODE_MEMCPY 2 |
35 | #define XOR_DESCRIPTOR_SWAP BIT(14) | 37 | #define XOR_DESCRIPTOR_SWAP BIT(14) |
36 | 38 | ||
39 | #define XOR_DESC_DMA_OWNED BIT(31) | ||
40 | #define XOR_DESC_EOD_INT_EN BIT(31) | ||
41 | |||
37 | #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) | 42 | #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) |
38 | #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) | 43 | #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) |
39 | #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) | 44 | #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) |
@@ -48,7 +53,24 @@ | |||
48 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) | 53 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) |
49 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) | 54 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) |
50 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) | 55 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) |
51 | #define XOR_INTR_MASK_VALUE 0x3F5 | 56 | |
57 | #define XOR_INT_END_OF_DESC BIT(0) | ||
58 | #define XOR_INT_END_OF_CHAIN BIT(1) | ||
59 | #define XOR_INT_STOPPED BIT(2) | ||
60 | #define XOR_INT_PAUSED BIT(3) | ||
61 | #define XOR_INT_ERR_DECODE BIT(4) | ||
62 | #define XOR_INT_ERR_RDPROT BIT(5) | ||
63 | #define XOR_INT_ERR_WRPROT BIT(6) | ||
64 | #define XOR_INT_ERR_OWN BIT(7) | ||
65 | #define XOR_INT_ERR_PAR BIT(8) | ||
66 | #define XOR_INT_ERR_MBUS BIT(9) | ||
67 | |||
68 | #define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \ | ||
69 | XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \ | ||
70 | XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS) | ||
71 | |||
72 | #define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \ | ||
73 | XOR_INT_STOPPED | XOR_INTR_ERRORS) | ||
52 | 74 | ||
53 | #define WINDOW_BASE(w) (0x50 + ((w) << 2)) | 75 | #define WINDOW_BASE(w) (0x50 + ((w) << 2)) |
54 | #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) | 76 | #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) |
@@ -97,10 +119,9 @@ struct mv_xor_chan { | |||
97 | struct list_head all_slots; | 119 | struct list_head all_slots; |
98 | int slots_allocated; | 120 | int slots_allocated; |
99 | struct tasklet_struct irq_tasklet; | 121 | struct tasklet_struct irq_tasklet; |
100 | #ifdef USE_TIMER | 122 | char dummy_src[MV_XOR_MIN_BYTE_COUNT]; |
101 | unsigned long cleanup_time; | 123 | char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; |
102 | u32 current_on_last_cleanup; | 124 | dma_addr_t dummy_src_addr, dummy_dst_addr; |
103 | #endif | ||
104 | }; | 125 | }; |
105 | 126 | ||
106 | /** | 127 | /** |
@@ -110,16 +131,10 @@ struct mv_xor_chan { | |||
110 | * @completed_node: node on the mv_xor_chan.completed_slots list | 131 | * @completed_node: node on the mv_xor_chan.completed_slots list |
111 | * @hw_desc: virtual address of the hardware descriptor chain | 132 | * @hw_desc: virtual address of the hardware descriptor chain |
112 | * @phys: hardware address of the hardware descriptor chain | 133 | * @phys: hardware address of the hardware descriptor chain |
113 | * @group_head: first operation in a transaction | 134 | * @slot_used: slot in use or not |
114 | * @slot_cnt: total slots used in an transaction (group of operations) | ||
115 | * @slots_per_op: number of slots per operation | ||
116 | * @idx: pool index | 135 | * @idx: pool index |
117 | * @unmap_src_cnt: number of xor sources | ||
118 | * @unmap_len: transaction bytecount | ||
119 | * @tx_list: list of slots that make up a multi-descriptor transaction | 136 | * @tx_list: list of slots that make up a multi-descriptor transaction |
120 | * @async_tx: support for the async_tx api | 137 | * @async_tx: support for the async_tx api |
121 | * @xor_check_result: result of zero sum | ||
122 | * @crc32_result: result crc calculation | ||
123 | */ | 138 | */ |
124 | struct mv_xor_desc_slot { | 139 | struct mv_xor_desc_slot { |
125 | struct list_head slot_node; | 140 | struct list_head slot_node; |
@@ -127,23 +142,9 @@ struct mv_xor_desc_slot { | |||
127 | struct list_head completed_node; | 142 | struct list_head completed_node; |
128 | enum dma_transaction_type type; | 143 | enum dma_transaction_type type; |
129 | void *hw_desc; | 144 | void *hw_desc; |
130 | struct mv_xor_desc_slot *group_head; | 145 | u16 slot_used; |
131 | u16 slot_cnt; | ||
132 | u16 slots_per_op; | ||
133 | u16 idx; | 146 | u16 idx; |
134 | u16 unmap_src_cnt; | ||
135 | u32 value; | ||
136 | size_t unmap_len; | ||
137 | struct list_head tx_list; | ||
138 | struct dma_async_tx_descriptor async_tx; | 147 | struct dma_async_tx_descriptor async_tx; |
139 | union { | ||
140 | u32 *xor_check_result; | ||
141 | u32 *crc32_result; | ||
142 | }; | ||
143 | #ifdef USE_TIMER | ||
144 | unsigned long arrival_time; | ||
145 | struct timer_list timeout; | ||
146 | #endif | ||
147 | }; | 148 | }; |
148 | 149 | ||
149 | /* | 150 | /* |
@@ -189,9 +190,4 @@ struct mv_xor_desc { | |||
189 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ | 190 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ |
190 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) | 191 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) |
191 | 192 | ||
192 | #define MV_XOR_MIN_BYTE_COUNT (128) | ||
193 | #define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1) | ||
194 | #define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT | ||
195 | |||
196 | |||
197 | #endif | 193 | #endif |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index d5149aacd2fe..4839bfa74a10 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1367,17 +1367,10 @@ static int pl330_submit_req(struct pl330_thread *thrd, | |||
1367 | struct pl330_dmac *pl330 = thrd->dmac; | 1367 | struct pl330_dmac *pl330 = thrd->dmac; |
1368 | struct _xfer_spec xs; | 1368 | struct _xfer_spec xs; |
1369 | unsigned long flags; | 1369 | unsigned long flags; |
1370 | void __iomem *regs; | ||
1371 | unsigned idx; | 1370 | unsigned idx; |
1372 | u32 ccr; | 1371 | u32 ccr; |
1373 | int ret = 0; | 1372 | int ret = 0; |
1374 | 1373 | ||
1375 | /* No Req or Unacquired Channel or DMAC */ | ||
1376 | if (!desc || !thrd || thrd->free) | ||
1377 | return -EINVAL; | ||
1378 | |||
1379 | regs = thrd->dmac->base; | ||
1380 | |||
1381 | if (pl330->state == DYING | 1374 | if (pl330->state == DYING |
1382 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | 1375 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { |
1383 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", | 1376 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", |
@@ -2755,8 +2748,10 @@ probe_err3: | |||
2755 | list_del(&pch->chan.device_node); | 2748 | list_del(&pch->chan.device_node); |
2756 | 2749 | ||
2757 | /* Flush the channel */ | 2750 | /* Flush the channel */ |
2758 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | 2751 | if (pch->thread) { |
2759 | pl330_free_chan_resources(&pch->chan); | 2752 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
2753 | pl330_free_chan_resources(&pch->chan); | ||
2754 | } | ||
2760 | } | 2755 | } |
2761 | probe_err2: | 2756 | probe_err2: |
2762 | pl330_del(pl330); | 2757 | pl330_del(pl330); |
@@ -2782,8 +2777,10 @@ static int pl330_remove(struct amba_device *adev) | |||
2782 | list_del(&pch->chan.device_node); | 2777 | list_del(&pch->chan.device_node); |
2783 | 2778 | ||
2784 | /* Flush the channel */ | 2779 | /* Flush the channel */ |
2785 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | 2780 | if (pch->thread) { |
2786 | pl330_free_chan_resources(&pch->chan); | 2781 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
2782 | pl330_free_chan_resources(&pch->chan); | ||
2783 | } | ||
2787 | } | 2784 | } |
2788 | 2785 | ||
2789 | pl330_del(pl330); | 2786 | pl330_del(pl330); |
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index dabbf0aba2e9..80fd2aeb4870 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c | |||
@@ -117,7 +117,7 @@ static void audmapp_start_xfer(struct shdma_chan *schan, | |||
117 | audmapp_write(auchan, chcr, PDMACHCR); | 117 | audmapp_write(auchan, chcr, PDMACHCR); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, | 120 | static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id, |
121 | u32 *chcr, dma_addr_t *dst) | 121 | u32 *chcr, dma_addr_t *dst) |
122 | { | 122 | { |
123 | struct audmapp_device *audev = to_dev(auchan); | 123 | struct audmapp_device *audev = to_dev(auchan); |
@@ -131,20 +131,22 @@ static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, | |||
131 | if (!pdata) { /* DT */ | 131 | if (!pdata) { /* DT */ |
132 | *chcr = ((u32)slave_id) << 16; | 132 | *chcr = ((u32)slave_id) << 16; |
133 | auchan->shdma_chan.slave_id = (slave_id) >> 8; | 133 | auchan->shdma_chan.slave_id = (slave_id) >> 8; |
134 | return; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
137 | /* non-DT */ | 137 | /* non-DT */ |
138 | 138 | ||
139 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) | 139 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) |
140 | return; | 140 | return -ENXIO; |
141 | 141 | ||
142 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | 142 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
143 | if (cfg->slave_id == slave_id) { | 143 | if (cfg->slave_id == slave_id) { |
144 | *chcr = cfg->chcr; | 144 | *chcr = cfg->chcr; |
145 | *dst = cfg->dst; | 145 | *dst = cfg->dst; |
146 | break; | 146 | return 0; |
147 | } | 147 | } |
148 | |||
149 | return -ENXIO; | ||
148 | } | 150 | } |
149 | 151 | ||
150 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, | 152 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, |
@@ -153,8 +155,11 @@ static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, | |||
153 | struct audmapp_chan *auchan = to_chan(schan); | 155 | struct audmapp_chan *auchan = to_chan(schan); |
154 | u32 chcr; | 156 | u32 chcr; |
155 | dma_addr_t dst; | 157 | dma_addr_t dst; |
158 | int ret; | ||
156 | 159 | ||
157 | audmapp_get_config(auchan, slave_id, &chcr, &dst); | 160 | ret = audmapp_get_config(auchan, slave_id, &chcr, &dst); |
161 | if (ret < 0) | ||
162 | return ret; | ||
158 | 163 | ||
159 | if (try) | 164 | if (try) |
160 | return 0; | 165 | return 0; |
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 1f92a56fd2b6..3aa10b328254 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -862,7 +862,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
862 | { | 862 | { |
863 | struct sun6i_dma_dev *sdc; | 863 | struct sun6i_dma_dev *sdc; |
864 | struct resource *res; | 864 | struct resource *res; |
865 | struct clk *mux, *pll6; | ||
866 | int ret, i; | 865 | int ret, i; |
867 | 866 | ||
868 | sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); | 867 | sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); |
@@ -886,28 +885,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
886 | return PTR_ERR(sdc->clk); | 885 | return PTR_ERR(sdc->clk); |
887 | } | 886 | } |
888 | 887 | ||
889 | mux = clk_get(NULL, "ahb1_mux"); | ||
890 | if (IS_ERR(mux)) { | ||
891 | dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n"); | ||
892 | return PTR_ERR(mux); | ||
893 | } | ||
894 | |||
895 | pll6 = clk_get(NULL, "pll6"); | ||
896 | if (IS_ERR(pll6)) { | ||
897 | dev_err(&pdev->dev, "Couldn't get PLL6\n"); | ||
898 | clk_put(mux); | ||
899 | return PTR_ERR(pll6); | ||
900 | } | ||
901 | |||
902 | ret = clk_set_parent(mux, pll6); | ||
903 | clk_put(pll6); | ||
904 | clk_put(mux); | ||
905 | |||
906 | if (ret) { | ||
907 | dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n"); | ||
908 | return ret; | ||
909 | } | ||
910 | |||
911 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); | 888 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); |
912 | if (IS_ERR(sdc->rstc)) { | 889 | if (IS_ERR(sdc->rstc)) { |
913 | dev_err(&pdev->dev, "No reset controller specified\n"); | 890 | dev_err(&pdev->dev, "No reset controller specified\n"); |
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 42a13e8d4607..a6e64767186e 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -1365,7 +1365,6 @@ static const struct of_device_id xilinx_vdma_of_ids[] = { | |||
1365 | static struct platform_driver xilinx_vdma_driver = { | 1365 | static struct platform_driver xilinx_vdma_driver = { |
1366 | .driver = { | 1366 | .driver = { |
1367 | .name = "xilinx-vdma", | 1367 | .name = "xilinx-vdma", |
1368 | .owner = THIS_MODULE, | ||
1369 | .of_match_table = xilinx_vdma_of_ids, | 1368 | .of_match_table = xilinx_vdma_of_ids, |
1370 | }, | 1369 | }, |
1371 | .probe = xilinx_vdma_probe, | 1370 | .probe = xilinx_vdma_probe, |