diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-17 21:40:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-17 21:40:24 -0500 |
commit | 57f2685c16fa8e0cb86e4bc7c8ac33bfed943819 (patch) | |
tree | 96a42fe632687c8486c250c4805bf1d4c9c34d19 /drivers/dma | |
parent | 488a9d018256dc9f29e041c0360445b6d25eea9a (diff) | |
parent | e08b881a69d638175bfa99b5af4d72b731633ea7 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits)
ARM: mach-shmobile: specify CHCLR registers on SH7372
dma: shdma: fix runtime PM: clear channel buffers on reset
dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit
dmaengine/ste_dma40: clear LNK on channel startup
dmaengine: intel_mid_dma: remove legacy pm interface
ASoC: mxs: correct 'direction' of device_prep_dma_cyclic
dmaengine: intel_mid_dma: error path fix
dmaengine: intel_mid_dma: locking and freeing fixes
mtd: gpmi-nand: move to dma_transfer_direction
mtd: fix compile error for gpmi-nand
mmc: mxs-mmc: fix the dma_transfer_direction migration
dmaengine: add DMA_TRANS_NONE to dma_transfer_direction
dma: mxs-dma: Don't use CLKGATE bits in CTRL0 to disable DMA channels
dma: mxs-dma: make mxs_dma_prep_slave_sg() multi user safe
dma: mxs-dma: Always leave mxs_dma_init() with the clock disabled.
dma: mxs-dma: fix a typo in comment
DMA: PL330: Remove pm_runtime_xxx calls from pl330 probe/remove
video i.MX IPU: Fix display connections
i.MX IPU DMA: Fix wrong burstsize settings
dmaengine/ste_dma40: allow fixed physical channel
...
Fix up conflicts in drivers/dma/{Kconfig,mxs-dma.c,pl330.c}
The conflicts looked pretty trivial, but I'll ask people to verify them.
Diffstat (limited to 'drivers/dma')
29 files changed, 1515 insertions, 393 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5a99bb3f255a..f1a274994bb1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -124,7 +124,7 @@ config MV_XOR | |||
124 | 124 | ||
125 | config MX3_IPU | 125 | config MX3_IPU |
126 | bool "MX3x Image Processing Unit support" | 126 | bool "MX3x Image Processing Unit support" |
127 | depends on SOC_IMX31 || SOC_IMX35 | 127 | depends on ARCH_MXC |
128 | select DMA_ENGINE | 128 | select DMA_ENGINE |
129 | default y | 129 | default y |
130 | help | 130 | help |
@@ -187,6 +187,13 @@ config TIMB_DMA | |||
187 | help | 187 | help |
188 | Enable support for the Timberdale FPGA DMA engine. | 188 | Enable support for the Timberdale FPGA DMA engine. |
189 | 189 | ||
190 | config SIRF_DMA | ||
191 | tristate "CSR SiRFprimaII DMA support" | ||
192 | depends on ARCH_PRIMA2 | ||
193 | select DMA_ENGINE | ||
194 | help | ||
195 | Enable support for the CSR SiRFprimaII DMA engine. | ||
196 | |||
190 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 197 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
191 | bool | 198 | bool |
192 | 199 | ||
@@ -201,26 +208,26 @@ config PL330_DMA | |||
201 | platform_data for a dma-pl330 device. | 208 | platform_data for a dma-pl330 device. |
202 | 209 | ||
203 | config PCH_DMA | 210 | config PCH_DMA |
204 | tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" | 211 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" |
205 | depends on PCI && X86 | 212 | depends on PCI && X86 |
206 | select DMA_ENGINE | 213 | select DMA_ENGINE |
207 | help | 214 | help |
208 | Enable support for Intel EG20T PCH DMA engine. | 215 | Enable support for Intel EG20T PCH DMA engine. |
209 | 216 | ||
210 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | 217 | This driver also can be used for LAPIS Semiconductor IOH(Input/ |
211 | Output Hub), ML7213 and ML7223. | 218 | Output Hub), ML7213, ML7223 and ML7831. |
212 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is | 219 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is |
213 | for MP(Media Phone) use. | 220 | for MP(Media Phone) use and ML7831 IOH is for general purpose use. |
214 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. | 221 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
215 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. | 222 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. |
216 | 223 | ||
217 | config IMX_SDMA | 224 | config IMX_SDMA |
218 | tristate "i.MX SDMA support" | 225 | tristate "i.MX SDMA support" |
219 | depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5 | 226 | depends on ARCH_MXC |
220 | select DMA_ENGINE | 227 | select DMA_ENGINE |
221 | help | 228 | help |
222 | Support the i.MX SDMA engine. This engine is integrated into | 229 | Support the i.MX SDMA engine. This engine is integrated into |
223 | Freescale i.MX25/31/35/51 chips. | 230 | Freescale i.MX25/31/35/51/53 chips. |
224 | 231 | ||
225 | config IMX_DMA | 232 | config IMX_DMA |
226 | tristate "i.MX DMA support" | 233 | tristate "i.MX DMA support" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 30cf3b1f0c5c..009a222e8283 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | |||
21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
24 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | ||
24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 25 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
25 | obj-$(CONFIG_PL330_DMA) += pl330.o | 26 | obj-$(CONFIG_PL330_DMA) += pl330.o |
26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 0698695e8bf9..8a281584458b 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
854 | int ret; | 854 | int ret; |
855 | 855 | ||
856 | /* Check if we already have a channel */ | 856 | /* Check if we already have a channel */ |
857 | if (plchan->phychan) | 857 | if (plchan->phychan) { |
858 | return 0; | 858 | ch = plchan->phychan; |
859 | goto got_channel; | ||
860 | } | ||
859 | 861 | ||
860 | ch = pl08x_get_phy_channel(pl08x, plchan); | 862 | ch = pl08x_get_phy_channel(pl08x, plchan); |
861 | if (!ch) { | 863 | if (!ch) { |
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
880 | return -EBUSY; | 882 | return -EBUSY; |
881 | } | 883 | } |
882 | ch->signal = ret; | 884 | ch->signal = ret; |
883 | |||
884 | /* Assign the flow control signal to this channel */ | ||
885 | if (txd->direction == DMA_TO_DEVICE) | ||
886 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
887 | else if (txd->direction == DMA_FROM_DEVICE) | ||
888 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
889 | } | 885 | } |
890 | 886 | ||
887 | plchan->phychan = ch; | ||
891 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | 888 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", |
892 | ch->id, | 889 | ch->id, |
893 | ch->signal, | 890 | ch->signal, |
894 | plchan->name); | 891 | plchan->name); |
895 | 892 | ||
893 | got_channel: | ||
894 | /* Assign the flow control signal to this channel */ | ||
895 | if (txd->direction == DMA_MEM_TO_DEV) | ||
896 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
897 | else if (txd->direction == DMA_DEV_TO_MEM) | ||
898 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
899 | |||
896 | plchan->phychan_hold++; | 900 | plchan->phychan_hold++; |
897 | plchan->phychan = ch; | ||
898 | 901 | ||
899 | return 0; | 902 | return 0; |
900 | } | 903 | } |
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1102 | 1105 | ||
1103 | /* Transfer direction */ | 1106 | /* Transfer direction */ |
1104 | plchan->runtime_direction = config->direction; | 1107 | plchan->runtime_direction = config->direction; |
1105 | if (config->direction == DMA_TO_DEVICE) { | 1108 | if (config->direction == DMA_MEM_TO_DEV) { |
1106 | addr_width = config->dst_addr_width; | 1109 | addr_width = config->dst_addr_width; |
1107 | maxburst = config->dst_maxburst; | 1110 | maxburst = config->dst_maxburst; |
1108 | } else if (config->direction == DMA_FROM_DEVICE) { | 1111 | } else if (config->direction == DMA_DEV_TO_MEM) { |
1109 | addr_width = config->src_addr_width; | 1112 | addr_width = config->src_addr_width; |
1110 | maxburst = config->src_maxburst; | 1113 | maxburst = config->src_maxburst; |
1111 | } else { | 1114 | } else { |
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1136 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | 1139 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; |
1137 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | 1140 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; |
1138 | 1141 | ||
1139 | if (plchan->runtime_direction == DMA_FROM_DEVICE) { | 1142 | if (plchan->runtime_direction == DMA_DEV_TO_MEM) { |
1140 | plchan->src_addr = config->src_addr; | 1143 | plchan->src_addr = config->src_addr; |
1141 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | 1144 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | |
1142 | pl08x_select_bus(plchan->cd->periph_buses, | 1145 | pl08x_select_bus(plchan->cd->periph_buses, |
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1152 | "configured channel %s (%s) for %s, data width %d, " | 1155 | "configured channel %s (%s) for %s, data width %d, " |
1153 | "maxburst %d words, LE, CCTL=0x%08x\n", | 1156 | "maxburst %d words, LE, CCTL=0x%08x\n", |
1154 | dma_chan_name(chan), plchan->name, | 1157 | dma_chan_name(chan), plchan->name, |
1155 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 1158 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
1156 | addr_width, | 1159 | addr_width, |
1157 | maxburst, | 1160 | maxburst, |
1158 | cctl); | 1161 | cctl); |
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1322 | 1325 | ||
1323 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1326 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1324 | struct dma_chan *chan, struct scatterlist *sgl, | 1327 | struct dma_chan *chan, struct scatterlist *sgl, |
1325 | unsigned int sg_len, enum dma_data_direction direction, | 1328 | unsigned int sg_len, enum dma_transfer_direction direction, |
1326 | unsigned long flags) | 1329 | unsigned long flags) |
1327 | { | 1330 | { |
1328 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1331 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1354 | */ | 1357 | */ |
1355 | txd->direction = direction; | 1358 | txd->direction = direction; |
1356 | 1359 | ||
1357 | if (direction == DMA_TO_DEVICE) { | 1360 | if (direction == DMA_MEM_TO_DEV) { |
1358 | txd->cctl = plchan->dst_cctl; | 1361 | txd->cctl = plchan->dst_cctl; |
1359 | slave_addr = plchan->dst_addr; | 1362 | slave_addr = plchan->dst_addr; |
1360 | } else if (direction == DMA_FROM_DEVICE) { | 1363 | } else if (direction == DMA_DEV_TO_MEM) { |
1361 | txd->cctl = plchan->src_cctl; | 1364 | txd->cctl = plchan->src_cctl; |
1362 | slave_addr = plchan->src_addr; | 1365 | slave_addr = plchan->src_addr; |
1363 | } else { | 1366 | } else { |
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1368 | } | 1371 | } |
1369 | 1372 | ||
1370 | if (plchan->cd->device_fc) | 1373 | if (plchan->cd->device_fc) |
1371 | tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : | 1374 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : |
1372 | PL080_FLOW_PER2MEM_PER; | 1375 | PL080_FLOW_PER2MEM_PER; |
1373 | else | 1376 | else |
1374 | tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : | 1377 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : |
1375 | PL080_FLOW_PER2MEM; | 1378 | PL080_FLOW_PER2MEM; |
1376 | 1379 | ||
1377 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1380 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1387 | list_add_tail(&dsg->node, &txd->dsg_list); | 1390 | list_add_tail(&dsg->node, &txd->dsg_list); |
1388 | 1391 | ||
1389 | dsg->len = sg_dma_len(sg); | 1392 | dsg->len = sg_dma_len(sg); |
1390 | if (direction == DMA_TO_DEVICE) { | 1393 | if (direction == DMA_MEM_TO_DEV) { |
1391 | dsg->src_addr = sg_phys(sg); | 1394 | dsg->src_addr = sg_phys(sg); |
1392 | dsg->dst_addr = slave_addr; | 1395 | dsg->dst_addr = slave_addr; |
1393 | } else { | 1396 | } else { |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fcfa0a8b5c59..97f87b29b9f3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/of.h> | ||
27 | #include <linux/of_device.h> | ||
26 | 28 | ||
27 | #include "at_hdmac_regs.h" | 29 | #include "at_hdmac_regs.h" |
28 | 30 | ||
@@ -660,7 +662,7 @@ err_desc_get: | |||
660 | */ | 662 | */ |
661 | static struct dma_async_tx_descriptor * | 663 | static struct dma_async_tx_descriptor * |
662 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 664 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
663 | unsigned int sg_len, enum dma_data_direction direction, | 665 | unsigned int sg_len, enum dma_transfer_direction direction, |
664 | unsigned long flags) | 666 | unsigned long flags) |
665 | { | 667 | { |
666 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 668 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
678 | 680 | ||
679 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", | 681 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
680 | sg_len, | 682 | sg_len, |
681 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 683 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
682 | flags); | 684 | flags); |
683 | 685 | ||
684 | if (unlikely(!atslave || !sg_len)) { | 686 | if (unlikely(!atslave || !sg_len)) { |
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
692 | ctrlb = ATC_IEN; | 694 | ctrlb = ATC_IEN; |
693 | 695 | ||
694 | switch (direction) { | 696 | switch (direction) { |
695 | case DMA_TO_DEVICE: | 697 | case DMA_MEM_TO_DEV: |
696 | ctrla |= ATC_DST_WIDTH(reg_width); | 698 | ctrla |= ATC_DST_WIDTH(reg_width); |
697 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 699 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
698 | | ATC_SRC_ADDR_MODE_INCR | 700 | | ATC_SRC_ADDR_MODE_INCR |
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
725 | total_len += len; | 727 | total_len += len; |
726 | } | 728 | } |
727 | break; | 729 | break; |
728 | case DMA_FROM_DEVICE: | 730 | case DMA_DEV_TO_MEM: |
729 | ctrla |= ATC_SRC_WIDTH(reg_width); | 731 | ctrla |= ATC_SRC_WIDTH(reg_width); |
730 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 732 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
731 | | ATC_SRC_ADDR_MODE_FIXED | 733 | | ATC_SRC_ADDR_MODE_FIXED |
@@ -787,7 +789,7 @@ err_desc_get: | |||
787 | */ | 789 | */ |
788 | static int | 790 | static int |
789 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | 791 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, |
790 | size_t period_len, enum dma_data_direction direction) | 792 | size_t period_len, enum dma_transfer_direction direction) |
791 | { | 793 | { |
792 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | 794 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) |
793 | goto err_out; | 795 | goto err_out; |
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | |||
795 | goto err_out; | 797 | goto err_out; |
796 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 798 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
797 | goto err_out; | 799 | goto err_out; |
798 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | 800 | if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) |
799 | goto err_out; | 801 | goto err_out; |
800 | 802 | ||
801 | return 0; | 803 | return 0; |
@@ -810,7 +812,7 @@ err_out: | |||
810 | static int | 812 | static int |
811 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | 813 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, |
812 | unsigned int period_index, dma_addr_t buf_addr, | 814 | unsigned int period_index, dma_addr_t buf_addr, |
813 | size_t period_len, enum dma_data_direction direction) | 815 | size_t period_len, enum dma_transfer_direction direction) |
814 | { | 816 | { |
815 | u32 ctrla; | 817 | u32 ctrla; |
816 | unsigned int reg_width = atslave->reg_width; | 818 | unsigned int reg_width = atslave->reg_width; |
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
822 | | period_len >> reg_width; | 824 | | period_len >> reg_width; |
823 | 825 | ||
824 | switch (direction) { | 826 | switch (direction) { |
825 | case DMA_TO_DEVICE: | 827 | case DMA_MEM_TO_DEV: |
826 | desc->lli.saddr = buf_addr + (period_len * period_index); | 828 | desc->lli.saddr = buf_addr + (period_len * period_index); |
827 | desc->lli.daddr = atslave->tx_reg; | 829 | desc->lli.daddr = atslave->tx_reg; |
828 | desc->lli.ctrla = ctrla; | 830 | desc->lli.ctrla = ctrla; |
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
833 | | ATC_DIF(AT_DMA_PER_IF); | 835 | | ATC_DIF(AT_DMA_PER_IF); |
834 | break; | 836 | break; |
835 | 837 | ||
836 | case DMA_FROM_DEVICE: | 838 | case DMA_DEV_TO_MEM: |
837 | desc->lli.saddr = atslave->rx_reg; | 839 | desc->lli.saddr = atslave->rx_reg; |
838 | desc->lli.daddr = buf_addr + (period_len * period_index); | 840 | desc->lli.daddr = buf_addr + (period_len * period_index); |
839 | desc->lli.ctrla = ctrla; | 841 | desc->lli.ctrla = ctrla; |
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
861 | */ | 863 | */ |
862 | static struct dma_async_tx_descriptor * | 864 | static struct dma_async_tx_descriptor * |
863 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 865 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
864 | size_t period_len, enum dma_data_direction direction) | 866 | size_t period_len, enum dma_transfer_direction direction) |
865 | { | 867 | { |
866 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 868 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
867 | struct at_dma_slave *atslave = chan->private; | 869 | struct at_dma_slave *atslave = chan->private; |
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
872 | unsigned int i; | 874 | unsigned int i; |
873 | 875 | ||
874 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | 876 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", |
875 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 877 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
876 | buf_addr, | 878 | buf_addr, |
877 | periods, buf_len, period_len); | 879 | periods, buf_len, period_len); |
878 | 880 | ||
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1175 | 1177 | ||
1176 | /*-- Module Management -----------------------------------------------*/ | 1178 | /*-- Module Management -----------------------------------------------*/ |
1177 | 1179 | ||
1180 | /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ | ||
1181 | static struct at_dma_platform_data at91sam9rl_config = { | ||
1182 | .nr_channels = 2, | ||
1183 | }; | ||
1184 | static struct at_dma_platform_data at91sam9g45_config = { | ||
1185 | .nr_channels = 8, | ||
1186 | }; | ||
1187 | |||
1188 | #if defined(CONFIG_OF) | ||
1189 | static const struct of_device_id atmel_dma_dt_ids[] = { | ||
1190 | { | ||
1191 | .compatible = "atmel,at91sam9rl-dma", | ||
1192 | .data = &at91sam9rl_config, | ||
1193 | }, { | ||
1194 | .compatible = "atmel,at91sam9g45-dma", | ||
1195 | .data = &at91sam9g45_config, | ||
1196 | }, { | ||
1197 | /* sentinel */ | ||
1198 | } | ||
1199 | }; | ||
1200 | |||
1201 | MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); | ||
1202 | #endif | ||
1203 | |||
1204 | static const struct platform_device_id atdma_devtypes[] = { | ||
1205 | { | ||
1206 | .name = "at91sam9rl_dma", | ||
1207 | .driver_data = (unsigned long) &at91sam9rl_config, | ||
1208 | }, { | ||
1209 | .name = "at91sam9g45_dma", | ||
1210 | .driver_data = (unsigned long) &at91sam9g45_config, | ||
1211 | }, { | ||
1212 | /* sentinel */ | ||
1213 | } | ||
1214 | }; | ||
1215 | |||
1216 | static inline struct at_dma_platform_data * __init at_dma_get_driver_data( | ||
1217 | struct platform_device *pdev) | ||
1218 | { | ||
1219 | if (pdev->dev.of_node) { | ||
1220 | const struct of_device_id *match; | ||
1221 | match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); | ||
1222 | if (match == NULL) | ||
1223 | return NULL; | ||
1224 | return match->data; | ||
1225 | } | ||
1226 | return (struct at_dma_platform_data *) | ||
1227 | platform_get_device_id(pdev)->driver_data; | ||
1228 | } | ||
1229 | |||
1178 | /** | 1230 | /** |
1179 | * at_dma_off - disable DMA controller | 1231 | * at_dma_off - disable DMA controller |
1180 | * @atdma: the Atmel HDAMC device | 1232 | * @atdma: the Atmel HDAMC device |
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma) | |||
1193 | 1245 | ||
1194 | static int __init at_dma_probe(struct platform_device *pdev) | 1246 | static int __init at_dma_probe(struct platform_device *pdev) |
1195 | { | 1247 | { |
1196 | struct at_dma_platform_data *pdata; | ||
1197 | struct resource *io; | 1248 | struct resource *io; |
1198 | struct at_dma *atdma; | 1249 | struct at_dma *atdma; |
1199 | size_t size; | 1250 | size_t size; |
1200 | int irq; | 1251 | int irq; |
1201 | int err; | 1252 | int err; |
1202 | int i; | 1253 | int i; |
1254 | struct at_dma_platform_data *plat_dat; | ||
1203 | 1255 | ||
1204 | /* get DMA Controller parameters from platform */ | 1256 | /* setup platform data for each SoC */ |
1205 | pdata = pdev->dev.platform_data; | 1257 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
1206 | if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) | 1258 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1207 | return -EINVAL; | 1259 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
1260 | |||
1261 | /* get DMA parameters from controller type */ | ||
1262 | plat_dat = at_dma_get_driver_data(pdev); | ||
1263 | if (!plat_dat) | ||
1264 | return -ENODEV; | ||
1208 | 1265 | ||
1209 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1266 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1210 | if (!io) | 1267 | if (!io) |
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1215 | return irq; | 1272 | return irq; |
1216 | 1273 | ||
1217 | size = sizeof(struct at_dma); | 1274 | size = sizeof(struct at_dma); |
1218 | size += pdata->nr_channels * sizeof(struct at_dma_chan); | 1275 | size += plat_dat->nr_channels * sizeof(struct at_dma_chan); |
1219 | atdma = kzalloc(size, GFP_KERNEL); | 1276 | atdma = kzalloc(size, GFP_KERNEL); |
1220 | if (!atdma) | 1277 | if (!atdma) |
1221 | return -ENOMEM; | 1278 | return -ENOMEM; |
1222 | 1279 | ||
1223 | /* discover transaction capabilites from the platform data */ | 1280 | /* discover transaction capabilities */ |
1224 | atdma->dma_common.cap_mask = pdata->cap_mask; | 1281 | atdma->dma_common.cap_mask = plat_dat->cap_mask; |
1225 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1282 | atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; |
1226 | 1283 | ||
1227 | size = resource_size(io); | 1284 | size = resource_size(io); |
1228 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1285 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1268 | 1325 | ||
1269 | /* initialize channels related values */ | 1326 | /* initialize channels related values */ |
1270 | INIT_LIST_HEAD(&atdma->dma_common.channels); | 1327 | INIT_LIST_HEAD(&atdma->dma_common.channels); |
1271 | for (i = 0; i < pdata->nr_channels; i++) { | 1328 | for (i = 0; i < plat_dat->nr_channels; i++) { |
1272 | struct at_dma_chan *atchan = &atdma->chan[i]; | 1329 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1273 | 1330 | ||
1274 | atchan->chan_common.device = &atdma->dma_common; | 1331 | atchan->chan_common.device = &atdma->dma_common; |
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1313 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", | 1370 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", |
1314 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 1371 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
1315 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | 1372 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
1316 | pdata->nr_channels); | 1373 | plat_dat->nr_channels); |
1317 | 1374 | ||
1318 | dma_async_device_register(&atdma->dma_common); | 1375 | dma_async_device_register(&atdma->dma_common); |
1319 | 1376 | ||
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { | |||
1495 | static struct platform_driver at_dma_driver = { | 1552 | static struct platform_driver at_dma_driver = { |
1496 | .remove = __exit_p(at_dma_remove), | 1553 | .remove = __exit_p(at_dma_remove), |
1497 | .shutdown = at_dma_shutdown, | 1554 | .shutdown = at_dma_shutdown, |
1555 | .id_table = atdma_devtypes, | ||
1498 | .driver = { | 1556 | .driver = { |
1499 | .name = "at_hdmac", | 1557 | .name = "at_hdmac", |
1500 | .pm = &at_dma_dev_pm_ops, | 1558 | .pm = &at_dma_dev_pm_ops, |
1559 | .of_match_table = of_match_ptr(atmel_dma_dt_ids), | ||
1501 | }, | 1560 | }, |
1502 | }; | 1561 | }; |
1503 | 1562 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index aa4c9aebab7c..dcaedfc181cf 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) | |||
251 | /** | 251 | /** |
252 | * struct at_dma - internal representation of an Atmel HDMA Controller | 252 | * struct at_dma - internal representation of an Atmel HDMA Controller |
253 | * @chan_common: common dmaengine dma_device object members | 253 | * @chan_common: common dmaengine dma_device object members |
254 | * @atdma_devtype: identifier of DMA controller compatibility | ||
254 | * @ch_regs: memory mapped register base | 255 | * @ch_regs: memory mapped register base |
255 | * @clk: dma controller clock | 256 | * @clk: dma controller clock |
256 | * @save_imr: interrupt mask register that is saved on suspend/resume cycle | 257 | * @save_imr: interrupt mask register that is saved on suspend/resume cycle |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 4234f416ef11..d65a718c0f9b 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -39,7 +39,7 @@ struct coh901318_desc { | |||
39 | struct scatterlist *sg; | 39 | struct scatterlist *sg; |
40 | unsigned int sg_len; | 40 | unsigned int sg_len; |
41 | struct coh901318_lli *lli; | 41 | struct coh901318_lli *lli; |
42 | enum dma_data_direction dir; | 42 | enum dma_transfer_direction dir; |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | u32 head_config; | 44 | u32 head_config; |
45 | u32 head_ctrl; | 45 | u32 head_ctrl; |
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
1034 | 1034 | ||
1035 | static struct dma_async_tx_descriptor * | 1035 | static struct dma_async_tx_descriptor * |
1036 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 1036 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1037 | unsigned int sg_len, enum dma_data_direction direction, | 1037 | unsigned int sg_len, enum dma_transfer_direction direction, |
1038 | unsigned long flags) | 1038 | unsigned long flags) |
1039 | { | 1039 | { |
1040 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1040 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1077 | ctrl_last |= cohc->runtime_ctrl; | 1077 | ctrl_last |= cohc->runtime_ctrl; |
1078 | ctrl |= cohc->runtime_ctrl; | 1078 | ctrl |= cohc->runtime_ctrl; |
1079 | 1079 | ||
1080 | if (direction == DMA_TO_DEVICE) { | 1080 | if (direction == DMA_MEM_TO_DEV) { |
1081 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | | 1081 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | |
1082 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; | 1082 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; |
1083 | 1083 | ||
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1085 | ctrl_chained |= tx_flags; | 1085 | ctrl_chained |= tx_flags; |
1086 | ctrl_last |= tx_flags; | 1086 | ctrl_last |= tx_flags; |
1087 | ctrl |= tx_flags; | 1087 | ctrl |= tx_flags; |
1088 | } else if (direction == DMA_FROM_DEVICE) { | 1088 | } else if (direction == DMA_DEV_TO_MEM) { |
1089 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | | 1089 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | |
1090 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; | 1090 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; |
1091 | 1091 | ||
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | |||
1274 | int i = 0; | 1274 | int i = 0; |
1275 | 1275 | ||
1276 | /* We only support mem to per or per to mem transfers */ | 1276 | /* We only support mem to per or per to mem transfers */ |
1277 | if (config->direction == DMA_FROM_DEVICE) { | 1277 | if (config->direction == DMA_DEV_TO_MEM) { |
1278 | addr = config->src_addr; | 1278 | addr = config->src_addr; |
1279 | addr_width = config->src_addr_width; | 1279 | addr_width = config->src_addr_width; |
1280 | maxburst = config->src_maxburst; | 1280 | maxburst = config->src_maxburst; |
1281 | } else if (config->direction == DMA_TO_DEVICE) { | 1281 | } else if (config->direction == DMA_MEM_TO_DEV) { |
1282 | addr = config->dst_addr; | 1282 | addr = config->dst_addr; |
1283 | addr_width = config->dst_addr_width; | 1283 | addr_width = config->dst_addr_width; |
1284 | maxburst = config->dst_maxburst; | 1284 | maxburst = config->dst_maxburst; |
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 9f7e0e6a7eea..6c0e2d4c6682 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c | |||
@@ -7,11 +7,10 @@ | |||
7 | * Author: Per Friden <per.friden@stericsson.com> | 7 | * Author: Per Friden <per.friden@stericsson.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
12 | #include <linux/dmapool.h> | ||
13 | #include <linux/memory.h> | 11 | #include <linux/memory.h> |
14 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
13 | #include <linux/dmapool.h> | ||
15 | #include <mach/coh901318.h> | 14 | #include <mach/coh901318.h> |
16 | 15 | ||
17 | #include "coh901318_lli.h" | 16 | #include "coh901318_lli.h" |
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
177 | struct coh901318_lli *lli, | 176 | struct coh901318_lli *lli, |
178 | dma_addr_t buf, unsigned int size, | 177 | dma_addr_t buf, unsigned int size, |
179 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, | 178 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, |
180 | enum dma_data_direction dir) | 179 | enum dma_transfer_direction dir) |
181 | { | 180 | { |
182 | int s = size; | 181 | int s = size; |
183 | dma_addr_t src; | 182 | dma_addr_t src; |
184 | dma_addr_t dst; | 183 | dma_addr_t dst; |
185 | 184 | ||
186 | 185 | ||
187 | if (dir == DMA_TO_DEVICE) { | 186 | if (dir == DMA_MEM_TO_DEV) { |
188 | src = buf; | 187 | src = buf; |
189 | dst = dev_addr; | 188 | dst = dev_addr; |
190 | 189 | ||
191 | } else if (dir == DMA_FROM_DEVICE) { | 190 | } else if (dir == DMA_DEV_TO_MEM) { |
192 | 191 | ||
193 | src = dev_addr; | 192 | src = dev_addr; |
194 | dst = buf; | 193 | dst = buf; |
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
215 | 214 | ||
216 | lli = coh901318_lli_next(lli); | 215 | lli = coh901318_lli_next(lli); |
217 | 216 | ||
218 | if (dir == DMA_TO_DEVICE) | 217 | if (dir == DMA_MEM_TO_DEV) |
219 | src += block_size; | 218 | src += block_size; |
220 | else if (dir == DMA_FROM_DEVICE) | 219 | else if (dir == DMA_DEV_TO_MEM) |
221 | dst += block_size; | 220 | dst += block_size; |
222 | } | 221 | } |
223 | 222 | ||
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
234 | struct scatterlist *sgl, unsigned int nents, | 233 | struct scatterlist *sgl, unsigned int nents, |
235 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, | 234 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, |
236 | u32 ctrl_last, | 235 | u32 ctrl_last, |
237 | enum dma_data_direction dir, u32 ctrl_irq_mask) | 236 | enum dma_transfer_direction dir, u32 ctrl_irq_mask) |
238 | { | 237 | { |
239 | int i; | 238 | int i; |
240 | struct scatterlist *sg; | 239 | struct scatterlist *sg; |
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
249 | 248 | ||
250 | spin_lock(&pool->lock); | 249 | spin_lock(&pool->lock); |
251 | 250 | ||
252 | if (dir == DMA_TO_DEVICE) | 251 | if (dir == DMA_MEM_TO_DEV) |
253 | dst = dev_addr; | 252 | dst = dev_addr; |
254 | else if (dir == DMA_FROM_DEVICE) | 253 | else if (dir == DMA_DEV_TO_MEM) |
255 | src = dev_addr; | 254 | src = dev_addr; |
256 | else | 255 | else |
257 | goto err; | 256 | goto err; |
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
269 | ctrl_sg = ctrl ? ctrl : ctrl_last; | 268 | ctrl_sg = ctrl ? ctrl : ctrl_last; |
270 | 269 | ||
271 | 270 | ||
272 | if (dir == DMA_TO_DEVICE) | 271 | if (dir == DMA_MEM_TO_DEV) |
273 | /* increment source address */ | 272 | /* increment source address */ |
274 | src = sg_phys(sg); | 273 | src = sg_phys(sg); |
275 | else | 274 | else |
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
293 | lli->src_addr = src; | 292 | lli->src_addr = src; |
294 | lli->dst_addr = dst; | 293 | lli->dst_addr = dst; |
295 | 294 | ||
296 | if (dir == DMA_FROM_DEVICE) | 295 | if (dir == DMA_DEV_TO_MEM) |
297 | dst += elem_size; | 296 | dst += elem_size; |
298 | else | 297 | else |
299 | src += elem_size; | 298 | src += elem_size; |
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h index 7a5c80990e9e..abff3714fdda 100644 --- a/drivers/dma/coh901318_lli.h +++ b/drivers/dma/coh901318_lli.h | |||
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
97 | struct coh901318_lli *lli, | 97 | struct coh901318_lli *lli, |
98 | dma_addr_t buf, unsigned int size, | 98 | dma_addr_t buf, unsigned int size, |
99 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, | 99 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, |
100 | enum dma_data_direction dir); | 100 | enum dma_transfer_direction dir); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer | 103 | * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer |
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
119 | struct scatterlist *sg, unsigned int nents, | 119 | struct scatterlist *sg, unsigned int nents, |
120 | dma_addr_t dev_addr, u32 ctrl_chained, | 120 | dma_addr_t dev_addr, u32 ctrl_chained, |
121 | u32 ctrl, u32 ctrl_last, | 121 | u32 ctrl, u32 ctrl_last, |
122 | enum dma_data_direction dir, u32 ctrl_irq_mask); | 122 | enum dma_transfer_direction dir, u32 ctrl_irq_mask); |
123 | 123 | ||
124 | #endif /* COH901318_LLI_H */ | 124 | #endif /* COH901318_LLI_H */ |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b48967b499da..a6c6051ec858 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device) | |||
693 | !device->device_prep_dma_interrupt); | 693 | !device->device_prep_dma_interrupt); |
694 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | 694 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
695 | !device->device_prep_dma_sg); | 695 | !device->device_prep_dma_sg); |
696 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | ||
697 | !device->device_prep_slave_sg); | ||
698 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | 696 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && |
699 | !device->device_prep_dma_cyclic); | 697 | !device->device_prep_dma_cyclic); |
700 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 698 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
701 | !device->device_control); | 699 | !device->device_control); |
700 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | ||
701 | !device->device_prep_interleaved_dma); | ||
702 | 702 | ||
703 | BUG_ON(!device->device_alloc_chan_resources); | 703 | BUG_ON(!device->device_alloc_chan_resources); |
704 | BUG_ON(!device->device_free_chan_resources); | 704 | BUG_ON(!device->device_free_chan_resources); |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 9bfd6d360718..9b592b02b5f4 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
166 | return cookie; | 166 | return cookie; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void dwc_initialize(struct dw_dma_chan *dwc) | ||
170 | { | ||
171 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
172 | struct dw_dma_slave *dws = dwc->chan.private; | ||
173 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
174 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
175 | |||
176 | if (dwc->initialized == true) | ||
177 | return; | ||
178 | |||
179 | if (dws) { | ||
180 | /* | ||
181 | * We need controller-specific data to set up slave | ||
182 | * transfers. | ||
183 | */ | ||
184 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
185 | |||
186 | cfghi = dws->cfg_hi; | ||
187 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | ||
188 | } | ||
189 | |||
190 | channel_writel(dwc, CFG_LO, cfglo); | ||
191 | channel_writel(dwc, CFG_HI, cfghi); | ||
192 | |||
193 | /* Enable interrupts */ | ||
194 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
195 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
196 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
197 | |||
198 | dwc->initialized = true; | ||
199 | } | ||
200 | |||
169 | /*----------------------------------------------------------------------*/ | 201 | /*----------------------------------------------------------------------*/ |
170 | 202 | ||
171 | /* Called with dwc->lock held and bh disabled */ | 203 | /* Called with dwc->lock held and bh disabled */ |
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
189 | return; | 221 | return; |
190 | } | 222 | } |
191 | 223 | ||
224 | dwc_initialize(dwc); | ||
225 | |||
192 | channel_writel(dwc, LLP, first->txd.phys); | 226 | channel_writel(dwc, LLP, first->txd.phys); |
193 | channel_writel(dwc, CTL_LO, | 227 | channel_writel(dwc, CTL_LO, |
194 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 228 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
@@ -696,7 +730,7 @@ err_desc_get: | |||
696 | 730 | ||
697 | static struct dma_async_tx_descriptor * | 731 | static struct dma_async_tx_descriptor * |
698 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 732 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
699 | unsigned int sg_len, enum dma_data_direction direction, | 733 | unsigned int sg_len, enum dma_transfer_direction direction, |
700 | unsigned long flags) | 734 | unsigned long flags) |
701 | { | 735 | { |
702 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 736 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
720 | prev = first = NULL; | 754 | prev = first = NULL; |
721 | 755 | ||
722 | switch (direction) { | 756 | switch (direction) { |
723 | case DMA_TO_DEVICE: | 757 | case DMA_MEM_TO_DEV: |
724 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 758 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
725 | | DWC_CTLL_DST_WIDTH(reg_width) | 759 | | DWC_CTLL_DST_WIDTH(reg_width) |
726 | | DWC_CTLL_DST_FIX | 760 | | DWC_CTLL_DST_FIX |
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc: | |||
777 | goto slave_sg_todev_fill_desc; | 811 | goto slave_sg_todev_fill_desc; |
778 | } | 812 | } |
779 | break; | 813 | break; |
780 | case DMA_FROM_DEVICE: | 814 | case DMA_DEV_TO_MEM: |
781 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 815 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
782 | | DWC_CTLL_SRC_WIDTH(reg_width) | 816 | | DWC_CTLL_SRC_WIDTH(reg_width) |
783 | | DWC_CTLL_DST_INC | 817 | | DWC_CTLL_DST_INC |
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
959 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 993 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
960 | struct dw_dma *dw = to_dw_dma(chan->device); | 994 | struct dw_dma *dw = to_dw_dma(chan->device); |
961 | struct dw_desc *desc; | 995 | struct dw_desc *desc; |
962 | struct dw_dma_slave *dws; | ||
963 | int i; | 996 | int i; |
964 | u32 cfghi; | ||
965 | u32 cfglo; | ||
966 | unsigned long flags; | 997 | unsigned long flags; |
967 | 998 | ||
968 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 999 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
975 | 1006 | ||
976 | dwc->completed = chan->cookie = 1; | 1007 | dwc->completed = chan->cookie = 1; |
977 | 1008 | ||
978 | cfghi = DWC_CFGH_FIFO_MODE; | ||
979 | cfglo = 0; | ||
980 | |||
981 | dws = chan->private; | ||
982 | if (dws) { | ||
983 | /* | ||
984 | * We need controller-specific data to set up slave | ||
985 | * transfers. | ||
986 | */ | ||
987 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
988 | |||
989 | cfghi = dws->cfg_hi; | ||
990 | cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | ||
991 | } | ||
992 | |||
993 | cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); | ||
994 | |||
995 | channel_writel(dwc, CFG_LO, cfglo); | ||
996 | channel_writel(dwc, CFG_HI, cfghi); | ||
997 | |||
998 | /* | 1009 | /* |
999 | * NOTE: some controllers may have additional features that we | 1010 | * NOTE: some controllers may have additional features that we |
1000 | * need to initialize here, like "scatter-gather" (which | 1011 | * need to initialize here, like "scatter-gather" (which |
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1026 | i = ++dwc->descs_allocated; | 1037 | i = ++dwc->descs_allocated; |
1027 | } | 1038 | } |
1028 | 1039 | ||
1029 | /* Enable interrupts */ | ||
1030 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
1031 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
1032 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
1033 | |||
1034 | spin_unlock_irqrestore(&dwc->lock, flags); | 1040 | spin_unlock_irqrestore(&dwc->lock, flags); |
1035 | 1041 | ||
1036 | dev_dbg(chan2dev(chan), | 1042 | dev_dbg(chan2dev(chan), |
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1058 | spin_lock_irqsave(&dwc->lock, flags); | 1064 | spin_lock_irqsave(&dwc->lock, flags); |
1059 | list_splice_init(&dwc->free_list, &list); | 1065 | list_splice_init(&dwc->free_list, &list); |
1060 | dwc->descs_allocated = 0; | 1066 | dwc->descs_allocated = 0; |
1067 | dwc->initialized = false; | ||
1061 | 1068 | ||
1062 | /* Disable interrupts */ | 1069 | /* Disable interrupts */ |
1063 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1070 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop); | |||
1165 | */ | 1172 | */ |
1166 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | 1173 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, |
1167 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | 1174 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, |
1168 | enum dma_data_direction direction) | 1175 | enum dma_transfer_direction direction) |
1169 | { | 1176 | { |
1170 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1177 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1171 | struct dw_cyclic_desc *cdesc; | 1178 | struct dw_cyclic_desc *cdesc; |
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1206 | goto out_err; | 1213 | goto out_err; |
1207 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 1214 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1208 | goto out_err; | 1215 | goto out_err; |
1209 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | 1216 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
1210 | goto out_err; | 1217 | goto out_err; |
1211 | 1218 | ||
1212 | retval = ERR_PTR(-ENOMEM); | 1219 | retval = ERR_PTR(-ENOMEM); |
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1228 | goto out_err_desc_get; | 1235 | goto out_err_desc_get; |
1229 | 1236 | ||
1230 | switch (direction) { | 1237 | switch (direction) { |
1231 | case DMA_TO_DEVICE: | 1238 | case DMA_MEM_TO_DEV: |
1232 | desc->lli.dar = dws->tx_reg; | 1239 | desc->lli.dar = dws->tx_reg; |
1233 | desc->lli.sar = buf_addr + (period_len * i); | 1240 | desc->lli.sar = buf_addr + (period_len * i); |
1234 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 1241 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1239 | | DWC_CTLL_FC(dws->fc) | 1246 | | DWC_CTLL_FC(dws->fc) |
1240 | | DWC_CTLL_INT_EN); | 1247 | | DWC_CTLL_INT_EN); |
1241 | break; | 1248 | break; |
1242 | case DMA_FROM_DEVICE: | 1249 | case DMA_DEV_TO_MEM: |
1243 | desc->lli.dar = buf_addr + (period_len * i); | 1250 | desc->lli.dar = buf_addr + (period_len * i); |
1244 | desc->lli.sar = dws->rx_reg; | 1251 | desc->lli.sar = dws->rx_reg; |
1245 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 1252 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); | |||
1335 | 1342 | ||
1336 | static void dw_dma_off(struct dw_dma *dw) | 1343 | static void dw_dma_off(struct dw_dma *dw) |
1337 | { | 1344 | { |
1345 | int i; | ||
1346 | |||
1338 | dma_writel(dw, CFG, 0); | 1347 | dma_writel(dw, CFG, 0); |
1339 | 1348 | ||
1340 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1349 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1345 | 1354 | ||
1346 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | 1355 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) |
1347 | cpu_relax(); | 1356 | cpu_relax(); |
1357 | |||
1358 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1359 | dw->chan[i].initialized = false; | ||
1348 | } | 1360 | } |
1349 | 1361 | ||
1350 | static int __init dw_probe(struct platform_device *pdev) | 1362 | static int __init dw_probe(struct platform_device *pdev) |
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev) | |||
1533 | 1545 | ||
1534 | dw_dma_off(platform_get_drvdata(pdev)); | 1546 | dw_dma_off(platform_get_drvdata(pdev)); |
1535 | clk_disable(dw->clk); | 1547 | clk_disable(dw->clk); |
1548 | |||
1536 | return 0; | 1549 | return 0; |
1537 | } | 1550 | } |
1538 | 1551 | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index c3419518d701..5eef6946a367 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -140,6 +140,7 @@ struct dw_dma_chan { | |||
140 | u8 mask; | 140 | u8 mask; |
141 | u8 priority; | 141 | u8 priority; |
142 | bool paused; | 142 | bool paused; |
143 | bool initialized; | ||
143 | 144 | ||
144 | spinlock_t lock; | 145 | spinlock_t lock; |
145 | 146 | ||
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index b47e2b803faf..59e7a965772b 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, | |||
246 | static struct ep93xx_dma_desc * | 246 | static struct ep93xx_dma_desc * |
247 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | 247 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) |
248 | { | 248 | { |
249 | if (list_empty(&edmac->active)) | ||
250 | return NULL; | ||
251 | |||
249 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); | 252 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); |
250 | } | 253 | } |
251 | 254 | ||
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | |||
263 | */ | 266 | */ |
264 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) | 267 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) |
265 | { | 268 | { |
269 | struct ep93xx_dma_desc *desc; | ||
270 | |||
266 | list_rotate_left(&edmac->active); | 271 | list_rotate_left(&edmac->active); |
267 | 272 | ||
268 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | 273 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) |
269 | return true; | 274 | return true; |
270 | 275 | ||
276 | desc = ep93xx_dma_get_active(edmac); | ||
277 | if (!desc) | ||
278 | return false; | ||
279 | |||
271 | /* | 280 | /* |
272 | * If txd.cookie is set it means that we are back in the first | 281 | * If txd.cookie is set it means that we are back in the first |
273 | * descriptor in the chain and hence done with it. | 282 | * descriptor in the chain and hence done with it. |
274 | */ | 283 | */ |
275 | return !ep93xx_dma_get_active(edmac)->txd.cookie; | 284 | return !desc->txd.cookie; |
276 | } | 285 | } |
277 | 286 | ||
278 | /* | 287 | /* |
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | |||
327 | 336 | ||
328 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | 337 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
329 | { | 338 | { |
330 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | 339 | struct ep93xx_dma_desc *desc; |
331 | u32 bus_addr; | 340 | u32 bus_addr; |
332 | 341 | ||
333 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) | 342 | desc = ep93xx_dma_get_active(edmac); |
343 | if (!desc) { | ||
344 | dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) | ||
334 | bus_addr = desc->src_addr; | 349 | bus_addr = desc->src_addr; |
335 | else | 350 | else |
336 | bus_addr = desc->dst_addr; | 351 | bus_addr = desc->dst_addr; |
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
443 | control = (5 << M2M_CONTROL_PWSC_SHIFT); | 458 | control = (5 << M2M_CONTROL_PWSC_SHIFT); |
444 | control |= M2M_CONTROL_NO_HDSK; | 459 | control |= M2M_CONTROL_NO_HDSK; |
445 | 460 | ||
446 | if (data->direction == DMA_TO_DEVICE) { | 461 | if (data->direction == DMA_MEM_TO_DEV) { |
447 | control |= M2M_CONTROL_DAH; | 462 | control |= M2M_CONTROL_DAH; |
448 | control |= M2M_CONTROL_TM_TX; | 463 | control |= M2M_CONTROL_TM_TX; |
449 | control |= M2M_CONTROL_RSS_SSPTX; | 464 | control |= M2M_CONTROL_RSS_SSPTX; |
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
459 | * This IDE part is totally untested. Values below are taken | 474 | * This IDE part is totally untested. Values below are taken |
460 | * from the EP93xx Users's Guide and might not be correct. | 475 | * from the EP93xx Users's Guide and might not be correct. |
461 | */ | 476 | */ |
462 | control |= M2M_CONTROL_NO_HDSK; | 477 | if (data->direction == DMA_MEM_TO_DEV) { |
463 | control |= M2M_CONTROL_RSS_IDE; | ||
464 | control |= M2M_CONTROL_PW_16; | ||
465 | |||
466 | if (data->direction == DMA_TO_DEVICE) { | ||
467 | /* Worst case from the UG */ | 478 | /* Worst case from the UG */ |
468 | control = (3 << M2M_CONTROL_PWSC_SHIFT); | 479 | control = (3 << M2M_CONTROL_PWSC_SHIFT); |
469 | control |= M2M_CONTROL_DAH; | 480 | control |= M2M_CONTROL_DAH; |
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
473 | control |= M2M_CONTROL_SAH; | 484 | control |= M2M_CONTROL_SAH; |
474 | control |= M2M_CONTROL_TM_RX; | 485 | control |= M2M_CONTROL_TM_RX; |
475 | } | 486 | } |
487 | |||
488 | control |= M2M_CONTROL_NO_HDSK; | ||
489 | control |= M2M_CONTROL_RSS_IDE; | ||
490 | control |= M2M_CONTROL_PW_16; | ||
476 | break; | 491 | break; |
477 | 492 | ||
478 | default: | 493 | default: |
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) | |||
491 | 506 | ||
492 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) | 507 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) |
493 | { | 508 | { |
494 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | 509 | struct ep93xx_dma_desc *desc; |
510 | |||
511 | desc = ep93xx_dma_get_active(edmac); | ||
512 | if (!desc) { | ||
513 | dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); | ||
514 | return; | ||
515 | } | ||
495 | 516 | ||
496 | if (edmac->buffer == 0) { | 517 | if (edmac->buffer == 0) { |
497 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); | 518 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); |
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
669 | { | 690 | { |
670 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | 691 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
671 | struct ep93xx_dma_desc *desc, *d; | 692 | struct ep93xx_dma_desc *desc, *d; |
672 | dma_async_tx_callback callback; | 693 | dma_async_tx_callback callback = NULL; |
673 | void *callback_param; | 694 | void *callback_param = NULL; |
674 | LIST_HEAD(list); | 695 | LIST_HEAD(list); |
675 | 696 | ||
676 | spin_lock_irq(&edmac->lock); | 697 | spin_lock_irq(&edmac->lock); |
698 | /* | ||
699 | * If dma_terminate_all() was called before we get to run, the active | ||
700 | * list has become empty. If that happens we aren't supposed to do | ||
701 | * anything more than call ep93xx_dma_advance_work(). | ||
702 | */ | ||
677 | desc = ep93xx_dma_get_active(edmac); | 703 | desc = ep93xx_dma_get_active(edmac); |
678 | if (desc->complete) { | 704 | if (desc) { |
679 | edmac->last_completed = desc->txd.cookie; | 705 | if (desc->complete) { |
680 | list_splice_init(&edmac->active, &list); | 706 | edmac->last_completed = desc->txd.cookie; |
707 | list_splice_init(&edmac->active, &list); | ||
708 | } | ||
709 | callback = desc->txd.callback; | ||
710 | callback_param = desc->txd.callback_param; | ||
681 | } | 711 | } |
682 | spin_unlock_irq(&edmac->lock); | 712 | spin_unlock_irq(&edmac->lock); |
683 | 713 | ||
684 | /* Pick up the next descriptor from the queue */ | 714 | /* Pick up the next descriptor from the queue */ |
685 | ep93xx_dma_advance_work(edmac); | 715 | ep93xx_dma_advance_work(edmac); |
686 | 716 | ||
687 | callback = desc->txd.callback; | ||
688 | callback_param = desc->txd.callback_param; | ||
689 | |||
690 | /* Now we can release all the chained descriptors */ | 717 | /* Now we can release all the chained descriptors */ |
691 | list_for_each_entry_safe(desc, d, &list, node) { | 718 | list_for_each_entry_safe(desc, d, &list, node) { |
692 | /* | 719 | /* |
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
706 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) | 733 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) |
707 | { | 734 | { |
708 | struct ep93xx_dma_chan *edmac = dev_id; | 735 | struct ep93xx_dma_chan *edmac = dev_id; |
736 | struct ep93xx_dma_desc *desc; | ||
709 | irqreturn_t ret = IRQ_HANDLED; | 737 | irqreturn_t ret = IRQ_HANDLED; |
710 | 738 | ||
711 | spin_lock(&edmac->lock); | 739 | spin_lock(&edmac->lock); |
712 | 740 | ||
741 | desc = ep93xx_dma_get_active(edmac); | ||
742 | if (!desc) { | ||
743 | dev_warn(chan2dev(edmac), | ||
744 | "got interrupt while active list is empty\n"); | ||
745 | spin_unlock(&edmac->lock); | ||
746 | return IRQ_NONE; | ||
747 | } | ||
748 | |||
713 | switch (edmac->edma->hw_interrupt(edmac)) { | 749 | switch (edmac->edma->hw_interrupt(edmac)) { |
714 | case INTERRUPT_DONE: | 750 | case INTERRUPT_DONE: |
715 | ep93xx_dma_get_active(edmac)->complete = true; | 751 | desc->complete = true; |
716 | tasklet_schedule(&edmac->tasklet); | 752 | tasklet_schedule(&edmac->tasklet); |
717 | break; | 753 | break; |
718 | 754 | ||
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | |||
803 | switch (data->port) { | 839 | switch (data->port) { |
804 | case EP93XX_DMA_SSP: | 840 | case EP93XX_DMA_SSP: |
805 | case EP93XX_DMA_IDE: | 841 | case EP93XX_DMA_IDE: |
806 | if (data->direction != DMA_TO_DEVICE && | 842 | if (data->direction != DMA_MEM_TO_DEV && |
807 | data->direction != DMA_FROM_DEVICE) | 843 | data->direction != DMA_DEV_TO_MEM) |
808 | return -EINVAL; | 844 | return -EINVAL; |
809 | break; | 845 | break; |
810 | default: | 846 | default: |
@@ -952,7 +988,7 @@ fail: | |||
952 | */ | 988 | */ |
953 | static struct dma_async_tx_descriptor * | 989 | static struct dma_async_tx_descriptor * |
954 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 990 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
955 | unsigned int sg_len, enum dma_data_direction dir, | 991 | unsigned int sg_len, enum dma_transfer_direction dir, |
956 | unsigned long flags) | 992 | unsigned long flags) |
957 | { | 993 | { |
958 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 994 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
988 | goto fail; | 1024 | goto fail; |
989 | } | 1025 | } |
990 | 1026 | ||
991 | if (dir == DMA_TO_DEVICE) { | 1027 | if (dir == DMA_MEM_TO_DEV) { |
992 | desc->src_addr = sg_dma_address(sg); | 1028 | desc->src_addr = sg_dma_address(sg); |
993 | desc->dst_addr = edmac->runtime_addr; | 1029 | desc->dst_addr = edmac->runtime_addr; |
994 | } else { | 1030 | } else { |
@@ -1032,7 +1068,7 @@ fail: | |||
1032 | static struct dma_async_tx_descriptor * | 1068 | static struct dma_async_tx_descriptor * |
1033 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 1069 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1034 | size_t buf_len, size_t period_len, | 1070 | size_t buf_len, size_t period_len, |
1035 | enum dma_data_direction dir) | 1071 | enum dma_transfer_direction dir) |
1036 | { | 1072 | { |
1037 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1073 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1038 | struct ep93xx_dma_desc *desc, *first; | 1074 | struct ep93xx_dma_desc *desc, *first; |
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
1065 | goto fail; | 1101 | goto fail; |
1066 | } | 1102 | } |
1067 | 1103 | ||
1068 | if (dir == DMA_TO_DEVICE) { | 1104 | if (dir == DMA_MEM_TO_DEV) { |
1069 | desc->src_addr = dma_addr + offset; | 1105 | desc->src_addr = dma_addr + offset; |
1070 | desc->dst_addr = edmac->runtime_addr; | 1106 | desc->dst_addr = edmac->runtime_addr; |
1071 | } else { | 1107 | } else { |
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | |||
1133 | return -EINVAL; | 1169 | return -EINVAL; |
1134 | 1170 | ||
1135 | switch (config->direction) { | 1171 | switch (config->direction) { |
1136 | case DMA_FROM_DEVICE: | 1172 | case DMA_DEV_TO_MEM: |
1137 | width = config->src_addr_width; | 1173 | width = config->src_addr_width; |
1138 | addr = config->src_addr; | 1174 | addr = config->src_addr; |
1139 | break; | 1175 | break; |
1140 | 1176 | ||
1141 | case DMA_TO_DEVICE: | 1177 | case DMA_MEM_TO_DEV: |
1142 | width = config->dst_addr_width; | 1178 | width = config->dst_addr_width; |
1143 | addr = config->dst_addr; | 1179 | addr = config->dst_addr; |
1144 | break; | 1180 | break; |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 8a781540590c..b98070c33ca9 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -772,7 +772,7 @@ fail: | |||
772 | */ | 772 | */ |
773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
775 | enum dma_data_direction direction, unsigned long flags) | 775 | enum dma_transfer_direction direction, unsigned long flags) |
776 | { | 776 | { |
777 | /* | 777 | /* |
778 | * This operation is not supported on the Freescale DMA controller | 778 | * This operation is not supported on the Freescale DMA controller |
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
819 | return -ENXIO; | 819 | return -ENXIO; |
820 | 820 | ||
821 | /* we set the controller burst size depending on direction */ | 821 | /* we set the controller burst size depending on direction */ |
822 | if (config->direction == DMA_TO_DEVICE) | 822 | if (config->direction == DMA_MEM_TO_DEV) |
823 | size = config->dst_addr_width * config->dst_maxburst; | 823 | size = config->dst_addr_width * config->dst_maxburst; |
824 | else | 824 | else |
825 | size = config->src_addr_width * config->src_maxburst; | 825 | size = config->src_addr_width * config->src_maxburst; |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 4be55f9bb6c1..e4383ee2c9ac 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
107 | imx_dma_disable(imxdmac->imxdma_channel); | 107 | imx_dma_disable(imxdmac->imxdma_channel); |
108 | return 0; | 108 | return 0; |
109 | case DMA_SLAVE_CONFIG: | 109 | case DMA_SLAVE_CONFIG: |
110 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | 110 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
111 | imxdmac->per_address = dmaengine_cfg->src_addr; | 111 | imxdmac->per_address = dmaengine_cfg->src_addr; |
112 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | 112 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; |
113 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | 113 | imxdmac->word_size = dmaengine_cfg->src_addr_width; |
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) | |||
224 | 224 | ||
225 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | 225 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( |
226 | struct dma_chan *chan, struct scatterlist *sgl, | 226 | struct dma_chan *chan, struct scatterlist *sgl, |
227 | unsigned int sg_len, enum dma_data_direction direction, | 227 | unsigned int sg_len, enum dma_transfer_direction direction, |
228 | unsigned long flags) | 228 | unsigned long flags) |
229 | { | 229 | { |
230 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 230 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
241 | dma_length += sg->length; | 241 | dma_length += sg->length; |
242 | } | 242 | } |
243 | 243 | ||
244 | if (direction == DMA_FROM_DEVICE) | 244 | if (direction == DMA_DEV_TO_MEM) |
245 | dmamode = DMA_MODE_READ; | 245 | dmamode = DMA_MODE_READ; |
246 | else | 246 | else |
247 | dmamode = DMA_MODE_WRITE; | 247 | dmamode = DMA_MODE_WRITE; |
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
271 | 271 | ||
272 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | 272 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
273 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 273 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
274 | size_t period_len, enum dma_data_direction direction) | 274 | size_t period_len, enum dma_transfer_direction direction) |
275 | { | 275 | { |
276 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 276 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
277 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 277 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
317 | imxdmac->sg_list[periods].page_link = | 317 | imxdmac->sg_list[periods].page_link = |
318 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | 318 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; |
319 | 319 | ||
320 | if (direction == DMA_FROM_DEVICE) | 320 | if (direction == DMA_DEV_TO_MEM) |
321 | dmamode = DMA_MODE_READ; | 321 | dmamode = DMA_MODE_READ; |
322 | else | 322 | else |
323 | dmamode = DMA_MODE_WRITE; | 323 | dmamode = DMA_MODE_WRITE; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f993955a640c..a8af379680c1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -247,7 +247,7 @@ struct sdma_engine; | |||
247 | struct sdma_channel { | 247 | struct sdma_channel { |
248 | struct sdma_engine *sdma; | 248 | struct sdma_engine *sdma; |
249 | unsigned int channel; | 249 | unsigned int channel; |
250 | enum dma_data_direction direction; | 250 | enum dma_transfer_direction direction; |
251 | enum sdma_peripheral_type peripheral_type; | 251 | enum sdma_peripheral_type peripheral_type; |
252 | unsigned int event_id0; | 252 | unsigned int event_id0; |
253 | unsigned int event_id1; | 253 | unsigned int event_id1; |
@@ -268,6 +268,8 @@ struct sdma_channel { | |||
268 | struct dma_async_tx_descriptor desc; | 268 | struct dma_async_tx_descriptor desc; |
269 | dma_cookie_t last_completed; | 269 | dma_cookie_t last_completed; |
270 | enum dma_status status; | 270 | enum dma_status status; |
271 | unsigned int chn_count; | ||
272 | unsigned int chn_real_count; | ||
271 | }; | 273 | }; |
272 | 274 | ||
273 | #define IMX_DMA_SG_LOOP (1 << 0) | 275 | #define IMX_DMA_SG_LOOP (1 << 0) |
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
503 | struct sdma_buffer_descriptor *bd; | 505 | struct sdma_buffer_descriptor *bd; |
504 | int i, error = 0; | 506 | int i, error = 0; |
505 | 507 | ||
508 | sdmac->chn_real_count = 0; | ||
506 | /* | 509 | /* |
507 | * non loop mode. Iterate over all descriptors, collect | 510 | * non loop mode. Iterate over all descriptors, collect |
508 | * errors and call callback function | 511 | * errors and call callback function |
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
512 | 515 | ||
513 | if (bd->mode.status & (BD_DONE | BD_RROR)) | 516 | if (bd->mode.status & (BD_DONE | BD_RROR)) |
514 | error = -EIO; | 517 | error = -EIO; |
518 | sdmac->chn_real_count += bd->mode.count; | ||
515 | } | 519 | } |
516 | 520 | ||
517 | if (error) | 521 | if (error) |
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
519 | else | 523 | else |
520 | sdmac->status = DMA_SUCCESS; | 524 | sdmac->status = DMA_SUCCESS; |
521 | 525 | ||
526 | sdmac->last_completed = sdmac->desc.cookie; | ||
522 | if (sdmac->desc.callback) | 527 | if (sdmac->desc.callback) |
523 | sdmac->desc.callback(sdmac->desc.callback_param); | 528 | sdmac->desc.callback(sdmac->desc.callback_param); |
524 | sdmac->last_completed = sdmac->desc.cookie; | ||
525 | } | 529 | } |
526 | 530 | ||
527 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | 531 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) |
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
650 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | 654 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; |
651 | int ret; | 655 | int ret; |
652 | 656 | ||
653 | if (sdmac->direction == DMA_FROM_DEVICE) { | 657 | if (sdmac->direction == DMA_DEV_TO_MEM) { |
654 | load_address = sdmac->pc_from_device; | 658 | load_address = sdmac->pc_from_device; |
655 | } else { | 659 | } else { |
656 | load_address = sdmac->pc_to_device; | 660 | load_address = sdmac->pc_to_device; |
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | |||
832 | 836 | ||
833 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | 837 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) |
834 | { | 838 | { |
839 | unsigned long flags; | ||
835 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | 840 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); |
836 | struct sdma_engine *sdma = sdmac->sdma; | 841 | struct sdma_engine *sdma = sdmac->sdma; |
837 | dma_cookie_t cookie; | 842 | dma_cookie_t cookie; |
838 | 843 | ||
839 | spin_lock_irq(&sdmac->lock); | 844 | spin_lock_irqsave(&sdmac->lock, flags); |
840 | 845 | ||
841 | cookie = sdma_assign_cookie(sdmac); | 846 | cookie = sdma_assign_cookie(sdmac); |
842 | 847 | ||
843 | sdma_enable_channel(sdma, sdmac->channel); | 848 | sdma_enable_channel(sdma, sdmac->channel); |
844 | 849 | ||
845 | spin_unlock_irq(&sdmac->lock); | 850 | spin_unlock_irqrestore(&sdmac->lock, flags); |
846 | 851 | ||
847 | return cookie; | 852 | return cookie; |
848 | } | 853 | } |
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
911 | 916 | ||
912 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | 917 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( |
913 | struct dma_chan *chan, struct scatterlist *sgl, | 918 | struct dma_chan *chan, struct scatterlist *sgl, |
914 | unsigned int sg_len, enum dma_data_direction direction, | 919 | unsigned int sg_len, enum dma_transfer_direction direction, |
915 | unsigned long flags) | 920 | unsigned long flags) |
916 | { | 921 | { |
917 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 922 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
941 | goto err_out; | 946 | goto err_out; |
942 | } | 947 | } |
943 | 948 | ||
949 | sdmac->chn_count = 0; | ||
944 | for_each_sg(sgl, sg, sg_len, i) { | 950 | for_each_sg(sgl, sg, sg_len, i) { |
945 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | 951 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; |
946 | int param; | 952 | int param; |
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
957 | } | 963 | } |
958 | 964 | ||
959 | bd->mode.count = count; | 965 | bd->mode.count = count; |
966 | sdmac->chn_count += count; | ||
960 | 967 | ||
961 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { | 968 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { |
962 | ret = -EINVAL; | 969 | ret = -EINVAL; |
@@ -1008,7 +1015,7 @@ err_out: | |||
1008 | 1015 | ||
1009 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | 1016 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( |
1010 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 1017 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
1011 | size_t period_len, enum dma_data_direction direction) | 1018 | size_t period_len, enum dma_transfer_direction direction) |
1012 | { | 1019 | { |
1013 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1020 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1014 | struct sdma_engine *sdma = sdmac->sdma; | 1021 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1093 | sdma_disable_channel(sdmac); | 1100 | sdma_disable_channel(sdmac); |
1094 | return 0; | 1101 | return 0; |
1095 | case DMA_SLAVE_CONFIG: | 1102 | case DMA_SLAVE_CONFIG: |
1096 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | 1103 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
1097 | sdmac->per_address = dmaengine_cfg->src_addr; | 1104 | sdmac->per_address = dmaengine_cfg->src_addr; |
1098 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; | 1105 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; |
1099 | sdmac->word_size = dmaengine_cfg->src_addr_width; | 1106 | sdmac->word_size = dmaengine_cfg->src_addr_width; |
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1102 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; | 1109 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; |
1103 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | 1110 | sdmac->word_size = dmaengine_cfg->dst_addr_width; |
1104 | } | 1111 | } |
1112 | sdmac->direction = dmaengine_cfg->direction; | ||
1105 | return sdma_config_channel(sdmac); | 1113 | return sdma_config_channel(sdmac); |
1106 | default: | 1114 | default: |
1107 | return -ENOSYS; | 1115 | return -ENOSYS; |
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1119 | 1127 | ||
1120 | last_used = chan->cookie; | 1128 | last_used = chan->cookie; |
1121 | 1129 | ||
1122 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | 1130 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, |
1131 | sdmac->chn_count - sdmac->chn_real_count); | ||
1123 | 1132 | ||
1124 | return sdmac->status; | 1133 | return sdmac->status; |
1125 | } | 1134 | } |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 19a0c64d45d3..74f70aadf9e4 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
280 | * callbacks but must be called with the lock held. | 280 | * callbacks but must be called with the lock held. |
281 | */ | 281 | */ |
282 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | 282 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, |
283 | struct intel_mid_dma_desc *desc) | 283 | struct intel_mid_dma_desc *desc) |
284 | __releases(&midc->lock) __acquires(&midc->lock) | ||
284 | { | 285 | { |
285 | struct dma_async_tx_descriptor *txd = &desc->txd; | 286 | struct dma_async_tx_descriptor *txd = &desc->txd; |
286 | dma_async_tx_callback callback_txd = NULL; | 287 | dma_async_tx_callback callback_txd = NULL; |
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
311 | pci_pool_free(desc->lli_pool, desc->lli, | 312 | pci_pool_free(desc->lli_pool, desc->lli, |
312 | desc->lli_phys); | 313 | desc->lli_phys); |
313 | pci_pool_destroy(desc->lli_pool); | 314 | pci_pool_destroy(desc->lli_pool); |
315 | desc->lli = NULL; | ||
314 | } | 316 | } |
315 | list_move(&desc->desc_node, &midc->free_list); | 317 | list_move(&desc->desc_node, &midc->free_list); |
316 | midc->busy = false; | 318 | midc->busy = false; |
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | |||
395 | midc->dma->block_size); | 397 | midc->dma->block_size); |
396 | /*Populate SAR and DAR values*/ | 398 | /*Populate SAR and DAR values*/ |
397 | sg_phy_addr = sg_phys(sg); | 399 | sg_phy_addr = sg_phys(sg); |
398 | if (desc->dirn == DMA_TO_DEVICE) { | 400 | if (desc->dirn == DMA_MEM_TO_DEV) { |
399 | lli_bloc_desc->sar = sg_phy_addr; | 401 | lli_bloc_desc->sar = sg_phy_addr; |
400 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | 402 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; |
401 | } else if (desc->dirn == DMA_FROM_DEVICE) { | 403 | } else if (desc->dirn == DMA_DEV_TO_MEM) { |
402 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | 404 | lli_bloc_desc->sar = mids->dma_slave.src_addr; |
403 | lli_bloc_desc->dar = sg_phy_addr; | 405 | lli_bloc_desc->dar = sg_phy_addr; |
404 | } | 406 | } |
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
490 | 492 | ||
491 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 493 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
492 | if (ret != DMA_SUCCESS) { | 494 | if (ret != DMA_SUCCESS) { |
495 | spin_lock_bh(&midc->lock); | ||
493 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 496 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
497 | spin_unlock_bh(&midc->lock); | ||
494 | 498 | ||
495 | last_complete = midc->completed; | 499 | last_complete = midc->completed; |
496 | last_used = chan->cookie; | 500 | last_used = chan->cookie; |
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
566 | pci_pool_free(desc->lli_pool, desc->lli, | 570 | pci_pool_free(desc->lli_pool, desc->lli, |
567 | desc->lli_phys); | 571 | desc->lli_phys); |
568 | pci_pool_destroy(desc->lli_pool); | 572 | pci_pool_destroy(desc->lli_pool); |
573 | desc->lli = NULL; | ||
569 | } | 574 | } |
570 | list_move(&desc->desc_node, &midc->free_list); | 575 | list_move(&desc->desc_node, &midc->free_list); |
571 | } | 576 | } |
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
632 | if (midc->dma->pimr_mask) { | 637 | if (midc->dma->pimr_mask) { |
633 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | 638 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ |
634 | cfg_hi.cfgx.fifo_mode = 1; | 639 | cfg_hi.cfgx.fifo_mode = 1; |
635 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { | 640 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { |
636 | cfg_hi.cfgx.src_per = 0; | 641 | cfg_hi.cfgx.src_per = 0; |
637 | if (mids->device_instance == 0) | 642 | if (mids->device_instance == 0) |
638 | cfg_hi.cfgx.dst_per = 3; | 643 | cfg_hi.cfgx.dst_per = 3; |
639 | if (mids->device_instance == 1) | 644 | if (mids->device_instance == 1) |
640 | cfg_hi.cfgx.dst_per = 1; | 645 | cfg_hi.cfgx.dst_per = 1; |
641 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { | 646 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { |
642 | if (mids->device_instance == 0) | 647 | if (mids->device_instance == 0) |
643 | cfg_hi.cfgx.src_per = 2; | 648 | cfg_hi.cfgx.src_per = 2; |
644 | if (mids->device_instance == 1) | 649 | if (mids->device_instance == 1) |
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
682 | ctl_lo.ctlx.sinc = 0; | 687 | ctl_lo.ctlx.sinc = 0; |
683 | ctl_lo.ctlx.dinc = 0; | 688 | ctl_lo.ctlx.dinc = 0; |
684 | } else { | 689 | } else { |
685 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { | 690 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { |
686 | ctl_lo.ctlx.sinc = 0; | 691 | ctl_lo.ctlx.sinc = 0; |
687 | ctl_lo.ctlx.dinc = 2; | 692 | ctl_lo.ctlx.dinc = 2; |
688 | ctl_lo.ctlx.tt_fc = 1; | 693 | ctl_lo.ctlx.tt_fc = 1; |
689 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { | 694 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { |
690 | ctl_lo.ctlx.sinc = 2; | 695 | ctl_lo.ctlx.sinc = 2; |
691 | ctl_lo.ctlx.dinc = 0; | 696 | ctl_lo.ctlx.dinc = 0; |
692 | ctl_lo.ctlx.tt_fc = 2; | 697 | ctl_lo.ctlx.tt_fc = 2; |
@@ -732,7 +737,7 @@ err_desc_get: | |||
732 | */ | 737 | */ |
733 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | 738 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( |
734 | struct dma_chan *chan, struct scatterlist *sgl, | 739 | struct dma_chan *chan, struct scatterlist *sgl, |
735 | unsigned int sg_len, enum dma_data_direction direction, | 740 | unsigned int sg_len, enum dma_transfer_direction direction, |
736 | unsigned long flags) | 741 | unsigned long flags) |
737 | { | 742 | { |
738 | struct intel_mid_dma_chan *midc = NULL; | 743 | struct intel_mid_dma_chan *midc = NULL; |
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
868 | pm_runtime_get_sync(&mid->pdev->dev); | 873 | pm_runtime_get_sync(&mid->pdev->dev); |
869 | 874 | ||
870 | if (mid->state == SUSPENDED) { | 875 | if (mid->state == SUSPENDED) { |
871 | if (dma_resume(mid->pdev)) { | 876 | if (dma_resume(&mid->pdev->dev)) { |
872 | pr_err("ERR_MDMA: resume failed"); | 877 | pr_err("ERR_MDMA: resume failed"); |
873 | return -EFAULT; | 878 | return -EFAULT; |
874 | } | 879 | } |
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1099 | LNW_PERIPHRAL_MASK_SIZE); | 1104 | LNW_PERIPHRAL_MASK_SIZE); |
1100 | if (dma->mask_reg == NULL) { | 1105 | if (dma->mask_reg == NULL) { |
1101 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); | 1106 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); |
1102 | return -ENOMEM; | 1107 | err = -ENOMEM; |
1108 | goto err_ioremap; | ||
1103 | } | 1109 | } |
1104 | } else | 1110 | } else |
1105 | dma->mask_reg = NULL; | 1111 | dma->mask_reg = NULL; |
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1196 | err_engine: | 1202 | err_engine: |
1197 | free_irq(pdev->irq, dma); | 1203 | free_irq(pdev->irq, dma); |
1198 | err_irq: | 1204 | err_irq: |
1205 | if (dma->mask_reg) | ||
1206 | iounmap(dma->mask_reg); | ||
1207 | err_ioremap: | ||
1199 | pci_pool_destroy(dma->dma_pool); | 1208 | pci_pool_destroy(dma->dma_pool); |
1200 | err_dma_pool: | 1209 | err_dma_pool: |
1201 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | 1210 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); |
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | |||
1337 | * | 1346 | * |
1338 | * This function is called by OS when a power event occurs | 1347 | * This function is called by OS when a power event occurs |
1339 | */ | 1348 | */ |
1340 | int dma_suspend(struct pci_dev *pci, pm_message_t state) | 1349 | static int dma_suspend(struct device *dev) |
1341 | { | 1350 | { |
1351 | struct pci_dev *pci = to_pci_dev(dev); | ||
1342 | int i; | 1352 | int i; |
1343 | struct middma_device *device = pci_get_drvdata(pci); | 1353 | struct middma_device *device = pci_get_drvdata(pci); |
1344 | pr_debug("MDMA: dma_suspend called\n"); | 1354 | pr_debug("MDMA: dma_suspend called\n"); |
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) | |||
1362 | * | 1372 | * |
1363 | * This function is called by OS when a power event occurs | 1373 | * This function is called by OS when a power event occurs |
1364 | */ | 1374 | */ |
1365 | int dma_resume(struct pci_dev *pci) | 1375 | int dma_resume(struct device *dev) |
1366 | { | 1376 | { |
1377 | struct pci_dev *pci = to_pci_dev(dev); | ||
1367 | int ret; | 1378 | int ret; |
1368 | struct middma_device *device = pci_get_drvdata(pci); | 1379 | struct middma_device *device = pci_get_drvdata(pci); |
1369 | 1380 | ||
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = { | |||
1429 | .runtime_suspend = dma_runtime_suspend, | 1440 | .runtime_suspend = dma_runtime_suspend, |
1430 | .runtime_resume = dma_runtime_resume, | 1441 | .runtime_resume = dma_runtime_resume, |
1431 | .runtime_idle = dma_runtime_idle, | 1442 | .runtime_idle = dma_runtime_idle, |
1443 | .suspend = dma_suspend, | ||
1444 | .resume = dma_resume, | ||
1432 | }; | 1445 | }; |
1433 | 1446 | ||
1434 | static struct pci_driver intel_mid_dma_pci_driver = { | 1447 | static struct pci_driver intel_mid_dma_pci_driver = { |
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = { | |||
1437 | .probe = intel_mid_dma_probe, | 1450 | .probe = intel_mid_dma_probe, |
1438 | .remove = __devexit_p(intel_mid_dma_remove), | 1451 | .remove = __devexit_p(intel_mid_dma_remove), |
1439 | #ifdef CONFIG_PM | 1452 | #ifdef CONFIG_PM |
1440 | .suspend = dma_suspend, | ||
1441 | .resume = dma_resume, | ||
1442 | .driver = { | 1453 | .driver = { |
1443 | .pm = &intel_mid_dma_pm, | 1454 | .pm = &intel_mid_dma_pm, |
1444 | }, | 1455 | }, |
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index aea5ee88ce03..c83d35b97bd8 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc { | |||
262 | unsigned int lli_length; | 262 | unsigned int lli_length; |
263 | unsigned int current_lli; | 263 | unsigned int current_lli; |
264 | dma_addr_t next; | 264 | dma_addr_t next; |
265 | enum dma_data_direction dirn; | 265 | enum dma_transfer_direction dirn; |
266 | enum dma_status status; | 266 | enum dma_status status; |
267 | enum dma_slave_buswidth width; /*width of DMA txn*/ | 267 | enum dma_slave_buswidth width; /*width of DMA txn*/ |
268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | |||
296 | } | 296 | } |
297 | 297 | ||
298 | 298 | ||
299 | int dma_resume(struct pci_dev *pci); | 299 | int dma_resume(struct device *dev); |
300 | 300 | ||
301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | 301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index e03f811a83dd..04be90b645b8 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1735 | spin_unlock_bh(&iop_chan->lock); | 1735 | spin_unlock_bh(&iop_chan->lock); |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | MODULE_ALIAS("platform:iop-adma"); | ||
1739 | |||
1740 | static struct platform_driver iop_adma_driver = { | 1738 | static struct platform_driver iop_adma_driver = { |
1741 | .probe = iop_adma_probe, | 1739 | .probe = iop_adma_probe, |
1742 | .remove = __devexit_p(iop_adma_remove), | 1740 | .remove = __devexit_p(iop_adma_remove), |
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = { | |||
1746 | }, | 1744 | }, |
1747 | }; | 1745 | }; |
1748 | 1746 | ||
1749 | static int __init iop_adma_init (void) | 1747 | module_platform_driver(iop_adma_driver); |
1750 | { | ||
1751 | return platform_driver_register(&iop_adma_driver); | ||
1752 | } | ||
1753 | |||
1754 | static void __exit iop_adma_exit (void) | ||
1755 | { | ||
1756 | platform_driver_unregister(&iop_adma_driver); | ||
1757 | return; | ||
1758 | } | ||
1759 | module_exit(iop_adma_exit); | ||
1760 | module_init(iop_adma_init); | ||
1761 | 1748 | ||
1762 | MODULE_AUTHOR("Intel Corporation"); | 1749 | MODULE_AUTHOR("Intel Corporation"); |
1763 | MODULE_DESCRIPTION("IOP ADMA Engine Driver"); | 1750 | MODULE_DESCRIPTION("IOP ADMA Engine Driver"); |
1764 | MODULE_LICENSE("GPL"); | 1751 | MODULE_LICENSE("GPL"); |
1752 | MODULE_ALIAS("platform:iop-adma"); | ||
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 0e5ef33f90a1..6212b16e8cf2 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
312 | case IPU_PIX_FMT_RGB565: | 312 | case IPU_PIX_FMT_RGB565: |
313 | params->ip.bpp = 2; | 313 | params->ip.bpp = 2; |
314 | params->ip.pfs = 4; | 314 | params->ip.pfs = 4; |
315 | params->ip.npb = 7; | 315 | params->ip.npb = 15; |
316 | params->ip.sat = 2; /* SAT = 32-bit access */ | 316 | params->ip.sat = 2; /* SAT = 32-bit access */ |
317 | params->ip.ofs0 = 0; /* Red bit offset */ | 317 | params->ip.ofs0 = 0; /* Red bit offset */ |
318 | params->ip.ofs1 = 5; /* Green bit offset */ | 318 | params->ip.ofs1 = 5; /* Green bit offset */ |
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
422 | params->pp.nsb = 1; | 422 | params->pp.nsb = 1; |
423 | } | 423 | } |
424 | 424 | ||
425 | static void ipu_ch_param_set_burst_size(union chan_param_mem *params, | ||
426 | uint16_t burst_pixels) | ||
427 | { | ||
428 | params->pp.npb = burst_pixels - 1; | ||
429 | } | ||
430 | |||
431 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, | 425 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, |
432 | dma_addr_t buf0, dma_addr_t buf1) | 426 | dma_addr_t buf0, dma_addr_t buf1) |
433 | { | 427 | { |
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, | |||
690 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); | 684 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); |
691 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); | 685 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); |
692 | ipu_ch_param_set_rotation(¶ms, rot_mode); | 686 | ipu_ch_param_set_rotation(¶ms, rot_mode); |
693 | /* Some channels (rotation) have restriction on burst length */ | ||
694 | switch (channel) { | ||
695 | case IDMAC_IC_7: /* Hangs with burst 8, 16, other values | ||
696 | invalid - Table 44-30 */ | ||
697 | /* | ||
698 | ipu_ch_param_set_burst_size(¶ms, 8); | ||
699 | */ | ||
700 | break; | ||
701 | case IDMAC_SDC_0: | ||
702 | case IDMAC_SDC_1: | ||
703 | /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ | ||
704 | ipu_ch_param_set_burst_size(¶ms, 16); | ||
705 | break; | ||
706 | case IDMAC_IC_0: | ||
707 | default: | ||
708 | break; | ||
709 | } | ||
710 | 687 | ||
711 | spin_lock_irqsave(&ipu->lock, flags); | 688 | spin_lock_irqsave(&ipu->lock, flags); |
712 | 689 | ||
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg) | |||
1364 | /* Allocate and initialise a transfer descriptor. */ | 1341 | /* Allocate and initialise a transfer descriptor. */ |
1365 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, | 1342 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, |
1366 | struct scatterlist *sgl, unsigned int sg_len, | 1343 | struct scatterlist *sgl, unsigned int sg_len, |
1367 | enum dma_data_direction direction, unsigned long tx_flags) | 1344 | enum dma_transfer_direction direction, unsigned long tx_flags) |
1368 | { | 1345 | { |
1369 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1346 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1370 | struct idmac_tx_desc *desc = NULL; | 1347 | struct idmac_tx_desc *desc = NULL; |
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan | |||
1376 | chan->chan_id != IDMAC_IC_7) | 1353 | chan->chan_id != IDMAC_IC_7) |
1377 | return NULL; | 1354 | return NULL; |
1378 | 1355 | ||
1379 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { | 1356 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { |
1380 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); | 1357 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); |
1381 | return NULL; | 1358 | return NULL; |
1382 | } | 1359 | } |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 8ba4edc6185e..4d6d4cf66949 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = { | |||
835 | }, | 835 | }, |
836 | }; | 836 | }; |
837 | 837 | ||
838 | static int __init mpc_dma_init(void) | 838 | module_platform_driver(mpc_dma_driver); |
839 | { | ||
840 | return platform_driver_register(&mpc_dma_driver); | ||
841 | } | ||
842 | module_init(mpc_dma_init); | ||
843 | |||
844 | static void __exit mpc_dma_exit(void) | ||
845 | { | ||
846 | platform_driver_unregister(&mpc_dma_driver); | ||
847 | } | ||
848 | module_exit(mpc_dma_exit); | ||
849 | 839 | ||
850 | MODULE_LICENSE("GPL"); | 840 | MODULE_LICENSE("GPL"); |
851 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); | 841 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index fc903c0ed234..b06cd4ca626f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #define HW_APBHX_CTRL0 0x000 | 44 | #define HW_APBHX_CTRL0 0x000 |
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | 45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | 46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | 47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
49 | #define HW_APBHX_CTRL1 0x010 | 48 | #define HW_APBHX_CTRL1 0x010 |
50 | #define HW_APBHX_CTRL2 0x020 | 49 | #define HW_APBHX_CTRL2 0x020 |
@@ -111,6 +110,7 @@ struct mxs_dma_chan { | |||
111 | int chan_irq; | 110 | int chan_irq; |
112 | struct mxs_dma_ccw *ccw; | 111 | struct mxs_dma_ccw *ccw; |
113 | dma_addr_t ccw_phys; | 112 | dma_addr_t ccw_phys; |
113 | int desc_count; | ||
114 | dma_cookie_t last_completed; | 114 | dma_cookie_t last_completed; |
115 | enum dma_status status; | 115 | enum dma_status status; |
116 | unsigned int flags; | 116 | unsigned int flags; |
@@ -130,23 +130,6 @@ struct mxs_dma_engine { | |||
130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | 130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) | ||
134 | { | ||
135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
136 | int chan_id = mxs_chan->chan.chan_id; | ||
137 | int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; | ||
138 | |||
139 | /* enable apbh channel clock */ | ||
140 | if (dma_is_apbh()) { | ||
141 | if (apbh_is_old()) | ||
142 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
143 | mxs_dma->base + HW_APBHX_CTRL0 + set_clr); | ||
144 | else | ||
145 | writel(1 << chan_id, | ||
146 | mxs_dma->base + HW_APBHX_CTRL0 + set_clr); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
151 | { | 134 | { |
152 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
165 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 148 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
166 | int chan_id = mxs_chan->chan.chan_id; | 149 | int chan_id = mxs_chan->chan.chan_id; |
167 | 150 | ||
168 | /* clkgate needs to be enabled before writing other registers */ | ||
169 | mxs_dma_clkgate(mxs_chan, 1); | ||
170 | |||
171 | /* set cmd_addr up */ | 151 | /* set cmd_addr up */ |
172 | writel(mxs_chan->ccw_phys, | 152 | writel(mxs_chan->ccw_phys, |
173 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); | 153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); |
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
178 | 158 | ||
179 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 159 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
180 | { | 160 | { |
181 | /* disable apbh channel clock */ | ||
182 | mxs_dma_clkgate(mxs_chan, 0); | ||
183 | |||
184 | mxs_chan->status = DMA_SUCCESS; | 161 | mxs_chan->status = DMA_SUCCESS; |
185 | } | 162 | } |
186 | 163 | ||
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
268 | /* | 245 | /* |
269 | * When both completion and error of termination bits set at the | 246 | * When both completion and error of termination bits set at the |
270 | * same time, we do not take it as an error. IOW, it only becomes | 247 | * same time, we do not take it as an error. IOW, it only becomes |
271 | * an error we need to handler here in case of ether it's (1) an bus | 248 | * an error we need to handle here in case of either it's (1) a bus |
272 | * error or (2) a termination error with no completion. | 249 | * error or (2) a termination error with no completion. |
273 | */ | 250 | */ |
274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 251 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ |
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
338 | if (ret) | 315 | if (ret) |
339 | goto err_clk; | 316 | goto err_clk; |
340 | 317 | ||
341 | /* clkgate needs to be enabled for reset to finish */ | ||
342 | mxs_dma_clkgate(mxs_chan, 1); | ||
343 | mxs_dma_reset_chan(mxs_chan); | 318 | mxs_dma_reset_chan(mxs_chan); |
344 | mxs_dma_clkgate(mxs_chan, 0); | ||
345 | 319 | ||
346 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | 320 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); |
347 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | 321 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
377 | 351 | ||
378 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 352 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
379 | struct dma_chan *chan, struct scatterlist *sgl, | 353 | struct dma_chan *chan, struct scatterlist *sgl, |
380 | unsigned int sg_len, enum dma_data_direction direction, | 354 | unsigned int sg_len, enum dma_transfer_direction direction, |
381 | unsigned long append) | 355 | unsigned long append) |
382 | { | 356 | { |
383 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 357 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
386 | struct scatterlist *sg; | 360 | struct scatterlist *sg; |
387 | int i, j; | 361 | int i, j; |
388 | u32 *pio; | 362 | u32 *pio; |
389 | static int idx; | 363 | int idx = append ? mxs_chan->desc_count : 0; |
390 | 364 | ||
391 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | 365 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
392 | return NULL; | 366 | return NULL; |
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
417 | idx = 0; | 391 | idx = 0; |
418 | } | 392 | } |
419 | 393 | ||
420 | if (direction == DMA_NONE) { | 394 | if (direction == DMA_TRANS_NONE) { |
421 | ccw = &mxs_chan->ccw[idx++]; | 395 | ccw = &mxs_chan->ccw[idx++]; |
422 | pio = (u32 *) sgl; | 396 | pio = (u32 *) sgl; |
423 | 397 | ||
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
450 | ccw->bits |= CCW_CHAIN; | 424 | ccw->bits |= CCW_CHAIN; |
451 | ccw->bits |= CCW_HALT_ON_TERM; | 425 | ccw->bits |= CCW_HALT_ON_TERM; |
452 | ccw->bits |= CCW_TERM_FLUSH; | 426 | ccw->bits |= CCW_TERM_FLUSH; |
453 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | 427 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
454 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | 428 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
455 | COMMAND); | 429 | COMMAND); |
456 | 430 | ||
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
462 | } | 436 | } |
463 | } | 437 | } |
464 | } | 438 | } |
439 | mxs_chan->desc_count = idx; | ||
465 | 440 | ||
466 | return &mxs_chan->desc; | 441 | return &mxs_chan->desc; |
467 | 442 | ||
@@ -472,7 +447,7 @@ err_out: | |||
472 | 447 | ||
473 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 448 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
474 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 449 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
475 | size_t period_len, enum dma_data_direction direction) | 450 | size_t period_len, enum dma_transfer_direction direction) |
476 | { | 451 | { |
477 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 452 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
478 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 453 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
515 | ccw->bits |= CCW_IRQ; | 490 | ccw->bits |= CCW_IRQ; |
516 | ccw->bits |= CCW_HALT_ON_TERM; | 491 | ccw->bits |= CCW_HALT_ON_TERM; |
517 | ccw->bits |= CCW_TERM_FLUSH; | 492 | ccw->bits |= CCW_TERM_FLUSH; |
518 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | 493 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
519 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 494 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
520 | 495 | ||
521 | dma_addr += period_len; | 496 | dma_addr += period_len; |
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
523 | 498 | ||
524 | i++; | 499 | i++; |
525 | } | 500 | } |
501 | mxs_chan->desc_count = i; | ||
526 | 502 | ||
527 | return &mxs_chan->desc; | 503 | return &mxs_chan->desc; |
528 | 504 | ||
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
539 | 515 | ||
540 | switch (cmd) { | 516 | switch (cmd) { |
541 | case DMA_TERMINATE_ALL: | 517 | case DMA_TERMINATE_ALL: |
542 | mxs_dma_disable_chan(mxs_chan); | ||
543 | mxs_dma_reset_chan(mxs_chan); | 518 | mxs_dma_reset_chan(mxs_chan); |
519 | mxs_dma_disable_chan(mxs_chan); | ||
544 | break; | 520 | break; |
545 | case DMA_PAUSE: | 521 | case DMA_PAUSE: |
546 | mxs_dma_pause_chan(mxs_chan); | 522 | mxs_dma_pause_chan(mxs_chan); |
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | |||
580 | 556 | ||
581 | ret = clk_prepare_enable(mxs_dma->clk); | 557 | ret = clk_prepare_enable(mxs_dma->clk); |
582 | if (ret) | 558 | if (ret) |
583 | goto err_out; | 559 | return ret; |
584 | 560 | ||
585 | ret = mxs_reset_block(mxs_dma->base); | 561 | ret = mxs_reset_block(mxs_dma->base); |
586 | if (ret) | 562 | if (ret) |
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | |||
604 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | 580 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
605 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); | 581 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); |
606 | 582 | ||
607 | clk_disable_unprepare(mxs_dma->clk); | ||
608 | |||
609 | return 0; | ||
610 | |||
611 | err_out: | 583 | err_out: |
584 | clk_disable_unprepare(mxs_dma->clk); | ||
612 | return ret; | 585 | return ret; |
613 | } | 586 | } |
614 | 587 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index a6d0e3dbed07..823f58179f9d 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. | 4 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -99,7 +99,7 @@ struct pch_dma_desc { | |||
99 | struct pch_dma_chan { | 99 | struct pch_dma_chan { |
100 | struct dma_chan chan; | 100 | struct dma_chan chan; |
101 | void __iomem *membase; | 101 | void __iomem *membase; |
102 | enum dma_data_direction dir; | 102 | enum dma_transfer_direction dir; |
103 | struct tasklet_struct tasklet; | 103 | struct tasklet_struct tasklet; |
104 | unsigned long err_status; | 104 | unsigned long err_status; |
105 | 105 | ||
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | 224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | 225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
226 | val &= mask_mode; | 226 | val &= mask_mode; |
227 | if (pd_chan->dir == DMA_TO_DEVICE) | 227 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
229 | DMA_CTL0_DIR_SHIFT_BITS); | 229 | DMA_CTL0_DIR_SHIFT_BITS); |
230 | else | 230 | else |
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | 242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
243 | (DMA_CTL0_BITS_PER_CH * ch)); | 243 | (DMA_CTL0_BITS_PER_CH * ch)); |
244 | val &= mask_mode; | 244 | val &= mask_mode; |
245 | if (pd_chan->dir == DMA_TO_DEVICE) | 245 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
247 | DMA_CTL0_DIR_SHIFT_BITS); | 247 | DMA_CTL0_DIR_SHIFT_BITS); |
248 | else | 248 | else |
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
607 | 607 | ||
608 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | 608 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
609 | struct scatterlist *sgl, unsigned int sg_len, | 609 | struct scatterlist *sgl, unsigned int sg_len, |
610 | enum dma_data_direction direction, unsigned long flags) | 610 | enum dma_transfer_direction direction, unsigned long flags) |
611 | { | 611 | { |
612 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 612 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
613 | struct pch_dma_slave *pd_slave = chan->private; | 613 | struct pch_dma_slave *pd_slave = chan->private; |
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
623 | return NULL; | 623 | return NULL; |
624 | } | 624 | } |
625 | 625 | ||
626 | if (direction == DMA_FROM_DEVICE) | 626 | if (direction == DMA_DEV_TO_MEM) |
627 | reg = pd_slave->rx_reg; | 627 | reg = pd_slave->rx_reg; |
628 | else if (direction == DMA_TO_DEVICE) | 628 | else if (direction == DMA_MEM_TO_DEV) |
629 | reg = pd_slave->tx_reg; | 629 | reg = pd_slave->tx_reg; |
630 | else | 630 | else |
631 | return NULL; | 631 | return NULL; |
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) | |||
1018 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | 1018 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E |
1019 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | 1019 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 |
1020 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | 1020 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B |
1021 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 | ||
1022 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 | ||
1021 | 1023 | ||
1022 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | 1024 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { |
1023 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | 1025 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | |||
1030 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | 1032 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ |
1031 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | 1033 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ |
1032 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | 1034 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ |
1035 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ | ||
1036 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ | ||
1033 | { 0, }, | 1037 | { 0, }, |
1034 | }; | 1038 | }; |
1035 | 1039 | ||
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void) | |||
1057 | module_init(pch_dma_init); | 1061 | module_init(pch_dma_init); |
1058 | module_exit(pch_dma_exit); | 1062 | module_exit(pch_dma_exit); |
1059 | 1063 | ||
1060 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " | 1064 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " |
1061 | "DMA controller driver"); | 1065 | "DMA controller driver"); |
1062 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 1066 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
1063 | MODULE_LICENSE("GPL v2"); | 1067 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 09adcfcd953e..b8ec03ee8e22 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
350 | case DMA_SLAVE_CONFIG: | 350 | case DMA_SLAVE_CONFIG: |
351 | slave_config = (struct dma_slave_config *)arg; | 351 | slave_config = (struct dma_slave_config *)arg; |
352 | 352 | ||
353 | if (slave_config->direction == DMA_TO_DEVICE) { | 353 | if (slave_config->direction == DMA_MEM_TO_DEV) { |
354 | if (slave_config->dst_addr) | 354 | if (slave_config->dst_addr) |
355 | pch->fifo_addr = slave_config->dst_addr; | 355 | pch->fifo_addr = slave_config->dst_addr; |
356 | if (slave_config->dst_addr_width) | 356 | if (slave_config->dst_addr_width) |
357 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | 357 | pch->burst_sz = __ffs(slave_config->dst_addr_width); |
358 | if (slave_config->dst_maxburst) | 358 | if (slave_config->dst_maxburst) |
359 | pch->burst_len = slave_config->dst_maxburst; | 359 | pch->burst_len = slave_config->dst_maxburst; |
360 | } else if (slave_config->direction == DMA_FROM_DEVICE) { | 360 | } else if (slave_config->direction == DMA_DEV_TO_MEM) { |
361 | if (slave_config->src_addr) | 361 | if (slave_config->src_addr) |
362 | pch->fifo_addr = slave_config->src_addr; | 362 | pch->fifo_addr = slave_config->src_addr; |
363 | if (slave_config->src_addr_width) | 363 | if (slave_config->src_addr_width) |
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |||
621 | 621 | ||
622 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | 622 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
623 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | 623 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, |
624 | size_t period_len, enum dma_data_direction direction) | 624 | size_t period_len, enum dma_transfer_direction direction) |
625 | { | 625 | { |
626 | struct dma_pl330_desc *desc; | 626 | struct dma_pl330_desc *desc; |
627 | struct dma_pl330_chan *pch = to_pchan(chan); | 627 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
636 | } | 636 | } |
637 | 637 | ||
638 | switch (direction) { | 638 | switch (direction) { |
639 | case DMA_TO_DEVICE: | 639 | case DMA_MEM_TO_DEV: |
640 | desc->rqcfg.src_inc = 1; | 640 | desc->rqcfg.src_inc = 1; |
641 | desc->rqcfg.dst_inc = 0; | 641 | desc->rqcfg.dst_inc = 0; |
642 | desc->req.rqtype = MEMTODEV; | 642 | desc->req.rqtype = MEMTODEV; |
643 | src = dma_addr; | 643 | src = dma_addr; |
644 | dst = pch->fifo_addr; | 644 | dst = pch->fifo_addr; |
645 | break; | 645 | break; |
646 | case DMA_FROM_DEVICE: | 646 | case DMA_DEV_TO_MEM: |
647 | desc->rqcfg.src_inc = 0; | 647 | desc->rqcfg.src_inc = 0; |
648 | desc->rqcfg.dst_inc = 1; | 648 | desc->rqcfg.dst_inc = 1; |
649 | desc->req.rqtype = DEVTOMEM; | 649 | desc->req.rqtype = DEVTOMEM; |
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
710 | 710 | ||
711 | static struct dma_async_tx_descriptor * | 711 | static struct dma_async_tx_descriptor * |
712 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 712 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
713 | unsigned int sg_len, enum dma_data_direction direction, | 713 | unsigned int sg_len, enum dma_transfer_direction direction, |
714 | unsigned long flg) | 714 | unsigned long flg) |
715 | { | 715 | { |
716 | struct dma_pl330_desc *first, *desc = NULL; | 716 | struct dma_pl330_desc *first, *desc = NULL; |
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
759 | else | 759 | else |
760 | list_add_tail(&desc->node, &first->node); | 760 | list_add_tail(&desc->node, &first->node); |
761 | 761 | ||
762 | if (direction == DMA_TO_DEVICE) { | 762 | if (direction == DMA_MEM_TO_DEV) { |
763 | desc->rqcfg.src_inc = 1; | 763 | desc->rqcfg.src_inc = 1; |
764 | desc->rqcfg.dst_inc = 0; | 764 | desc->rqcfg.dst_inc = 0; |
765 | desc->req.rqtype = MEMTODEV; | 765 | desc->req.rqtype = MEMTODEV; |
@@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
834 | 834 | ||
835 | amba_set_drvdata(adev, pdmac); | 835 | amba_set_drvdata(adev, pdmac); |
836 | 836 | ||
837 | #ifdef CONFIG_PM_RUNTIME | 837 | #ifndef CONFIG_PM_RUNTIME |
838 | /* to use the runtime PM helper functions */ | ||
839 | pm_runtime_enable(&adev->dev); | ||
840 | |||
841 | /* enable the power domain */ | ||
842 | if (pm_runtime_get_sync(&adev->dev)) { | ||
843 | dev_err(&adev->dev, "failed to get runtime pm\n"); | ||
844 | ret = -ENODEV; | ||
845 | goto probe_err1; | ||
846 | } | ||
847 | #else | ||
848 | /* enable dma clk */ | 838 | /* enable dma clk */ |
849 | clk_enable(pdmac->clk); | 839 | clk_enable(pdmac->clk); |
850 | #endif | 840 | #endif |
@@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev) | |||
977 | res = &adev->res; | 967 | res = &adev->res; |
978 | release_mem_region(res->start, resource_size(res)); | 968 | release_mem_region(res->start, resource_size(res)); |
979 | 969 | ||
980 | #ifdef CONFIG_PM_RUNTIME | 970 | #ifndef CONFIG_PM_RUNTIME |
981 | pm_runtime_put(&adev->dev); | ||
982 | pm_runtime_disable(&adev->dev); | ||
983 | #else | ||
984 | clk_disable(pdmac->clk); | 971 | clk_disable(pdmac->clk); |
985 | #endif | 972 | #endif |
986 | 973 | ||
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 81809c2b46ab..54043cd831c8 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | 27 | #include <linux/pm_runtime.h> |
29 | #include <linux/sh_dma.h> | 28 | #include <linux/sh_dma.h> |
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices); | |||
57 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | 56 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; |
58 | 57 | ||
59 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 58 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
59 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); | ||
60 | |||
61 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
62 | { | ||
63 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
64 | |||
65 | __raw_writel(data, shdev->chan_reg + | ||
66 | shdev->pdata->channel[sh_dc->id].chclr_offset); | ||
67 | } | ||
60 | 68 | ||
61 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 69 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
62 | { | 70 | { |
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
129 | 137 | ||
130 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | 138 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); |
131 | 139 | ||
140 | if (shdev->pdata->chclr_present) { | ||
141 | int i; | ||
142 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
143 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
144 | if (sh_chan) | ||
145 | chclr_write(sh_chan, 0); | ||
146 | } | ||
147 | } | ||
148 | |||
132 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | 149 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); |
133 | 150 | ||
134 | dmaor = dmaor_read(shdev); | 151 | dmaor = dmaor_read(shdev); |
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
139 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | 156 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); |
140 | return -EIO; | 157 | return -EIO; |
141 | } | 158 | } |
159 | if (shdev->pdata->dmaor_init & ~dmaor) | ||
160 | dev_warn(shdev->common.dev, | ||
161 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | ||
162 | dmaor, shdev->pdata->dmaor_init); | ||
142 | return 0; | 163 | return 0; |
143 | } | 164 | } |
144 | 165 | ||
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
259 | return 0; | 280 | return 0; |
260 | } | 281 | } |
261 | 282 | ||
262 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); | ||
263 | |||
264 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 283 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
265 | { | 284 | { |
266 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; | 285 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |||
340 | sh_chan_xfer_ld_queue(sh_chan); | 359 | sh_chan_xfer_ld_queue(sh_chan); |
341 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; | 360 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; |
342 | } | 361 | } |
362 | } else { | ||
363 | sh_chan->pm_state = DMAE_PM_PENDING; | ||
343 | } | 364 | } |
344 | 365 | ||
345 | spin_unlock_irq(&sh_chan->desc_lock); | 366 | spin_unlock_irq(&sh_chan->desc_lock); |
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
479 | * @sh_chan: DMA channel | 500 | * @sh_chan: DMA channel |
480 | * @flags: DMA transfer flags | 501 | * @flags: DMA transfer flags |
481 | * @dest: destination DMA address, incremented when direction equals | 502 | * @dest: destination DMA address, incremented when direction equals |
482 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | 503 | * DMA_DEV_TO_MEM |
483 | * @src: source DMA address, incremented when direction equals | 504 | * @src: source DMA address, incremented when direction equals |
484 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | 505 | * DMA_MEM_TO_DEV |
485 | * @len: DMA transfer length | 506 | * @len: DMA transfer length |
486 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | 507 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY |
487 | * @direction: needed for slave DMA to decide which address to keep constant, | 508 | * @direction: needed for slave DMA to decide which address to keep constant, |
488 | * equals DMA_BIDIRECTIONAL for MEMCPY | 509 | * equals DMA_MEM_TO_MEM for MEMCPY |
489 | * Returns 0 or an error | 510 | * Returns 0 or an error |
490 | * Locks: called with desc_lock held | 511 | * Locks: called with desc_lock held |
491 | */ | 512 | */ |
492 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | 513 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, |
493 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | 514 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, |
494 | struct sh_desc **first, enum dma_data_direction direction) | 515 | struct sh_desc **first, enum dma_transfer_direction direction) |
495 | { | 516 | { |
496 | struct sh_desc *new; | 517 | struct sh_desc *new; |
497 | size_t copy_size; | 518 | size_t copy_size; |
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | |||
531 | new->direction = direction; | 552 | new->direction = direction; |
532 | 553 | ||
533 | *len -= copy_size; | 554 | *len -= copy_size; |
534 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | 555 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) |
535 | *src += copy_size; | 556 | *src += copy_size; |
536 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | 557 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) |
537 | *dest += copy_size; | 558 | *dest += copy_size; |
538 | 559 | ||
539 | return new; | 560 | return new; |
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | |||
546 | * converted to scatter-gather to guarantee consistent locking and a correct | 567 | * converted to scatter-gather to guarantee consistent locking and a correct |
547 | * list manipulation. For slave DMA direction carries the usual meaning, and, | 568 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
548 | * logically, the SG list is RAM and the addr variable contains slave address, | 569 | * logically, the SG list is RAM and the addr variable contains slave address, |
549 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | 570 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM |
550 | * and the SG list contains only one element and points at the source buffer. | 571 | * and the SG list contains only one element and points at the source buffer. |
551 | */ | 572 | */ |
552 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | 573 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, |
553 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | 574 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
554 | enum dma_data_direction direction, unsigned long flags) | 575 | enum dma_transfer_direction direction, unsigned long flags) |
555 | { | 576 | { |
556 | struct scatterlist *sg; | 577 | struct scatterlist *sg; |
557 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | 578 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; |
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c | |||
592 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", | 613 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
593 | i, sg, len, (unsigned long long)sg_addr); | 614 | i, sg, len, (unsigned long long)sg_addr); |
594 | 615 | ||
595 | if (direction == DMA_FROM_DEVICE) | 616 | if (direction == DMA_DEV_TO_MEM) |
596 | new = sh_dmae_add_desc(sh_chan, flags, | 617 | new = sh_dmae_add_desc(sh_chan, flags, |
597 | &sg_addr, addr, &len, &first, | 618 | &sg_addr, addr, &len, &first, |
598 | direction); | 619 | direction); |
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
646 | sg_dma_address(&sg) = dma_src; | 667 | sg_dma_address(&sg) = dma_src; |
647 | sg_dma_len(&sg) = len; | 668 | sg_dma_len(&sg) = len; |
648 | 669 | ||
649 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | 670 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, |
650 | flags); | 671 | flags); |
651 | } | 672 | } |
652 | 673 | ||
653 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | 674 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( |
654 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 675 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
655 | enum dma_data_direction direction, unsigned long flags) | 676 | enum dma_transfer_direction direction, unsigned long flags) |
656 | { | 677 | { |
657 | struct sh_dmae_slave *param; | 678 | struct sh_dmae_slave *param; |
658 | struct sh_dmae_chan *sh_chan; | 679 | struct sh_dmae_chan *sh_chan; |
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data) | |||
996 | spin_lock_irq(&sh_chan->desc_lock); | 1017 | spin_lock_irq(&sh_chan->desc_lock); |
997 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 1018 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
998 | if (desc->mark == DESC_SUBMITTED && | 1019 | if (desc->mark == DESC_SUBMITTED && |
999 | ((desc->direction == DMA_FROM_DEVICE && | 1020 | ((desc->direction == DMA_DEV_TO_MEM && |
1000 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | 1021 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || |
1001 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | 1022 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { |
1002 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 1023 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1225 | 1246 | ||
1226 | platform_set_drvdata(pdev, shdev); | 1247 | platform_set_drvdata(pdev, shdev); |
1227 | 1248 | ||
1249 | shdev->common.dev = &pdev->dev; | ||
1250 | |||
1228 | pm_runtime_enable(&pdev->dev); | 1251 | pm_runtime_enable(&pdev->dev); |
1229 | pm_runtime_get_sync(&pdev->dev); | 1252 | pm_runtime_get_sync(&pdev->dev); |
1230 | 1253 | ||
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1254 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | 1277 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; |
1255 | shdev->common.device_control = sh_dmae_control; | 1278 | shdev->common.device_control = sh_dmae_control; |
1256 | 1279 | ||
1257 | shdev->common.dev = &pdev->dev; | ||
1258 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 1280 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
1259 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | 1281 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; |
1260 | 1282 | ||
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev) | |||
1435 | #ifdef CONFIG_PM | 1457 | #ifdef CONFIG_PM |
1436 | static int sh_dmae_suspend(struct device *dev) | 1458 | static int sh_dmae_suspend(struct device *dev) |
1437 | { | 1459 | { |
1438 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1439 | int i; | ||
1440 | |||
1441 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
1442 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
1443 | if (sh_chan->descs_allocated) | ||
1444 | sh_chan->pm_error = pm_runtime_put_sync(dev); | ||
1445 | } | ||
1446 | |||
1447 | return 0; | 1460 | return 0; |
1448 | } | 1461 | } |
1449 | 1462 | ||
1450 | static int sh_dmae_resume(struct device *dev) | 1463 | static int sh_dmae_resume(struct device *dev) |
1451 | { | 1464 | { |
1452 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | 1465 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); |
1453 | int i; | 1466 | int i, ret; |
1467 | |||
1468 | ret = sh_dmae_rst(shdev); | ||
1469 | if (ret < 0) | ||
1470 | dev_err(dev, "Failed to reset!\n"); | ||
1454 | 1471 | ||
1455 | for (i = 0; i < shdev->pdata->channel_num; i++) { | 1472 | for (i = 0; i < shdev->pdata->channel_num; i++) { |
1456 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 1473 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev) | |||
1459 | if (!sh_chan->descs_allocated) | 1476 | if (!sh_chan->descs_allocated) |
1460 | continue; | 1477 | continue; |
1461 | 1478 | ||
1462 | if (!sh_chan->pm_error) | ||
1463 | pm_runtime_get_sync(dev); | ||
1464 | |||
1465 | if (param) { | 1479 | if (param) { |
1466 | const struct sh_dmae_slave_config *cfg = param->config; | 1480 | const struct sh_dmae_slave_config *cfg = param->config; |
1467 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 1481 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c new file mode 100644 index 000000000000..2333810d1688 --- /dev/null +++ b/drivers/dma/sirf-dma.c | |||
@@ -0,0 +1,707 @@ | |||
1 | /* | ||
2 | * DMA controller driver for CSR SiRFprimaII | ||
3 | * | ||
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | ||
5 | * | ||
6 | * Licensed under GPLv2 or later. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/dmaengine.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/of_platform.h> | ||
19 | #include <linux/sirfsoc_dma.h> | ||
20 | |||
21 | #define SIRFSOC_DMA_DESCRIPTORS 16 | ||
22 | #define SIRFSOC_DMA_CHANNELS 16 | ||
23 | |||
24 | #define SIRFSOC_DMA_CH_ADDR 0x00 | ||
25 | #define SIRFSOC_DMA_CH_XLEN 0x04 | ||
26 | #define SIRFSOC_DMA_CH_YLEN 0x08 | ||
27 | #define SIRFSOC_DMA_CH_CTRL 0x0C | ||
28 | |||
29 | #define SIRFSOC_DMA_WIDTH_0 0x100 | ||
30 | #define SIRFSOC_DMA_CH_VALID 0x140 | ||
31 | #define SIRFSOC_DMA_CH_INT 0x144 | ||
32 | #define SIRFSOC_DMA_INT_EN 0x148 | ||
33 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 | ||
34 | |||
35 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | ||
36 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | ||
37 | |||
38 | /* xlen and dma_width register is in 4 bytes boundary */ | ||
39 | #define SIRFSOC_DMA_WORD_LEN 4 | ||
40 | |||
41 | struct sirfsoc_dma_desc { | ||
42 | struct dma_async_tx_descriptor desc; | ||
43 | struct list_head node; | ||
44 | |||
45 | /* SiRFprimaII 2D-DMA parameters */ | ||
46 | |||
47 | int xlen; /* DMA xlen */ | ||
48 | int ylen; /* DMA ylen */ | ||
49 | int width; /* DMA width */ | ||
50 | int dir; | ||
51 | bool cyclic; /* is loop DMA? */ | ||
52 | u32 addr; /* DMA buffer address */ | ||
53 | }; | ||
54 | |||
55 | struct sirfsoc_dma_chan { | ||
56 | struct dma_chan chan; | ||
57 | struct list_head free; | ||
58 | struct list_head prepared; | ||
59 | struct list_head queued; | ||
60 | struct list_head active; | ||
61 | struct list_head completed; | ||
62 | dma_cookie_t completed_cookie; | ||
63 | unsigned long happened_cyclic; | ||
64 | unsigned long completed_cyclic; | ||
65 | |||
66 | /* Lock for this structure */ | ||
67 | spinlock_t lock; | ||
68 | |||
69 | int mode; | ||
70 | }; | ||
71 | |||
72 | struct sirfsoc_dma { | ||
73 | struct dma_device dma; | ||
74 | struct tasklet_struct tasklet; | ||
75 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | ||
76 | void __iomem *base; | ||
77 | int irq; | ||
78 | }; | ||
79 | |||
80 | #define DRV_NAME "sirfsoc_dma" | ||
81 | |||
82 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | ||
83 | static inline | ||
84 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | ||
85 | { | ||
86 | return container_of(c, struct sirfsoc_dma_chan, chan); | ||
87 | } | ||
88 | |||
89 | /* Convert struct dma_chan to struct sirfsoc_dma */ | ||
90 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | ||
91 | { | ||
92 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | ||
93 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | ||
94 | } | ||
95 | |||
96 | /* Execute all queued DMA descriptors */ | ||
97 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | ||
98 | { | ||
99 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
100 | int cid = schan->chan.chan_id; | ||
101 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
102 | |||
103 | /* | ||
104 | * lock has been held by functions calling this, so we don't hold | ||
105 | * lock again | ||
106 | */ | ||
107 | |||
108 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | ||
109 | node); | ||
110 | /* Move the first queued descriptor to active list */ | ||
111 | list_move_tail(&schan->queued, &schan->active); | ||
112 | |||
113 | /* Start the DMA transfer */ | ||
114 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | ||
115 | cid * 4); | ||
116 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | ||
117 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | ||
118 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | ||
119 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | ||
120 | SIRFSOC_DMA_CH_XLEN); | ||
121 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | ||
122 | SIRFSOC_DMA_CH_YLEN); | ||
123 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | ||
124 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
125 | |||
126 | /* | ||
127 | * writel has an implict memory write barrier to make sure data is | ||
128 | * flushed into memory before starting DMA | ||
129 | */ | ||
130 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | ||
131 | |||
132 | if (sdesc->cyclic) { | ||
133 | writel((1 << cid) | 1 << (cid + 16) | | ||
134 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | ||
135 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
136 | schan->happened_cyclic = schan->completed_cyclic = 0; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* Interrupt handler */ | ||
141 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | ||
142 | { | ||
143 | struct sirfsoc_dma *sdma = data; | ||
144 | struct sirfsoc_dma_chan *schan; | ||
145 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
146 | u32 is; | ||
147 | int ch; | ||
148 | |||
149 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | ||
150 | while ((ch = fls(is) - 1) >= 0) { | ||
151 | is &= ~(1 << ch); | ||
152 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | ||
153 | schan = &sdma->channels[ch]; | ||
154 | |||
155 | spin_lock(&schan->lock); | ||
156 | |||
157 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
158 | node); | ||
159 | if (!sdesc->cyclic) { | ||
160 | /* Execute queued descriptors */ | ||
161 | list_splice_tail_init(&schan->active, &schan->completed); | ||
162 | if (!list_empty(&schan->queued)) | ||
163 | sirfsoc_dma_execute(schan); | ||
164 | } else | ||
165 | schan->happened_cyclic++; | ||
166 | |||
167 | spin_unlock(&schan->lock); | ||
168 | } | ||
169 | |||
170 | /* Schedule tasklet */ | ||
171 | tasklet_schedule(&sdma->tasklet); | ||
172 | |||
173 | return IRQ_HANDLED; | ||
174 | } | ||
175 | |||
176 | /* process completed descriptors */ | ||
177 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | ||
178 | { | ||
179 | dma_cookie_t last_cookie = 0; | ||
180 | struct sirfsoc_dma_chan *schan; | ||
181 | struct sirfsoc_dma_desc *sdesc; | ||
182 | struct dma_async_tx_descriptor *desc; | ||
183 | unsigned long flags; | ||
184 | unsigned long happened_cyclic; | ||
185 | LIST_HEAD(list); | ||
186 | int i; | ||
187 | |||
188 | for (i = 0; i < sdma->dma.chancnt; i++) { | ||
189 | schan = &sdma->channels[i]; | ||
190 | |||
191 | /* Get all completed descriptors */ | ||
192 | spin_lock_irqsave(&schan->lock, flags); | ||
193 | if (!list_empty(&schan->completed)) { | ||
194 | list_splice_tail_init(&schan->completed, &list); | ||
195 | spin_unlock_irqrestore(&schan->lock, flags); | ||
196 | |||
197 | /* Execute callbacks and run dependencies */ | ||
198 | list_for_each_entry(sdesc, &list, node) { | ||
199 | desc = &sdesc->desc; | ||
200 | |||
201 | if (desc->callback) | ||
202 | desc->callback(desc->callback_param); | ||
203 | |||
204 | last_cookie = desc->cookie; | ||
205 | dma_run_dependencies(desc); | ||
206 | } | ||
207 | |||
208 | /* Free descriptors */ | ||
209 | spin_lock_irqsave(&schan->lock, flags); | ||
210 | list_splice_tail_init(&list, &schan->free); | ||
211 | schan->completed_cookie = last_cookie; | ||
212 | spin_unlock_irqrestore(&schan->lock, flags); | ||
213 | } else { | ||
214 | /* for cyclic channel, desc is always in active list */ | ||
215 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
216 | node); | ||
217 | |||
218 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | ||
219 | /* without active cyclic DMA */ | ||
220 | spin_unlock_irqrestore(&schan->lock, flags); | ||
221 | continue; | ||
222 | } | ||
223 | |||
224 | /* cyclic DMA */ | ||
225 | happened_cyclic = schan->happened_cyclic; | ||
226 | spin_unlock_irqrestore(&schan->lock, flags); | ||
227 | |||
228 | desc = &sdesc->desc; | ||
229 | while (happened_cyclic != schan->completed_cyclic) { | ||
230 | if (desc->callback) | ||
231 | desc->callback(desc->callback_param); | ||
232 | schan->completed_cyclic++; | ||
233 | } | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /* DMA Tasklet */ | ||
239 | static void sirfsoc_dma_tasklet(unsigned long data) | ||
240 | { | ||
241 | struct sirfsoc_dma *sdma = (void *)data; | ||
242 | |||
243 | sirfsoc_dma_process_completed(sdma); | ||
244 | } | ||
245 | |||
246 | /* Submit descriptor to hardware */ | ||
247 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
248 | { | ||
249 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | ||
250 | struct sirfsoc_dma_desc *sdesc; | ||
251 | unsigned long flags; | ||
252 | dma_cookie_t cookie; | ||
253 | |||
254 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | ||
255 | |||
256 | spin_lock_irqsave(&schan->lock, flags); | ||
257 | |||
258 | /* Move descriptor to queue */ | ||
259 | list_move_tail(&sdesc->node, &schan->queued); | ||
260 | |||
261 | /* Update cookie */ | ||
262 | cookie = schan->chan.cookie + 1; | ||
263 | if (cookie <= 0) | ||
264 | cookie = 1; | ||
265 | |||
266 | schan->chan.cookie = cookie; | ||
267 | sdesc->desc.cookie = cookie; | ||
268 | |||
269 | spin_unlock_irqrestore(&schan->lock, flags); | ||
270 | |||
271 | return cookie; | ||
272 | } | ||
273 | |||
274 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | ||
275 | struct dma_slave_config *config) | ||
276 | { | ||
277 | unsigned long flags; | ||
278 | |||
279 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
280 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
281 | return -EINVAL; | ||
282 | |||
283 | spin_lock_irqsave(&schan->lock, flags); | ||
284 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | ||
285 | spin_unlock_irqrestore(&schan->lock, flags); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | ||
291 | { | ||
292 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
293 | int cid = schan->chan.chan_id; | ||
294 | unsigned long flags; | ||
295 | |||
296 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | ||
297 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
298 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | ||
299 | |||
300 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | ||
301 | & ~((1 << cid) | 1 << (cid + 16)), | ||
302 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
303 | |||
304 | spin_lock_irqsave(&schan->lock, flags); | ||
305 | list_splice_tail_init(&schan->active, &schan->free); | ||
306 | list_splice_tail_init(&schan->queued, &schan->free); | ||
307 | spin_unlock_irqrestore(&schan->lock, flags); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
313 | unsigned long arg) | ||
314 | { | ||
315 | struct dma_slave_config *config; | ||
316 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
317 | |||
318 | switch (cmd) { | ||
319 | case DMA_TERMINATE_ALL: | ||
320 | return sirfsoc_dma_terminate_all(schan); | ||
321 | case DMA_SLAVE_CONFIG: | ||
322 | config = (struct dma_slave_config *)arg; | ||
323 | return sirfsoc_dma_slave_config(schan, config); | ||
324 | |||
325 | default: | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | return -ENOSYS; | ||
330 | } | ||
331 | |||
332 | /* Alloc channel resources */ | ||
333 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | ||
334 | { | ||
335 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
336 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
337 | struct sirfsoc_dma_desc *sdesc; | ||
338 | unsigned long flags; | ||
339 | LIST_HEAD(descs); | ||
340 | int i; | ||
341 | |||
342 | /* Alloc descriptors for this channel */ | ||
343 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | ||
344 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | ||
345 | if (!sdesc) { | ||
346 | dev_notice(sdma->dma.dev, "Memory allocation error. " | ||
347 | "Allocated only %u descriptors\n", i); | ||
348 | break; | ||
349 | } | ||
350 | |||
351 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | ||
352 | sdesc->desc.flags = DMA_CTRL_ACK; | ||
353 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | ||
354 | |||
355 | list_add_tail(&sdesc->node, &descs); | ||
356 | } | ||
357 | |||
358 | /* Return error only if no descriptors were allocated */ | ||
359 | if (i == 0) | ||
360 | return -ENOMEM; | ||
361 | |||
362 | spin_lock_irqsave(&schan->lock, flags); | ||
363 | |||
364 | list_splice_tail_init(&descs, &schan->free); | ||
365 | spin_unlock_irqrestore(&schan->lock, flags); | ||
366 | |||
367 | return i; | ||
368 | } | ||
369 | |||
370 | /* Free channel resources */ | ||
371 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | ||
372 | { | ||
373 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
374 | struct sirfsoc_dma_desc *sdesc, *tmp; | ||
375 | unsigned long flags; | ||
376 | LIST_HEAD(descs); | ||
377 | |||
378 | spin_lock_irqsave(&schan->lock, flags); | ||
379 | |||
380 | /* Channel must be idle */ | ||
381 | BUG_ON(!list_empty(&schan->prepared)); | ||
382 | BUG_ON(!list_empty(&schan->queued)); | ||
383 | BUG_ON(!list_empty(&schan->active)); | ||
384 | BUG_ON(!list_empty(&schan->completed)); | ||
385 | |||
386 | /* Move data */ | ||
387 | list_splice_tail_init(&schan->free, &descs); | ||
388 | |||
389 | spin_unlock_irqrestore(&schan->lock, flags); | ||
390 | |||
391 | /* Free descriptors */ | ||
392 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | ||
393 | kfree(sdesc); | ||
394 | } | ||
395 | |||
396 | /* Send pending descriptor to hardware */ | ||
397 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | ||
398 | { | ||
399 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
400 | unsigned long flags; | ||
401 | |||
402 | spin_lock_irqsave(&schan->lock, flags); | ||
403 | |||
404 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | ||
405 | sirfsoc_dma_execute(schan); | ||
406 | |||
407 | spin_unlock_irqrestore(&schan->lock, flags); | ||
408 | } | ||
409 | |||
410 | /* Check request completion status */ | ||
411 | static enum dma_status | ||
412 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
413 | struct dma_tx_state *txstate) | ||
414 | { | ||
415 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
416 | unsigned long flags; | ||
417 | dma_cookie_t last_used; | ||
418 | dma_cookie_t last_complete; | ||
419 | |||
420 | spin_lock_irqsave(&schan->lock, flags); | ||
421 | last_used = schan->chan.cookie; | ||
422 | last_complete = schan->completed_cookie; | ||
423 | spin_unlock_irqrestore(&schan->lock, flags); | ||
424 | |||
425 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
426 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
427 | } | ||
428 | |||
429 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | ||
430 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
431 | unsigned long flags) | ||
432 | { | ||
433 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
434 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
435 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
436 | unsigned long iflags; | ||
437 | int ret; | ||
438 | |||
439 | if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { | ||
440 | ret = -EINVAL; | ||
441 | goto err_dir; | ||
442 | } | ||
443 | |||
444 | /* Get free descriptor */ | ||
445 | spin_lock_irqsave(&schan->lock, iflags); | ||
446 | if (!list_empty(&schan->free)) { | ||
447 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | ||
448 | node); | ||
449 | list_del(&sdesc->node); | ||
450 | } | ||
451 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
452 | |||
453 | if (!sdesc) { | ||
454 | /* try to free completed descriptors */ | ||
455 | sirfsoc_dma_process_completed(sdma); | ||
456 | ret = 0; | ||
457 | goto no_desc; | ||
458 | } | ||
459 | |||
460 | /* Place descriptor in prepared list */ | ||
461 | spin_lock_irqsave(&schan->lock, iflags); | ||
462 | |||
463 | /* | ||
464 | * Number of chunks in a frame can only be 1 for prima2 | ||
465 | * and ylen (number of frame - 1) must be at least 0 | ||
466 | */ | ||
467 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | ||
468 | sdesc->cyclic = 0; | ||
469 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | ||
470 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | ||
471 | SIRFSOC_DMA_WORD_LEN; | ||
472 | sdesc->ylen = xt->numf - 1; | ||
473 | if (xt->dir == DMA_MEM_TO_DEV) { | ||
474 | sdesc->addr = xt->src_start; | ||
475 | sdesc->dir = 1; | ||
476 | } else { | ||
477 | sdesc->addr = xt->dst_start; | ||
478 | sdesc->dir = 0; | ||
479 | } | ||
480 | |||
481 | list_add_tail(&sdesc->node, &schan->prepared); | ||
482 | } else { | ||
483 | pr_err("sirfsoc DMA Invalid xfer\n"); | ||
484 | ret = -EINVAL; | ||
485 | goto err_xfer; | ||
486 | } | ||
487 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
488 | |||
489 | return &sdesc->desc; | ||
490 | err_xfer: | ||
491 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
492 | no_desc: | ||
493 | err_dir: | ||
494 | return ERR_PTR(ret); | ||
495 | } | ||
496 | |||
497 | static struct dma_async_tx_descriptor * | ||
498 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | ||
499 | size_t buf_len, size_t period_len, | ||
500 | enum dma_transfer_direction direction) | ||
501 | { | ||
502 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
503 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
504 | unsigned long iflags; | ||
505 | |||
506 | /* | ||
507 | * we only support cycle transfer with 2 period | ||
508 | * If the X-length is set to 0, it would be the loop mode. | ||
509 | * The DMA address keeps increasing until reaching the end of a loop | ||
510 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | ||
511 | * the DMA address goes back to the beginning of this area. | ||
512 | * In loop mode, the DMA data region is divided into two parts, BUFA | ||
513 | * and BUFB. DMA controller generates interrupts twice in each loop: | ||
514 | * when the DMA address reaches the end of BUFA or the end of the | ||
515 | * BUFB | ||
516 | */ | ||
517 | if (buf_len != 2 * period_len) | ||
518 | return ERR_PTR(-EINVAL); | ||
519 | |||
520 | /* Get free descriptor */ | ||
521 | spin_lock_irqsave(&schan->lock, iflags); | ||
522 | if (!list_empty(&schan->free)) { | ||
523 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | ||
524 | node); | ||
525 | list_del(&sdesc->node); | ||
526 | } | ||
527 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
528 | |||
529 | if (!sdesc) | ||
530 | return 0; | ||
531 | |||
532 | /* Place descriptor in prepared list */ | ||
533 | spin_lock_irqsave(&schan->lock, iflags); | ||
534 | sdesc->addr = addr; | ||
535 | sdesc->cyclic = 1; | ||
536 | sdesc->xlen = 0; | ||
537 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | ||
538 | sdesc->width = 1; | ||
539 | list_add_tail(&sdesc->node, &schan->prepared); | ||
540 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
541 | |||
542 | return &sdesc->desc; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * The DMA controller consists of 16 independent DMA channels. | ||
547 | * Each channel is allocated to a different function | ||
548 | */ | ||
549 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | ||
550 | { | ||
551 | unsigned int ch_nr = (unsigned int) chan_id; | ||
552 | |||
553 | if (ch_nr == chan->chan_id + | ||
554 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | ||
555 | return true; | ||
556 | |||
557 | return false; | ||
558 | } | ||
559 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | ||
560 | |||
561 | static int __devinit sirfsoc_dma_probe(struct platform_device *op) | ||
562 | { | ||
563 | struct device_node *dn = op->dev.of_node; | ||
564 | struct device *dev = &op->dev; | ||
565 | struct dma_device *dma; | ||
566 | struct sirfsoc_dma *sdma; | ||
567 | struct sirfsoc_dma_chan *schan; | ||
568 | struct resource res; | ||
569 | ulong regs_start, regs_size; | ||
570 | u32 id; | ||
571 | int ret, i; | ||
572 | |||
573 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | ||
574 | if (!sdma) { | ||
575 | dev_err(dev, "Memory exhausted!\n"); | ||
576 | return -ENOMEM; | ||
577 | } | ||
578 | |||
579 | if (of_property_read_u32(dn, "cell-index", &id)) { | ||
580 | dev_err(dev, "Fail to get DMAC index\n"); | ||
581 | ret = -ENODEV; | ||
582 | goto free_mem; | ||
583 | } | ||
584 | |||
585 | sdma->irq = irq_of_parse_and_map(dn, 0); | ||
586 | if (sdma->irq == NO_IRQ) { | ||
587 | dev_err(dev, "Error mapping IRQ!\n"); | ||
588 | ret = -EINVAL; | ||
589 | goto free_mem; | ||
590 | } | ||
591 | |||
592 | ret = of_address_to_resource(dn, 0, &res); | ||
593 | if (ret) { | ||
594 | dev_err(dev, "Error parsing memory region!\n"); | ||
595 | goto free_mem; | ||
596 | } | ||
597 | |||
598 | regs_start = res.start; | ||
599 | regs_size = resource_size(&res); | ||
600 | |||
601 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | ||
602 | if (!sdma->base) { | ||
603 | dev_err(dev, "Error mapping memory region!\n"); | ||
604 | ret = -ENOMEM; | ||
605 | goto irq_dispose; | ||
606 | } | ||
607 | |||
608 | ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, | ||
609 | sdma); | ||
610 | if (ret) { | ||
611 | dev_err(dev, "Error requesting IRQ!\n"); | ||
612 | ret = -EINVAL; | ||
613 | goto unmap_mem; | ||
614 | } | ||
615 | |||
616 | dma = &sdma->dma; | ||
617 | dma->dev = dev; | ||
618 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | ||
619 | |||
620 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | ||
621 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | ||
622 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | ||
623 | dma->device_control = sirfsoc_dma_control; | ||
624 | dma->device_tx_status = sirfsoc_dma_tx_status; | ||
625 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | ||
626 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | ||
627 | |||
628 | INIT_LIST_HEAD(&dma->channels); | ||
629 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | ||
630 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | ||
631 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | ||
632 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
633 | |||
634 | for (i = 0; i < dma->chancnt; i++) { | ||
635 | schan = &sdma->channels[i]; | ||
636 | |||
637 | schan->chan.device = dma; | ||
638 | schan->chan.cookie = 1; | ||
639 | schan->completed_cookie = schan->chan.cookie; | ||
640 | |||
641 | INIT_LIST_HEAD(&schan->free); | ||
642 | INIT_LIST_HEAD(&schan->prepared); | ||
643 | INIT_LIST_HEAD(&schan->queued); | ||
644 | INIT_LIST_HEAD(&schan->active); | ||
645 | INIT_LIST_HEAD(&schan->completed); | ||
646 | |||
647 | spin_lock_init(&schan->lock); | ||
648 | list_add_tail(&schan->chan.device_node, &dma->channels); | ||
649 | } | ||
650 | |||
651 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | ||
652 | |||
653 | /* Register DMA engine */ | ||
654 | dev_set_drvdata(dev, sdma); | ||
655 | ret = dma_async_device_register(dma); | ||
656 | if (ret) | ||
657 | goto free_irq; | ||
658 | |||
659 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | ||
660 | |||
661 | return 0; | ||
662 | |||
663 | free_irq: | ||
664 | devm_free_irq(dev, sdma->irq, sdma); | ||
665 | irq_dispose: | ||
666 | irq_dispose_mapping(sdma->irq); | ||
667 | unmap_mem: | ||
668 | iounmap(sdma->base); | ||
669 | free_mem: | ||
670 | devm_kfree(dev, sdma); | ||
671 | return ret; | ||
672 | } | ||
673 | |||
674 | static int __devexit sirfsoc_dma_remove(struct platform_device *op) | ||
675 | { | ||
676 | struct device *dev = &op->dev; | ||
677 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
678 | |||
679 | dma_async_device_unregister(&sdma->dma); | ||
680 | devm_free_irq(dev, sdma->irq, sdma); | ||
681 | irq_dispose_mapping(sdma->irq); | ||
682 | iounmap(sdma->base); | ||
683 | devm_kfree(dev, sdma); | ||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | static struct of_device_id sirfsoc_dma_match[] = { | ||
688 | { .compatible = "sirf,prima2-dmac", }, | ||
689 | {}, | ||
690 | }; | ||
691 | |||
692 | static struct platform_driver sirfsoc_dma_driver = { | ||
693 | .probe = sirfsoc_dma_probe, | ||
694 | .remove = __devexit_p(sirfsoc_dma_remove), | ||
695 | .driver = { | ||
696 | .name = DRV_NAME, | ||
697 | .owner = THIS_MODULE, | ||
698 | .of_match_table = sirfsoc_dma_match, | ||
699 | }, | ||
700 | }; | ||
701 | |||
702 | module_platform_driver(sirfsoc_dma_driver); | ||
703 | |||
704 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | ||
705 | "Barry Song <baohua.song@csr.com>"); | ||
706 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | ||
707 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 13259cad0ceb..cc5ecbc067a3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/pm.h> | ||
18 | #include <linux/pm_runtime.h> | ||
17 | #include <linux/err.h> | 19 | #include <linux/err.h> |
18 | #include <linux/amba/bus.h> | 20 | #include <linux/amba/bus.h> |
19 | 21 | ||
@@ -32,6 +34,9 @@ | |||
32 | /* Maximum iterations taken before giving up suspending a channel */ | 34 | /* Maximum iterations taken before giving up suspending a channel */ |
33 | #define D40_SUSPEND_MAX_IT 500 | 35 | #define D40_SUSPEND_MAX_IT 500 |
34 | 36 | ||
37 | /* Milliseconds */ | ||
38 | #define DMA40_AUTOSUSPEND_DELAY 100 | ||
39 | |||
35 | /* Hardware requirement on LCLA alignment */ | 40 | /* Hardware requirement on LCLA alignment */ |
36 | #define LCLA_ALIGNMENT 0x40000 | 41 | #define LCLA_ALIGNMENT 0x40000 |
37 | 42 | ||
@@ -62,6 +67,55 @@ enum d40_command { | |||
62 | D40_DMA_SUSPENDED = 3 | 67 | D40_DMA_SUSPENDED = 3 |
63 | }; | 68 | }; |
64 | 69 | ||
70 | /* | ||
71 | * These are the registers that has to be saved and later restored | ||
72 | * when the DMA hw is powered off. | ||
73 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | ||
74 | */ | ||
75 | static u32 d40_backup_regs[] = { | ||
76 | D40_DREG_LCPA, | ||
77 | D40_DREG_LCLA, | ||
78 | D40_DREG_PRMSE, | ||
79 | D40_DREG_PRMSO, | ||
80 | D40_DREG_PRMOE, | ||
81 | D40_DREG_PRMOO, | ||
82 | }; | ||
83 | |||
84 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | ||
85 | |||
86 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | ||
87 | static u32 d40_backup_regs_v3[] = { | ||
88 | D40_DREG_PSEG1, | ||
89 | D40_DREG_PSEG2, | ||
90 | D40_DREG_PSEG3, | ||
91 | D40_DREG_PSEG4, | ||
92 | D40_DREG_PCEG1, | ||
93 | D40_DREG_PCEG2, | ||
94 | D40_DREG_PCEG3, | ||
95 | D40_DREG_PCEG4, | ||
96 | D40_DREG_RSEG1, | ||
97 | D40_DREG_RSEG2, | ||
98 | D40_DREG_RSEG3, | ||
99 | D40_DREG_RSEG4, | ||
100 | D40_DREG_RCEG1, | ||
101 | D40_DREG_RCEG2, | ||
102 | D40_DREG_RCEG3, | ||
103 | D40_DREG_RCEG4, | ||
104 | }; | ||
105 | |||
106 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | ||
107 | |||
108 | static u32 d40_backup_regs_chan[] = { | ||
109 | D40_CHAN_REG_SSCFG, | ||
110 | D40_CHAN_REG_SSELT, | ||
111 | D40_CHAN_REG_SSPTR, | ||
112 | D40_CHAN_REG_SSLNK, | ||
113 | D40_CHAN_REG_SDCFG, | ||
114 | D40_CHAN_REG_SDELT, | ||
115 | D40_CHAN_REG_SDPTR, | ||
116 | D40_CHAN_REG_SDLNK, | ||
117 | }; | ||
118 | |||
65 | /** | 119 | /** |
66 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 120 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
67 | * | 121 | * |
@@ -96,7 +150,7 @@ struct d40_lli_pool { | |||
96 | * during a transfer. | 150 | * during a transfer. |
97 | * @node: List entry. | 151 | * @node: List entry. |
98 | * @is_in_client_list: true if the client owns this descriptor. | 152 | * @is_in_client_list: true if the client owns this descriptor. |
99 | * the previous one. | 153 | * @cyclic: true if this is a cyclic job |
100 | * | 154 | * |
101 | * This descriptor is used for both logical and physical transfers. | 155 | * This descriptor is used for both logical and physical transfers. |
102 | */ | 156 | */ |
@@ -143,6 +197,7 @@ struct d40_lcla_pool { | |||
143 | * channels. | 197 | * channels. |
144 | * | 198 | * |
145 | * @lock: A lock protection this entity. | 199 | * @lock: A lock protection this entity. |
200 | * @reserved: True if used by secure world or otherwise. | ||
146 | * @num: The physical channel number of this entity. | 201 | * @num: The physical channel number of this entity. |
147 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 202 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
148 | * this physical channel. Can also be free or physically allocated. | 203 | * this physical channel. Can also be free or physically allocated. |
@@ -152,6 +207,7 @@ struct d40_lcla_pool { | |||
152 | */ | 207 | */ |
153 | struct d40_phy_res { | 208 | struct d40_phy_res { |
154 | spinlock_t lock; | 209 | spinlock_t lock; |
210 | bool reserved; | ||
155 | int num; | 211 | int num; |
156 | u32 allocated_src; | 212 | u32 allocated_src; |
157 | u32 allocated_dst; | 213 | u32 allocated_dst; |
@@ -185,7 +241,6 @@ struct d40_base; | |||
185 | * @src_def_cfg: Default cfg register setting for src. | 241 | * @src_def_cfg: Default cfg register setting for src. |
186 | * @dst_def_cfg: Default cfg register setting for dst. | 242 | * @dst_def_cfg: Default cfg register setting for dst. |
187 | * @log_def: Default logical channel settings. | 243 | * @log_def: Default logical channel settings. |
188 | * @lcla: Space for one dst src pair for logical channel transfers. | ||
189 | * @lcpa: Pointer to dst and src lcpa settings. | 244 | * @lcpa: Pointer to dst and src lcpa settings. |
190 | * @runtime_addr: runtime configured address. | 245 | * @runtime_addr: runtime configured address. |
191 | * @runtime_direction: runtime configured direction. | 246 | * @runtime_direction: runtime configured direction. |
@@ -217,7 +272,7 @@ struct d40_chan { | |||
217 | struct d40_log_lli_full *lcpa; | 272 | struct d40_log_lli_full *lcpa; |
218 | /* Runtime reconfiguration */ | 273 | /* Runtime reconfiguration */ |
219 | dma_addr_t runtime_addr; | 274 | dma_addr_t runtime_addr; |
220 | enum dma_data_direction runtime_direction; | 275 | enum dma_transfer_direction runtime_direction; |
221 | }; | 276 | }; |
222 | 277 | ||
223 | /** | 278 | /** |
@@ -241,6 +296,7 @@ struct d40_chan { | |||
241 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 296 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
242 | * @dma_slave: dma_device channels that can do only do slave transfers. | 297 | * @dma_slave: dma_device channels that can do only do slave transfers. |
243 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 298 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
299 | * @phy_chans: Room for all possible physical channels in system. | ||
244 | * @log_chans: Room for all possible logical channels in system. | 300 | * @log_chans: Room for all possible logical channels in system. |
245 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 301 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
246 | * to log_chans entries. | 302 | * to log_chans entries. |
@@ -248,12 +304,20 @@ struct d40_chan { | |||
248 | * to phy_chans entries. | 304 | * to phy_chans entries. |
249 | * @plat_data: Pointer to provided platform_data which is the driver | 305 | * @plat_data: Pointer to provided platform_data which is the driver |
250 | * configuration. | 306 | * configuration. |
307 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. | ||
251 | * @phy_res: Vector containing all physical channels. | 308 | * @phy_res: Vector containing all physical channels. |
252 | * @lcla_pool: lcla pool settings and data. | 309 | * @lcla_pool: lcla pool settings and data. |
253 | * @lcpa_base: The virtual mapped address of LCPA. | 310 | * @lcpa_base: The virtual mapped address of LCPA. |
254 | * @phy_lcpa: The physical address of the LCPA. | 311 | * @phy_lcpa: The physical address of the LCPA. |
255 | * @lcpa_size: The size of the LCPA area. | 312 | * @lcpa_size: The size of the LCPA area. |
256 | * @desc_slab: cache for descriptors. | 313 | * @desc_slab: cache for descriptors. |
314 | * @reg_val_backup: Here the values of some hardware registers are stored | ||
315 | * before the DMA is powered off. They are restored when the power is back on. | ||
316 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | ||
317 | * later. | ||
318 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | ||
319 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | ||
320 | * @initialized: true if the dma has been initialized | ||
257 | */ | 321 | */ |
258 | struct d40_base { | 322 | struct d40_base { |
259 | spinlock_t interrupt_lock; | 323 | spinlock_t interrupt_lock; |
@@ -275,6 +339,7 @@ struct d40_base { | |||
275 | struct d40_chan **lookup_log_chans; | 339 | struct d40_chan **lookup_log_chans; |
276 | struct d40_chan **lookup_phy_chans; | 340 | struct d40_chan **lookup_phy_chans; |
277 | struct stedma40_platform_data *plat_data; | 341 | struct stedma40_platform_data *plat_data; |
342 | struct regulator *lcpa_regulator; | ||
278 | /* Physical half channels */ | 343 | /* Physical half channels */ |
279 | struct d40_phy_res *phy_res; | 344 | struct d40_phy_res *phy_res; |
280 | struct d40_lcla_pool lcla_pool; | 345 | struct d40_lcla_pool lcla_pool; |
@@ -282,6 +347,11 @@ struct d40_base { | |||
282 | dma_addr_t phy_lcpa; | 347 | dma_addr_t phy_lcpa; |
283 | resource_size_t lcpa_size; | 348 | resource_size_t lcpa_size; |
284 | struct kmem_cache *desc_slab; | 349 | struct kmem_cache *desc_slab; |
350 | u32 reg_val_backup[BACKUP_REGS_SZ]; | ||
351 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | ||
352 | u32 *reg_val_backup_chan; | ||
353 | u16 gcc_pwr_off_mask; | ||
354 | bool initialized; | ||
285 | }; | 355 | }; |
286 | 356 | ||
287 | /** | 357 | /** |
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
479 | struct d40_desc *d; | 549 | struct d40_desc *d; |
480 | struct d40_desc *_d; | 550 | struct d40_desc *_d; |
481 | 551 | ||
482 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 552 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
483 | if (async_tx_test_ack(&d->txd)) { | 553 | if (async_tx_test_ack(&d->txd)) { |
484 | d40_desc_remove(d); | 554 | d40_desc_remove(d); |
485 | desc = d; | 555 | desc = d; |
486 | memset(desc, 0, sizeof(*desc)); | 556 | memset(desc, 0, sizeof(*desc)); |
487 | break; | 557 | break; |
488 | } | 558 | } |
559 | } | ||
489 | } | 560 | } |
490 | 561 | ||
491 | if (!desc) | 562 | if (!desc) |
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
536 | bool cyclic = desc->cyclic; | 607 | bool cyclic = desc->cyclic; |
537 | int curr_lcla = -EINVAL; | 608 | int curr_lcla = -EINVAL; |
538 | int first_lcla = 0; | 609 | int first_lcla = 0; |
610 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; | ||
539 | bool linkback; | 611 | bool linkback; |
540 | 612 | ||
541 | /* | 613 | /* |
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
608 | &lli->src[lli_current], | 680 | &lli->src[lli_current], |
609 | next_lcla, flags); | 681 | next_lcla, flags); |
610 | 682 | ||
611 | dma_sync_single_range_for_device(chan->base->dev, | 683 | /* |
612 | pool->dma_addr, lcla_offset, | 684 | * Cache maintenance is not needed if lcla is |
613 | 2 * sizeof(struct d40_log_lli), | 685 | * mapped in esram |
614 | DMA_TO_DEVICE); | 686 | */ |
615 | 687 | if (!use_esram_lcla) { | |
688 | dma_sync_single_range_for_device(chan->base->dev, | ||
689 | pool->dma_addr, lcla_offset, | ||
690 | 2 * sizeof(struct d40_log_lli), | ||
691 | DMA_TO_DEVICE); | ||
692 | } | ||
616 | curr_lcla = next_lcla; | 693 | curr_lcla = next_lcla; |
617 | 694 | ||
618 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | 695 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
740 | return len; | 817 | return len; |
741 | } | 818 | } |
742 | 819 | ||
743 | /* Support functions for logical channels */ | 820 | |
821 | #ifdef CONFIG_PM | ||
822 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | ||
823 | u32 *regaddr, int num, bool save) | ||
824 | { | ||
825 | int i; | ||
826 | |||
827 | for (i = 0; i < num; i++) { | ||
828 | void __iomem *addr = baseaddr + regaddr[i]; | ||
829 | |||
830 | if (save) | ||
831 | backup[i] = readl_relaxed(addr); | ||
832 | else | ||
833 | writel_relaxed(backup[i], addr); | ||
834 | } | ||
835 | } | ||
836 | |||
837 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | /* Save/Restore channel specific registers */ | ||
842 | for (i = 0; i < base->num_phy_chans; i++) { | ||
843 | void __iomem *addr; | ||
844 | int idx; | ||
845 | |||
846 | if (base->phy_res[i].reserved) | ||
847 | continue; | ||
848 | |||
849 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
850 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
851 | |||
852 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
853 | d40_backup_regs_chan, | ||
854 | ARRAY_SIZE(d40_backup_regs_chan), | ||
855 | save); | ||
856 | } | ||
857 | |||
858 | /* Save/Restore global registers */ | ||
859 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
860 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
861 | save); | ||
862 | |||
863 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
864 | if (base->rev >= 3) | ||
865 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | ||
866 | d40_backup_regs_v3, | ||
867 | ARRAY_SIZE(d40_backup_regs_v3), | ||
868 | save); | ||
869 | } | ||
870 | #else | ||
871 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
872 | { | ||
873 | } | ||
874 | #endif | ||
744 | 875 | ||
745 | static int d40_channel_execute_command(struct d40_chan *d40c, | 876 | static int d40_channel_execute_command(struct d40_chan *d40c, |
746 | enum d40_command command) | 877 | enum d40_command command) |
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c) | |||
973 | /* Set LIDX for lcla */ | 1104 | /* Set LIDX for lcla */ |
974 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); | 1105 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
975 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | 1106 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
1107 | |||
1108 | /* Clear LNK which will be used by d40_chan_has_events() */ | ||
1109 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | ||
1110 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | ||
976 | } | 1111 | } |
977 | } | 1112 | } |
978 | 1113 | ||
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c) | |||
1013 | if (!d40c->busy) | 1148 | if (!d40c->busy) |
1014 | return 0; | 1149 | return 0; |
1015 | 1150 | ||
1151 | pm_runtime_get_sync(d40c->base->dev); | ||
1016 | spin_lock_irqsave(&d40c->lock, flags); | 1152 | spin_lock_irqsave(&d40c->lock, flags); |
1017 | 1153 | ||
1018 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1154 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c) | |||
1025 | D40_DMA_RUN); | 1161 | D40_DMA_RUN); |
1026 | } | 1162 | } |
1027 | } | 1163 | } |
1028 | 1164 | pm_runtime_mark_last_busy(d40c->base->dev); | |
1165 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1029 | spin_unlock_irqrestore(&d40c->lock, flags); | 1166 | spin_unlock_irqrestore(&d40c->lock, flags); |
1030 | return res; | 1167 | return res; |
1031 | } | 1168 | } |
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c) | |||
1039 | return 0; | 1176 | return 0; |
1040 | 1177 | ||
1041 | spin_lock_irqsave(&d40c->lock, flags); | 1178 | spin_lock_irqsave(&d40c->lock, flags); |
1042 | 1179 | pm_runtime_get_sync(d40c->base->dev); | |
1043 | if (d40c->base->rev == 0) | 1180 | if (d40c->base->rev == 0) |
1044 | if (chan_is_logical(d40c)) { | 1181 | if (chan_is_logical(d40c)) { |
1045 | res = d40_channel_execute_command(d40c, | 1182 | res = d40_channel_execute_command(d40c, |
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c) | |||
1057 | } | 1194 | } |
1058 | 1195 | ||
1059 | no_suspend: | 1196 | no_suspend: |
1197 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1198 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1060 | spin_unlock_irqrestore(&d40c->lock, flags); | 1199 | spin_unlock_irqrestore(&d40c->lock, flags); |
1061 | return res; | 1200 | return res; |
1062 | } | 1201 | } |
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1129 | d40d = d40_first_queued(d40c); | 1268 | d40d = d40_first_queued(d40c); |
1130 | 1269 | ||
1131 | if (d40d != NULL) { | 1270 | if (d40d != NULL) { |
1132 | d40c->busy = true; | 1271 | if (!d40c->busy) |
1272 | d40c->busy = true; | ||
1273 | |||
1274 | pm_runtime_get_sync(d40c->base->dev); | ||
1133 | 1275 | ||
1134 | /* Remove from queue */ | 1276 | /* Remove from queue */ |
1135 | d40_desc_remove(d40d); | 1277 | d40_desc_remove(d40d); |
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1190 | 1332 | ||
1191 | if (d40_queue_start(d40c) == NULL) | 1333 | if (d40_queue_start(d40c) == NULL) |
1192 | d40c->busy = false; | 1334 | d40c->busy = false; |
1335 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1336 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1193 | } | 1337 | } |
1194 | 1338 | ||
1195 | d40c->pending_tx++; | 1339 | d40c->pending_tx++; |
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1405 | return res; | 1549 | return res; |
1406 | } | 1550 | } |
1407 | 1551 | ||
1408 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, | 1552 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1409 | int log_event_line, bool is_log) | 1553 | bool is_src, int log_event_line, bool is_log, |
1554 | bool *first_user) | ||
1410 | { | 1555 | { |
1411 | unsigned long flags; | 1556 | unsigned long flags; |
1412 | spin_lock_irqsave(&phy->lock, flags); | 1557 | spin_lock_irqsave(&phy->lock, flags); |
1558 | |||
1559 | *first_user = ((phy->allocated_src | phy->allocated_dst) | ||
1560 | == D40_ALLOC_FREE); | ||
1561 | |||
1413 | if (!is_log) { | 1562 | if (!is_log) { |
1414 | /* Physical interrupts are masked per physical full channel */ | 1563 | /* Physical interrupts are masked per physical full channel */ |
1415 | if (phy->allocated_src == D40_ALLOC_FREE && | 1564 | if (phy->allocated_src == D40_ALLOC_FREE && |
@@ -1490,7 +1639,7 @@ out: | |||
1490 | return is_free; | 1639 | return is_free; |
1491 | } | 1640 | } |
1492 | 1641 | ||
1493 | static int d40_allocate_channel(struct d40_chan *d40c) | 1642 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
1494 | { | 1643 | { |
1495 | int dev_type; | 1644 | int dev_type; |
1496 | int event_group; | 1645 | int event_group; |
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1526 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1675 | for (i = 0; i < d40c->base->num_phy_chans; i++) { |
1527 | 1676 | ||
1528 | if (d40_alloc_mask_set(&phys[i], is_src, | 1677 | if (d40_alloc_mask_set(&phys[i], is_src, |
1529 | 0, is_log)) | 1678 | 0, is_log, |
1679 | first_phy_user)) | ||
1530 | goto found_phy; | 1680 | goto found_phy; |
1531 | } | 1681 | } |
1532 | } else | 1682 | } else |
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1536 | if (d40_alloc_mask_set(&phys[i], | 1686 | if (d40_alloc_mask_set(&phys[i], |
1537 | is_src, | 1687 | is_src, |
1538 | 0, | 1688 | 0, |
1539 | is_log)) | 1689 | is_log, |
1690 | first_phy_user)) | ||
1540 | goto found_phy; | 1691 | goto found_phy; |
1541 | } | 1692 | } |
1542 | } | 1693 | } |
@@ -1552,6 +1703,25 @@ found_phy: | |||
1552 | /* Find logical channel */ | 1703 | /* Find logical channel */ |
1553 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1704 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1554 | int phy_num = j + event_group * 2; | 1705 | int phy_num = j + event_group * 2; |
1706 | |||
1707 | if (d40c->dma_cfg.use_fixed_channel) { | ||
1708 | i = d40c->dma_cfg.phy_channel; | ||
1709 | |||
1710 | if ((i != phy_num) && (i != phy_num + 1)) { | ||
1711 | dev_err(chan2dev(d40c), | ||
1712 | "invalid fixed phy channel %d\n", i); | ||
1713 | return -EINVAL; | ||
1714 | } | ||
1715 | |||
1716 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | ||
1717 | is_log, first_phy_user)) | ||
1718 | goto found_log; | ||
1719 | |||
1720 | dev_err(chan2dev(d40c), | ||
1721 | "could not allocate fixed phy channel %d\n", i); | ||
1722 | return -EINVAL; | ||
1723 | } | ||
1724 | |||
1555 | /* | 1725 | /* |
1556 | * Spread logical channels across all available physical rather | 1726 | * Spread logical channels across all available physical rather |
1557 | * than pack every logical channel at the first available phy | 1727 | * than pack every logical channel at the first available phy |
@@ -1560,13 +1730,15 @@ found_phy: | |||
1560 | if (is_src) { | 1730 | if (is_src) { |
1561 | for (i = phy_num; i < phy_num + 2; i++) { | 1731 | for (i = phy_num; i < phy_num + 2; i++) { |
1562 | if (d40_alloc_mask_set(&phys[i], is_src, | 1732 | if (d40_alloc_mask_set(&phys[i], is_src, |
1563 | event_line, is_log)) | 1733 | event_line, is_log, |
1734 | first_phy_user)) | ||
1564 | goto found_log; | 1735 | goto found_log; |
1565 | } | 1736 | } |
1566 | } else { | 1737 | } else { |
1567 | for (i = phy_num + 1; i >= phy_num; i--) { | 1738 | for (i = phy_num + 1; i >= phy_num; i--) { |
1568 | if (d40_alloc_mask_set(&phys[i], is_src, | 1739 | if (d40_alloc_mask_set(&phys[i], is_src, |
1569 | event_line, is_log)) | 1740 | event_line, is_log, |
1741 | first_phy_user)) | ||
1570 | goto found_log; | 1742 | goto found_log; |
1571 | } | 1743 | } |
1572 | } | 1744 | } |
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1643 | return -EINVAL; | 1815 | return -EINVAL; |
1644 | } | 1816 | } |
1645 | 1817 | ||
1818 | pm_runtime_get_sync(d40c->base->dev); | ||
1646 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1819 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1647 | if (res) { | 1820 | if (res) { |
1648 | chan_err(d40c, "suspend failed\n"); | 1821 | chan_err(d40c, "suspend failed\n"); |
1649 | return res; | 1822 | goto out; |
1650 | } | 1823 | } |
1651 | 1824 | ||
1652 | if (chan_is_logical(d40c)) { | 1825 | if (chan_is_logical(d40c)) { |
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1664 | if (d40_chan_has_events(d40c)) { | 1837 | if (d40_chan_has_events(d40c)) { |
1665 | res = d40_channel_execute_command(d40c, | 1838 | res = d40_channel_execute_command(d40c, |
1666 | D40_DMA_RUN); | 1839 | D40_DMA_RUN); |
1667 | if (res) { | 1840 | if (res) |
1668 | chan_err(d40c, | 1841 | chan_err(d40c, |
1669 | "Executing RUN command\n"); | 1842 | "Executing RUN command\n"); |
1670 | return res; | ||
1671 | } | ||
1672 | } | 1843 | } |
1673 | return 0; | 1844 | goto out; |
1674 | } | 1845 | } |
1675 | } else { | 1846 | } else { |
1676 | (void) d40_alloc_mask_free(phy, is_src, 0); | 1847 | (void) d40_alloc_mask_free(phy, is_src, 0); |
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1680 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1851 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1681 | if (res) { | 1852 | if (res) { |
1682 | chan_err(d40c, "Failed to stop channel\n"); | 1853 | chan_err(d40c, "Failed to stop channel\n"); |
1683 | return res; | 1854 | goto out; |
1684 | } | 1855 | } |
1856 | |||
1857 | if (d40c->busy) { | ||
1858 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1859 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1860 | } | ||
1861 | |||
1862 | d40c->busy = false; | ||
1685 | d40c->phy_chan = NULL; | 1863 | d40c->phy_chan = NULL; |
1686 | d40c->configured = false; | 1864 | d40c->configured = false; |
1687 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1865 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1866 | out: | ||
1688 | 1867 | ||
1689 | return 0; | 1868 | pm_runtime_mark_last_busy(d40c->base->dev); |
1869 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1870 | return res; | ||
1690 | } | 1871 | } |
1691 | 1872 | ||
1692 | static bool d40_is_paused(struct d40_chan *d40c) | 1873 | static bool d40_is_paused(struct d40_chan *d40c) |
@@ -1855,7 +2036,7 @@ err: | |||
1855 | } | 2036 | } |
1856 | 2037 | ||
1857 | static dma_addr_t | 2038 | static dma_addr_t |
1858 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | 2039 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) |
1859 | { | 2040 | { |
1860 | struct stedma40_platform_data *plat = chan->base->plat_data; | 2041 | struct stedma40_platform_data *plat = chan->base->plat_data; |
1861 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 2042 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1864 | if (chan->runtime_addr) | 2045 | if (chan->runtime_addr) |
1865 | return chan->runtime_addr; | 2046 | return chan->runtime_addr; |
1866 | 2047 | ||
1867 | if (direction == DMA_FROM_DEVICE) | 2048 | if (direction == DMA_DEV_TO_MEM) |
1868 | addr = plat->dev_rx[cfg->src_dev_type]; | 2049 | addr = plat->dev_rx[cfg->src_dev_type]; |
1869 | else if (direction == DMA_TO_DEVICE) | 2050 | else if (direction == DMA_MEM_TO_DEV) |
1870 | addr = plat->dev_tx[cfg->dst_dev_type]; | 2051 | addr = plat->dev_tx[cfg->dst_dev_type]; |
1871 | 2052 | ||
1872 | return addr; | 2053 | return addr; |
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1875 | static struct dma_async_tx_descriptor * | 2056 | static struct dma_async_tx_descriptor * |
1876 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 2057 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
1877 | struct scatterlist *sg_dst, unsigned int sg_len, | 2058 | struct scatterlist *sg_dst, unsigned int sg_len, |
1878 | enum dma_data_direction direction, unsigned long dma_flags) | 2059 | enum dma_transfer_direction direction, unsigned long dma_flags) |
1879 | { | 2060 | { |
1880 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | 2061 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
1881 | dma_addr_t src_dev_addr = 0; | 2062 | dma_addr_t src_dev_addr = 0; |
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
1902 | if (direction != DMA_NONE) { | 2083 | if (direction != DMA_NONE) { |
1903 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 2084 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
1904 | 2085 | ||
1905 | if (direction == DMA_FROM_DEVICE) | 2086 | if (direction == DMA_DEV_TO_MEM) |
1906 | src_dev_addr = dev_addr; | 2087 | src_dev_addr = dev_addr; |
1907 | else if (direction == DMA_TO_DEVICE) | 2088 | else if (direction == DMA_MEM_TO_DEV) |
1908 | dst_dev_addr = dev_addr; | 2089 | dst_dev_addr = dev_addr; |
1909 | } | 2090 | } |
1910 | 2091 | ||
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2011 | goto fail; | 2192 | goto fail; |
2012 | } | 2193 | } |
2013 | } | 2194 | } |
2014 | is_free_phy = (d40c->phy_chan == NULL); | ||
2015 | 2195 | ||
2016 | err = d40_allocate_channel(d40c); | 2196 | err = d40_allocate_channel(d40c, &is_free_phy); |
2017 | if (err) { | 2197 | if (err) { |
2018 | chan_err(d40c, "Failed to allocate channel\n"); | 2198 | chan_err(d40c, "Failed to allocate channel\n"); |
2199 | d40c->configured = false; | ||
2019 | goto fail; | 2200 | goto fail; |
2020 | } | 2201 | } |
2021 | 2202 | ||
2203 | pm_runtime_get_sync(d40c->base->dev); | ||
2022 | /* Fill in basic CFG register values */ | 2204 | /* Fill in basic CFG register values */ |
2023 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 2205 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2024 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | 2206 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2038 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2220 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2039 | } | 2221 | } |
2040 | 2222 | ||
2223 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | ||
2224 | chan_is_logical(d40c) ? "logical" : "physical", | ||
2225 | d40c->phy_chan->num, | ||
2226 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | ||
2227 | |||
2228 | |||
2041 | /* | 2229 | /* |
2042 | * Only write channel configuration to the DMA if the physical | 2230 | * Only write channel configuration to the DMA if the physical |
2043 | * resource is free. In case of multiple logical channels | 2231 | * resource is free. In case of multiple logical channels |
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2046 | if (is_free_phy) | 2234 | if (is_free_phy) |
2047 | d40_config_write(d40c); | 2235 | d40_config_write(d40c); |
2048 | fail: | 2236 | fail: |
2237 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2238 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2049 | spin_unlock_irqrestore(&d40c->lock, flags); | 2239 | spin_unlock_irqrestore(&d40c->lock, flags); |
2050 | return err; | 2240 | return err; |
2051 | } | 2241 | } |
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
2108 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2298 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
2109 | struct scatterlist *sgl, | 2299 | struct scatterlist *sgl, |
2110 | unsigned int sg_len, | 2300 | unsigned int sg_len, |
2111 | enum dma_data_direction direction, | 2301 | enum dma_transfer_direction direction, |
2112 | unsigned long dma_flags) | 2302 | unsigned long dma_flags) |
2113 | { | 2303 | { |
2114 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) | 2304 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) |
2115 | return NULL; | 2305 | return NULL; |
2116 | 2306 | ||
2117 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2307 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2120 | static struct dma_async_tx_descriptor * | 2310 | static struct dma_async_tx_descriptor * |
2121 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2311 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2122 | size_t buf_len, size_t period_len, | 2312 | size_t buf_len, size_t period_len, |
2123 | enum dma_data_direction direction) | 2313 | enum dma_transfer_direction direction) |
2124 | { | 2314 | { |
2125 | unsigned int periods = buf_len / period_len; | 2315 | unsigned int periods = buf_len / period_len; |
2126 | struct dma_async_tx_descriptor *txd; | 2316 | struct dma_async_tx_descriptor *txd; |
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2269 | dst_addr_width = config->dst_addr_width; | 2459 | dst_addr_width = config->dst_addr_width; |
2270 | dst_maxburst = config->dst_maxburst; | 2460 | dst_maxburst = config->dst_maxburst; |
2271 | 2461 | ||
2272 | if (config->direction == DMA_FROM_DEVICE) { | 2462 | if (config->direction == DMA_DEV_TO_MEM) { |
2273 | dma_addr_t dev_addr_rx = | 2463 | dma_addr_t dev_addr_rx = |
2274 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | 2464 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; |
2275 | 2465 | ||
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2292 | if (dst_maxburst == 0) | 2482 | if (dst_maxburst == 0) |
2293 | dst_maxburst = src_maxburst; | 2483 | dst_maxburst = src_maxburst; |
2294 | 2484 | ||
2295 | } else if (config->direction == DMA_TO_DEVICE) { | 2485 | } else if (config->direction == DMA_MEM_TO_DEV) { |
2296 | dma_addr_t dev_addr_tx = | 2486 | dma_addr_t dev_addr_tx = |
2297 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | 2487 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; |
2298 | 2488 | ||
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2357 | "configured channel %s for %s, data width %d/%d, " | 2547 | "configured channel %s for %s, data width %d/%d, " |
2358 | "maxburst %d/%d elements, LE, no flow control\n", | 2548 | "maxburst %d/%d elements, LE, no flow control\n", |
2359 | dma_chan_name(chan), | 2549 | dma_chan_name(chan), |
2360 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 2550 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
2361 | src_addr_width, dst_addr_width, | 2551 | src_addr_width, dst_addr_width, |
2362 | src_maxburst, dst_maxburst); | 2552 | src_maxburst, dst_maxburst); |
2363 | 2553 | ||
@@ -2519,6 +2709,72 @@ failure1: | |||
2519 | return err; | 2709 | return err; |
2520 | } | 2710 | } |
2521 | 2711 | ||
2712 | /* Suspend resume functionality */ | ||
2713 | #ifdef CONFIG_PM | ||
2714 | static int dma40_pm_suspend(struct device *dev) | ||
2715 | { | ||
2716 | struct platform_device *pdev = to_platform_device(dev); | ||
2717 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2718 | int ret = 0; | ||
2719 | if (!pm_runtime_suspended(dev)) | ||
2720 | return -EBUSY; | ||
2721 | |||
2722 | if (base->lcpa_regulator) | ||
2723 | ret = regulator_disable(base->lcpa_regulator); | ||
2724 | return ret; | ||
2725 | } | ||
2726 | |||
2727 | static int dma40_runtime_suspend(struct device *dev) | ||
2728 | { | ||
2729 | struct platform_device *pdev = to_platform_device(dev); | ||
2730 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2731 | |||
2732 | d40_save_restore_registers(base, true); | ||
2733 | |||
2734 | /* Don't disable/enable clocks for v1 due to HW bugs */ | ||
2735 | if (base->rev != 1) | ||
2736 | writel_relaxed(base->gcc_pwr_off_mask, | ||
2737 | base->virtbase + D40_DREG_GCC); | ||
2738 | |||
2739 | return 0; | ||
2740 | } | ||
2741 | |||
2742 | static int dma40_runtime_resume(struct device *dev) | ||
2743 | { | ||
2744 | struct platform_device *pdev = to_platform_device(dev); | ||
2745 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2746 | |||
2747 | if (base->initialized) | ||
2748 | d40_save_restore_registers(base, false); | ||
2749 | |||
2750 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | ||
2751 | base->virtbase + D40_DREG_GCC); | ||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2755 | static int dma40_resume(struct device *dev) | ||
2756 | { | ||
2757 | struct platform_device *pdev = to_platform_device(dev); | ||
2758 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2759 | int ret = 0; | ||
2760 | |||
2761 | if (base->lcpa_regulator) | ||
2762 | ret = regulator_enable(base->lcpa_regulator); | ||
2763 | |||
2764 | return ret; | ||
2765 | } | ||
2766 | |||
2767 | static const struct dev_pm_ops dma40_pm_ops = { | ||
2768 | .suspend = dma40_pm_suspend, | ||
2769 | .runtime_suspend = dma40_runtime_suspend, | ||
2770 | .runtime_resume = dma40_runtime_resume, | ||
2771 | .resume = dma40_resume, | ||
2772 | }; | ||
2773 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
2774 | #else | ||
2775 | #define DMA40_PM_OPS NULL | ||
2776 | #endif | ||
2777 | |||
2522 | /* Initialization functions. */ | 2778 | /* Initialization functions. */ |
2523 | 2779 | ||
2524 | static int __init d40_phy_res_init(struct d40_base *base) | 2780 | static int __init d40_phy_res_init(struct d40_base *base) |
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2527 | int num_phy_chans_avail = 0; | 2783 | int num_phy_chans_avail = 0; |
2528 | u32 val[2]; | 2784 | u32 val[2]; |
2529 | int odd_even_bit = -2; | 2785 | int odd_even_bit = -2; |
2786 | int gcc = D40_DREG_GCC_ENA; | ||
2530 | 2787 | ||
2531 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2788 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2532 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2789 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2538 | /* Mark security only channels as occupied */ | 2795 | /* Mark security only channels as occupied */ |
2539 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2796 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2540 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2797 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2798 | base->phy_res[i].reserved = true; | ||
2799 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2800 | D40_DREG_GCC_SRC); | ||
2801 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2802 | D40_DREG_GCC_DST); | ||
2803 | |||
2804 | |||
2541 | } else { | 2805 | } else { |
2542 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2806 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2543 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2807 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2808 | base->phy_res[i].reserved = false; | ||
2544 | num_phy_chans_avail++; | 2809 | num_phy_chans_avail++; |
2545 | } | 2810 | } |
2546 | spin_lock_init(&base->phy_res[i].lock); | 2811 | spin_lock_init(&base->phy_res[i].lock); |
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2552 | 2817 | ||
2553 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 2818 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2554 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 2819 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
2820 | base->phy_res[chan].reserved = true; | ||
2821 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2822 | D40_DREG_GCC_SRC); | ||
2823 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2824 | D40_DREG_GCC_DST); | ||
2555 | num_phy_chans_avail--; | 2825 | num_phy_chans_avail--; |
2556 | } | 2826 | } |
2557 | 2827 | ||
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2572 | val[0] = val[0] >> 2; | 2842 | val[0] = val[0] >> 2; |
2573 | } | 2843 | } |
2574 | 2844 | ||
2845 | /* | ||
2846 | * To keep things simple, Enable all clocks initially. | ||
2847 | * The clocks will get managed later post channel allocation. | ||
2848 | * The clocks for the event lines on which reserved channels exists | ||
2849 | * are not managed here. | ||
2850 | */ | ||
2851 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | ||
2852 | base->gcc_pwr_off_mask = gcc; | ||
2853 | |||
2575 | return num_phy_chans_avail; | 2854 | return num_phy_chans_avail; |
2576 | } | 2855 | } |
2577 | 2856 | ||
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2699 | goto failure; | 2978 | goto failure; |
2700 | } | 2979 | } |
2701 | 2980 | ||
2702 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * | 2981 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
2703 | sizeof(struct d40_desc *) * | 2982 | sizeof(d40_backup_regs_chan), |
2704 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
2705 | GFP_KERNEL); | 2983 | GFP_KERNEL); |
2984 | if (!base->reg_val_backup_chan) | ||
2985 | goto failure; | ||
2986 | |||
2987 | base->lcla_pool.alloc_map = | ||
2988 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | ||
2989 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | ||
2706 | if (!base->lcla_pool.alloc_map) | 2990 | if (!base->lcla_pool.alloc_map) |
2707 | goto failure; | 2991 | goto failure; |
2708 | 2992 | ||
@@ -2741,9 +3025,9 @@ failure: | |||
2741 | static void __init d40_hw_init(struct d40_base *base) | 3025 | static void __init d40_hw_init(struct d40_base *base) |
2742 | { | 3026 | { |
2743 | 3027 | ||
2744 | static const struct d40_reg_val dma_init_reg[] = { | 3028 | static struct d40_reg_val dma_init_reg[] = { |
2745 | /* Clock every part of the DMA block from start */ | 3029 | /* Clock every part of the DMA block from start */ |
2746 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, | 3030 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
2747 | 3031 | ||
2748 | /* Interrupts on all logical channels */ | 3032 | /* Interrupts on all logical channels */ |
2749 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 3033 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2943 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); | 3227 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
2944 | goto failure; | 3228 | goto failure; |
2945 | } | 3229 | } |
3230 | /* If lcla has to be located in ESRAM we don't need to allocate */ | ||
3231 | if (base->plat_data->use_esram_lcla) { | ||
3232 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
3233 | "lcla_esram"); | ||
3234 | if (!res) { | ||
3235 | ret = -ENOENT; | ||
3236 | d40_err(&pdev->dev, | ||
3237 | "No \"lcla_esram\" memory resource\n"); | ||
3238 | goto failure; | ||
3239 | } | ||
3240 | base->lcla_pool.base = ioremap(res->start, | ||
3241 | resource_size(res)); | ||
3242 | if (!base->lcla_pool.base) { | ||
3243 | ret = -ENOMEM; | ||
3244 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | ||
3245 | goto failure; | ||
3246 | } | ||
3247 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
2946 | 3248 | ||
2947 | ret = d40_lcla_allocate(base); | 3249 | } else { |
2948 | if (ret) { | 3250 | ret = d40_lcla_allocate(base); |
2949 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | 3251 | if (ret) { |
2950 | goto failure; | 3252 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
3253 | goto failure; | ||
3254 | } | ||
2951 | } | 3255 | } |
2952 | 3256 | ||
2953 | spin_lock_init(&base->lcla_pool.lock); | 3257 | spin_lock_init(&base->lcla_pool.lock); |
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2960 | goto failure; | 3264 | goto failure; |
2961 | } | 3265 | } |
2962 | 3266 | ||
3267 | pm_runtime_irq_safe(base->dev); | ||
3268 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3269 | pm_runtime_use_autosuspend(base->dev); | ||
3270 | pm_runtime_enable(base->dev); | ||
3271 | pm_runtime_resume(base->dev); | ||
3272 | |||
3273 | if (base->plat_data->use_esram_lcla) { | ||
3274 | |||
3275 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | ||
3276 | if (IS_ERR(base->lcpa_regulator)) { | ||
3277 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | ||
3278 | base->lcpa_regulator = NULL; | ||
3279 | goto failure; | ||
3280 | } | ||
3281 | |||
3282 | ret = regulator_enable(base->lcpa_regulator); | ||
3283 | if (ret) { | ||
3284 | d40_err(&pdev->dev, | ||
3285 | "Failed to enable lcpa_regulator\n"); | ||
3286 | regulator_put(base->lcpa_regulator); | ||
3287 | base->lcpa_regulator = NULL; | ||
3288 | goto failure; | ||
3289 | } | ||
3290 | } | ||
3291 | |||
3292 | base->initialized = true; | ||
2963 | err = d40_dmaengine_init(base, num_reserved_chans); | 3293 | err = d40_dmaengine_init(base, num_reserved_chans); |
2964 | if (err) | 3294 | if (err) |
2965 | goto failure; | 3295 | goto failure; |
@@ -2976,6 +3306,11 @@ failure: | |||
2976 | if (base->virtbase) | 3306 | if (base->virtbase) |
2977 | iounmap(base->virtbase); | 3307 | iounmap(base->virtbase); |
2978 | 3308 | ||
3309 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | ||
3310 | iounmap(base->lcla_pool.base); | ||
3311 | base->lcla_pool.base = NULL; | ||
3312 | } | ||
3313 | |||
2979 | if (base->lcla_pool.dma_addr) | 3314 | if (base->lcla_pool.dma_addr) |
2980 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 3315 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
2981 | SZ_1K * base->num_phy_chans, | 3316 | SZ_1K * base->num_phy_chans, |
@@ -2998,6 +3333,11 @@ failure: | |||
2998 | clk_put(base->clk); | 3333 | clk_put(base->clk); |
2999 | } | 3334 | } |
3000 | 3335 | ||
3336 | if (base->lcpa_regulator) { | ||
3337 | regulator_disable(base->lcpa_regulator); | ||
3338 | regulator_put(base->lcpa_regulator); | ||
3339 | } | ||
3340 | |||
3001 | kfree(base->lcla_pool.alloc_map); | 3341 | kfree(base->lcla_pool.alloc_map); |
3002 | kfree(base->lookup_log_chans); | 3342 | kfree(base->lookup_log_chans); |
3003 | kfree(base->lookup_phy_chans); | 3343 | kfree(base->lookup_phy_chans); |
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = { | |||
3013 | .driver = { | 3353 | .driver = { |
3014 | .owner = THIS_MODULE, | 3354 | .owner = THIS_MODULE, |
3015 | .name = D40_NAME, | 3355 | .name = D40_NAME, |
3356 | .pm = DMA40_PM_OPS, | ||
3016 | }, | 3357 | }, |
3017 | }; | 3358 | }; |
3018 | 3359 | ||
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index b44c455158de..8d3d490968a3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #define D40_TYPE_TO_GROUP(type) (type / 16) | 17 | #define D40_TYPE_TO_GROUP(type) (type / 16) |
18 | #define D40_TYPE_TO_EVENT(type) (type % 16) | 18 | #define D40_TYPE_TO_EVENT(type) (type % 16) |
19 | #define D40_GROUP_SIZE 8 | ||
20 | #define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2) | ||
19 | 21 | ||
20 | /* Most bits of the CFG register are the same in log as in phy mode */ | 22 | /* Most bits of the CFG register are the same in log as in phy mode */ |
21 | #define D40_SREG_CFG_MST_POS 15 | 23 | #define D40_SREG_CFG_MST_POS 15 |
@@ -123,6 +125,15 @@ | |||
123 | 125 | ||
124 | /* DMA Register Offsets */ | 126 | /* DMA Register Offsets */ |
125 | #define D40_DREG_GCC 0x000 | 127 | #define D40_DREG_GCC 0x000 |
128 | #define D40_DREG_GCC_ENA 0x1 | ||
129 | /* This assumes that there are only 4 event groups */ | ||
130 | #define D40_DREG_GCC_ENABLE_ALL 0xff01 | ||
131 | #define D40_DREG_GCC_EVTGRP_POS 8 | ||
132 | #define D40_DREG_GCC_SRC 0 | ||
133 | #define D40_DREG_GCC_DST 1 | ||
134 | #define D40_DREG_GCC_EVTGRP_ENA(x, y) \ | ||
135 | (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y)) | ||
136 | |||
126 | #define D40_DREG_PRTYP 0x004 | 137 | #define D40_DREG_PRTYP 0x004 |
127 | #define D40_DREG_PRSME 0x008 | 138 | #define D40_DREG_PRSME 0x008 |
128 | #define D40_DREG_PRSMO 0x00C | 139 | #define D40_DREG_PRSMO 0x00C |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index a4a398f2ef61..a6f9c1684a0f 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -90,7 +90,7 @@ struct timb_dma_chan { | |||
90 | struct list_head queue; | 90 | struct list_head queue; |
91 | struct list_head free_list; | 91 | struct list_head free_list; |
92 | unsigned int bytes_per_line; | 92 | unsigned int bytes_per_line; |
93 | enum dma_data_direction direction; | 93 | enum dma_transfer_direction direction; |
94 | unsigned int descs; /* Descriptors to allocate */ | 94 | unsigned int descs; /* Descriptors to allocate */ |
95 | unsigned int desc_elems; /* number of elems per descriptor */ | 95 | unsigned int desc_elems; /* number of elems per descriptor */ |
96 | }; | 96 | }; |
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | |||
166 | 166 | ||
167 | if (single) | 167 | if (single) |
168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | 168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, |
169 | td_chan->direction); | 169 | DMA_TO_DEVICE); |
170 | else | 170 | else |
171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | 171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, |
172 | td_chan->direction); | 172 | DMA_TO_DEVICE); |
173 | } | 173 | } |
174 | 174 | ||
175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | 175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) |
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan) | |||
235 | "td_chan: %p, chan: %d, membase: %p\n", | 235 | "td_chan: %p, chan: %d, membase: %p\n", |
236 | td_chan, td_chan->chan.chan_id, td_chan->membase); | 236 | td_chan, td_chan->chan.chan_id, td_chan->membase); |
237 | 237 | ||
238 | if (td_chan->direction == DMA_FROM_DEVICE) { | 238 | if (td_chan->direction == DMA_DEV_TO_MEM) { |
239 | 239 | ||
240 | /* descriptor address */ | 240 | /* descriptor address */ |
241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | 241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); |
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
278 | txd->cookie); | 278 | txd->cookie); |
279 | 279 | ||
280 | /* make sure to stop the transfer */ | 280 | /* make sure to stop the transfer */ |
281 | if (td_chan->direction == DMA_FROM_DEVICE) | 281 | if (td_chan->direction == DMA_DEV_TO_MEM) |
282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
283 | /* Currently no support for stopping DMA transfers | 283 | /* Currently no support for stopping DMA transfers |
284 | else | 284 | else |
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan) | |||
558 | 558 | ||
559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
560 | struct scatterlist *sgl, unsigned int sg_len, | 560 | struct scatterlist *sgl, unsigned int sg_len, |
561 | enum dma_data_direction direction, unsigned long flags) | 561 | enum dma_transfer_direction direction, unsigned long flags) |
562 | { | 562 | { |
563 | struct timb_dma_chan *td_chan = | 563 | struct timb_dma_chan *td_chan = |
564 | container_of(chan, struct timb_dma_chan, chan); | 564 | container_of(chan, struct timb_dma_chan, chan); |
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |||
606 | } | 606 | } |
607 | 607 | ||
608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | 608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, |
609 | td_desc->desc_list_len, DMA_TO_DEVICE); | 609 | td_desc->desc_list_len, DMA_MEM_TO_DEV); |
610 | 610 | ||
611 | return &td_desc->txd; | 611 | return &td_desc->txd; |
612 | } | 612 | } |
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev) | |||
775 | td_chan->descs = pchan->descriptors; | 775 | td_chan->descs = pchan->descriptors; |
776 | td_chan->desc_elems = pchan->descriptor_elements; | 776 | td_chan->desc_elems = pchan->descriptor_elements; |
777 | td_chan->bytes_per_line = pchan->bytes_per_line; | 777 | td_chan->bytes_per_line = pchan->bytes_per_line; |
778 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : | 778 | td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : |
779 | DMA_TO_DEVICE; | 779 | DMA_MEM_TO_DEV; |
780 | 780 | ||
781 | td_chan->membase = td->membase + | 781 | td_chan->membase = td->membase + |
782 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | 782 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + |
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = { | |||
841 | .remove = __exit_p(td_remove), | 841 | .remove = __exit_p(td_remove), |
842 | }; | 842 | }; |
843 | 843 | ||
844 | static int __init td_init(void) | 844 | module_platform_driver(td_driver); |
845 | { | ||
846 | return platform_driver_register(&td_driver); | ||
847 | } | ||
848 | module_init(td_init); | ||
849 | |||
850 | static void __exit td_exit(void) | ||
851 | { | ||
852 | platform_driver_unregister(&td_driver); | ||
853 | } | ||
854 | module_exit(td_exit); | ||
855 | 845 | ||
856 | MODULE_LICENSE("GPL v2"); | 846 | MODULE_LICENSE("GPL v2"); |
857 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | 847 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index cbd83e362b5e..6122c364cf11 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
845 | 845 | ||
846 | static struct dma_async_tx_descriptor * | 846 | static struct dma_async_tx_descriptor * |
847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
848 | unsigned int sg_len, enum dma_data_direction direction, | 848 | unsigned int sg_len, enum dma_transfer_direction direction, |
849 | unsigned long flags) | 849 | unsigned long flags) |
850 | { | 850 | { |
851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
860 | 860 | ||
861 | BUG_ON(!ds || !ds->reg_width); | 861 | BUG_ON(!ds || !ds->reg_width); |
862 | if (ds->tx_reg) | 862 | if (ds->tx_reg) |
863 | BUG_ON(direction != DMA_TO_DEVICE); | 863 | BUG_ON(direction != DMA_MEM_TO_DEV); |
864 | else | 864 | else |
865 | BUG_ON(direction != DMA_FROM_DEVICE); | 865 | BUG_ON(direction != DMA_DEV_TO_MEM); |
866 | if (unlikely(!sg_len)) | 866 | if (unlikely(!sg_len)) |
867 | return NULL; | 867 | return NULL; |
868 | 868 | ||
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
882 | mem = sg_dma_address(sg); | 882 | mem = sg_dma_address(sg); |
883 | 883 | ||
884 | if (__is_dmac64(ddev)) { | 884 | if (__is_dmac64(ddev)) { |
885 | if (direction == DMA_TO_DEVICE) { | 885 | if (direction == DMA_MEM_TO_DEV) { |
886 | desc->hwdesc.SAR = mem; | 886 | desc->hwdesc.SAR = mem; |
887 | desc->hwdesc.DAR = ds->tx_reg; | 887 | desc->hwdesc.DAR = ds->tx_reg; |
888 | } else { | 888 | } else { |
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
891 | } | 891 | } |
892 | desc->hwdesc.CNTR = sg_dma_len(sg); | 892 | desc->hwdesc.CNTR = sg_dma_len(sg); |
893 | } else { | 893 | } else { |
894 | if (direction == DMA_TO_DEVICE) { | 894 | if (direction == DMA_MEM_TO_DEV) { |
895 | desc->hwdesc32.SAR = mem; | 895 | desc->hwdesc32.SAR = mem; |
896 | desc->hwdesc32.DAR = ds->tx_reg; | 896 | desc->hwdesc32.DAR = ds->tx_reg; |
897 | } else { | 897 | } else { |
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
900 | } | 900 | } |
901 | desc->hwdesc32.CNTR = sg_dma_len(sg); | 901 | desc->hwdesc32.CNTR = sg_dma_len(sg); |
902 | } | 902 | } |
903 | if (direction == DMA_TO_DEVICE) { | 903 | if (direction == DMA_MEM_TO_DEV) { |
904 | sai = ds->reg_width; | 904 | sai = ds->reg_width; |
905 | dai = 0; | 905 | dai = 0; |
906 | } else { | 906 | } else { |