aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig15
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c14
-rw-r--r--drivers/dma/at_hdmac.c184
-rw-r--r--drivers/dma/at_hdmac_regs.h7
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/cppi41.c9
-rw-r--r--drivers/dma/dma-jz4740.c7
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/hsu/Kconfig14
-rw-r--r--drivers/dma/hsu/Makefile5
-rw-r--r--drivers/dma/hsu/hsu.c495
-rw-r--r--drivers/dma/hsu/hsu.h118
-rw-r--r--drivers/dma/hsu/pci.c124
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/dma/intel_mid_dma.c1443
-rw-r--r--drivers/dma/intel_mid_dma_regs.h295
-rw-r--r--drivers/dma/ioat/dma_v3.c4
-rw-r--r--drivers/dma/mmp_pdma.c10
-rw-r--r--drivers/dma/mmp_tdma.c31
-rw-r--r--drivers/dma/moxart-dma.c4
-rw-r--r--drivers/dma/of-dma.c1
-rw-r--r--drivers/dma/omap-dma.c1
-rw-r--r--drivers/dma/qcom_bam_dma.c10
-rw-r--r--drivers/dma/sh/shdmac.c15
-rw-r--r--drivers/dma/ste_dma40.c2
30 files changed, 983 insertions, 1859 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b674683de24b..fd7ac13f2574 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -51,19 +51,6 @@ config INTEL_MIC_X100_DMA
51 OS and tools for MIC to use with this driver are available from 51 OS and tools for MIC to use with this driver are available from
52 <http://software.intel.com/en-us/mic-developer>. 52 <http://software.intel.com/en-us/mic-developer>.
53 53
54config INTEL_MID_DMAC
55 tristate "Intel MID DMA support for Peripheral DMA controllers"
56 depends on PCI && X86
57 select DMA_ENGINE
58 default n
59 help
60 Enable support for the Intel(R) MID DMA engine present
61 in Intel MID chipsets.
62
63 Say Y here if you have such a chipset.
64
65 If unsure, say N.
66
67config ASYNC_TX_ENABLE_CHANNEL_SWITCH 54config ASYNC_TX_ENABLE_CHANNEL_SWITCH
68 bool 55 bool
69 56
@@ -136,6 +123,8 @@ config FSL_RAID
136 the capability to offload memcpy, xor and pq computation 123 the capability to offload memcpy, xor and pq computation
137 for raid5/6. 124 for raid5/6.
138 125
126source "drivers/dma/hsu/Kconfig"
127
139config MPC512X_DMA 128config MPC512X_DMA
140 tristate "Freescale MPC512x built-in DMA engine support" 129 tristate "Freescale MPC512x built-in DMA engine support"
141 depends on PPC_MPC512x || PPC_MPC831x 130 depends on PPC_MPC512x || PPC_MPC831x
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 345ec4758b9d..69f77d5ba53b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,11 +6,11 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
6obj-$(CONFIG_DMA_ACPI) += acpi-dma.o 6obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
7obj-$(CONFIG_DMA_OF) += of-dma.o 7obj-$(CONFIG_DMA_OF) += of-dma.o
8 8
9obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
10obj-$(CONFIG_DMATEST) += dmatest.o 9obj-$(CONFIG_DMATEST) += dmatest.o
11obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 10obj-$(CONFIG_INTEL_IOATDMA) += ioat/
12obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 11obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
13obj-$(CONFIG_FSL_DMA) += fsldma.o 12obj-$(CONFIG_FSL_DMA) += fsldma.o
13obj-$(CONFIG_HSU_DMA) += hsu/
14obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o 14obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
15obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 15obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
16obj-$(CONFIG_MV_XOR) += mv_xor.o 16obj-$(CONFIG_MV_XOR) += mv_xor.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e7c50d4c3d45..49d396ec06e5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -93,6 +93,12 @@
93 93
94#define DRIVER_NAME "pl08xdmac" 94#define DRIVER_NAME "pl08xdmac"
95 95
96#define PL80X_DMA_BUSWIDTHS \
97 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
98 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
99 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
100 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
101
96static struct amba_driver pl08x_amba_driver; 102static struct amba_driver pl08x_amba_driver;
97struct pl08x_driver_data; 103struct pl08x_driver_data;
98 104
@@ -2060,6 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2060 pl08x->memcpy.device_pause = pl08x_pause; 2066 pl08x->memcpy.device_pause = pl08x_pause;
2061 pl08x->memcpy.device_resume = pl08x_resume; 2067 pl08x->memcpy.device_resume = pl08x_resume;
2062 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2068 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2069 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2070 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2071 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2072 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2063 2073
2064 /* Initialize slave engine */ 2074 /* Initialize slave engine */
2065 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2075,6 +2085,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2075 pl08x->slave.device_pause = pl08x_pause; 2085 pl08x->slave.device_pause = pl08x_pause;
2076 pl08x->slave.device_resume = pl08x_resume; 2086 pl08x->slave.device_resume = pl08x_resume;
2077 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2087 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2088 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2089 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2090 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2091 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2078 2092
2079 /* Get the platform data */ 2093 /* Get the platform data */
2080 pl08x->pd = dev_get_platdata(&adev->dev); 2094 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 4f5b262f9a40..57b2141ddddc 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,93 +253,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
253} 253}
254 254
255/* 255/*
256 * atc_get_current_descriptors - 256 * atc_get_desc_by_cookie - get the descriptor of a cookie
257 * locate the descriptor which equal to physical address in DSCR 257 * @atchan: the DMA channel
258 * @atchan: the channel we want to start 258 * @cookie: the cookie to get the descriptor for
259 * @dscr_addr: physical descriptor address in DSCR
260 */ 259 */
261static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 260static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
262 u32 dscr_addr) 261 dma_cookie_t cookie)
263{ 262{
264 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 263 struct at_desc *desc, *_desc;
265 264
266 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 265 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
267 if (desc->lli.dscr == dscr_addr) { 266 if (desc->txd.cookie == cookie)
268 desc_cur = desc; 267 return desc;
269 break; 268 }
270 }
271 269
272 list_for_each_entry(child, &desc->tx_list, desc_node) { 270 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
273 if (child->lli.dscr == dscr_addr) { 271 if (desc->txd.cookie == cookie)
274 desc_cur = child; 272 return desc;
275 break;
276 }
277 }
278 } 273 }
279 274
280 return desc_cur; 275 return NULL;
281} 276}
282 277
283/* 278/**
284 * atc_get_bytes_left - 279 * atc_calc_bytes_left - calculates the number of bytes left according to the
285 * Get the number of bytes residue in dma buffer, 280 * value read from CTRLA.
286 * @chan: the channel we want to start 281 *
282 * @current_len: the number of bytes left before reading CTRLA
283 * @ctrla: the value of CTRLA
284 * @desc: the descriptor containing the transfer width
285 */
286static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
287 struct at_desc *desc)
288{
289 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
290}
291
292/**
293 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
294 * to the current value of CTRLA.
295 *
296 * @current_len: the number of bytes left before reading CTRLA
297 * @atchan: the channel to read CTRLA for
298 * @desc: the descriptor containing the transfer width
287 */ 299 */
288static int atc_get_bytes_left(struct dma_chan *chan) 300static inline int atc_calc_bytes_left_from_reg(int current_len,
301 struct at_dma_chan *atchan, struct at_desc *desc)
302{
303 u32 ctrla = channel_readl(atchan, CTRLA);
304
305 return atc_calc_bytes_left(current_len, ctrla, desc);
306}
307
308/**
309 * atc_get_bytes_left - get the number of bytes residue for a cookie
310 * @chan: DMA channel
311 * @cookie: transaction identifier to check status of
312 */
313static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
289{ 314{
290 struct at_dma_chan *atchan = to_at_dma_chan(chan); 315 struct at_dma_chan *atchan = to_at_dma_chan(chan);
291 struct at_dma *atdma = to_at_dma(chan->device);
292 int chan_id = atchan->chan_common.chan_id;
293 struct at_desc *desc_first = atc_first_active(atchan); 316 struct at_desc *desc_first = atc_first_active(atchan);
294 struct at_desc *desc_cur; 317 struct at_desc *desc;
295 int ret = 0, count = 0; 318 int ret;
319 u32 ctrla, dscr;
296 320
297 /* 321 /*
298 * Initialize necessary values in the first time. 322 * If the cookie doesn't match to the currently running transfer then
299 * remain_desc record remain desc length. 323 * we can return the total length of the associated DMA transfer,
324 * because it is still queued.
300 */ 325 */
301 if (atchan->remain_desc == 0) 326 desc = atc_get_desc_by_cookie(atchan, cookie);
302 /* First descriptor embedds the transaction length */ 327 if (desc == NULL)
303 atchan->remain_desc = desc_first->len; 328 return -EINVAL;
329 else if (desc != desc_first)
330 return desc->total_len;
304 331
305 /* 332 /* cookie matches to the currently running transfer */
306 * This happens when current descriptor transfer complete. 333 ret = desc_first->total_len;
307 * The residual buffer size should reduce current descriptor length. 334
308 */ 335 if (desc_first->lli.dscr) {
309 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { 336 /* hardware linked list transfer */
310 clear_bit(ATC_IS_BTC, &atchan->status); 337
311 desc_cur = atc_get_current_descriptors(atchan, 338 /*
312 channel_readl(atchan, DSCR)); 339 * Calculate the residue by removing the length of the child
313 if (!desc_cur) { 340 * descriptors already transferred from the total length.
314 ret = -EINVAL; 341 * To get the current child descriptor we can use the value of
315 goto out; 342 * the channel's DSCR register and compare it against the value
316 } 343 * of the hardware linked list structure of each child
344 * descriptor.
345 */
317 346
318 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 347 ctrla = channel_readl(atchan, CTRLA);
319 << desc_first->tx_width; 348 rmb(); /* ensure CTRLA is read before DSCR */
320 if (atchan->remain_desc < count) { 349 dscr = channel_readl(atchan, DSCR);
321 ret = -EINVAL; 350
322 goto out; 351 /* for the first descriptor we can be more accurate */
352 if (desc_first->lli.dscr == dscr)
353 return atc_calc_bytes_left(ret, ctrla, desc_first);
354
355 ret -= desc_first->len;
356 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
357 if (desc->lli.dscr == dscr)
358 break;
359
360 ret -= desc->len;
323 } 361 }
324 362
325 atchan->remain_desc -= count;
326 ret = atchan->remain_desc;
327 } else {
328 /* 363 /*
329 * Get residual bytes when current 364 * For the last descriptor in the chain we can calculate
330 * descriptor transfer in progress. 365 * the remaining bytes using the channel's register.
366 * Note that the transfer width of the first and last
367 * descriptor may differ.
331 */ 368 */
332 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 369 if (!desc->lli.dscr)
333 << (desc_first->tx_width); 370 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
334 ret = atchan->remain_desc - count; 371 } else {
372 /* single transfer */
373 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
335 } 374 }
336 /*
337 * Check fifo empty.
338 */
339 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
340 atc_issue_pending(chan);
341 375
342out:
343 return ret; 376 return ret;
344} 377}
345 378
@@ -554,8 +587,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
554 /* Give information to tasklet */ 587 /* Give information to tasklet */
555 set_bit(ATC_IS_ERROR, &atchan->status); 588 set_bit(ATC_IS_ERROR, &atchan->status);
556 } 589 }
557 if (pending & AT_DMA_BTC(i))
558 set_bit(ATC_IS_BTC, &atchan->status);
559 tasklet_schedule(&atchan->tasklet); 590 tasklet_schedule(&atchan->tasklet);
560 ret = IRQ_HANDLED; 591 ret = IRQ_HANDLED;
561 } 592 }
@@ -662,14 +693,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
662 desc->lli.ctrlb = ctrlb; 693 desc->lli.ctrlb = ctrlb;
663 694
664 desc->txd.cookie = 0; 695 desc->txd.cookie = 0;
696 desc->len = xfer_count << src_width;
665 697
666 atc_desc_chain(&first, &prev, desc); 698 atc_desc_chain(&first, &prev, desc);
667 } 699 }
668 700
669 /* First descriptor of the chain embedds additional information */ 701 /* First descriptor of the chain embedds additional information */
670 first->txd.cookie = -EBUSY; 702 first->txd.cookie = -EBUSY;
671 first->len = len; 703 first->total_len = len;
704
705 /* set transfer width for the calculation of the residue */
672 first->tx_width = src_width; 706 first->tx_width = src_width;
707 prev->tx_width = src_width;
673 708
674 /* set end-of-link to the last link descriptor of list*/ 709 /* set end-of-link to the last link descriptor of list*/
675 set_desc_eol(desc); 710 set_desc_eol(desc);
@@ -761,6 +796,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
761 | ATC_SRC_WIDTH(mem_width) 796 | ATC_SRC_WIDTH(mem_width)
762 | len >> mem_width; 797 | len >> mem_width;
763 desc->lli.ctrlb = ctrlb; 798 desc->lli.ctrlb = ctrlb;
799 desc->len = len;
764 800
765 atc_desc_chain(&first, &prev, desc); 801 atc_desc_chain(&first, &prev, desc);
766 total_len += len; 802 total_len += len;
@@ -801,6 +837,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
801 | ATC_DST_WIDTH(mem_width) 837 | ATC_DST_WIDTH(mem_width)
802 | len >> reg_width; 838 | len >> reg_width;
803 desc->lli.ctrlb = ctrlb; 839 desc->lli.ctrlb = ctrlb;
840 desc->len = len;
804 841
805 atc_desc_chain(&first, &prev, desc); 842 atc_desc_chain(&first, &prev, desc);
806 total_len += len; 843 total_len += len;
@@ -815,8 +852,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
815 852
816 /* First descriptor of the chain embedds additional information */ 853 /* First descriptor of the chain embedds additional information */
817 first->txd.cookie = -EBUSY; 854 first->txd.cookie = -EBUSY;
818 first->len = total_len; 855 first->total_len = total_len;
856
857 /* set transfer width for the calculation of the residue */
819 first->tx_width = reg_width; 858 first->tx_width = reg_width;
859 prev->tx_width = reg_width;
820 860
821 /* first link descriptor of list is responsible of flags */ 861 /* first link descriptor of list is responsible of flags */
822 first->txd.flags = flags; /* client is in control of this ack */ 862 first->txd.flags = flags; /* client is in control of this ack */
@@ -1019,6 +1059,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1019 | ATC_FC_MEM2PER 1059 | ATC_FC_MEM2PER
1020 | ATC_SIF(atchan->mem_if) 1060 | ATC_SIF(atchan->mem_if)
1021 | ATC_DIF(atchan->per_if); 1061 | ATC_DIF(atchan->per_if);
1062 desc->len = period_len;
1022 break; 1063 break;
1023 1064
1024 case DMA_DEV_TO_MEM: 1065 case DMA_DEV_TO_MEM:
@@ -1030,6 +1071,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1030 | ATC_FC_PER2MEM 1071 | ATC_FC_PER2MEM
1031 | ATC_SIF(atchan->per_if) 1072 | ATC_SIF(atchan->per_if)
1032 | ATC_DIF(atchan->mem_if); 1073 | ATC_DIF(atchan->mem_if);
1074 desc->len = period_len;
1033 break; 1075 break;
1034 1076
1035 default: 1077 default:
@@ -1111,7 +1153,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1111 1153
1112 /* First descriptor of the chain embedds additional information */ 1154 /* First descriptor of the chain embedds additional information */
1113 first->txd.cookie = -EBUSY; 1155 first->txd.cookie = -EBUSY;
1114 first->len = buf_len; 1156 first->total_len = buf_len;
1115 first->tx_width = reg_width; 1157 first->tx_width = reg_width;
1116 1158
1117 return &first->txd; 1159 return &first->txd;
@@ -1265,7 +1307,7 @@ atc_tx_status(struct dma_chan *chan,
1265 spin_lock_irqsave(&atchan->lock, flags); 1307 spin_lock_irqsave(&atchan->lock, flags);
1266 1308
1267 /* Get number of bytes left in the active transactions */ 1309 /* Get number of bytes left in the active transactions */
1268 bytes = atc_get_bytes_left(chan); 1310 bytes = atc_get_bytes_left(chan, cookie);
1269 1311
1270 spin_unlock_irqrestore(&atchan->lock, flags); 1312 spin_unlock_irqrestore(&atchan->lock, flags);
1271 1313
@@ -1361,7 +1403,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1361 1403
1362 spin_lock_irqsave(&atchan->lock, flags); 1404 spin_lock_irqsave(&atchan->lock, flags);
1363 atchan->descs_allocated = i; 1405 atchan->descs_allocated = i;
1364 atchan->remain_desc = 0;
1365 list_splice(&tmp_list, &atchan->free_list); 1406 list_splice(&tmp_list, &atchan->free_list);
1366 dma_cookie_init(chan); 1407 dma_cookie_init(chan);
1367 spin_unlock_irqrestore(&atchan->lock, flags); 1408 spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1404,7 +1445,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1404 list_splice_init(&atchan->free_list, &list); 1445 list_splice_init(&atchan->free_list, &list);
1405 atchan->descs_allocated = 0; 1446 atchan->descs_allocated = 0;
1406 atchan->status = 0; 1447 atchan->status = 0;
1407 atchan->remain_desc = 0;
1408 1448
1409 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1449 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1410} 1450}
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d6bba6c636c2..2727ca560572 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -181,8 +181,9 @@ struct at_lli {
181 * @at_lli: hardware lli structure 181 * @at_lli: hardware lli structure
182 * @txd: support for the async_tx api 182 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 183 * @desc_node: node on the channed descriptors list
184 * @len: total transaction bytecount 184 * @len: descriptor byte count
185 * @tx_width: transfer width 185 * @tx_width: transfer width
186 * @total_len: total transaction byte count
186 */ 187 */
187struct at_desc { 188struct at_desc {
188 /* FIRST values the hardware uses */ 189 /* FIRST values the hardware uses */
@@ -194,6 +195,7 @@ struct at_desc {
194 struct list_head desc_node; 195 struct list_head desc_node;
195 size_t len; 196 size_t len;
196 u32 tx_width; 197 u32 tx_width;
198 size_t total_len;
197}; 199};
198 200
199static inline struct at_desc * 201static inline struct at_desc *
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
213enum atc_status { 215enum atc_status {
214 ATC_IS_ERROR = 0, 216 ATC_IS_ERROR = 0,
215 ATC_IS_PAUSED = 1, 217 ATC_IS_PAUSED = 1,
216 ATC_IS_BTC = 2,
217 ATC_IS_CYCLIC = 24, 218 ATC_IS_CYCLIC = 24,
218}; 219};
219 220
@@ -231,7 +232,6 @@ enum atc_status {
231 * @save_cfg: configuration register that is saved on suspend/resume cycle 232 * @save_cfg: configuration register that is saved on suspend/resume cycle
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 233 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 234 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config 236 * .device_config
237 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -251,7 +251,6 @@ struct at_dma_chan {
251 struct tasklet_struct tasklet; 251 struct tasklet_struct tasklet;
252 u32 save_cfg; 252 u32 save_cfg;
253 u32 save_dscr; 253 u32 save_dscr;
254 u32 remain_desc;
255 struct dma_slave_config dma_sconfig; 254 struct dma_slave_config dma_sconfig;
256 255
257 spinlock_t lock; 256 spinlock_t lock;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 6d18abf88cb5..933e4b338459 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664 struct at_xdmac_desc *first = NULL, *prev = NULL; 664 struct at_xdmac_desc *first = NULL, *prev = NULL;
665 unsigned int periods = buf_len / period_len; 665 unsigned int periods = buf_len / period_len;
666 int i; 666 int i;
667 u32 cfg;
668 667
669 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 668 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
670 __func__, &buf_addr, buf_len, period_len, 669 __func__, &buf_addr, buf_len, period_len,
@@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
700 if (direction == DMA_DEV_TO_MEM) { 699 if (direction == DMA_DEV_TO_MEM) {
701 desc->lld.mbr_sa = atchan->per_src_addr; 700 desc->lld.mbr_sa = atchan->per_src_addr;
702 desc->lld.mbr_da = buf_addr + i * period_len; 701 desc->lld.mbr_da = buf_addr + i * period_len;
703 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 702 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
704 } else { 703 } else {
705 desc->lld.mbr_sa = buf_addr + i * period_len; 704 desc->lld.mbr_sa = buf_addr + i * period_len;
706 desc->lld.mbr_da = atchan->per_dst_addr; 705 desc->lld.mbr_da = atchan->per_dst_addr;
707 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 706 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
708 } 707 }
709 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 708 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
710 | AT_XDMAC_MBR_UBC_NDEN 709 | AT_XDMAC_MBR_UBC_NDEN
711 | AT_XDMAC_MBR_UBC_NSEN 710 | AT_XDMAC_MBR_UBC_NSEN
712 | AT_XDMAC_MBR_UBC_NDE 711 | AT_XDMAC_MBR_UBC_NDE
713 | period_len >> at_xdmac_get_dwidth(cfg); 712 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
714 713
715 dev_dbg(chan2dev(chan), 714 dev_dbg(chan2dev(chan),
716 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 715 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0723096fb50a..c92d6a70ccf3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
475 * c->desc is NULL and exit.) 475 * c->desc is NULL and exit.)
476 */ 476 */
477 if (c->desc) { 477 if (c->desc) {
478 bcm2835_dma_desc_free(&c->desc->vd);
478 c->desc = NULL; 479 c->desc = NULL;
479 bcm2835_dma_abort(c->chan_base); 480 bcm2835_dma_abort(c->chan_base);
480 481
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 512cb8e2805e..ceedafbd23e0 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -903,6 +903,11 @@ static const struct cppi_glue_infos *get_glue_info(struct device *dev)
903 return of_id->data; 903 return of_id->data;
904} 904}
905 905
906#define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
907 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
908 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
909 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
910
906static int cppi41_dma_probe(struct platform_device *pdev) 911static int cppi41_dma_probe(struct platform_device *pdev)
907{ 912{
908 struct cppi41_dd *cdd; 913 struct cppi41_dd *cdd;
@@ -926,6 +931,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
926 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 931 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
927 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 932 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
928 cdd->ddev.device_terminate_all = cppi41_stop_chan; 933 cdd->ddev.device_terminate_all = cppi41_stop_chan;
934 cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
935 cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
936 cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
937 cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
929 cdd->ddev.dev = dev; 938 cdd->ddev.dev = dev;
930 INIT_LIST_HEAD(&cdd->ddev.channels); 939 INIT_LIST_HEAD(&cdd->ddev.channels);
931 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 940 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 7497578c9cfc..7638b24ce8d0 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -502,6 +502,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
502 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); 502 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
503} 503}
504 504
505#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
506 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
507
505static int jz4740_dma_probe(struct platform_device *pdev) 508static int jz4740_dma_probe(struct platform_device *pdev)
506{ 509{
507 struct jz4740_dmaengine_chan *chan; 510 struct jz4740_dmaengine_chan *chan;
@@ -538,6 +541,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
538 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 541 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
539 dd->device_config = jz4740_dma_slave_config; 542 dd->device_config = jz4740_dma_slave_config;
540 dd->device_terminate_all = jz4740_dma_terminate_all; 543 dd->device_terminate_all = jz4740_dma_terminate_all;
544 dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
545 dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
546 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
547 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
541 dd->dev = &pdev->dev; 548 dd->dev = &pdev->dev;
542 INIT_LIST_HEAD(&dd->channels); 549 INIT_LIST_HEAD(&dd->channels);
543 550
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24967c89f5d4..0e035a8cf401 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -841,9 +841,6 @@ int dma_async_device_register(struct dma_device *device)
841 BUG_ON(!device->device_issue_pending); 841 BUG_ON(!device->device_issue_pending);
842 BUG_ON(!device->dev); 842 BUG_ON(!device->dev);
843 843
844 WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
845 "this driver doesn't support generic slave capabilities reporting\n");
846
847 /* note: this only matters in the 844 /* note: this only matters in the
848 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 845 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
849 */ 846 */
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index a1c078d8cc85..1022c2e1a2b0 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -627,7 +627,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
627 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 627 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
628 628
629 /* Check if we have any interrupt from the DMAC */ 629 /* Check if we have any interrupt from the DMAC */
630 if (!status) 630 if (!status || !dw->in_use)
631 return IRQ_NONE; 631 return IRQ_NONE;
632 632
633 /* 633 /*
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6565a361e7e5..b2c3ae071429 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -26,6 +26,8 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define DRV_NAME "dw_dmac"
30
29static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 31static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
30 struct of_dma *ofdma) 32 struct of_dma *ofdma)
31{ 33{
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
284 .remove = dw_remove, 286 .remove = dw_remove,
285 .shutdown = dw_shutdown, 287 .shutdown = dw_shutdown,
286 .driver = { 288 .driver = {
287 .name = "dw_dmac", 289 .name = DRV_NAME,
288 .pm = &dw_dev_pm_ops, 290 .pm = &dw_dev_pm_ops,
289 .of_match_table = of_match_ptr(dw_dma_of_id_table), 291 .of_match_table = of_match_ptr(dw_dma_of_id_table),
290 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 292 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -305,3 +307,4 @@ module_exit(dw_exit);
305 307
306MODULE_LICENSE("GPL v2"); 308MODULE_LICENSE("GPL v2");
307MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 309MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
310MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 0ea813d98998..bf09db7ca9ee 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
260 */ 260 */
261 if (echan->edesc) { 261 if (echan->edesc) {
262 int cyclic = echan->edesc->cyclic; 262 int cyclic = echan->edesc->cyclic;
263
264 /*
265 * free the running request descriptor
266 * since it is not in any of the vdesc lists
267 */
268 edma_desc_free(&echan->edesc->vdesc);
269
263 echan->edesc = NULL; 270 echan->edesc = NULL;
264 edma_stop(echan->ch_num); 271 edma_stop(echan->ch_num);
265 /* Move the cyclic channel back to default queue */ 272 /* Move the cyclic channel back to default queue */
diff --git a/drivers/dma/hsu/Kconfig b/drivers/dma/hsu/Kconfig
new file mode 100644
index 000000000000..2810dca70612
--- /dev/null
+++ b/drivers/dma/hsu/Kconfig
@@ -0,0 +1,14 @@
1# DMA engine configuration for hsu
2config HSU_DMA
3 tristate
4 select DMA_ENGINE
5 select DMA_VIRTUAL_CHANNELS
6
7config HSU_DMA_PCI
8 tristate "High Speed UART DMA PCI driver"
9 depends on PCI
10 select HSU_DMA
11 help
12 Support the High Speed UART DMA on the platfroms that
13 enumerate it as a PCI device. For example, Intel Medfield
14 has integrated this HSU DMA controller.
diff --git a/drivers/dma/hsu/Makefile b/drivers/dma/hsu/Makefile
new file mode 100644
index 000000000000..b8f9af032ef1
--- /dev/null
+++ b/drivers/dma/hsu/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_HSU_DMA) += hsu_dma.o
2hsu_dma-objs := hsu.o
3
4obj-$(CONFIG_HSU_DMA_PCI) += hsu_dma_pci.o
5hsu_dma_pci-objs := pci.o
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
new file mode 100644
index 000000000000..9b84def7a353
--- /dev/null
+++ b/drivers/dma/hsu/hsu.c
@@ -0,0 +1,495 @@
1/*
2 * Core driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14/*
15 * DMA channel allocation:
16 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
17 * Write (UART RX).
18 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
19 * port 3, and so on.
20 */
21
22#include <linux/delay.h>
23#include <linux/dmaengine.h>
24#include <linux/dma-mapping.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28
29#include "hsu.h"
30
31#define HSU_DMA_BUSWIDTHS \
32 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
33 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
34 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
35 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
36 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
37 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
38 BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
39
40static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
41{
42 hsu_chan_writel(hsuc, HSU_CH_CR, 0);
43}
44
45static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
46{
47 u32 cr = HSU_CH_CR_CHA;
48
49 if (hsuc->direction == DMA_MEM_TO_DEV)
50 cr &= ~HSU_CH_CR_CHD;
51 else if (hsuc->direction == DMA_DEV_TO_MEM)
52 cr |= HSU_CH_CR_CHD;
53
54 hsu_chan_writel(hsuc, HSU_CH_CR, cr);
55}
56
57static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
58{
59 struct dma_slave_config *config = &hsuc->config;
60 struct hsu_dma_desc *desc = hsuc->desc;
61 u32 bsr = 0, mtsr = 0; /* to shut the compiler up */
62 u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
63 unsigned int i, count;
64
65 if (hsuc->direction == DMA_MEM_TO_DEV) {
66 bsr = config->dst_maxburst;
67 mtsr = config->dst_addr_width;
68 } else if (hsuc->direction == DMA_DEV_TO_MEM) {
69 bsr = config->src_maxburst;
70 mtsr = config->src_addr_width;
71 }
72
73 hsu_chan_disable(hsuc);
74
75 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
76 hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
77 hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
78
79 /* Set descriptors */
80 count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
81 for (i = 0; i < count; i++) {
82 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
83 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
84
85 /* Prepare value for DCR */
86 dcr |= HSU_CH_DCR_DESCA(i);
87 dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
88
89 desc->active++;
90 }
91 /* Only for the last descriptor in the chain */
92 dcr |= HSU_CH_DCR_CHSOD(count - 1);
93 dcr |= HSU_CH_DCR_CHDI(count - 1);
94
95 hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
96
97 hsu_chan_enable(hsuc);
98}
99
100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&hsuc->lock, flags);
105 hsu_chan_disable(hsuc);
106 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
107 spin_unlock_irqrestore(&hsuc->lock, flags);
108}
109
110static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
111{
112 unsigned long flags;
113
114 spin_lock_irqsave(&hsuc->lock, flags);
115 hsu_dma_chan_start(hsuc);
116 spin_unlock_irqrestore(&hsuc->lock, flags);
117}
118
119static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
120{
121 struct virt_dma_desc *vdesc;
122
123 /* Get the next descriptor */
124 vdesc = vchan_next_desc(&hsuc->vchan);
125 if (!vdesc) {
126 hsuc->desc = NULL;
127 return;
128 }
129
130 list_del(&vdesc->node);
131 hsuc->desc = to_hsu_dma_desc(vdesc);
132
133 /* Start the channel with a new descriptor */
134 hsu_dma_start_channel(hsuc);
135}
136
137static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
138{
139 unsigned long flags;
140 u32 sr;
141
142 spin_lock_irqsave(&hsuc->lock, flags);
143 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
144 spin_unlock_irqrestore(&hsuc->lock, flags);
145
146 return sr;
147}
148
149irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
150{
151 struct hsu_dma_chan *hsuc;
152 struct hsu_dma_desc *desc;
153 unsigned long flags;
154 u32 sr;
155
156 /* Sanity check */
157 if (nr >= chip->pdata->nr_channels)
158 return IRQ_NONE;
159
160 hsuc = &chip->hsu->chan[nr];
161
162 /*
163 * No matter what situation, need read clear the IRQ status
164 * There is a bug, see Errata 5, HSD 2900918
165 */
166 sr = hsu_dma_chan_get_sr(hsuc);
167 if (!sr)
168 return IRQ_NONE;
169
170 /* Timeout IRQ, need wait some time, see Errata 2 */
171 if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
172 udelay(2);
173
174 sr &= ~HSU_CH_SR_DESCTO_ANY;
175 if (!sr)
176 return IRQ_HANDLED;
177
178 spin_lock_irqsave(&hsuc->vchan.lock, flags);
179 desc = hsuc->desc;
180 if (desc) {
181 if (sr & HSU_CH_SR_CHE) {
182 desc->status = DMA_ERROR;
183 } else if (desc->active < desc->nents) {
184 hsu_dma_start_channel(hsuc);
185 } else {
186 vchan_cookie_complete(&desc->vdesc);
187 desc->status = DMA_COMPLETE;
188 hsu_dma_start_transfer(hsuc);
189 }
190 }
191 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
192
193 return IRQ_HANDLED;
194}
195EXPORT_SYMBOL_GPL(hsu_dma_irq);
196
197static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
198{
199 struct hsu_dma_desc *desc;
200
201 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
202 if (!desc)
203 return NULL;
204
205 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
206 if (!desc->sg) {
207 kfree(desc);
208 return NULL;
209 }
210
211 return desc;
212}
213
214static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
215{
216 struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
217
218 kfree(desc->sg);
219 kfree(desc);
220}
221
222static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
223 struct dma_chan *chan, struct scatterlist *sgl,
224 unsigned int sg_len, enum dma_transfer_direction direction,
225 unsigned long flags, void *context)
226{
227 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
228 struct hsu_dma_desc *desc;
229 struct scatterlist *sg;
230 unsigned int i;
231
232 desc = hsu_dma_alloc_desc(sg_len);
233 if (!desc)
234 return NULL;
235
236 for_each_sg(sgl, sg, sg_len, i) {
237 desc->sg[i].addr = sg_dma_address(sg);
238 desc->sg[i].len = sg_dma_len(sg);
239 }
240
241 desc->nents = sg_len;
242 desc->direction = direction;
243 /* desc->active = 0 by kzalloc */
244 desc->status = DMA_IN_PROGRESS;
245
246 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
247}
248
249static void hsu_dma_issue_pending(struct dma_chan *chan)
250{
251 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
252 unsigned long flags;
253
254 spin_lock_irqsave(&hsuc->vchan.lock, flags);
255 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
256 hsu_dma_start_transfer(hsuc);
257 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
258}
259
260static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
261{
262 size_t bytes = 0;
263 unsigned int i;
264
265 for (i = desc->active; i < desc->nents; i++)
266 bytes += desc->sg[i].len;
267
268 return bytes;
269}
270
271static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
272{
273 struct hsu_dma_desc *desc = hsuc->desc;
274 size_t bytes = hsu_dma_desc_size(desc);
275 int i;
276 unsigned long flags;
277
278 spin_lock_irqsave(&hsuc->lock, flags);
279 i = desc->active % HSU_DMA_CHAN_NR_DESC;
280 do {
281 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
282 } while (--i >= 0);
283 spin_unlock_irqrestore(&hsuc->lock, flags);
284
285 return bytes;
286}
287
288static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
289 dma_cookie_t cookie, struct dma_tx_state *state)
290{
291 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
292 struct virt_dma_desc *vdesc;
293 enum dma_status status;
294 size_t bytes;
295 unsigned long flags;
296
297 status = dma_cookie_status(chan, cookie, state);
298 if (status == DMA_COMPLETE)
299 return status;
300
301 spin_lock_irqsave(&hsuc->vchan.lock, flags);
302 vdesc = vchan_find_desc(&hsuc->vchan, cookie);
303 if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
304 bytes = hsu_dma_active_desc_size(hsuc);
305 dma_set_residue(state, bytes);
306 status = hsuc->desc->status;
307 } else if (vdesc) {
308 bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
309 dma_set_residue(state, bytes);
310 }
311 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
312
313 return status;
314}
315
316static int hsu_dma_slave_config(struct dma_chan *chan,
317 struct dma_slave_config *config)
318{
319 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
320
321 /* Check if chan will be configured for slave transfers */
322 if (!is_slave_direction(config->direction))
323 return -EINVAL;
324
325 memcpy(&hsuc->config, config, sizeof(hsuc->config));
326
327 return 0;
328}
329
330static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&hsuc->lock, flags);
335 hsu_chan_disable(hsuc);
336 spin_unlock_irqrestore(&hsuc->lock, flags);
337}
338
339static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
340{
341 unsigned long flags;
342
343 spin_lock_irqsave(&hsuc->lock, flags);
344 hsu_chan_enable(hsuc);
345 spin_unlock_irqrestore(&hsuc->lock, flags);
346}
347
348static int hsu_dma_pause(struct dma_chan *chan)
349{
350 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
351 unsigned long flags;
352
353 spin_lock_irqsave(&hsuc->vchan.lock, flags);
354 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
355 hsu_dma_chan_deactivate(hsuc);
356 hsuc->desc->status = DMA_PAUSED;
357 }
358 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
359
360 return 0;
361}
362
363static int hsu_dma_resume(struct dma_chan *chan)
364{
365 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
366 unsigned long flags;
367
368 spin_lock_irqsave(&hsuc->vchan.lock, flags);
369 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
370 hsuc->desc->status = DMA_IN_PROGRESS;
371 hsu_dma_chan_activate(hsuc);
372 }
373 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
374
375 return 0;
376}
377
378static int hsu_dma_terminate_all(struct dma_chan *chan)
379{
380 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
381 unsigned long flags;
382 LIST_HEAD(head);
383
384 spin_lock_irqsave(&hsuc->vchan.lock, flags);
385
386 hsu_dma_stop_channel(hsuc);
387 hsuc->desc = NULL;
388
389 vchan_get_all_descriptors(&hsuc->vchan, &head);
390 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
391 vchan_dma_desc_free_list(&hsuc->vchan, &head);
392
393 return 0;
394}
395
396static void hsu_dma_free_chan_resources(struct dma_chan *chan)
397{
398 vchan_free_chan_resources(to_virt_chan(chan));
399}
400
401int hsu_dma_probe(struct hsu_dma_chip *chip)
402{
403 struct hsu_dma *hsu;
404 struct hsu_dma_platform_data *pdata = chip->pdata;
405 void __iomem *addr = chip->regs + chip->offset;
406 unsigned short i;
407 int ret;
408
409 hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
410 if (!hsu)
411 return -ENOMEM;
412
413 chip->hsu = hsu;
414
415 if (!pdata) {
416 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
417 if (!pdata)
418 return -ENOMEM;
419
420 chip->pdata = pdata;
421
422 /* Guess nr_channels from the IO space length */
423 pdata->nr_channels = (chip->length - chip->offset) /
424 HSU_DMA_CHAN_LENGTH;
425 }
426
427 hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels,
428 sizeof(*hsu->chan), GFP_KERNEL);
429 if (!hsu->chan)
430 return -ENOMEM;
431
432 INIT_LIST_HEAD(&hsu->dma.channels);
433 for (i = 0; i < pdata->nr_channels; i++) {
434 struct hsu_dma_chan *hsuc = &hsu->chan[i];
435
436 hsuc->vchan.desc_free = hsu_dma_desc_free;
437 vchan_init(&hsuc->vchan, &hsu->dma);
438
439 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
440 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
441
442 spin_lock_init(&hsuc->lock);
443 }
444
445 dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
446 dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
447
448 hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
449
450 hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
451
452 hsu->dma.device_issue_pending = hsu_dma_issue_pending;
453 hsu->dma.device_tx_status = hsu_dma_tx_status;
454
455 hsu->dma.device_config = hsu_dma_slave_config;
456 hsu->dma.device_pause = hsu_dma_pause;
457 hsu->dma.device_resume = hsu_dma_resume;
458 hsu->dma.device_terminate_all = hsu_dma_terminate_all;
459
460 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
461 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
462 hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
463 hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
464
465 hsu->dma.dev = chip->dev;
466
467 ret = dma_async_device_register(&hsu->dma);
468 if (ret)
469 return ret;
470
471 dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels);
472 return 0;
473}
474EXPORT_SYMBOL_GPL(hsu_dma_probe);
475
476int hsu_dma_remove(struct hsu_dma_chip *chip)
477{
478 struct hsu_dma *hsu = chip->hsu;
479 unsigned short i;
480
481 dma_async_device_unregister(&hsu->dma);
482
483 for (i = 0; i < chip->pdata->nr_channels; i++) {
484 struct hsu_dma_chan *hsuc = &hsu->chan[i];
485
486 tasklet_kill(&hsuc->vchan.task);
487 }
488
489 return 0;
490}
491EXPORT_SYMBOL_GPL(hsu_dma_remove);
492
493MODULE_LICENSE("GPL v2");
494MODULE_DESCRIPTION("High Speed UART DMA core driver");
495MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
new file mode 100644
index 000000000000..0275233cf550
--- /dev/null
+++ b/drivers/dma/hsu/hsu.h
@@ -0,0 +1,118 @@
1/*
2 * Driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 *
6 * Partially based on the bits found in drivers/tty/serial/mfd.c.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __DMA_HSU_H__
14#define __DMA_HSU_H__
15
16#include <linux/spinlock.h>
17#include <linux/dma/hsu.h>
18
19#include "../virt-dma.h"
20
21#define HSU_CH_SR 0x00 /* channel status */
22#define HSU_CH_CR 0x04 /* channel control */
23#define HSU_CH_DCR 0x08 /* descriptor control */
24#define HSU_CH_BSR 0x10 /* FIFO buffer size */
25#define HSU_CH_MTSR 0x14 /* minimum transfer size */
26#define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */
27#define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */
28#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
29#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
30#define HSU_CH_D1SAR 0x28
31#define HSU_CH_D1TSR 0x2c
32#define HSU_CH_D2SAR 0x30
33#define HSU_CH_D2TSR 0x34
34#define HSU_CH_D3SAR 0x38
35#define HSU_CH_D3TSR 0x3c
36
37#define HSU_DMA_CHAN_NR_DESC 4
38#define HSU_DMA_CHAN_LENGTH 0x40
39
40/* Bits in HSU_CH_SR */
41#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
42#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
43#define HSU_CH_SR_CHE BIT(15)
44
45/* Bits in HSU_CH_CR */
46#define HSU_CH_CR_CHA BIT(0)
47#define HSU_CH_CR_CHD BIT(1)
48
49/* Bits in HSU_CH_DCR */
50#define HSU_CH_DCR_DESCA(x) BIT(0 + (x))
51#define HSU_CH_DCR_CHSOD(x) BIT(8 + (x))
52#define HSU_CH_DCR_CHSOTO BIT(14)
53#define HSU_CH_DCR_CHSOE BIT(15)
54#define HSU_CH_DCR_CHDI(x) BIT(16 + (x))
55#define HSU_CH_DCR_CHEI BIT(23)
56#define HSU_CH_DCR_CHTOI(x) BIT(24 + (x))
57
58struct hsu_dma_sg {
59 dma_addr_t addr;
60 unsigned int len;
61};
62
63struct hsu_dma_desc {
64 struct virt_dma_desc vdesc;
65 enum dma_transfer_direction direction;
66 struct hsu_dma_sg *sg;
67 unsigned int nents;
68 unsigned int active;
69 enum dma_status status;
70};
71
72static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc)
73{
74 return container_of(vdesc, struct hsu_dma_desc, vdesc);
75}
76
77struct hsu_dma_chan {
78 struct virt_dma_chan vchan;
79
80 void __iomem *reg;
81 spinlock_t lock;
82
83 /* hardware configuration */
84 enum dma_transfer_direction direction;
85 struct dma_slave_config config;
86
87 struct hsu_dma_desc *desc;
88};
89
90static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan)
91{
92 return container_of(chan, struct hsu_dma_chan, vchan.chan);
93}
94
95static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset)
96{
97 return readl(hsuc->reg + offset);
98}
99
100static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset,
101 u32 value)
102{
103 writel(value, hsuc->reg + offset);
104}
105
106struct hsu_dma {
107 struct dma_device dma;
108
109 /* channels */
110 struct hsu_dma_chan *chan;
111};
112
113static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev)
114{
115 return container_of(ddev, struct hsu_dma, dma);
116}
117
118#endif /* __DMA_HSU_H__ */
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
new file mode 100644
index 000000000000..77879e6ddc4c
--- /dev/null
+++ b/drivers/dma/hsu/pci.c
@@ -0,0 +1,124 @@
1/*
2 * PCI driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/bitops.h>
15#include <linux/device.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18
19#include "hsu.h"
20
21#define HSU_PCI_DMASR 0x00
22#define HSU_PCI_DMAISR 0x04
23
24#define HSU_PCI_CHAN_OFFSET 0x100
25
26static irqreturn_t hsu_pci_irq(int irq, void *dev)
27{
28 struct hsu_dma_chip *chip = dev;
29 u32 dmaisr;
30 unsigned short i;
31 irqreturn_t ret = IRQ_NONE;
32
33 dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
34 for (i = 0; i < chip->pdata->nr_channels; i++) {
35 if (dmaisr & 0x1)
36 ret |= hsu_dma_irq(chip, i);
37 dmaisr >>= 1;
38 }
39
40 return ret;
41}
42
43static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
44{
45 struct hsu_dma_chip *chip;
46 int ret;
47
48 ret = pcim_enable_device(pdev);
49 if (ret)
50 return ret;
51
52 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
53 if (ret) {
54 dev_err(&pdev->dev, "I/O memory remapping failed\n");
55 return ret;
56 }
57
58 pci_set_master(pdev);
59 pci_try_set_mwi(pdev);
60
61 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
62 if (ret)
63 return ret;
64
65 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
66 if (ret)
67 return ret;
68
69 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
70 if (!chip)
71 return -ENOMEM;
72
73 chip->dev = &pdev->dev;
74 chip->regs = pcim_iomap_table(pdev)[0];
75 chip->length = pci_resource_len(pdev, 0);
76 chip->offset = HSU_PCI_CHAN_OFFSET;
77 chip->irq = pdev->irq;
78
79 pci_enable_msi(pdev);
80
81 ret = hsu_dma_probe(chip);
82 if (ret)
83 return ret;
84
85 ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
86 if (ret)
87 goto err_register_irq;
88
89 pci_set_drvdata(pdev, chip);
90
91 return 0;
92
93err_register_irq:
94 hsu_dma_remove(chip);
95 return ret;
96}
97
98static void hsu_pci_remove(struct pci_dev *pdev)
99{
100 struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
101
102 free_irq(chip->irq, chip);
103 hsu_dma_remove(chip);
104}
105
106static const struct pci_device_id hsu_pci_id_table[] = {
107 { PCI_VDEVICE(INTEL, 0x081e), 0 },
108 { PCI_VDEVICE(INTEL, 0x1192), 0 },
109 { }
110};
111MODULE_DEVICE_TABLE(pci, hsu_pci_id_table);
112
113static struct pci_driver hsu_pci_driver = {
114 .name = "hsu_dma_pci",
115 .id_table = hsu_pci_id_table,
116 .probe = hsu_pci_probe,
117 .remove = hsu_pci_remove,
118};
119
120module_pci_driver(hsu_pci_driver);
121
122MODULE_LICENSE("GPL v2");
123MODULE_DESCRIPTION("High Speed UART DMA PCI driver");
124MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index eb10109c55ad..62bbd79338e0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
532 } 532 }
533 533
534 /* Set bits of CONFIG register with dynamic context switching */
535 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
536 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
537
534 return ret ? 0 : -ETIMEDOUT; 538 return ret ? 0 : -ETIMEDOUT;
535} 539}
536 540
@@ -1398,9 +1402,6 @@ static int sdma_init(struct sdma_engine *sdma)
1398 1402
1399 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1403 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1400 1404
1401 /* Set bits of CONFIG register with given context switching mode */
1402 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1403
1404 /* Initializes channel's priorities */ 1405 /* Initializes channel's priorities */
1405 sdma_set_channel_priority(&sdma->channel[0], 7); 1406 sdma_set_channel_priority(&sdma->channel[0], 7);
1406 1407
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
deleted file mode 100644
index c17e18b909b6..000000000000
--- a/drivers/dma/intel_mid_dma.c
+++ /dev/null
@@ -1,1443 +0,0 @@
1/*
2 * intel_mid_dma.c - Intel Langwell DMA Drivers
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 *
20 *
21 */
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/pm_runtime.h>
25#include <linux/intel_mid_dma.h>
26#include <linux/module.h>
27
28#include "dmaengine.h"
29
30#define MAX_CHAN 4 /*max ch across controllers*/
31#include "intel_mid_dma_regs.h"
32
33#define INTEL_MID_DMAC1_ID 0x0814
34#define INTEL_MID_DMAC2_ID 0x0813
35#define INTEL_MID_GP_DMAC2_ID 0x0827
36#define INTEL_MFLD_DMAC1_ID 0x0830
37#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
38#define LNW_PERIPHRAL_MASK_SIZE 0x10
39#define LNW_PERIPHRAL_STATUS 0x0
40#define LNW_PERIPHRAL_MASK 0x8
41
42struct intel_mid_dma_probe_info {
43 u8 max_chan;
44 u8 ch_base;
45 u16 block_size;
46 u32 pimr_mask;
47};
48
49#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
50 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
51 .max_chan = (_max_chan), \
52 .ch_base = (_ch_base), \
53 .block_size = (_block_size), \
54 .pimr_mask = (_pimr_mask), \
55 })
56
57/*****************************************************************************
58Utility Functions*/
59/**
60 * get_ch_index - convert status to channel
61 * @status: status mask
62 * @base: dma ch base value
63 *
64 * Modify the status mask and return the channel index needing
65 * attention (or -1 if neither)
66 */
67static int get_ch_index(int *status, unsigned int base)
68{
69 int i;
70 for (i = 0; i < MAX_CHAN; i++) {
71 if (*status & (1 << (i + base))) {
72 *status = *status & ~(1 << (i + base));
73 pr_debug("MDMA: index %d New status %x\n", i, *status);
74 return i;
75 }
76 }
77 return -1;
78}
79
80/**
81 * get_block_ts - calculates dma transaction length
82 * @len: dma transfer length
83 * @tx_width: dma transfer src width
84 * @block_size: dma controller max block size
85 *
86 * Based on src width calculate the DMA trsaction length in data items
87 * return data items or FFFF if exceeds max length for block
88 */
89static int get_block_ts(int len, int tx_width, int block_size)
90{
91 int byte_width = 0, block_ts = 0;
92
93 switch (tx_width) {
94 case DMA_SLAVE_BUSWIDTH_1_BYTE:
95 byte_width = 1;
96 break;
97 case DMA_SLAVE_BUSWIDTH_2_BYTES:
98 byte_width = 2;
99 break;
100 case DMA_SLAVE_BUSWIDTH_4_BYTES:
101 default:
102 byte_width = 4;
103 break;
104 }
105
106 block_ts = len/byte_width;
107 if (block_ts > block_size)
108 block_ts = 0xFFFF;
109 return block_ts;
110}
111
112/*****************************************************************************
113DMAC1 interrupt Functions*/
114
115/**
116 * dmac1_mask_periphral_intr - mask the periphral interrupt
117 * @mid: dma device for which masking is required
118 *
119 * Masks the DMA periphral interrupt
120 * this is valid for DMAC1 family controllers only
121 * This controller should have periphral mask registers already mapped
122 */
123static void dmac1_mask_periphral_intr(struct middma_device *mid)
124{
125 u32 pimr;
126
127 if (mid->pimr_mask) {
128 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
129 pimr |= mid->pimr_mask;
130 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
131 }
132 return;
133}
134
135/**
136 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
137 * @midc: dma channel for which masking is required
138 *
139 * UnMasks the DMA periphral interrupt,
140 * this is valid for DMAC1 family controllers only
141 * This controller should have periphral mask registers already mapped
142 */
143static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
144{
145 u32 pimr;
146 struct middma_device *mid = to_middma_device(midc->chan.device);
147
148 if (mid->pimr_mask) {
149 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
150 pimr &= ~mid->pimr_mask;
151 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
152 }
153 return;
154}
155
156/**
157 * enable_dma_interrupt - enable the periphral interrupt
158 * @midc: dma channel for which enable interrupt is required
159 *
160 * Enable the DMA periphral interrupt,
161 * this is valid for DMAC1 family controllers only
162 * This controller should have periphral mask registers already mapped
163 */
164static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
165{
166 dmac1_unmask_periphral_intr(midc);
167
168 /*en ch interrupts*/
169 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
170 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
171 return;
172}
173
174/**
175 * disable_dma_interrupt - disable the periphral interrupt
176 * @midc: dma channel for which disable interrupt is required
177 *
178 * Disable the DMA periphral interrupt,
179 * this is valid for DMAC1 family controllers only
180 * This controller should have periphral mask registers already mapped
181 */
182static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
183{
184 /*Check LPE PISR, make sure fwd is disabled*/
185 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
186 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
188 return;
189}
190
191/*****************************************************************************
192DMA channel helper Functions*/
193/**
194 * mid_desc_get - get a descriptor
195 * @midc: dma channel for which descriptor is required
196 *
197 * Obtain a descriptor for the channel. Returns NULL if none are free.
198 * Once the descriptor is returned it is private until put on another
199 * list or freed
200 */
201static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
202{
203 struct intel_mid_dma_desc *desc, *_desc;
204 struct intel_mid_dma_desc *ret = NULL;
205
206 spin_lock_bh(&midc->lock);
207 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
208 if (async_tx_test_ack(&desc->txd)) {
209 list_del(&desc->desc_node);
210 ret = desc;
211 break;
212 }
213 }
214 spin_unlock_bh(&midc->lock);
215 return ret;
216}
217
218/**
219 * mid_desc_put - put a descriptor
220 * @midc: dma channel for which descriptor is required
221 * @desc: descriptor to put
222 *
223 * Return a descriptor from lwn_desc_get back to the free pool
224 */
225static void midc_desc_put(struct intel_mid_dma_chan *midc,
226 struct intel_mid_dma_desc *desc)
227{
228 if (desc) {
229 spin_lock_bh(&midc->lock);
230 list_add_tail(&desc->desc_node, &midc->free_list);
231 spin_unlock_bh(&midc->lock);
232 }
233}
234/**
235 * midc_dostart - begin a DMA transaction
236 * @midc: channel for which txn is to be started
237 * @first: first descriptor of series
238 *
239 * Load a transaction into the engine. This must be called with midc->lock
240 * held and bh disabled.
241 */
242static void midc_dostart(struct intel_mid_dma_chan *midc,
243 struct intel_mid_dma_desc *first)
244{
245 struct middma_device *mid = to_middma_device(midc->chan.device);
246
247 /* channel is idle */
248 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
249 /*error*/
250 pr_err("ERR_MDMA: channel is busy in start\n");
251 /* The tasklet will hopefully advance the queue... */
252 return;
253 }
254 midc->busy = true;
255 /*write registers and en*/
256 iowrite32(first->sar, midc->ch_regs + SAR);
257 iowrite32(first->dar, midc->ch_regs + DAR);
258 iowrite32(first->lli_phys, midc->ch_regs + LLP);
259 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
260 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
261 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
262 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
263 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
264 (int)first->sar, (int)first->dar, first->cfg_hi,
265 first->cfg_lo, first->ctl_hi, first->ctl_lo);
266 first->status = DMA_IN_PROGRESS;
267
268 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
269}
270
271/**
272 * midc_descriptor_complete - process completed descriptor
273 * @midc: channel owning the descriptor
274 * @desc: the descriptor itself
275 *
276 * Process a completed descriptor and perform any callbacks upon
277 * the completion. The completion handling drops the lock during the
278 * callbacks but must be called with the lock held.
279 */
280static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
281 struct intel_mid_dma_desc *desc)
282 __releases(&midc->lock) __acquires(&midc->lock)
283{
284 struct dma_async_tx_descriptor *txd = &desc->txd;
285 dma_async_tx_callback callback_txd = NULL;
286 struct intel_mid_dma_lli *llitem;
287 void *param_txd = NULL;
288
289 dma_cookie_complete(txd);
290 callback_txd = txd->callback;
291 param_txd = txd->callback_param;
292
293 if (desc->lli != NULL) {
294 /*clear the DONE bit of completed LLI in memory*/
295 llitem = desc->lli + desc->current_lli;
296 llitem->ctl_hi &= CLEAR_DONE;
297 if (desc->current_lli < desc->lli_length-1)
298 (desc->current_lli)++;
299 else
300 desc->current_lli = 0;
301 }
302 spin_unlock_bh(&midc->lock);
303 if (callback_txd) {
304 pr_debug("MDMA: TXD callback set ... calling\n");
305 callback_txd(param_txd);
306 }
307 if (midc->raw_tfr) {
308 desc->status = DMA_COMPLETE;
309 if (desc->lli != NULL) {
310 pci_pool_free(desc->lli_pool, desc->lli,
311 desc->lli_phys);
312 pci_pool_destroy(desc->lli_pool);
313 desc->lli = NULL;
314 }
315 list_move(&desc->desc_node, &midc->free_list);
316 midc->busy = false;
317 }
318 spin_lock_bh(&midc->lock);
319
320}
321/**
322 * midc_scan_descriptors - check the descriptors in channel
323 * mark completed when tx is completete
324 * @mid: device
325 * @midc: channel to scan
326 *
327 * Walk the descriptor chain for the device and process any entries
328 * that are complete.
329 */
330static void midc_scan_descriptors(struct middma_device *mid,
331 struct intel_mid_dma_chan *midc)
332{
333 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
334
335 /*tx is complete*/
336 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
337 if (desc->status == DMA_IN_PROGRESS)
338 midc_descriptor_complete(midc, desc);
339 }
340 return;
341 }
342/**
343 * midc_lli_fill_sg - Helper function to convert
344 * SG list to Linked List Items.
345 *@midc: Channel
346 *@desc: DMA descriptor
347 *@sglist: Pointer to SG list
348 *@sglen: SG list length
349 *@flags: DMA transaction flags
350 *
351 * Walk through the SG list and convert the SG list into Linked
352 * List Items (LLI).
353 */
354static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
355 struct intel_mid_dma_desc *desc,
356 struct scatterlist *sglist,
357 unsigned int sglen,
358 unsigned int flags)
359{
360 struct intel_mid_dma_slave *mids;
361 struct scatterlist *sg;
362 dma_addr_t lli_next, sg_phy_addr;
363 struct intel_mid_dma_lli *lli_bloc_desc;
364 union intel_mid_dma_ctl_lo ctl_lo;
365 union intel_mid_dma_ctl_hi ctl_hi;
366 int i;
367
368 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
369 mids = midc->mid_slave;
370
371 lli_bloc_desc = desc->lli;
372 lli_next = desc->lli_phys;
373
374 ctl_lo.ctl_lo = desc->ctl_lo;
375 ctl_hi.ctl_hi = desc->ctl_hi;
376 for_each_sg(sglist, sg, sglen, i) {
377 /*Populate CTL_LOW and LLI values*/
378 if (i != sglen - 1) {
379 lli_next = lli_next +
380 sizeof(struct intel_mid_dma_lli);
381 } else {
382 /*Check for circular list, otherwise terminate LLI to ZERO*/
383 if (flags & DMA_PREP_CIRCULAR_LIST) {
384 pr_debug("MDMA: LLI is configured in circular mode\n");
385 lli_next = desc->lli_phys;
386 } else {
387 lli_next = 0;
388 ctl_lo.ctlx.llp_dst_en = 0;
389 ctl_lo.ctlx.llp_src_en = 0;
390 }
391 }
392 /*Populate CTL_HI values*/
393 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
394 desc->width,
395 midc->dma->block_size);
396 /*Populate SAR and DAR values*/
397 sg_phy_addr = sg_dma_address(sg);
398 if (desc->dirn == DMA_MEM_TO_DEV) {
399 lli_bloc_desc->sar = sg_phy_addr;
400 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
401 } else if (desc->dirn == DMA_DEV_TO_MEM) {
402 lli_bloc_desc->sar = mids->dma_slave.src_addr;
403 lli_bloc_desc->dar = sg_phy_addr;
404 }
405 /*Copy values into block descriptor in system memroy*/
406 lli_bloc_desc->llp = lli_next;
407 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
408 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
409
410 lli_bloc_desc++;
411 }
412 /*Copy very first LLI values to descriptor*/
413 desc->ctl_lo = desc->lli->ctl_lo;
414 desc->ctl_hi = desc->lli->ctl_hi;
415 desc->sar = desc->lli->sar;
416 desc->dar = desc->lli->dar;
417
418 return 0;
419}
420/*****************************************************************************
421DMA engine callback Functions*/
422/**
423 * intel_mid_dma_tx_submit - callback to submit DMA transaction
424 * @tx: dma engine descriptor
425 *
426 * Submit the DMA transaction for this descriptor, start if ch idle
427 */
428static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
429{
430 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
431 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
432 dma_cookie_t cookie;
433
434 spin_lock_bh(&midc->lock);
435 cookie = dma_cookie_assign(tx);
436
437 if (list_empty(&midc->active_list))
438 list_add_tail(&desc->desc_node, &midc->active_list);
439 else
440 list_add_tail(&desc->desc_node, &midc->queue);
441
442 midc_dostart(midc, desc);
443 spin_unlock_bh(&midc->lock);
444
445 return cookie;
446}
447
448/**
449 * intel_mid_dma_issue_pending - callback to issue pending txn
450 * @chan: chan where pending trascation needs to be checked and submitted
451 *
452 * Call for scan to issue pending descriptors
453 */
454static void intel_mid_dma_issue_pending(struct dma_chan *chan)
455{
456 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
457
458 spin_lock_bh(&midc->lock);
459 if (!list_empty(&midc->queue))
460 midc_scan_descriptors(to_middma_device(chan->device), midc);
461 spin_unlock_bh(&midc->lock);
462}
463
464/**
465 * intel_mid_dma_tx_status - Return status of txn
466 * @chan: chan for where status needs to be checked
467 * @cookie: cookie for txn
468 * @txstate: DMA txn state
469 *
470 * Return status of DMA txn
471 */
472static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
473 dma_cookie_t cookie,
474 struct dma_tx_state *txstate)
475{
476 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
477 enum dma_status ret;
478
479 ret = dma_cookie_status(chan, cookie, txstate);
480 if (ret != DMA_COMPLETE) {
481 spin_lock_bh(&midc->lock);
482 midc_scan_descriptors(to_middma_device(chan->device), midc);
483 spin_unlock_bh(&midc->lock);
484
485 ret = dma_cookie_status(chan, cookie, txstate);
486 }
487
488 return ret;
489}
490
491static int intel_mid_dma_config(struct dma_chan *chan,
492 struct dma_slave_config *slave)
493{
494 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
495 struct intel_mid_dma_slave *mid_slave;
496
497 BUG_ON(!midc);
498 BUG_ON(!slave);
499 pr_debug("MDMA: slave control called\n");
500
501 mid_slave = to_intel_mid_dma_slave(slave);
502
503 BUG_ON(!mid_slave);
504
505 midc->mid_slave = mid_slave;
506 return 0;
507}
508
509static int intel_mid_dma_terminate_all(struct dma_chan *chan)
510{
511 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
512 struct middma_device *mid = to_middma_device(chan->device);
513 struct intel_mid_dma_desc *desc, *_desc;
514 union intel_mid_dma_cfg_lo cfg_lo;
515
516 spin_lock_bh(&midc->lock);
517 if (midc->busy == false) {
518 spin_unlock_bh(&midc->lock);
519 return 0;
520 }
521 /*Suspend and disable the channel*/
522 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
523 cfg_lo.cfgx.ch_susp = 1;
524 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
525 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
526 midc->busy = false;
527 /* Disable interrupts */
528 disable_dma_interrupt(midc);
529 midc->descs_allocated = 0;
530
531 spin_unlock_bh(&midc->lock);
532 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
533 if (desc->lli != NULL) {
534 pci_pool_free(desc->lli_pool, desc->lli,
535 desc->lli_phys);
536 pci_pool_destroy(desc->lli_pool);
537 desc->lli = NULL;
538 }
539 list_move(&desc->desc_node, &midc->free_list);
540 }
541 return 0;
542}
543
544
545/**
546 * intel_mid_dma_prep_memcpy - Prep memcpy txn
547 * @chan: chan for DMA transfer
548 * @dest: destn address
549 * @src: src address
550 * @len: DMA transfer len
551 * @flags: DMA flags
552 *
553 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
554 * The periphral txn details should be filled in slave structure properly
555 * Returns the descriptor for this txn
556 */
557static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
558 struct dma_chan *chan, dma_addr_t dest,
559 dma_addr_t src, size_t len, unsigned long flags)
560{
561 struct intel_mid_dma_chan *midc;
562 struct intel_mid_dma_desc *desc = NULL;
563 struct intel_mid_dma_slave *mids;
564 union intel_mid_dma_ctl_lo ctl_lo;
565 union intel_mid_dma_ctl_hi ctl_hi;
566 union intel_mid_dma_cfg_lo cfg_lo;
567 union intel_mid_dma_cfg_hi cfg_hi;
568 enum dma_slave_buswidth width;
569
570 pr_debug("MDMA: Prep for memcpy\n");
571 BUG_ON(!chan);
572 if (!len)
573 return NULL;
574
575 midc = to_intel_mid_dma_chan(chan);
576 BUG_ON(!midc);
577
578 mids = midc->mid_slave;
579 BUG_ON(!mids);
580
581 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
582 midc->dma->pci_id, midc->ch_id, len);
583 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
584 mids->cfg_mode, mids->dma_slave.direction,
585 mids->hs_mode, mids->dma_slave.src_addr_width);
586
587 /*calculate CFG_LO*/
588 if (mids->hs_mode == LNW_DMA_SW_HS) {
589 cfg_lo.cfg_lo = 0;
590 cfg_lo.cfgx.hs_sel_dst = 1;
591 cfg_lo.cfgx.hs_sel_src = 1;
592 } else if (mids->hs_mode == LNW_DMA_HW_HS)
593 cfg_lo.cfg_lo = 0x00000;
594
595 /*calculate CFG_HI*/
596 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
597 /*SW HS only*/
598 cfg_hi.cfg_hi = 0;
599 } else {
600 cfg_hi.cfg_hi = 0;
601 if (midc->dma->pimr_mask) {
602 cfg_hi.cfgx.protctl = 0x0; /*default value*/
603 cfg_hi.cfgx.fifo_mode = 1;
604 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
605 cfg_hi.cfgx.src_per = 0;
606 if (mids->device_instance == 0)
607 cfg_hi.cfgx.dst_per = 3;
608 if (mids->device_instance == 1)
609 cfg_hi.cfgx.dst_per = 1;
610 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
611 if (mids->device_instance == 0)
612 cfg_hi.cfgx.src_per = 2;
613 if (mids->device_instance == 1)
614 cfg_hi.cfgx.src_per = 0;
615 cfg_hi.cfgx.dst_per = 0;
616 }
617 } else {
618 cfg_hi.cfgx.protctl = 0x1; /*default value*/
619 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
620 midc->ch_id - midc->dma->chan_base;
621 }
622 }
623
624 /*calculate CTL_HI*/
625 ctl_hi.ctlx.reser = 0;
626 ctl_hi.ctlx.done = 0;
627 width = mids->dma_slave.src_addr_width;
628
629 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
630 pr_debug("MDMA:calc len %d for block size %d\n",
631 ctl_hi.ctlx.block_ts, midc->dma->block_size);
632 /*calculate CTL_LO*/
633 ctl_lo.ctl_lo = 0;
634 ctl_lo.ctlx.int_en = 1;
635 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
636 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
637
638 /*
639 * Here we need some translation from "enum dma_slave_buswidth"
640 * to the format for our dma controller
641 * standard intel_mid_dmac's format
642 * 1 Byte 0b000
643 * 2 Bytes 0b001
644 * 4 Bytes 0b010
645 */
646 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
647 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
648
649 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
650 ctl_lo.ctlx.tt_fc = 0;
651 ctl_lo.ctlx.sinc = 0;
652 ctl_lo.ctlx.dinc = 0;
653 } else {
654 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
655 ctl_lo.ctlx.sinc = 0;
656 ctl_lo.ctlx.dinc = 2;
657 ctl_lo.ctlx.tt_fc = 1;
658 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
659 ctl_lo.ctlx.sinc = 2;
660 ctl_lo.ctlx.dinc = 0;
661 ctl_lo.ctlx.tt_fc = 2;
662 }
663 }
664
665 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
666 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
667
668 enable_dma_interrupt(midc);
669
670 desc = midc_desc_get(midc);
671 if (desc == NULL)
672 goto err_desc_get;
673 desc->sar = src;
674 desc->dar = dest ;
675 desc->len = len;
676 desc->cfg_hi = cfg_hi.cfg_hi;
677 desc->cfg_lo = cfg_lo.cfg_lo;
678 desc->ctl_lo = ctl_lo.ctl_lo;
679 desc->ctl_hi = ctl_hi.ctl_hi;
680 desc->width = width;
681 desc->dirn = mids->dma_slave.direction;
682 desc->lli_phys = 0;
683 desc->lli = NULL;
684 desc->lli_pool = NULL;
685 return &desc->txd;
686
687err_desc_get:
688 pr_err("ERR_MDMA: Failed to get desc\n");
689 midc_desc_put(midc, desc);
690 return NULL;
691}
692/**
693 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
694 * @chan: chan for DMA transfer
695 * @sgl: scatter gather list
696 * @sg_len: length of sg txn
697 * @direction: DMA transfer dirtn
698 * @flags: DMA flags
699 * @context: transfer context (ignored)
700 *
701 * Prepares LLI based periphral transfer
702 */
703static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
704 struct dma_chan *chan, struct scatterlist *sgl,
705 unsigned int sg_len, enum dma_transfer_direction direction,
706 unsigned long flags, void *context)
707{
708 struct intel_mid_dma_chan *midc = NULL;
709 struct intel_mid_dma_slave *mids = NULL;
710 struct intel_mid_dma_desc *desc = NULL;
711 struct dma_async_tx_descriptor *txd = NULL;
712 union intel_mid_dma_ctl_lo ctl_lo;
713
714 pr_debug("MDMA: Prep for slave SG\n");
715
716 if (!sg_len) {
717 pr_err("MDMA: Invalid SG length\n");
718 return NULL;
719 }
720 midc = to_intel_mid_dma_chan(chan);
721 BUG_ON(!midc);
722
723 mids = midc->mid_slave;
724 BUG_ON(!mids);
725
726 if (!midc->dma->pimr_mask) {
727 /* We can still handle sg list with only one item */
728 if (sg_len == 1) {
729 txd = intel_mid_dma_prep_memcpy(chan,
730 mids->dma_slave.dst_addr,
731 mids->dma_slave.src_addr,
732 sg_dma_len(sgl),
733 flags);
734 return txd;
735 } else {
736 pr_warn("MDMA: SG list is not supported by this controller\n");
737 return NULL;
738 }
739 }
740
741 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
742 sg_len, direction, flags);
743
744 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
745 if (NULL == txd) {
746 pr_err("MDMA: Prep memcpy failed\n");
747 return NULL;
748 }
749
750 desc = to_intel_mid_dma_desc(txd);
751 desc->dirn = direction;
752 ctl_lo.ctl_lo = desc->ctl_lo;
753 ctl_lo.ctlx.llp_dst_en = 1;
754 ctl_lo.ctlx.llp_src_en = 1;
755 desc->ctl_lo = ctl_lo.ctl_lo;
756 desc->lli_length = sg_len;
757 desc->current_lli = 0;
758 /* DMA coherent memory pool for LLI descriptors*/
759 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
760 midc->dma->pdev,
761 (sizeof(struct intel_mid_dma_lli)*sg_len),
762 32, 0);
763 if (NULL == desc->lli_pool) {
764 pr_err("MID_DMA:LLI pool create failed\n");
765 return NULL;
766 }
767
768 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
769 if (!desc->lli) {
770 pr_err("MID_DMA: LLI alloc failed\n");
771 pci_pool_destroy(desc->lli_pool);
772 return NULL;
773 }
774
775 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
776 if (flags & DMA_PREP_INTERRUPT) {
777 iowrite32(UNMASK_INTR_REG(midc->ch_id),
778 midc->dma_base + MASK_BLOCK);
779 pr_debug("MDMA:Enabled Block interrupt\n");
780 }
781 return &desc->txd;
782}
783
784/**
785 * intel_mid_dma_free_chan_resources - Frees dma resources
786 * @chan: chan requiring attention
787 *
788 * Frees the allocated resources on this DMA chan
789 */
790static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
791{
792 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
793 struct middma_device *mid = to_middma_device(chan->device);
794 struct intel_mid_dma_desc *desc, *_desc;
795
796 if (true == midc->busy) {
797 /*trying to free ch in use!!!!!*/
798 pr_err("ERR_MDMA: trying to free ch in use\n");
799 }
800 spin_lock_bh(&midc->lock);
801 midc->descs_allocated = 0;
802 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
803 list_del(&desc->desc_node);
804 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
805 }
806 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
807 list_del(&desc->desc_node);
808 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
809 }
810 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
811 list_del(&desc->desc_node);
812 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
813 }
814 spin_unlock_bh(&midc->lock);
815 midc->in_use = false;
816 midc->busy = false;
817 /* Disable CH interrupts */
818 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
819 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
820 pm_runtime_put(&mid->pdev->dev);
821}
822
823/**
824 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
825 * @chan: chan requiring attention
826 *
827 * Allocates DMA resources on this chan
828 * Return the descriptors allocated
829 */
830static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
831{
832 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
833 struct middma_device *mid = to_middma_device(chan->device);
834 struct intel_mid_dma_desc *desc;
835 dma_addr_t phys;
836 int i = 0;
837
838 pm_runtime_get_sync(&mid->pdev->dev);
839
840 if (mid->state == SUSPENDED) {
841 if (dma_resume(&mid->pdev->dev)) {
842 pr_err("ERR_MDMA: resume failed");
843 return -EFAULT;
844 }
845 }
846
847 /* ASSERT: channel is idle */
848 if (test_ch_en(mid->dma_base, midc->ch_id)) {
849 /*ch is not idle*/
850 pr_err("ERR_MDMA: ch not idle\n");
851 pm_runtime_put(&mid->pdev->dev);
852 return -EIO;
853 }
854 dma_cookie_init(chan);
855
856 spin_lock_bh(&midc->lock);
857 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
858 spin_unlock_bh(&midc->lock);
859 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
860 if (!desc) {
861 pr_err("ERR_MDMA: desc failed\n");
862 pm_runtime_put(&mid->pdev->dev);
863 return -ENOMEM;
864 /*check*/
865 }
866 dma_async_tx_descriptor_init(&desc->txd, chan);
867 desc->txd.tx_submit = intel_mid_dma_tx_submit;
868 desc->txd.flags = DMA_CTRL_ACK;
869 desc->txd.phys = phys;
870 spin_lock_bh(&midc->lock);
871 i = ++midc->descs_allocated;
872 list_add_tail(&desc->desc_node, &midc->free_list);
873 }
874 spin_unlock_bh(&midc->lock);
875 midc->in_use = true;
876 midc->busy = false;
877 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
878 return i;
879}
880
881/**
882 * midc_handle_error - Handle DMA txn error
883 * @mid: controller where error occurred
884 * @midc: chan where error occurred
885 *
886 * Scan the descriptor for error
887 */
888static void midc_handle_error(struct middma_device *mid,
889 struct intel_mid_dma_chan *midc)
890{
891 midc_scan_descriptors(mid, midc);
892}
893
894/**
895 * dma_tasklet - DMA interrupt tasklet
896 * @data: tasklet arg (the controller structure)
897 *
898 * Scan the controller for interrupts for completion/error
899 * Clear the interrupt and call for handling completion/error
900 */
901static void dma_tasklet(unsigned long data)
902{
903 struct middma_device *mid = NULL;
904 struct intel_mid_dma_chan *midc = NULL;
905 u32 status, raw_tfr, raw_block;
906 int i;
907
908 mid = (struct middma_device *)data;
909 if (mid == NULL) {
910 pr_err("ERR_MDMA: tasklet Null param\n");
911 return;
912 }
913 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
914 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
915 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
916 status = raw_tfr | raw_block;
917 status &= mid->intr_mask;
918 while (status) {
919 /*txn interrupt*/
920 i = get_ch_index(&status, mid->chan_base);
921 if (i < 0) {
922 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
923 return;
924 }
925 midc = &mid->ch[i];
926 if (midc == NULL) {
927 pr_err("ERR_MDMA:Null param midc\n");
928 return;
929 }
930 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
931 status, midc->ch_id, i);
932 midc->raw_tfr = raw_tfr;
933 midc->raw_block = raw_block;
934 spin_lock_bh(&midc->lock);
935 /*clearing this interrupts first*/
936 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
937 if (raw_block) {
938 iowrite32((1 << midc->ch_id),
939 mid->dma_base + CLEAR_BLOCK);
940 }
941 midc_scan_descriptors(mid, midc);
942 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
943 iowrite32(UNMASK_INTR_REG(midc->ch_id),
944 mid->dma_base + MASK_TFR);
945 if (raw_block) {
946 iowrite32(UNMASK_INTR_REG(midc->ch_id),
947 mid->dma_base + MASK_BLOCK);
948 }
949 spin_unlock_bh(&midc->lock);
950 }
951
952 status = ioread32(mid->dma_base + RAW_ERR);
953 status &= mid->intr_mask;
954 while (status) {
955 /*err interrupt*/
956 i = get_ch_index(&status, mid->chan_base);
957 if (i < 0) {
958 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
959 return;
960 }
961 midc = &mid->ch[i];
962 if (midc == NULL) {
963 pr_err("ERR_MDMA:Null param midc\n");
964 return;
965 }
966 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
967 status, midc->ch_id, i);
968
969 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
970 spin_lock_bh(&midc->lock);
971 midc_handle_error(mid, midc);
972 iowrite32(UNMASK_INTR_REG(midc->ch_id),
973 mid->dma_base + MASK_ERR);
974 spin_unlock_bh(&midc->lock);
975 }
976 pr_debug("MDMA:Exiting takslet...\n");
977 return;
978}
979
980static void dma_tasklet1(unsigned long data)
981{
982 pr_debug("MDMA:in takslet1...\n");
983 return dma_tasklet(data);
984}
985
986static void dma_tasklet2(unsigned long data)
987{
988 pr_debug("MDMA:in takslet2...\n");
989 return dma_tasklet(data);
990}
991
992/**
993 * intel_mid_dma_interrupt - DMA ISR
994 * @irq: IRQ where interrupt occurred
995 * @data: ISR cllback data (the controller structure)
996 *
997 * See if this is our interrupt if so then schedule the tasklet
998 * otherwise ignore
999 */
1000static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1001{
1002 struct middma_device *mid = data;
1003 u32 tfr_status, err_status;
1004 int call_tasklet = 0;
1005
1006 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1007 err_status = ioread32(mid->dma_base + RAW_ERR);
1008 if (!tfr_status && !err_status)
1009 return IRQ_NONE;
1010
1011 /*DMA Interrupt*/
1012 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1013 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1014 tfr_status &= mid->intr_mask;
1015 if (tfr_status) {
1016 /*need to disable intr*/
1017 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1018 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1019 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1020 call_tasklet = 1;
1021 }
1022 err_status &= mid->intr_mask;
1023 if (err_status) {
1024 iowrite32((err_status << INT_MASK_WE),
1025 mid->dma_base + MASK_ERR);
1026 call_tasklet = 1;
1027 }
1028 if (call_tasklet)
1029 tasklet_schedule(&mid->tasklet);
1030
1031 return IRQ_HANDLED;
1032}
1033
1034static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1035{
1036 return intel_mid_dma_interrupt(irq, data);
1037}
1038
1039static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1040{
1041 return intel_mid_dma_interrupt(irq, data);
1042}
1043
1044/**
1045 * mid_setup_dma - Setup the DMA controller
1046 * @pdev: Controller PCI device structure
1047 *
1048 * Initialize the DMA controller, channels, registers with DMA engine,
1049 * ISR. Initialize DMA controller channels.
1050 */
1051static int mid_setup_dma(struct pci_dev *pdev)
1052{
1053 struct middma_device *dma = pci_get_drvdata(pdev);
1054 int err, i;
1055
1056 /* DMA coherent memory pool for DMA descriptor allocations */
1057 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1058 sizeof(struct intel_mid_dma_desc),
1059 32, 0);
1060 if (NULL == dma->dma_pool) {
1061 pr_err("ERR_MDMA:pci_pool_create failed\n");
1062 err = -ENOMEM;
1063 goto err_dma_pool;
1064 }
1065
1066 INIT_LIST_HEAD(&dma->common.channels);
1067 dma->pci_id = pdev->device;
1068 if (dma->pimr_mask) {
1069 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1070 LNW_PERIPHRAL_MASK_SIZE);
1071 if (dma->mask_reg == NULL) {
1072 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1073 err = -ENOMEM;
1074 goto err_ioremap;
1075 }
1076 } else
1077 dma->mask_reg = NULL;
1078
1079 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1080 /*init CH structures*/
1081 dma->intr_mask = 0;
1082 dma->state = RUNNING;
1083 for (i = 0; i < dma->max_chan; i++) {
1084 struct intel_mid_dma_chan *midch = &dma->ch[i];
1085
1086 midch->chan.device = &dma->common;
1087 dma_cookie_init(&midch->chan);
1088 midch->ch_id = dma->chan_base + i;
1089 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1090
1091 midch->dma_base = dma->dma_base;
1092 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1093 midch->dma = dma;
1094 dma->intr_mask |= 1 << (dma->chan_base + i);
1095 spin_lock_init(&midch->lock);
1096
1097 INIT_LIST_HEAD(&midch->active_list);
1098 INIT_LIST_HEAD(&midch->queue);
1099 INIT_LIST_HEAD(&midch->free_list);
1100 /*mask interrupts*/
1101 iowrite32(MASK_INTR_REG(midch->ch_id),
1102 dma->dma_base + MASK_BLOCK);
1103 iowrite32(MASK_INTR_REG(midch->ch_id),
1104 dma->dma_base + MASK_SRC_TRAN);
1105 iowrite32(MASK_INTR_REG(midch->ch_id),
1106 dma->dma_base + MASK_DST_TRAN);
1107 iowrite32(MASK_INTR_REG(midch->ch_id),
1108 dma->dma_base + MASK_ERR);
1109 iowrite32(MASK_INTR_REG(midch->ch_id),
1110 dma->dma_base + MASK_TFR);
1111
1112 disable_dma_interrupt(midch);
1113 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1114 }
1115 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1116
1117 /*init dma structure*/
1118 dma_cap_zero(dma->common.cap_mask);
1119 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1120 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1121 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1122 dma->common.dev = &pdev->dev;
1123
1124 dma->common.device_alloc_chan_resources =
1125 intel_mid_dma_alloc_chan_resources;
1126 dma->common.device_free_chan_resources =
1127 intel_mid_dma_free_chan_resources;
1128
1129 dma->common.device_tx_status = intel_mid_dma_tx_status;
1130 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1131 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1132 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1133 dma->common.device_config = intel_mid_dma_config;
1134 dma->common.device_terminate_all = intel_mid_dma_terminate_all;
1135
1136 /*enable dma cntrl*/
1137 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1138
1139 /*register irq */
1140 if (dma->pimr_mask) {
1141 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1142 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1143 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1144 if (0 != err)
1145 goto err_irq;
1146 } else {
1147 dma->intr_mask = 0x03;
1148 pr_debug("MDMA:Requesting irq for DMAC2\n");
1149 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1150 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1151 if (0 != err)
1152 goto err_irq;
1153 }
1154 /*register device w/ engine*/
1155 err = dma_async_device_register(&dma->common);
1156 if (0 != err) {
1157 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1158 goto err_engine;
1159 }
1160 if (dma->pimr_mask) {
1161 pr_debug("setting up tasklet1 for DMAC1\n");
1162 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1163 } else {
1164 pr_debug("setting up tasklet2 for DMAC2\n");
1165 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1166 }
1167 return 0;
1168
1169err_engine:
1170 free_irq(pdev->irq, dma);
1171err_irq:
1172 if (dma->mask_reg)
1173 iounmap(dma->mask_reg);
1174err_ioremap:
1175 pci_pool_destroy(dma->dma_pool);
1176err_dma_pool:
1177 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1178 return err;
1179
1180}
1181
1182/**
1183 * middma_shutdown - Shutdown the DMA controller
1184 * @pdev: Controller PCI device structure
1185 *
1186 * Called by remove
1187 * Unregister DMa controller, clear all structures and free interrupt
1188 */
1189static void middma_shutdown(struct pci_dev *pdev)
1190{
1191 struct middma_device *device = pci_get_drvdata(pdev);
1192
1193 dma_async_device_unregister(&device->common);
1194 pci_pool_destroy(device->dma_pool);
1195 if (device->mask_reg)
1196 iounmap(device->mask_reg);
1197 if (device->dma_base)
1198 iounmap(device->dma_base);
1199 free_irq(pdev->irq, device);
1200 return;
1201}
1202
1203/**
1204 * intel_mid_dma_probe - PCI Probe
1205 * @pdev: Controller PCI device structure
1206 * @id: pci device id structure
1207 *
1208 * Initialize the PCI device, map BARs, query driver data.
1209 * Call setup_dma to complete contoller and chan initilzation
1210 */
1211static int intel_mid_dma_probe(struct pci_dev *pdev,
1212 const struct pci_device_id *id)
1213{
1214 struct middma_device *device;
1215 u32 base_addr, bar_size;
1216 struct intel_mid_dma_probe_info *info;
1217 int err;
1218
1219 pr_debug("MDMA: probe for %x\n", pdev->device);
1220 info = (void *)id->driver_data;
1221 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1222 info->max_chan, info->ch_base,
1223 info->block_size, info->pimr_mask);
1224
1225 err = pci_enable_device(pdev);
1226 if (err)
1227 goto err_enable_device;
1228
1229 err = pci_request_regions(pdev, "intel_mid_dmac");
1230 if (err)
1231 goto err_request_regions;
1232
1233 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1234 if (err)
1235 goto err_set_dma_mask;
1236
1237 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1238 if (err)
1239 goto err_set_dma_mask;
1240
1241 device = kzalloc(sizeof(*device), GFP_KERNEL);
1242 if (!device) {
1243 pr_err("ERR_MDMA:kzalloc failed probe\n");
1244 err = -ENOMEM;
1245 goto err_kzalloc;
1246 }
1247 device->pdev = pci_dev_get(pdev);
1248
1249 base_addr = pci_resource_start(pdev, 0);
1250 bar_size = pci_resource_len(pdev, 0);
1251 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1252 if (!device->dma_base) {
1253 pr_err("ERR_MDMA:ioremap failed\n");
1254 err = -ENOMEM;
1255 goto err_ioremap;
1256 }
1257 pci_set_drvdata(pdev, device);
1258 pci_set_master(pdev);
1259 device->max_chan = info->max_chan;
1260 device->chan_base = info->ch_base;
1261 device->block_size = info->block_size;
1262 device->pimr_mask = info->pimr_mask;
1263
1264 err = mid_setup_dma(pdev);
1265 if (err)
1266 goto err_dma;
1267
1268 pm_runtime_put_noidle(&pdev->dev);
1269 pm_runtime_allow(&pdev->dev);
1270 return 0;
1271
1272err_dma:
1273 iounmap(device->dma_base);
1274err_ioremap:
1275 pci_dev_put(pdev);
1276 kfree(device);
1277err_kzalloc:
1278err_set_dma_mask:
1279 pci_release_regions(pdev);
1280 pci_disable_device(pdev);
1281err_request_regions:
1282err_enable_device:
1283 pr_err("ERR_MDMA:Probe failed %d\n", err);
1284 return err;
1285}
1286
1287/**
1288 * intel_mid_dma_remove - PCI remove
1289 * @pdev: Controller PCI device structure
1290 *
1291 * Free up all resources and data
1292 * Call shutdown_dma to complete contoller and chan cleanup
1293 */
1294static void intel_mid_dma_remove(struct pci_dev *pdev)
1295{
1296 struct middma_device *device = pci_get_drvdata(pdev);
1297
1298 pm_runtime_get_noresume(&pdev->dev);
1299 pm_runtime_forbid(&pdev->dev);
1300 middma_shutdown(pdev);
1301 pci_dev_put(pdev);
1302 kfree(device);
1303 pci_release_regions(pdev);
1304 pci_disable_device(pdev);
1305}
1306
1307/* Power Management */
1308/*
1309* dma_suspend - PCI suspend function
1310*
1311* @pci: PCI device structure
1312* @state: PM message
1313*
1314* This function is called by OS when a power event occurs
1315*/
1316static int dma_suspend(struct device *dev)
1317{
1318 struct pci_dev *pci = to_pci_dev(dev);
1319 int i;
1320 struct middma_device *device = pci_get_drvdata(pci);
1321 pr_debug("MDMA: dma_suspend called\n");
1322
1323 for (i = 0; i < device->max_chan; i++) {
1324 if (device->ch[i].in_use)
1325 return -EAGAIN;
1326 }
1327 dmac1_mask_periphral_intr(device);
1328 device->state = SUSPENDED;
1329 pci_save_state(pci);
1330 pci_disable_device(pci);
1331 pci_set_power_state(pci, PCI_D3hot);
1332 return 0;
1333}
1334
1335/**
1336* dma_resume - PCI resume function
1337*
1338* @pci: PCI device structure
1339*
1340* This function is called by OS when a power event occurs
1341*/
1342int dma_resume(struct device *dev)
1343{
1344 struct pci_dev *pci = to_pci_dev(dev);
1345 int ret;
1346 struct middma_device *device = pci_get_drvdata(pci);
1347
1348 pr_debug("MDMA: dma_resume called\n");
1349 pci_set_power_state(pci, PCI_D0);
1350 pci_restore_state(pci);
1351 ret = pci_enable_device(pci);
1352 if (ret) {
1353 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1354 return ret;
1355 }
1356 device->state = RUNNING;
1357 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1358 return 0;
1359}
1360
1361static int dma_runtime_suspend(struct device *dev)
1362{
1363 struct pci_dev *pci_dev = to_pci_dev(dev);
1364 struct middma_device *device = pci_get_drvdata(pci_dev);
1365
1366 device->state = SUSPENDED;
1367 return 0;
1368}
1369
1370static int dma_runtime_resume(struct device *dev)
1371{
1372 struct pci_dev *pci_dev = to_pci_dev(dev);
1373 struct middma_device *device = pci_get_drvdata(pci_dev);
1374
1375 device->state = RUNNING;
1376 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1377 return 0;
1378}
1379
1380static int dma_runtime_idle(struct device *dev)
1381{
1382 struct pci_dev *pdev = to_pci_dev(dev);
1383 struct middma_device *device = pci_get_drvdata(pdev);
1384 int i;
1385
1386 for (i = 0; i < device->max_chan; i++) {
1387 if (device->ch[i].in_use)
1388 return -EAGAIN;
1389 }
1390
1391 return 0;
1392}
1393
1394/******************************************************************************
1395* PCI stuff
1396*/
1397static struct pci_device_id intel_mid_dma_ids[] = {
1398 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1399 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1400 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1401 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1402 { 0, }
1403};
1404MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1405
1406static const struct dev_pm_ops intel_mid_dma_pm = {
1407 .runtime_suspend = dma_runtime_suspend,
1408 .runtime_resume = dma_runtime_resume,
1409 .runtime_idle = dma_runtime_idle,
1410 .suspend = dma_suspend,
1411 .resume = dma_resume,
1412};
1413
1414static struct pci_driver intel_mid_dma_pci_driver = {
1415 .name = "Intel MID DMA",
1416 .id_table = intel_mid_dma_ids,
1417 .probe = intel_mid_dma_probe,
1418 .remove = intel_mid_dma_remove,
1419#ifdef CONFIG_PM
1420 .driver = {
1421 .pm = &intel_mid_dma_pm,
1422 },
1423#endif
1424};
1425
1426static int __init intel_mid_dma_init(void)
1427{
1428 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1429 INTEL_MID_DMA_DRIVER_VERSION);
1430 return pci_register_driver(&intel_mid_dma_pci_driver);
1431}
1432fs_initcall(intel_mid_dma_init);
1433
1434static void __exit intel_mid_dma_exit(void)
1435{
1436 pci_unregister_driver(&intel_mid_dma_pci_driver);
1437}
1438module_exit(intel_mid_dma_exit);
1439
1440MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1441MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1442MODULE_LICENSE("GPL v2");
1443MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
deleted file mode 100644
index ebdd567dac1e..000000000000
--- a/drivers/dma/intel_mid_dma_regs.h
+++ /dev/null
@@ -1,295 +0,0 @@
1/*
2 * intel_mid_dma_regs.h - Intel MID DMA Drivers
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 *
19 *
20 */
21#ifndef __INTEL_MID_DMAC_REGS_H__
22#define __INTEL_MID_DMAC_REGS_H__
23
24#include <linux/dmaengine.h>
25#include <linux/dmapool.h>
26#include <linux/pci_ids.h>
27
28#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
29
30#define REG_BIT0 0x00000001
31#define REG_BIT8 0x00000100
32#define INT_MASK_WE 0x8
33#define CLEAR_DONE 0xFFFFEFFF
34#define UNMASK_INTR_REG(chan_num) \
35 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
36#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
37
38#define ENABLE_CHANNEL(chan_num) \
39 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
40
41#define DISABLE_CHANNEL(chan_num) \
42 (REG_BIT8 << chan_num)
43
44#define DESCS_PER_CHANNEL 16
45/*DMA Registers*/
46/*registers associated with channel programming*/
47#define DMA_REG_SIZE 0x400
48#define DMA_CH_SIZE 0x58
49
50/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
51#define SAR 0x00 /* Source Address Register*/
52#define DAR 0x08 /* Destination Address Register*/
53#define LLP 0x10 /* Linked List Pointer Register*/
54#define CTL_LOW 0x18 /* Control Register*/
55#define CTL_HIGH 0x1C /* Control Register*/
56#define CFG_LOW 0x40 /* Configuration Register Low*/
57#define CFG_HIGH 0x44 /* Configuration Register high*/
58
59#define STATUS_TFR 0x2E8
60#define STATUS_BLOCK 0x2F0
61#define STATUS_ERR 0x308
62
63#define RAW_TFR 0x2C0
64#define RAW_BLOCK 0x2C8
65#define RAW_ERR 0x2E0
66
67#define MASK_TFR 0x310
68#define MASK_BLOCK 0x318
69#define MASK_SRC_TRAN 0x320
70#define MASK_DST_TRAN 0x328
71#define MASK_ERR 0x330
72
73#define CLEAR_TFR 0x338
74#define CLEAR_BLOCK 0x340
75#define CLEAR_SRC_TRAN 0x348
76#define CLEAR_DST_TRAN 0x350
77#define CLEAR_ERR 0x358
78
79#define INTR_STATUS 0x360
80#define DMA_CFG 0x398
81#define DMA_CHAN_EN 0x3A0
82
83/*DMA channel control registers*/
84union intel_mid_dma_ctl_lo {
85 struct {
86 u32 int_en:1; /*enable or disable interrupts*/
87 /*should be 0*/
88 u32 dst_tr_width:3; /*destination transfer width*/
89 /*usually 32 bits = 010*/
90 u32 src_tr_width:3; /*source transfer width*/
91 /*usually 32 bits = 010*/
92 u32 dinc:2; /*destination address inc/dec*/
93 /*For mem:INC=00, Periphral NoINC=11*/
94 u32 sinc:2; /*source address inc or dec, as above*/
95 u32 dst_msize:3; /*destination burst transaction length*/
96 /*always = 16 ie 011*/
97 u32 src_msize:3; /*source burst transaction length*/
98 /*always = 16 ie 011*/
99 u32 reser1:3;
100 u32 tt_fc:3; /*transfer type and flow controller*/
101 /*M-M = 000
102 P-M = 010
103 M-P = 001*/
104 u32 dms:2; /*destination master select = 0*/
105 u32 sms:2; /*source master select = 0*/
106 u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
107 u32 llp_src_en:1; /*enable/disable source LLP = 0*/
108 u32 reser2:3;
109 } ctlx;
110 u32 ctl_lo;
111};
112
113union intel_mid_dma_ctl_hi {
114 struct {
115 u32 block_ts:12; /*block transfer size*/
116 u32 done:1; /*Done - updated by DMAC*/
117 u32 reser:19; /*configured by DMAC*/
118 } ctlx;
119 u32 ctl_hi;
120
121};
122
123/*DMA channel configuration registers*/
124union intel_mid_dma_cfg_lo {
125 struct {
126 u32 reser1:5;
127 u32 ch_prior:3; /*channel priority = 0*/
128 u32 ch_susp:1; /*channel suspend = 0*/
129 u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
130 u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
131 /*HW = 0, SW = 1*/
132 u32 hs_sel_src:1; /*select HW/SW src handshaking*/
133 u32 reser2:6;
134 u32 dst_hs_pol:1; /*dest HS interface polarity*/
135 u32 src_hs_pol:1; /*src HS interface polarity*/
136 u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
137 u32 reload_src:1; /*auto reload src addr =1 if src is P*/
138 u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
139 } cfgx;
140 u32 cfg_lo;
141};
142
143union intel_mid_dma_cfg_hi {
144 struct {
145 u32 fcmode:1; /*flow control mode = 1*/
146 u32 fifo_mode:1; /*FIFO mode select = 1*/
147 u32 protctl:3; /*protection control = 0*/
148 u32 rsvd:2;
149 u32 src_per:4; /*src hw HS interface*/
150 u32 dst_per:4; /*dstn hw HS interface*/
151 u32 reser2:17;
152 } cfgx;
153 u32 cfg_hi;
154};
155
156
157/**
158 * struct intel_mid_dma_chan - internal mid representation of a DMA channel
159 * @chan: dma_chan strcture represetation for mid chan
160 * @ch_regs: MMIO register space pointer to channel register
161 * @dma_base: MMIO register space DMA engine base pointer
162 * @ch_id: DMA channel id
163 * @lock: channel spinlock
164 * @active_list: current active descriptors
165 * @queue: current queued up descriptors
166 * @free_list: current free descriptors
167 * @slave: dma slave structure
168 * @descs_allocated: total number of descriptors allocated
169 * @dma: dma device structure pointer
170 * @busy: bool representing if ch is busy (active txn) or not
171 * @in_use: bool representing if ch is in use or not
172 * @raw_tfr: raw trf interrupt received
173 * @raw_block: raw block interrupt received
174 */
175struct intel_mid_dma_chan {
176 struct dma_chan chan;
177 void __iomem *ch_regs;
178 void __iomem *dma_base;
179 int ch_id;
180 spinlock_t lock;
181 struct list_head active_list;
182 struct list_head queue;
183 struct list_head free_list;
184 unsigned int descs_allocated;
185 struct middma_device *dma;
186 bool busy;
187 bool in_use;
188 u32 raw_tfr;
189 u32 raw_block;
190 struct intel_mid_dma_slave *mid_slave;
191};
192
193static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
194 struct dma_chan *chan)
195{
196 return container_of(chan, struct intel_mid_dma_chan, chan);
197}
198
199enum intel_mid_dma_state {
200 RUNNING = 0,
201 SUSPENDED,
202};
203/**
204 * struct middma_device - internal representation of a DMA device
205 * @pdev: PCI device
206 * @dma_base: MMIO register space pointer of DMA
207 * @dma_pool: for allocating DMA descriptors
208 * @common: embedded struct dma_device
209 * @tasklet: dma tasklet for processing interrupts
210 * @ch: per channel data
211 * @pci_id: DMA device PCI ID
212 * @intr_mask: Interrupt mask to be used
213 * @mask_reg: MMIO register for periphral mask
214 * @chan_base: Base ch index (read from driver data)
215 * @max_chan: max number of chs supported (from drv_data)
216 * @block_size: Block size of DMA transfer supported (from drv_data)
217 * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
218 * @state: dma PM device state
219 */
220struct middma_device {
221 struct pci_dev *pdev;
222 void __iomem *dma_base;
223 struct pci_pool *dma_pool;
224 struct dma_device common;
225 struct tasklet_struct tasklet;
226 struct intel_mid_dma_chan ch[MAX_CHAN];
227 unsigned int pci_id;
228 unsigned int intr_mask;
229 void __iomem *mask_reg;
230 int chan_base;
231 int max_chan;
232 int block_size;
233 unsigned int pimr_mask;
234 enum intel_mid_dma_state state;
235};
236
237static inline struct middma_device *to_middma_device(struct dma_device *common)
238{
239 return container_of(common, struct middma_device, common);
240}
241
242struct intel_mid_dma_desc {
243 void __iomem *block; /*ch ptr*/
244 struct list_head desc_node;
245 struct dma_async_tx_descriptor txd;
246 size_t len;
247 dma_addr_t sar;
248 dma_addr_t dar;
249 u32 cfg_hi;
250 u32 cfg_lo;
251 u32 ctl_lo;
252 u32 ctl_hi;
253 struct pci_pool *lli_pool;
254 struct intel_mid_dma_lli *lli;
255 dma_addr_t lli_phys;
256 unsigned int lli_length;
257 unsigned int current_lli;
258 dma_addr_t next;
259 enum dma_transfer_direction dirn;
260 enum dma_status status;
261 enum dma_slave_buswidth width; /*width of DMA txn*/
262 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
263
264};
265
266struct intel_mid_dma_lli {
267 dma_addr_t sar;
268 dma_addr_t dar;
269 dma_addr_t llp;
270 u32 ctl_lo;
271 u32 ctl_hi;
272} __attribute__ ((packed));
273
274static inline int test_ch_en(void __iomem *dma, u32 ch_no)
275{
276 u32 en_reg = ioread32(dma + DMA_CHAN_EN);
277 return (en_reg >> ch_no) & 0x1;
278}
279
280static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
281 (struct dma_async_tx_descriptor *txd)
282{
283 return container_of(txd, struct intel_mid_dma_desc, txd);
284}
285
286static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
287 (struct dma_slave_config *slave)
288{
289 return container_of(slave, struct intel_mid_dma_slave, dma_slave);
290}
291
292
293int dma_resume(struct device *dev);
294
295#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 3d19a3187a77..64790a45ef5d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -226,6 +226,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
226 switch (pdev->device) { 226 switch (pdev->device) {
227 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 227 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
228 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 228 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
229 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
230 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
231 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
232 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
229 return true; 233 return true;
230 default: 234 default:
231 return false; 235 return false;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 4c4cda98072b..462a0229a743 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
219 219
220 while (dint) { 220 while (dint) {
221 i = __ffs(dint); 221 i = __ffs(dint);
222 /* only handle interrupts belonging to pdma driver*/
223 if (i >= pdev->dma_channels)
224 break;
222 dint &= (dint - 1); 225 dint &= (dint - 1);
223 phy = &pdev->phy[i]; 226 phy = &pdev->phy[i];
224 ret = mmp_pdma_chan_handler(irq, phy); 227 ret = mmp_pdma_chan_handler(irq, phy);
@@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op)
999 struct resource *iores; 1002 struct resource *iores;
1000 int i, ret, irq = 0; 1003 int i, ret, irq = 0;
1001 int dma_channels = 0, irq_num = 0; 1004 int dma_channels = 0, irq_num = 0;
1005 const enum dma_slave_buswidth widths =
1006 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1007 DMA_SLAVE_BUSWIDTH_4_BYTES;
1002 1008
1003 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 1009 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1004 if (!pdev) 1010 if (!pdev)
@@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op)
1066 pdev->device.device_config = mmp_pdma_config; 1072 pdev->device.device_config = mmp_pdma_config;
1067 pdev->device.device_terminate_all = mmp_pdma_terminate_all; 1073 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1068 pdev->device.copy_align = PDMA_ALIGNMENT; 1074 pdev->device.copy_align = PDMA_ALIGNMENT;
1075 pdev->device.src_addr_widths = widths;
1076 pdev->device.dst_addr_widths = widths;
1077 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1078 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1069 1079
1070 if (pdev->dev->coherent_dma_mask) 1080 if (pdev->dev->coherent_dma_mask)
1071 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1081 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index bc7bc5329ca5..449e785def17 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -110,7 +110,7 @@ struct mmp_tdma_chan {
110 struct tasklet_struct tasklet; 110 struct tasklet_struct tasklet;
111 111
112 struct mmp_tdma_desc *desc_arr; 112 struct mmp_tdma_desc *desc_arr;
113 phys_addr_t desc_arr_phys; 113 dma_addr_t desc_arr_phys;
114 int desc_num; 114 int desc_num;
115 enum dma_transfer_direction dir; 115 enum dma_transfer_direction dir;
116 dma_addr_t dev_addr; 116 dma_addr_t dev_addr;
@@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
166static int mmp_tdma_disable_chan(struct dma_chan *chan) 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
167{ 167{
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
169 u32 tdcr;
169 170
170 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 171 tdcr = readl(tdmac->reg_base + TDCR);
171 tdmac->reg_base + TDCR); 172 tdcr |= TDCR_ABR;
173 tdcr &= ~TDCR_CHANEN;
174 writel(tdcr, tdmac->reg_base + TDCR);
172 175
173 tdmac->status = DMA_COMPLETE; 176 tdmac->status = DMA_COMPLETE;
174 177
@@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
296 return -EAGAIN; 299 return -EAGAIN;
297} 300}
298 301
302static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
303{
304 size_t reg;
305
306 if (tdmac->idx == 0) {
307 reg = __raw_readl(tdmac->reg_base + TDSAR);
308 reg -= tdmac->desc_arr[0].src_addr;
309 } else if (tdmac->idx == 1) {
310 reg = __raw_readl(tdmac->reg_base + TDDAR);
311 reg -= tdmac->desc_arr[0].dst_addr;
312 } else
313 return -EINVAL;
314
315 return reg;
316}
317
299static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) 318static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
300{ 319{
301 struct mmp_tdma_chan *tdmac = dev_id; 320 struct mmp_tdma_chan *tdmac = dev_id;
302 321
303 if (mmp_tdma_clear_chan_irq(tdmac) == 0) { 322 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
304 tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
305 tasklet_schedule(&tdmac->tasklet); 323 tasklet_schedule(&tdmac->tasklet);
306 return IRQ_HANDLED; 324 return IRQ_HANDLED;
307 } else 325 } else
@@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
343 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 361 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
344 362
345 gpool = tdmac->pool; 363 gpool = tdmac->pool;
346 if (tdmac->desc_arr) 364 if (gpool && tdmac->desc_arr)
347 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 365 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
348 size); 366 size);
349 tdmac->desc_arr = NULL; 367 tdmac->desc_arr = NULL;
@@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
499{ 517{
500 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 518 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
501 519
520 tdmac->pos = mmp_tdma_get_pos(tdmac);
502 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 521 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
503 tdmac->buf_len - tdmac->pos); 522 tdmac->buf_len - tdmac->pos);
504 523
@@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
610 int i, ret; 629 int i, ret;
611 int irq = 0, irq_num = 0; 630 int irq = 0, irq_num = 0;
612 int chan_num = TDMA_CHANNEL_NUM; 631 int chan_num = TDMA_CHANNEL_NUM;
613 struct gen_pool *pool; 632 struct gen_pool *pool = NULL;
614 633
615 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 634 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
616 if (of_id) 635 if (of_id)
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 15cab7d79525..b4634109e010 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
193 193
194 spin_lock_irqsave(&ch->vc.lock, flags); 194 spin_lock_irqsave(&ch->vc.lock, flags);
195 195
196 if (ch->desc) 196 if (ch->desc) {
197 moxart_dma_desc_free(&ch->desc->vd);
197 ch->desc = NULL; 198 ch->desc = NULL;
199 }
198 200
199 ctrl = readl(ch->base + REG_OFF_CTRL); 201 ctrl = readl(ch->base + REG_OFF_CTRL);
200 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); 202 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index ca31f1b45366..cbd4a8aff120 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -194,6 +194,7 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
194 194
195 return ERR_PTR(ret_no_channel); 195 return ERR_PTR(ret_no_channel);
196} 196}
197EXPORT_SYMBOL_GPL(of_dma_request_slave_channel);
197 198
198/** 199/**
199 * of_dma_simple_xlate - Simple DMA engine translation function 200 * of_dma_simple_xlate - Simple DMA engine translation function
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7dd6dd121681..167dbaf65742 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
981 * c->desc is NULL and exit.) 981 * c->desc is NULL and exit.)
982 */ 982 */
983 if (c->desc) { 983 if (c->desc) {
984 omap_dma_desc_free(&c->desc->vd);
984 c->desc = NULL; 985 c->desc = NULL;
985 /* Avoid stopping the dma twice */ 986 /* Avoid stopping the dma twice */
986 if (!c->paused) 987 if (!c->paused)
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 1576cd97d4d8..5a250cdc8376 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, 162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, 163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, 164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
165 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, 165 [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 },
166 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, 166 [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 },
167 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, 167 [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 },
168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, 168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, 169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, 170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
@@ -1173,6 +1173,10 @@ static int bam_dma_probe(struct platform_device *pdev)
1173 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); 1173 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1174 1174
1175 /* initialize dmaengine apis */ 1175 /* initialize dmaengine apis */
1176 bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1177 bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1178 bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1179 bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1176 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1180 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1177 bdev->common.device_free_chan_resources = bam_free_chan; 1181 bdev->common.device_free_chan_resources = bam_free_chan;
1178 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1182 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 1858259f3be4..11707df1a689 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
582 } 582 }
583} 583}
584 584
585static void sh_dmae_shutdown(struct platform_device *pdev)
586{
587 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
588 sh_dmae_ctl_stop(shdev);
589}
590
591#ifdef CONFIG_PM 585#ifdef CONFIG_PM
592static int sh_dmae_runtime_suspend(struct device *dev) 586static int sh_dmae_runtime_suspend(struct device *dev)
593{ 587{
588 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
589
590 sh_dmae_ctl_stop(shdev);
594 return 0; 591 return 0;
595} 592}
596 593
@@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
605#ifdef CONFIG_PM_SLEEP 602#ifdef CONFIG_PM_SLEEP
606static int sh_dmae_suspend(struct device *dev) 603static int sh_dmae_suspend(struct device *dev)
607{ 604{
605 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
606
607 sh_dmae_ctl_stop(shdev);
608 return 0; 608 return 0;
609} 609}
610 610
@@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev)
929} 929}
930 930
931static struct platform_driver sh_dmae_driver = { 931static struct platform_driver sh_dmae_driver = {
932 .driver = { 932 .driver = {
933 .pm = &sh_dmae_pm, 933 .pm = &sh_dmae_pm,
934 .name = SH_DMAE_DRV_NAME, 934 .name = SH_DMAE_DRV_NAME,
935 .of_match_table = sh_dmae_of_match, 935 .of_match_table = sh_dmae_of_match,
936 }, 936 },
937 .remove = sh_dmae_remove, 937 .remove = sh_dmae_remove,
938 .shutdown = sh_dmae_shutdown,
939}; 938};
940 939
941static int __init sh_dmae_init(void) 940static int __init sh_dmae_init(void)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 75faaeac197d..3c10f034d4b9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3550,7 +3550,7 @@ static int __init d40_probe(struct platform_device *pdev)
3550 3550
3551 if (!plat_data) { 3551 if (!plat_data) {
3552 if (np) { 3552 if (np) {
3553 if(d40_of_probe(pdev, np)) { 3553 if (d40_of_probe(pdev, np)) {
3554 ret = -ENOMEM; 3554 ret = -ENOMEM;
3555 goto failure; 3555 goto failure;
3556 } 3556 }