aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/dw/Kconfig7
-rw-r--r--drivers/dma/dw/core.c332
-rw-r--r--drivers/dma/dw/regs.h50
-rw-r--r--include/linux/dma/dw.h21
4 files changed, 14 insertions, 396 deletions
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 5a37b9fcf40d..04b9728c1d26 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,17 +6,12 @@ config DW_DMAC_CORE
6 tristate 6 tristate
7 select DMA_ENGINE 7 select DMA_ENGINE
8 8
9config DW_DMAC_BIG_ENDIAN_IO
10 bool
11
12config DW_DMAC 9config DW_DMAC
13 tristate "Synopsys DesignWare AHB DMA platform driver" 10 tristate "Synopsys DesignWare AHB DMA platform driver"
14 select DW_DMAC_CORE 11 select DW_DMAC_CORE
15 select DW_DMAC_BIG_ENDIAN_IO if AVR32
16 default y if CPU_AT32AP7000
17 help 12 help
18 Support the Synopsys DesignWare AHB DMA controller. This 13 Support the Synopsys DesignWare AHB DMA controller. This
19 can be integrated in chips such as the Atmel AT32ap7000. 14 can be integrated in chips such as the Intel Cherrytrail.
20 15
21config DW_DMAC_PCI 16config DW_DMAC_PCI
22 tristate "Synopsys DesignWare AHB DMA PCI driver" 17 tristate "Synopsys DesignWare AHB DMA PCI driver"
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e500950dad82..f43e6dafe446 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -561,92 +561,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
561 dwc_descriptor_complete(dwc, bad_desc, true); 561 dwc_descriptor_complete(dwc, bad_desc, true);
562} 562}
563 563
564/* --------------------- Cyclic DMA API extensions -------------------- */
565
566dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
567{
568 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
569 return channel_readl(dwc, SAR);
570}
571EXPORT_SYMBOL(dw_dma_get_src_addr);
572
573dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
574{
575 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
576 return channel_readl(dwc, DAR);
577}
578EXPORT_SYMBOL(dw_dma_get_dst_addr);
579
580/* Called with dwc->lock held and all DMAC interrupts disabled */
581static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
582 u32 status_block, u32 status_err, u32 status_xfer)
583{
584 unsigned long flags;
585
586 if (status_block & dwc->mask) {
587 void (*callback)(void *param);
588 void *callback_param;
589
590 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
591 channel_readl(dwc, LLP));
592 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
593
594 callback = dwc->cdesc->period_callback;
595 callback_param = dwc->cdesc->period_callback_param;
596
597 if (callback)
598 callback(callback_param);
599 }
600
601 /*
602 * Error and transfer complete are highly unlikely, and will most
603 * likely be due to a configuration error by the user.
604 */
605 if (unlikely(status_err & dwc->mask) ||
606 unlikely(status_xfer & dwc->mask)) {
607 unsigned int i;
608
609 dev_err(chan2dev(&dwc->chan),
610 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
611 status_xfer ? "xfer" : "error");
612
613 spin_lock_irqsave(&dwc->lock, flags);
614
615 dwc_dump_chan_regs(dwc);
616
617 dwc_chan_disable(dw, dwc);
618
619 /* Make sure DMA does not restart by loading a new list */
620 channel_writel(dwc, LLP, 0);
621 channel_writel(dwc, CTL_LO, 0);
622 channel_writel(dwc, CTL_HI, 0);
623
624 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
625 dma_writel(dw, CLEAR.ERROR, dwc->mask);
626 dma_writel(dw, CLEAR.XFER, dwc->mask);
627
628 for (i = 0; i < dwc->cdesc->periods; i++)
629 dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
630
631 spin_unlock_irqrestore(&dwc->lock, flags);
632 }
633
634 /* Re-enable interrupts */
635 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
636}
637
638/* ------------------------------------------------------------------------- */
639
640static void dw_dma_tasklet(unsigned long data) 564static void dw_dma_tasklet(unsigned long data)
641{ 565{
642 struct dw_dma *dw = (struct dw_dma *)data; 566 struct dw_dma *dw = (struct dw_dma *)data;
643 struct dw_dma_chan *dwc; 567 struct dw_dma_chan *dwc;
644 u32 status_block;
645 u32 status_xfer; 568 u32 status_xfer;
646 u32 status_err; 569 u32 status_err;
647 unsigned int i; 570 unsigned int i;
648 571
649 status_block = dma_readl(dw, RAW.BLOCK);
650 status_xfer = dma_readl(dw, RAW.XFER); 572 status_xfer = dma_readl(dw, RAW.XFER);
651 status_err = dma_readl(dw, RAW.ERROR); 573 status_err = dma_readl(dw, RAW.ERROR);
652 574
@@ -655,8 +577,7 @@ static void dw_dma_tasklet(unsigned long data)
655 for (i = 0; i < dw->dma.chancnt; i++) { 577 for (i = 0; i < dw->dma.chancnt; i++) {
656 dwc = &dw->chan[i]; 578 dwc = &dw->chan[i];
657 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 579 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
658 dwc_handle_cyclic(dw, dwc, status_block, status_err, 580 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
659 status_xfer);
660 else if (status_err & (1 << i)) 581 else if (status_err & (1 << i))
661 dwc_handle_error(dw, dwc); 582 dwc_handle_error(dw, dwc);
662 else if (status_xfer & (1 << i)) 583 else if (status_xfer & (1 << i))
@@ -1264,255 +1185,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1264 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1185 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1265} 1186}
1266 1187
1267/* --------------------- Cyclic DMA API extensions -------------------- */
1268
1269/**
1270 * dw_dma_cyclic_start - start the cyclic DMA transfer
1271 * @chan: the DMA channel to start
1272 *
1273 * Must be called with soft interrupts disabled. Returns zero on success or
1274 * -errno on failure.
1275 */
1276int dw_dma_cyclic_start(struct dma_chan *chan)
1277{
1278 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1279 struct dw_dma *dw = to_dw_dma(chan->device);
1280 unsigned long flags;
1281
1282 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1283 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1284 return -ENODEV;
1285 }
1286
1287 spin_lock_irqsave(&dwc->lock, flags);
1288
1289 /* Enable interrupts to perform cyclic transfer */
1290 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1291
1292 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1293
1294 spin_unlock_irqrestore(&dwc->lock, flags);
1295
1296 return 0;
1297}
1298EXPORT_SYMBOL(dw_dma_cyclic_start);
1299
1300/**
1301 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1302 * @chan: the DMA channel to stop
1303 *
1304 * Must be called with soft interrupts disabled.
1305 */
1306void dw_dma_cyclic_stop(struct dma_chan *chan)
1307{
1308 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1309 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1310 unsigned long flags;
1311
1312 spin_lock_irqsave(&dwc->lock, flags);
1313
1314 dwc_chan_disable(dw, dwc);
1315
1316 spin_unlock_irqrestore(&dwc->lock, flags);
1317}
1318EXPORT_SYMBOL(dw_dma_cyclic_stop);
1319
1320/**
1321 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1322 * @chan: the DMA channel to prepare
1323 * @buf_addr: physical DMA address where the buffer starts
1324 * @buf_len: total number of bytes for the entire buffer
1325 * @period_len: number of bytes for each period
1326 * @direction: transfer direction, to or from device
1327 *
1328 * Must be called before trying to start the transfer. Returns a valid struct
1329 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1330 */
1331struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1332 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1333 enum dma_transfer_direction direction)
1334{
1335 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1336 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1337 struct dw_cyclic_desc *cdesc;
1338 struct dw_cyclic_desc *retval = NULL;
1339 struct dw_desc *desc;
1340 struct dw_desc *last = NULL;
1341 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
1342 unsigned long was_cyclic;
1343 unsigned int reg_width;
1344 unsigned int periods;
1345 unsigned int i;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&dwc->lock, flags);
1349 if (dwc->nollp) {
1350 spin_unlock_irqrestore(&dwc->lock, flags);
1351 dev_dbg(chan2dev(&dwc->chan),
1352 "channel doesn't support LLP transfers\n");
1353 return ERR_PTR(-EINVAL);
1354 }
1355
1356 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1357 spin_unlock_irqrestore(&dwc->lock, flags);
1358 dev_dbg(chan2dev(&dwc->chan),
1359 "queue and/or active list are not empty\n");
1360 return ERR_PTR(-EBUSY);
1361 }
1362
1363 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1364 spin_unlock_irqrestore(&dwc->lock, flags);
1365 if (was_cyclic) {
1366 dev_dbg(chan2dev(&dwc->chan),
1367 "channel already prepared for cyclic DMA\n");
1368 return ERR_PTR(-EBUSY);
1369 }
1370
1371 retval = ERR_PTR(-EINVAL);
1372
1373 if (unlikely(!is_slave_direction(direction)))
1374 goto out_err;
1375
1376 dwc->direction = direction;
1377
1378 if (direction == DMA_MEM_TO_DEV)
1379 reg_width = __ffs(sconfig->dst_addr_width);
1380 else
1381 reg_width = __ffs(sconfig->src_addr_width);
1382
1383 periods = buf_len / period_len;
1384
1385 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1386 if (period_len > (dwc->block_size << reg_width))
1387 goto out_err;
1388 if (unlikely(period_len & ((1 << reg_width) - 1)))
1389 goto out_err;
1390 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1391 goto out_err;
1392
1393 retval = ERR_PTR(-ENOMEM);
1394
1395 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1396 if (!cdesc)
1397 goto out_err;
1398
1399 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1400 if (!cdesc->desc)
1401 goto out_err_alloc;
1402
1403 for (i = 0; i < periods; i++) {
1404 desc = dwc_desc_get(dwc);
1405 if (!desc)
1406 goto out_err_desc_get;
1407
1408 switch (direction) {
1409 case DMA_MEM_TO_DEV:
1410 lli_write(desc, dar, sconfig->dst_addr);
1411 lli_write(desc, sar, buf_addr + period_len * i);
1412 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1413 | DWC_CTLL_DST_WIDTH(reg_width)
1414 | DWC_CTLL_SRC_WIDTH(reg_width)
1415 | DWC_CTLL_DST_FIX
1416 | DWC_CTLL_SRC_INC
1417 | DWC_CTLL_INT_EN));
1418
1419 lli_set(desc, ctllo, sconfig->device_fc ?
1420 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1421 DWC_CTLL_FC(DW_DMA_FC_D_M2P));
1422
1423 break;
1424 case DMA_DEV_TO_MEM:
1425 lli_write(desc, dar, buf_addr + period_len * i);
1426 lli_write(desc, sar, sconfig->src_addr);
1427 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1428 | DWC_CTLL_SRC_WIDTH(reg_width)
1429 | DWC_CTLL_DST_WIDTH(reg_width)
1430 | DWC_CTLL_DST_INC
1431 | DWC_CTLL_SRC_FIX
1432 | DWC_CTLL_INT_EN));
1433
1434 lli_set(desc, ctllo, sconfig->device_fc ?
1435 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1436 DWC_CTLL_FC(DW_DMA_FC_D_P2M));
1437
1438 break;
1439 default:
1440 break;
1441 }
1442
1443 lli_write(desc, ctlhi, period_len >> reg_width);
1444 cdesc->desc[i] = desc;
1445
1446 if (last)
1447 lli_write(last, llp, desc->txd.phys | lms);
1448
1449 last = desc;
1450 }
1451
1452 /* Let's make a cyclic list */
1453 lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
1454
1455 dev_dbg(chan2dev(&dwc->chan),
1456 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1457 &buf_addr, buf_len, period_len, periods);
1458
1459 cdesc->periods = periods;
1460 dwc->cdesc = cdesc;
1461
1462 return cdesc;
1463
1464out_err_desc_get:
1465 while (i--)
1466 dwc_desc_put(dwc, cdesc->desc[i]);
1467out_err_alloc:
1468 kfree(cdesc);
1469out_err:
1470 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1471 return (struct dw_cyclic_desc *)retval;
1472}
1473EXPORT_SYMBOL(dw_dma_cyclic_prep);
1474
1475/**
1476 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1477 * @chan: the DMA channel to free
1478 */
1479void dw_dma_cyclic_free(struct dma_chan *chan)
1480{
1481 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1482 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1483 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1484 unsigned int i;
1485 unsigned long flags;
1486
1487 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1488
1489 if (!cdesc)
1490 return;
1491
1492 spin_lock_irqsave(&dwc->lock, flags);
1493
1494 dwc_chan_disable(dw, dwc);
1495
1496 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1497 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1498 dma_writel(dw, CLEAR.XFER, dwc->mask);
1499
1500 spin_unlock_irqrestore(&dwc->lock, flags);
1501
1502 for (i = 0; i < cdesc->periods; i++)
1503 dwc_desc_put(dwc, cdesc->desc[i]);
1504
1505 kfree(cdesc->desc);
1506 kfree(cdesc);
1507
1508 dwc->cdesc = NULL;
1509
1510 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1511}
1512EXPORT_SYMBOL(dw_dma_cyclic_free);
1513
1514/*----------------------------------------------------------------------*/
1515
1516int dw_dma_probe(struct dw_dma_chip *chip) 1188int dw_dma_probe(struct dw_dma_chip *chip)
1517{ 1189{
1518 struct dw_dma_platform_data *pdata; 1190 struct dw_dma_platform_data *pdata;
@@ -1642,7 +1314,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
1642 if (autocfg) { 1314 if (autocfg) {
1643 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1315 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1644 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; 1316 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1645 unsigned int dwc_params = dma_readl_native(addr); 1317 unsigned int dwc_params = readl(addr);
1646 1318
1647 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1319 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1648 dwc_params); 1320 dwc_params);
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 32a328721c88..09e7dfdbb790 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -116,20 +116,6 @@ struct dw_dma_regs {
116 DW_REG(GLOBAL_CFG); 116 DW_REG(GLOBAL_CFG);
117}; 117};
118 118
119/*
120 * Big endian I/O access when reading and writing to the DMA controller
121 * registers. This is needed on some platforms, like the Atmel AVR32
122 * architecture.
123 */
124
125#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
126#define dma_readl_native ioread32be
127#define dma_writel_native iowrite32be
128#else
129#define dma_readl_native readl
130#define dma_writel_native writel
131#endif
132
133/* Bitfields in DW_PARAMS */ 119/* Bitfields in DW_PARAMS */
134#define DW_PARAMS_NR_CHAN 8 /* number of channels */ 120#define DW_PARAMS_NR_CHAN 8 /* number of channels */
135#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ 121#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
@@ -280,7 +266,6 @@ struct dw_dma_chan {
280 unsigned long flags; 266 unsigned long flags;
281 struct list_head active_list; 267 struct list_head active_list;
282 struct list_head queue; 268 struct list_head queue;
283 struct dw_cyclic_desc *cdesc;
284 269
285 unsigned int descs_allocated; 270 unsigned int descs_allocated;
286 271
@@ -302,9 +287,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
302} 287}
303 288
304#define channel_readl(dwc, name) \ 289#define channel_readl(dwc, name) \
305 dma_readl_native(&(__dwc_regs(dwc)->name)) 290 readl(&(__dwc_regs(dwc)->name))
306#define channel_writel(dwc, name, val) \ 291#define channel_writel(dwc, name, val) \
307 dma_writel_native((val), &(__dwc_regs(dwc)->name)) 292 writel((val), &(__dwc_regs(dwc)->name))
308 293
309static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 294static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
310{ 295{
@@ -333,9 +318,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
333} 318}
334 319
335#define dma_readl(dw, name) \ 320#define dma_readl(dw, name) \
336 dma_readl_native(&(__dw_regs(dw)->name)) 321 readl(&(__dw_regs(dw)->name))
337#define dma_writel(dw, name, val) \ 322#define dma_writel(dw, name, val) \
338 dma_writel_native((val), &(__dw_regs(dw)->name)) 323 writel((val), &(__dw_regs(dw)->name))
339 324
340#define idma32_readq(dw, name) \ 325#define idma32_readq(dw, name) \
341 hi_lo_readq(&(__dw_regs(dw)->name)) 326 hi_lo_readq(&(__dw_regs(dw)->name))
@@ -352,43 +337,30 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
352 return container_of(ddev, struct dw_dma, dma); 337 return container_of(ddev, struct dw_dma, dma);
353} 338}
354 339
355#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
356typedef __be32 __dw32;
357#else
358typedef __le32 __dw32;
359#endif
360
361/* LLI == Linked List Item; a.k.a. DMA block descriptor */ 340/* LLI == Linked List Item; a.k.a. DMA block descriptor */
362struct dw_lli { 341struct dw_lli {
363 /* values that are not changed by hardware */ 342 /* values that are not changed by hardware */
364 __dw32 sar; 343 __le32 sar;
365 __dw32 dar; 344 __le32 dar;
366 __dw32 llp; /* chain to next lli */ 345 __le32 llp; /* chain to next lli */
367 __dw32 ctllo; 346 __le32 ctllo;
368 /* values that may get written back: */ 347 /* values that may get written back: */
369 __dw32 ctlhi; 348 __le32 ctlhi;
370 /* sstat and dstat can snapshot peripheral register state. 349 /* sstat and dstat can snapshot peripheral register state.
371 * silicon config may discard either or both... 350 * silicon config may discard either or both...
372 */ 351 */
373 __dw32 sstat; 352 __le32 sstat;
374 __dw32 dstat; 353 __le32 dstat;
375}; 354};
376 355
377struct dw_desc { 356struct dw_desc {
378 /* FIRST values the hardware uses */ 357 /* FIRST values the hardware uses */
379 struct dw_lli lli; 358 struct dw_lli lli;
380 359
381#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
382#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v))
383#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v))
384#define lli_read(d, reg) be32_to_cpu((d)->lli.reg)
385#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v))
386#else
387#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v)) 360#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v))
388#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v)) 361#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v))
389#define lli_read(d, reg) le32_to_cpu((d)->lli.reg) 362#define lli_read(d, reg) le32_to_cpu((d)->lli.reg)
390#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v)) 363#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v))
391#endif
392 364
393 /* THEN values for driver housekeeping */ 365 /* THEN values for driver housekeeping */
394 struct list_head desc_node; 366 struct list_head desc_node;
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index b63b25814d77..e166cac8e870 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -50,25 +50,4 @@ static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
50static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } 50static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
51#endif /* CONFIG_DW_DMAC_CORE */ 51#endif /* CONFIG_DW_DMAC_CORE */
52 52
53/* DMA API extensions */
54struct dw_desc;
55
56struct dw_cyclic_desc {
57 struct dw_desc **desc;
58 unsigned long periods;
59 void (*period_callback)(void *param);
60 void *period_callback_param;
61};
62
63struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
64 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
65 enum dma_transfer_direction direction);
66void dw_dma_cyclic_free(struct dma_chan *chan);
67int dw_dma_cyclic_start(struct dma_chan *chan);
68void dw_dma_cyclic_stop(struct dma_chan *chan);
69
70dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
71
72dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
73
74#endif /* _DMA_DW_H */ 53#endif /* _DMA_DW_H */