aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dw/core.c')
-rw-r--r--drivers/dma/dw/core.c332
1 files changed, 2 insertions, 330 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e500950dad82..f43e6dafe446 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -561,92 +561,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
561 dwc_descriptor_complete(dwc, bad_desc, true); 561 dwc_descriptor_complete(dwc, bad_desc, true);
562} 562}
563 563
564/* --------------------- Cyclic DMA API extensions -------------------- */
565
566dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
567{
568 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
569 return channel_readl(dwc, SAR);
570}
571EXPORT_SYMBOL(dw_dma_get_src_addr);
572
573dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
574{
575 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
576 return channel_readl(dwc, DAR);
577}
578EXPORT_SYMBOL(dw_dma_get_dst_addr);
579
580/* Called with dwc->lock held and all DMAC interrupts disabled */
581static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
582 u32 status_block, u32 status_err, u32 status_xfer)
583{
584 unsigned long flags;
585
586 if (status_block & dwc->mask) {
587 void (*callback)(void *param);
588 void *callback_param;
589
590 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
591 channel_readl(dwc, LLP));
592 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
593
594 callback = dwc->cdesc->period_callback;
595 callback_param = dwc->cdesc->period_callback_param;
596
597 if (callback)
598 callback(callback_param);
599 }
600
601 /*
602 * Error and transfer complete are highly unlikely, and will most
603 * likely be due to a configuration error by the user.
604 */
605 if (unlikely(status_err & dwc->mask) ||
606 unlikely(status_xfer & dwc->mask)) {
607 unsigned int i;
608
609 dev_err(chan2dev(&dwc->chan),
610 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
611 status_xfer ? "xfer" : "error");
612
613 spin_lock_irqsave(&dwc->lock, flags);
614
615 dwc_dump_chan_regs(dwc);
616
617 dwc_chan_disable(dw, dwc);
618
619 /* Make sure DMA does not restart by loading a new list */
620 channel_writel(dwc, LLP, 0);
621 channel_writel(dwc, CTL_LO, 0);
622 channel_writel(dwc, CTL_HI, 0);
623
624 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
625 dma_writel(dw, CLEAR.ERROR, dwc->mask);
626 dma_writel(dw, CLEAR.XFER, dwc->mask);
627
628 for (i = 0; i < dwc->cdesc->periods; i++)
629 dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
630
631 spin_unlock_irqrestore(&dwc->lock, flags);
632 }
633
634 /* Re-enable interrupts */
635 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
636}
637
638/* ------------------------------------------------------------------------- */
639
640static void dw_dma_tasklet(unsigned long data) 564static void dw_dma_tasklet(unsigned long data)
641{ 565{
642 struct dw_dma *dw = (struct dw_dma *)data; 566 struct dw_dma *dw = (struct dw_dma *)data;
643 struct dw_dma_chan *dwc; 567 struct dw_dma_chan *dwc;
644 u32 status_block;
645 u32 status_xfer; 568 u32 status_xfer;
646 u32 status_err; 569 u32 status_err;
647 unsigned int i; 570 unsigned int i;
648 571
649 status_block = dma_readl(dw, RAW.BLOCK);
650 status_xfer = dma_readl(dw, RAW.XFER); 572 status_xfer = dma_readl(dw, RAW.XFER);
651 status_err = dma_readl(dw, RAW.ERROR); 573 status_err = dma_readl(dw, RAW.ERROR);
652 574
@@ -655,8 +577,7 @@ static void dw_dma_tasklet(unsigned long data)
655 for (i = 0; i < dw->dma.chancnt; i++) { 577 for (i = 0; i < dw->dma.chancnt; i++) {
656 dwc = &dw->chan[i]; 578 dwc = &dw->chan[i];
657 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 579 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
658 dwc_handle_cyclic(dw, dwc, status_block, status_err, 580 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
659 status_xfer);
660 else if (status_err & (1 << i)) 581 else if (status_err & (1 << i))
661 dwc_handle_error(dw, dwc); 582 dwc_handle_error(dw, dwc);
662 else if (status_xfer & (1 << i)) 583 else if (status_xfer & (1 << i))
@@ -1264,255 +1185,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1264 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1185 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1265} 1186}
1266 1187
1267/* --------------------- Cyclic DMA API extensions -------------------- */
1268
1269/**
1270 * dw_dma_cyclic_start - start the cyclic DMA transfer
1271 * @chan: the DMA channel to start
1272 *
1273 * Must be called with soft interrupts disabled. Returns zero on success or
1274 * -errno on failure.
1275 */
1276int dw_dma_cyclic_start(struct dma_chan *chan)
1277{
1278 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1279 struct dw_dma *dw = to_dw_dma(chan->device);
1280 unsigned long flags;
1281
1282 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1283 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1284 return -ENODEV;
1285 }
1286
1287 spin_lock_irqsave(&dwc->lock, flags);
1288
1289 /* Enable interrupts to perform cyclic transfer */
1290 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1291
1292 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1293
1294 spin_unlock_irqrestore(&dwc->lock, flags);
1295
1296 return 0;
1297}
1298EXPORT_SYMBOL(dw_dma_cyclic_start);
1299
1300/**
1301 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1302 * @chan: the DMA channel to stop
1303 *
1304 * Must be called with soft interrupts disabled.
1305 */
1306void dw_dma_cyclic_stop(struct dma_chan *chan)
1307{
1308 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1309 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1310 unsigned long flags;
1311
1312 spin_lock_irqsave(&dwc->lock, flags);
1313
1314 dwc_chan_disable(dw, dwc);
1315
1316 spin_unlock_irqrestore(&dwc->lock, flags);
1317}
1318EXPORT_SYMBOL(dw_dma_cyclic_stop);
1319
1320/**
1321 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1322 * @chan: the DMA channel to prepare
1323 * @buf_addr: physical DMA address where the buffer starts
1324 * @buf_len: total number of bytes for the entire buffer
1325 * @period_len: number of bytes for each period
1326 * @direction: transfer direction, to or from device
1327 *
1328 * Must be called before trying to start the transfer. Returns a valid struct
1329 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1330 */
1331struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1332 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1333 enum dma_transfer_direction direction)
1334{
1335 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1336 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1337 struct dw_cyclic_desc *cdesc;
1338 struct dw_cyclic_desc *retval = NULL;
1339 struct dw_desc *desc;
1340 struct dw_desc *last = NULL;
1341 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
1342 unsigned long was_cyclic;
1343 unsigned int reg_width;
1344 unsigned int periods;
1345 unsigned int i;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&dwc->lock, flags);
1349 if (dwc->nollp) {
1350 spin_unlock_irqrestore(&dwc->lock, flags);
1351 dev_dbg(chan2dev(&dwc->chan),
1352 "channel doesn't support LLP transfers\n");
1353 return ERR_PTR(-EINVAL);
1354 }
1355
1356 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1357 spin_unlock_irqrestore(&dwc->lock, flags);
1358 dev_dbg(chan2dev(&dwc->chan),
1359 "queue and/or active list are not empty\n");
1360 return ERR_PTR(-EBUSY);
1361 }
1362
1363 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1364 spin_unlock_irqrestore(&dwc->lock, flags);
1365 if (was_cyclic) {
1366 dev_dbg(chan2dev(&dwc->chan),
1367 "channel already prepared for cyclic DMA\n");
1368 return ERR_PTR(-EBUSY);
1369 }
1370
1371 retval = ERR_PTR(-EINVAL);
1372
1373 if (unlikely(!is_slave_direction(direction)))
1374 goto out_err;
1375
1376 dwc->direction = direction;
1377
1378 if (direction == DMA_MEM_TO_DEV)
1379 reg_width = __ffs(sconfig->dst_addr_width);
1380 else
1381 reg_width = __ffs(sconfig->src_addr_width);
1382
1383 periods = buf_len / period_len;
1384
1385 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1386 if (period_len > (dwc->block_size << reg_width))
1387 goto out_err;
1388 if (unlikely(period_len & ((1 << reg_width) - 1)))
1389 goto out_err;
1390 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1391 goto out_err;
1392
1393 retval = ERR_PTR(-ENOMEM);
1394
1395 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1396 if (!cdesc)
1397 goto out_err;
1398
1399 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1400 if (!cdesc->desc)
1401 goto out_err_alloc;
1402
1403 for (i = 0; i < periods; i++) {
1404 desc = dwc_desc_get(dwc);
1405 if (!desc)
1406 goto out_err_desc_get;
1407
1408 switch (direction) {
1409 case DMA_MEM_TO_DEV:
1410 lli_write(desc, dar, sconfig->dst_addr);
1411 lli_write(desc, sar, buf_addr + period_len * i);
1412 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1413 | DWC_CTLL_DST_WIDTH(reg_width)
1414 | DWC_CTLL_SRC_WIDTH(reg_width)
1415 | DWC_CTLL_DST_FIX
1416 | DWC_CTLL_SRC_INC
1417 | DWC_CTLL_INT_EN));
1418
1419 lli_set(desc, ctllo, sconfig->device_fc ?
1420 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1421 DWC_CTLL_FC(DW_DMA_FC_D_M2P));
1422
1423 break;
1424 case DMA_DEV_TO_MEM:
1425 lli_write(desc, dar, buf_addr + period_len * i);
1426 lli_write(desc, sar, sconfig->src_addr);
1427 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1428 | DWC_CTLL_SRC_WIDTH(reg_width)
1429 | DWC_CTLL_DST_WIDTH(reg_width)
1430 | DWC_CTLL_DST_INC
1431 | DWC_CTLL_SRC_FIX
1432 | DWC_CTLL_INT_EN));
1433
1434 lli_set(desc, ctllo, sconfig->device_fc ?
1435 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1436 DWC_CTLL_FC(DW_DMA_FC_D_P2M));
1437
1438 break;
1439 default:
1440 break;
1441 }
1442
1443 lli_write(desc, ctlhi, period_len >> reg_width);
1444 cdesc->desc[i] = desc;
1445
1446 if (last)
1447 lli_write(last, llp, desc->txd.phys | lms);
1448
1449 last = desc;
1450 }
1451
1452 /* Let's make a cyclic list */
1453 lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
1454
1455 dev_dbg(chan2dev(&dwc->chan),
1456 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1457 &buf_addr, buf_len, period_len, periods);
1458
1459 cdesc->periods = periods;
1460 dwc->cdesc = cdesc;
1461
1462 return cdesc;
1463
1464out_err_desc_get:
1465 while (i--)
1466 dwc_desc_put(dwc, cdesc->desc[i]);
1467out_err_alloc:
1468 kfree(cdesc);
1469out_err:
1470 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1471 return (struct dw_cyclic_desc *)retval;
1472}
1473EXPORT_SYMBOL(dw_dma_cyclic_prep);
1474
1475/**
1476 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1477 * @chan: the DMA channel to free
1478 */
1479void dw_dma_cyclic_free(struct dma_chan *chan)
1480{
1481 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1482 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1483 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1484 unsigned int i;
1485 unsigned long flags;
1486
1487 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1488
1489 if (!cdesc)
1490 return;
1491
1492 spin_lock_irqsave(&dwc->lock, flags);
1493
1494 dwc_chan_disable(dw, dwc);
1495
1496 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1497 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1498 dma_writel(dw, CLEAR.XFER, dwc->mask);
1499
1500 spin_unlock_irqrestore(&dwc->lock, flags);
1501
1502 for (i = 0; i < cdesc->periods; i++)
1503 dwc_desc_put(dwc, cdesc->desc[i]);
1504
1505 kfree(cdesc->desc);
1506 kfree(cdesc);
1507
1508 dwc->cdesc = NULL;
1509
1510 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1511}
1512EXPORT_SYMBOL(dw_dma_cyclic_free);
1513
1514/*----------------------------------------------------------------------*/
1515
1516int dw_dma_probe(struct dw_dma_chip *chip) 1188int dw_dma_probe(struct dw_dma_chip *chip)
1517{ 1189{
1518 struct dw_dma_platform_data *pdata; 1190 struct dw_dma_platform_data *pdata;
@@ -1642,7 +1314,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
1642 if (autocfg) { 1314 if (autocfg) {
1643 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1315 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1644 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; 1316 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1645 unsigned int dwc_params = dma_readl_native(addr); 1317 unsigned int dwc_params = readl(addr);
1646 1318
1647 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1319 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1648 dwc_params); 1320 dwc_params);