aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@st.com>2011-08-05 06:02:43 -0400
committerVinod Koul <vinod.koul@intel.com>2011-09-19 05:43:06 -0400
commitb7f69d9d4283cfbbf7458962cf9bdba6463b831d (patch)
tree76e29e885d467486922f3b2fbd2c480a7b5a1b8f
parent937bb6e4c676fecbfbc1939b942241c3f27bf5d8 (diff)
dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers
Untill now, sg_len greater than one is not supported. This patch adds support to do that. Note: Still, if peripheral is flow controller, sg_len can't be greater that one. Signed-off-by: Viresh Kumar <viresh.kumar@st.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/amba-pl08x.c378
-rw-r--r--include/linux/amba/pl08x.h22
2 files changed, 231 insertions, 169 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index cd8df7f5b5c8..2c390d1b9cad 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -352,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
352 if (!list_empty(&plchan->pend_list)) { 352 if (!list_empty(&plchan->pend_list)) {
353 struct pl08x_txd *txdi; 353 struct pl08x_txd *txdi;
354 list_for_each_entry(txdi, &plchan->pend_list, node) { 354 list_for_each_entry(txdi, &plchan->pend_list, node) {
355 bytes += txdi->len; 355 struct pl08x_sg *dsg;
356 list_for_each_entry(dsg, &txd->dsg_list, node)
357 bytes += dsg->len;
356 } 358 }
357 } 359 }
358 360
@@ -567,8 +569,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
567 struct pl08x_lli_build_data bd; 569 struct pl08x_lli_build_data bd;
568 int num_llis = 0; 570 int num_llis = 0;
569 u32 cctl, early_bytes = 0; 571 u32 cctl, early_bytes = 0;
570 size_t max_bytes_per_lli, total_bytes = 0; 572 size_t max_bytes_per_lli, total_bytes;
571 struct pl08x_lli *llis_va; 573 struct pl08x_lli *llis_va;
574 struct pl08x_sg *dsg;
572 575
573 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 576 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
574 if (!txd->llis_va) { 577 if (!txd->llis_va) {
@@ -578,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
578 581
579 pl08x->pool_ctr++; 582 pl08x->pool_ctr++;
580 583
581 /* Get the default CCTL */
582 cctl = txd->cctl;
583
584 bd.txd = txd; 584 bd.txd = txd;
585 bd.srcbus.addr = txd->src_addr;
586 bd.dstbus.addr = txd->dst_addr;
587 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 585 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
586 cctl = txd->cctl;
588 587
589 /* Find maximum width of the source bus */ 588 /* Find maximum width of the source bus */
590 bd.srcbus.maxwidth = 589 bd.srcbus.maxwidth =
@@ -596,162 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
596 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 595 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
597 PL080_CONTROL_DWIDTH_SHIFT); 596 PL080_CONTROL_DWIDTH_SHIFT);
598 597
599 /* Set up the bus widths to the maximum */ 598 list_for_each_entry(dsg, &txd->dsg_list, node) {
600 bd.srcbus.buswidth = bd.srcbus.maxwidth; 599 total_bytes = 0;
601 bd.dstbus.buswidth = bd.dstbus.maxwidth; 600 cctl = txd->cctl;
602 601
603 /* We need to count this down to zero */ 602 bd.srcbus.addr = dsg->src_addr;
604 bd.remainder = txd->len; 603 bd.dstbus.addr = dsg->dst_addr;
604 bd.remainder = dsg->len;
605 bd.srcbus.buswidth = bd.srcbus.maxwidth;
606 bd.dstbus.buswidth = bd.dstbus.maxwidth;
605 607
606 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 608 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
607 609
608 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 610 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
609 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 611 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
610 bd.srcbus.buswidth, 612 bd.srcbus.buswidth,
611 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 613 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
612 bd.dstbus.buswidth, 614 bd.dstbus.buswidth,
613 bd.remainder); 615 bd.remainder);
614 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 616 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
615 mbus == &bd.srcbus ? "src" : "dst", 617 mbus == &bd.srcbus ? "src" : "dst",
616 sbus == &bd.srcbus ? "src" : "dst"); 618 sbus == &bd.srcbus ? "src" : "dst");
617 619
618 /* 620 /*
619 * Zero length is only allowed if all these requirements are met: 621 * Zero length is only allowed if all these requirements are
620 * - flow controller is peripheral. 622 * met:
621 * - src.addr is aligned to src.width 623 * - flow controller is peripheral.
622 * - dst.addr is aligned to dst.width 624 * - src.addr is aligned to src.width
623 * 625 * - dst.addr is aligned to dst.width
624 * sg_len == 1 should be true, as there can be two cases here: 626 *
625 * - Memory addresses are contiguous and are not scattered. Here, Only 627 * sg_len == 1 should be true, as there can be two cases here:
626 * one sg will be passed by user driver, with memory address and zero 628 *
627 * length. We pass this to controller and after the transfer it will 629 * - Memory addresses are contiguous and are not scattered.
628 * receive the last burst request from peripheral and so transfer 630 * Here, Only one sg will be passed by user driver, with
629 * finishes. 631 * memory address and zero length. We pass this to controller
630 * 632 * and after the transfer it will receive the last burst
631 * - Memory addresses are scattered and are not contiguous. Here, 633 * request from peripheral and so transfer finishes.
632 * Obviously as DMA controller doesn't know when a lli's transfer gets 634 *
633 * over, it can't load next lli. So in this case, there has to be an 635 * - Memory addresses are scattered and are not contiguous.
634 * assumption that only one lli is supported. Thus, we can't have 636 * Here, Obviously as DMA controller doesn't know when a lli's
635 * scattered addresses. 637 * transfer gets over, it can't load next lli. So in this
636 */ 638 * case, there has to be an assumption that only one lli is
637 if (!bd.remainder) { 639 * supported. Thus, we can't have scattered addresses.
638 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 640 */
639 PL080_CONFIG_FLOW_CONTROL_SHIFT; 641 if (!bd.remainder) {
640 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 642 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
643 PL080_CONFIG_FLOW_CONTROL_SHIFT;
644 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
641 (fc <= PL080_FLOW_SRC2DST_SRC))) { 645 (fc <= PL080_FLOW_SRC2DST_SRC))) {
642 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 646 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
643 __func__); 647 __func__);
644 return 0; 648 return 0;
645 } 649 }
646
647 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
648 (bd.srcbus.addr % bd.srcbus.buswidth)) {
649 dev_err(&pl08x->adev->dev,
650 "%s src & dst address must be aligned to src"
651 " & dst width if peripheral is flow controller",
652 __func__);
653 return 0;
654 }
655
656 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
657 bd.dstbus.buswidth, 0);
658 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
659 }
660 650
661 /* 651 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
662 * Send byte by byte for following cases 652 (bd.srcbus.addr % bd.srcbus.buswidth)) {
663 * - Less than a bus width available 653 dev_err(&pl08x->adev->dev,
664 * - until master bus is aligned 654 "%s src & dst address must be aligned to src"
665 */ 655 " & dst width if peripheral is flow controller",
666 if (bd.remainder < mbus->buswidth) 656 __func__);
667 early_bytes = bd.remainder; 657 return 0;
668 else if ((mbus->addr) % (mbus->buswidth)) { 658 }
669 early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth);
670 if ((bd.remainder - early_bytes) < mbus->buswidth)
671 early_bytes = bd.remainder;
672 }
673 659
674 if (early_bytes) { 660 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
675 dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs " 661 bd.dstbus.buswidth, 0);
676 "(remain 0x%08x)\n", __func__, bd.remainder); 662 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
677 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 663 break;
678 &total_bytes); 664 }
679 }
680 665
681 if (bd.remainder) {
682 /* 666 /*
683 * Master now aligned 667 * Send byte by byte for following cases
684 * - if slave is not then we must set its width down 668 * - Less than a bus width available
669 * - until master bus is aligned
685 */ 670 */
686 if (sbus->addr % sbus->buswidth) { 671 if (bd.remainder < mbus->buswidth)
687 dev_dbg(&pl08x->adev->dev, 672 early_bytes = bd.remainder;
688 "%s set down bus width to one byte\n", 673 else if ((mbus->addr) % (mbus->buswidth)) {
689 __func__); 674 early_bytes = mbus->buswidth - (mbus->addr) %
675 (mbus->buswidth);
676 if ((bd.remainder - early_bytes) < mbus->buswidth)
677 early_bytes = bd.remainder;
678 }
690 679
691 sbus->buswidth = 1; 680 if (early_bytes) {
681 dev_vdbg(&pl08x->adev->dev,
682 "%s byte width LLIs (remain 0x%08x)\n",
683 __func__, bd.remainder);
684 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
685 &total_bytes);
692 } 686 }
693 687
694 /* Bytes transferred = tsize * src width, not MIN(buswidths) */ 688 if (bd.remainder) {
695 max_bytes_per_lli = bd.srcbus.buswidth * 689 /*
696 PL080_CONTROL_TRANSFER_SIZE_MASK; 690 * Master now aligned
691 * - if slave is not then we must set its width down
692 */
693 if (sbus->addr % sbus->buswidth) {
694 dev_dbg(&pl08x->adev->dev,
695 "%s set down bus width to one byte\n",
696 __func__);
697 697
698 /* 698 sbus->buswidth = 1;
699 * Make largest possible LLIs until less than one bus 699 }
700 * width left
701 */
702 while (bd.remainder > (mbus->buswidth - 1)) {
703 size_t lli_len, tsize, width;
704 700
705 /* 701 /*
706 * If enough left try to send max possible, 702 * Bytes transferred = tsize * src width, not
707 * otherwise try to send the remainder 703 * MIN(buswidths)
708 */ 704 */
709 lli_len = min(bd.remainder, max_bytes_per_lli); 705 max_bytes_per_lli = bd.srcbus.buswidth *
706 PL080_CONTROL_TRANSFER_SIZE_MASK;
707 dev_vdbg(&pl08x->adev->dev,
708 "%s max bytes per lli = %zu\n",
709 __func__, max_bytes_per_lli);
710 710
711 /* 711 /*
712 * Check against maximum bus alignment: Calculate actual 712 * Make largest possible LLIs until less than one bus
713 * transfer size in relation to bus width and get a 713 * width left
714 * maximum remainder of the highest bus width - 1
715 */ 714 */
716 width = max(mbus->buswidth, sbus->buswidth); 715 while (bd.remainder > (mbus->buswidth - 1)) {
717 lli_len = (lli_len / width) * width; 716 size_t lli_len, tsize, width;
718 tsize = lli_len / bd.srcbus.buswidth;
719 717
720 dev_vdbg(&pl08x->adev->dev, 718 /*
721 "%s fill lli with single lli chunk of " 719 * If enough left try to send max possible,
722 "size 0x%08zx (remainder 0x%08zx)\n", 720 * otherwise try to send the remainder
723 __func__, lli_len, bd.remainder); 721 */
722 lli_len = min(bd.remainder, max_bytes_per_lli);
724 723
725 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 724 /*
725 * Check against maximum bus alignment:
726 * Calculate actual transfer size in relation to
727 * bus width an get a maximum remainder of the
728 * highest bus width - 1
729 */
730 width = max(mbus->buswidth, sbus->buswidth);
731 lli_len = (lli_len / width) * width;
732 tsize = lli_len / bd.srcbus.buswidth;
733
734 dev_vdbg(&pl08x->adev->dev,
735 "%s fill lli with single lli chunk of "
736 "size 0x%08zx (remainder 0x%08zx)\n",
737 __func__, lli_len, bd.remainder);
738
739 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
726 bd.dstbus.buswidth, tsize); 740 bd.dstbus.buswidth, tsize);
727 pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl); 741 pl08x_fill_lli_for_desc(&bd, num_llis++,
728 total_bytes += lli_len; 742 lli_len, cctl);
729 } 743 total_bytes += lli_len;
744 }
730 745
731 /* 746 /*
732 * Send any odd bytes 747 * Send any odd bytes
733 */ 748 */
734 if (bd.remainder) { 749 if (bd.remainder) {
735 dev_vdbg(&pl08x->adev->dev, 750 dev_vdbg(&pl08x->adev->dev,
736 "%s align with boundary, send odd bytes (remain %zu)\n", 751 "%s align with boundary, send odd bytes (remain %zu)\n",
737 __func__, bd.remainder); 752 __func__, bd.remainder);
738 prep_byte_width_lli(&bd, &cctl, bd.remainder, 753 prep_byte_width_lli(&bd, &cctl, bd.remainder,
739 num_llis++, &total_bytes); 754 num_llis++, &total_bytes);
755 }
740 } 756 }
741 }
742 757
743 if (total_bytes != txd->len) { 758 if (total_bytes != dsg->len) {
744 dev_err(&pl08x->adev->dev, 759 dev_err(&pl08x->adev->dev,
745 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 760 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
746 __func__, total_bytes, txd->len); 761 __func__, total_bytes, dsg->len);
747 return 0; 762 return 0;
748 } 763 }
749 764
750 if (num_llis >= MAX_NUM_TSFR_LLIS) { 765 if (num_llis >= MAX_NUM_TSFR_LLIS) {
751 dev_err(&pl08x->adev->dev, 766 dev_err(&pl08x->adev->dev,
752 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 767 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
753 __func__, (u32) MAX_NUM_TSFR_LLIS); 768 __func__, (u32) MAX_NUM_TSFR_LLIS);
754 return 0; 769 return 0;
770 }
755 } 771 }
756 772
757 llis_va = txd->llis_va; 773 llis_va = txd->llis_va;
@@ -784,11 +800,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
784static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 800static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
785 struct pl08x_txd *txd) 801 struct pl08x_txd *txd)
786{ 802{
803 struct pl08x_sg *dsg, *_dsg;
804
787 /* Free the LLI */ 805 /* Free the LLI */
788 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 806 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
789 807
790 pl08x->pool_ctr--; 808 pl08x->pool_ctr--;
791 809
810 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
811 list_del(&dsg->node);
812 kfree(dsg);
813 }
814
792 kfree(txd); 815 kfree(txd);
793} 816}
794 817
@@ -1234,6 +1257,7 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1234 txd->tx.flags = flags; 1257 txd->tx.flags = flags;
1235 txd->tx.tx_submit = pl08x_tx_submit; 1258 txd->tx.tx_submit = pl08x_tx_submit;
1236 INIT_LIST_HEAD(&txd->node); 1259 INIT_LIST_HEAD(&txd->node);
1260 INIT_LIST_HEAD(&txd->dsg_list);
1237 1261
1238 /* Always enable error and terminal interrupts */ 1262 /* Always enable error and terminal interrupts */
1239 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1263 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1252,6 +1276,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1252 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1276 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1253 struct pl08x_driver_data *pl08x = plchan->host; 1277 struct pl08x_driver_data *pl08x = plchan->host;
1254 struct pl08x_txd *txd; 1278 struct pl08x_txd *txd;
1279 struct pl08x_sg *dsg;
1255 int ret; 1280 int ret;
1256 1281
1257 txd = pl08x_get_txd(plchan, flags); 1282 txd = pl08x_get_txd(plchan, flags);
@@ -1261,10 +1286,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1261 return NULL; 1286 return NULL;
1262 } 1287 }
1263 1288
1289 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1290 if (!dsg) {
1291 pl08x_free_txd(pl08x, txd);
1292 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1293 __func__);
1294 return NULL;
1295 }
1296 list_add_tail(&dsg->node, &txd->dsg_list);
1297
1264 txd->direction = DMA_NONE; 1298 txd->direction = DMA_NONE;
1265 txd->src_addr = src; 1299 dsg->src_addr = src;
1266 txd->dst_addr = dest; 1300 dsg->dst_addr = dest;
1267 txd->len = len; 1301 dsg->len = len;
1268 1302
1269 /* Set platform data for m2m */ 1303 /* Set platform data for m2m */
1270 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1304 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1293,19 +1327,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1293 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1327 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1294 struct pl08x_driver_data *pl08x = plchan->host; 1328 struct pl08x_driver_data *pl08x = plchan->host;
1295 struct pl08x_txd *txd; 1329 struct pl08x_txd *txd;
1330 struct pl08x_sg *dsg;
1331 struct scatterlist *sg;
1332 dma_addr_t slave_addr;
1296 int ret, tmp; 1333 int ret, tmp;
1297 1334
1298 /*
1299 * Current implementation ASSUMES only one sg
1300 */
1301 if (sg_len != 1) {
1302 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1303 __func__);
1304 BUG();
1305 }
1306
1307 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1335 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1308 __func__, sgl->length, plchan->name); 1336 __func__, sgl->length, plchan->name);
1309 1337
1310 txd = pl08x_get_txd(plchan, flags); 1338 txd = pl08x_get_txd(plchan, flags);
1311 if (!txd) { 1339 if (!txd) {
@@ -1324,17 +1352,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324 * channel target address dynamically at runtime. 1352 * channel target address dynamically at runtime.
1325 */ 1353 */
1326 txd->direction = direction; 1354 txd->direction = direction;
1327 txd->len = sgl->length;
1328 1355
1329 if (direction == DMA_TO_DEVICE) { 1356 if (direction == DMA_TO_DEVICE) {
1330 txd->cctl = plchan->dst_cctl; 1357 txd->cctl = plchan->dst_cctl;
1331 txd->src_addr = sgl->dma_address; 1358 slave_addr = plchan->dst_addr;
1332 txd->dst_addr = plchan->dst_addr;
1333 } else if (direction == DMA_FROM_DEVICE) { 1359 } else if (direction == DMA_FROM_DEVICE) {
1334 txd->cctl = plchan->src_cctl; 1360 txd->cctl = plchan->src_cctl;
1335 txd->src_addr = plchan->src_addr; 1361 slave_addr = plchan->src_addr;
1336 txd->dst_addr = sgl->dma_address;
1337 } else { 1362 } else {
1363 pl08x_free_txd(pl08x, txd);
1338 dev_err(&pl08x->adev->dev, 1364 dev_err(&pl08x->adev->dev,
1339 "%s direction unsupported\n", __func__); 1365 "%s direction unsupported\n", __func__);
1340 return NULL; 1366 return NULL;
@@ -1349,6 +1375,26 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1349 1375
1350 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1376 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1351 1377
1378 for_each_sg(sgl, sg, sg_len, tmp) {
1379 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1380 if (!dsg) {
1381 pl08x_free_txd(pl08x, txd);
1382 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1383 __func__);
1384 return NULL;
1385 }
1386 list_add_tail(&dsg->node, &txd->dsg_list);
1387
1388 dsg->len = sg_dma_len(sg);
1389 if (direction == DMA_TO_DEVICE) {
1390 dsg->src_addr = sg_phys(sg);
1391 dsg->dst_addr = slave_addr;
1392 } else {
1393 dsg->src_addr = slave_addr;
1394 dsg->dst_addr = sg_phys(sg);
1395 }
1396 }
1397
1352 ret = pl08x_prep_channel_resources(plchan, txd); 1398 ret = pl08x_prep_channel_resources(plchan, txd);
1353 if (ret) 1399 if (ret)
1354 return NULL; 1400 return NULL;
@@ -1452,22 +1498,28 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1452static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1498static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1453{ 1499{
1454 struct device *dev = txd->tx.chan->device->dev; 1500 struct device *dev = txd->tx.chan->device->dev;
1501 struct pl08x_sg *dsg;
1455 1502
1456 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1503 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1457 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1504 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1458 dma_unmap_single(dev, txd->src_addr, txd->len, 1505 list_for_each_entry(dsg, &txd->dsg_list, node)
1459 DMA_TO_DEVICE); 1506 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1460 else 1507 DMA_TO_DEVICE);
1461 dma_unmap_page(dev, txd->src_addr, txd->len, 1508 else {
1462 DMA_TO_DEVICE); 1509 list_for_each_entry(dsg, &txd->dsg_list, node)
1510 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1511 DMA_TO_DEVICE);
1512 }
1463 } 1513 }
1464 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1514 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1465 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1515 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1466 dma_unmap_single(dev, txd->dst_addr, txd->len, 1516 list_for_each_entry(dsg, &txd->dsg_list, node)
1467 DMA_FROM_DEVICE); 1517 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1518 DMA_FROM_DEVICE);
1468 else 1519 else
1469 dma_unmap_page(dev, txd->dst_addr, txd->len, 1520 list_for_each_entry(dsg, &txd->dsg_list, node)
1470 DMA_FROM_DEVICE); 1521 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1522 DMA_FROM_DEVICE);
1471 } 1523 }
1472} 1524}
1473 1525
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index a22662c93981..9eabffbc4e50 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -106,12 +106,24 @@ struct pl08x_phy_chan {
106}; 106};
107 107
108/** 108/**
109 * struct pl08x_sg - structure containing data per sg
110 * @src_addr: src address of sg
111 * @dst_addr: dst address of sg
112 * @len: transfer len in bytes
113 * @node: node for txd's dsg_list
114 */
115struct pl08x_sg {
116 dma_addr_t src_addr;
117 dma_addr_t dst_addr;
118 size_t len;
119 struct list_head node;
120};
121
122/**
109 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 123 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
110 * @tx: async tx descriptor 124 * @tx: async tx descriptor
111 * @node: node for txd list for channels 125 * @node: node for txd list for channels
112 * @src_addr: src address of txd 126 * @dsg_list: list of children sg's
113 * @dst_addr: dst address of txd
114 * @len: transfer len in bytes
115 * @direction: direction of transfer 127 * @direction: direction of transfer
116 * @llis_bus: DMA memory address (physical) start for the LLIs 128 * @llis_bus: DMA memory address (physical) start for the LLIs
117 * @llis_va: virtual memory address start for the LLIs 129 * @llis_va: virtual memory address start for the LLIs
@@ -121,10 +133,8 @@ struct pl08x_phy_chan {
121struct pl08x_txd { 133struct pl08x_txd {
122 struct dma_async_tx_descriptor tx; 134 struct dma_async_tx_descriptor tx;
123 struct list_head node; 135 struct list_head node;
136 struct list_head dsg_list;
124 enum dma_data_direction direction; 137 enum dma_data_direction direction;
125 dma_addr_t src_addr;
126 dma_addr_t dst_addr;
127 size_t len;
128 dma_addr_t llis_bus; 138 dma_addr_t llis_bus;
129 struct pl08x_lli *llis_va; 139 struct pl08x_lli *llis_va;
130 /* Default cctl value for LLIs */ 140 /* Default cctl value for LLIs */