summaryrefslogtreecommitdiffstats
path: root/drivers/dma/xgene-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/xgene-dma.c')
-rw-r--r--drivers/dma/xgene-dma.c46
1 files changed, 17 insertions, 29 deletions
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d52d126..8d57b1b12e41 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF 61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
62#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
63#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) 62#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
64#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) 63#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
65#define XGENE_DMA_RING_CMD_OFFSET 0x2C 64#define XGENE_DMA_RING_CMD_OFFSET 0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
379 return flyby_type[src_cnt]; 378 return flyby_type[src_cnt];
380} 379}
381 380
382static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
383{
384 u32 __iomem *cmd_base = ring->cmd_base;
385 u32 ring_state = ioread32(&cmd_base[1]);
386
387 return XGENE_DMA_RING_DESC_CNT(ring_state);
388}
389
390static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, 381static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
391 dma_addr_t *paddr) 382 dma_addr_t *paddr)
392{ 383{
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
659 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); 650 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
660} 651}
661 652
662static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, 653static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
663 struct xgene_dma_desc_sw *desc_sw) 654 struct xgene_dma_desc_sw *desc_sw)
664{ 655{
656 struct xgene_dma_ring *ring = &chan->tx_ring;
665 struct xgene_dma_desc_hw *desc_hw; 657 struct xgene_dma_desc_hw *desc_hw;
666 658
667 /* Check if can push more descriptor to hw for execution */
668 if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
669 return -EBUSY;
670
671 /* Get hw descriptor from DMA tx ring */ 659 /* Get hw descriptor from DMA tx ring */
672 desc_hw = &ring->desc_hw[ring->head]; 660 desc_hw = &ring->desc_hw[ring->head];
673 661
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
694 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); 682 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
695 } 683 }
696 684
685 /* Increment the pending transaction count */
686 chan->pending += ((desc_sw->flags &
687 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
688
697 /* Notify the hw that we have descriptor ready for execution */ 689 /* Notify the hw that we have descriptor ready for execution */
698 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 690 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
699 2 : 1, ring->cmd); 691 2 : 1, ring->cmd);
700
701 return 0;
702} 692}
703 693
704/** 694/**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
710static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) 700static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
711{ 701{
712 struct xgene_dma_desc_sw *desc_sw, *_desc_sw; 702 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
713 int ret;
714 703
715 /* 704 /*
716 * If the list of pending descriptors is empty, then we 705 * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
735 if (chan->pending >= chan->max_outstanding) 724 if (chan->pending >= chan->max_outstanding)
736 return; 725 return;
737 726
738 ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); 727 xgene_chan_xfer_request(chan, desc_sw);
739 if (ret)
740 return;
741 728
742 /* 729 /*
743 * Delete this element from ld pending queue and append it to 730 * Delete this element from ld pending queue and append it to
744 * ld running queue 731 * ld running queue
745 */ 732 */
746 list_move_tail(&desc_sw->node, &chan->ld_running); 733 list_move_tail(&desc_sw->node, &chan->ld_running);
747
748 /* Increment the pending transaction count */
749 chan->pending++;
750 } 734 }
751} 735}
752 736
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
821 * Decrement the pending transaction count 805 * Decrement the pending transaction count
822 * as we have processed one 806 * as we have processed one
823 */ 807 */
824 chan->pending--; 808 chan->pending -= ((desc_sw->flags &
809 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
825 810
826 /* 811 /*
827 * Delete this node from ld running queue and append it to 812 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1421 struct xgene_dma_ring *ring, 1406 struct xgene_dma_ring *ring,
1422 enum xgene_dma_ring_cfgsize cfgsize) 1407 enum xgene_dma_ring_cfgsize cfgsize)
1423{ 1408{
1409 int ret;
1410
1424 /* Setup DMA ring descriptor variables */ 1411 /* Setup DMA ring descriptor variables */
1425 ring->pdma = chan->pdma; 1412 ring->pdma = chan->pdma;
1426 ring->cfgsize = cfgsize; 1413 ring->cfgsize = cfgsize;
1427 ring->num = chan->pdma->ring_num++; 1414 ring->num = chan->pdma->ring_num++;
1428 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); 1415 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1429 1416
1430 ring->size = xgene_dma_get_ring_size(chan, cfgsize); 1417 ret = xgene_dma_get_ring_size(chan, cfgsize);
1431 if (ring->size <= 0) 1418 if (ret <= 0)
1432 return ring->size; 1419 return ret;
1420 ring->size = ret;
1433 1421
1434 /* Allocate memory for DMA ring descriptor */ 1422 /* Allocate memory for DMA ring descriptor */
1435 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1423 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1482 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); 1470 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1483 1471
1484 /* Set the max outstanding request possible to this channel */ 1472 /* Set the max outstanding request possible to this channel */
1485 chan->max_outstanding = rx_ring->slots; 1473 chan->max_outstanding = tx_ring->slots;
1486 1474
1487 return ret; 1475 return ret;
1488} 1476}