aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence/macb.c
diff options
context:
space:
mode:
authorHarini Katakam <harini.katakam@xilinx.com>2016-08-09 03:45:53 -0400
committerDavid S. Miller <davem@davemloft.net>2016-08-10 20:38:15 -0400
commitfff8019a08b60dce0c7f2858ebe44c5b84ed493b (patch)
tree96d1ebc5b353dff0242452cc08dca08ec44a849e /drivers/net/ethernet/cadence/macb.c
parent054c67d1c82afde13e475cdd8b7117a5e40bebb1 (diff)
net: macb: Add 64 bit addressing support for GEM
This patch adds support for 64 bit addressing and BDs. -> Enable 64 bit addressing in DMACFG register. -> Set DMA mask when design config register shows support for 64 bit addr. -> Add new BD words for higher address when 64 bit DMA support is present. -> Add and update TBQPH and RBQPH for MSB of BD pointers. -> Change extraction and updation of buffer addresses to use 64 bit address. -> In gem_rx extract address in one place insted of two and use a separate flag for RXUSED. Signed-off-by: Harini Katakam <harinik@xilinx.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cadence/macb.c')
-rw-r--r--drivers/net/ethernet/cadence/macb.c62
1 files changed, 51 insertions, 11 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 89c0cfa9719f..6b797e301e3f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -541,6 +541,14 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
541 } 541 }
542} 542}
543 543
544static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
545{
546 desc->addr = (u32)addr;
547#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
548 desc->addrh = (u32)(addr >> 32);
549#endif
550}
551
544static void macb_tx_error_task(struct work_struct *work) 552static void macb_tx_error_task(struct work_struct *work)
545{ 553{
546 struct macb_queue *queue = container_of(work, struct macb_queue, 554 struct macb_queue *queue = container_of(work, struct macb_queue,
@@ -621,14 +629,17 @@ static void macb_tx_error_task(struct work_struct *work)
621 629
622 /* Set end of TX queue */ 630 /* Set end of TX queue */
623 desc = macb_tx_desc(queue, 0); 631 desc = macb_tx_desc(queue, 0);
624 desc->addr = 0; 632 macb_set_addr(desc, 0);
625 desc->ctrl = MACB_BIT(TX_USED); 633 desc->ctrl = MACB_BIT(TX_USED);
626 634
627 /* Make descriptor updates visible to hardware */ 635 /* Make descriptor updates visible to hardware */
628 wmb(); 636 wmb();
629 637
630 /* Reinitialize the TX desc queue */ 638 /* Reinitialize the TX desc queue */
631 queue_writel(queue, TBQP, queue->tx_ring_dma); 639 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
640#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
641 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
642#endif
632 /* Make TX ring reflect state of hardware */ 643 /* Make TX ring reflect state of hardware */
633 queue->tx_head = 0; 644 queue->tx_head = 0;
634 queue->tx_tail = 0; 645 queue->tx_tail = 0;
@@ -750,7 +761,7 @@ static void gem_rx_refill(struct macb *bp)
750 761
751 if (entry == RX_RING_SIZE - 1) 762 if (entry == RX_RING_SIZE - 1)
752 paddr |= MACB_BIT(RX_WRAP); 763 paddr |= MACB_BIT(RX_WRAP);
753 bp->rx_ring[entry].addr = paddr; 764 macb_set_addr(&(bp->rx_ring[entry]), paddr);
754 bp->rx_ring[entry].ctrl = 0; 765 bp->rx_ring[entry].ctrl = 0;
755 766
756 /* properly align Ethernet header */ 767 /* properly align Ethernet header */
@@ -798,7 +809,9 @@ static int gem_rx(struct macb *bp, int budget)
798 int count = 0; 809 int count = 0;
799 810
800 while (count < budget) { 811 while (count < budget) {
801 u32 addr, ctrl; 812 u32 ctrl;
813 dma_addr_t addr;
814 bool rxused;
802 815
803 entry = macb_rx_ring_wrap(bp->rx_tail); 816 entry = macb_rx_ring_wrap(bp->rx_tail);
804 desc = &bp->rx_ring[entry]; 817 desc = &bp->rx_ring[entry];
@@ -806,10 +819,14 @@ static int gem_rx(struct macb *bp, int budget)
806 /* Make hw descriptor updates visible to CPU */ 819 /* Make hw descriptor updates visible to CPU */
807 rmb(); 820 rmb();
808 821
809 addr = desc->addr; 822 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
823 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
824#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
825 addr |= ((u64)(desc->addrh) << 32);
826#endif
810 ctrl = desc->ctrl; 827 ctrl = desc->ctrl;
811 828
812 if (!(addr & MACB_BIT(RX_USED))) 829 if (!rxused)
813 break; 830 break;
814 831
815 bp->rx_tail++; 832 bp->rx_tail++;
@@ -835,7 +852,6 @@ static int gem_rx(struct macb *bp, int budget)
835 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 852 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
836 853
837 skb_put(skb, len); 854 skb_put(skb, len);
838 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
839 dma_unmap_single(&bp->pdev->dev, addr, 855 dma_unmap_single(&bp->pdev->dev, addr,
840 bp->rx_buffer_size, DMA_FROM_DEVICE); 856 bp->rx_buffer_size, DMA_FROM_DEVICE);
841 857
@@ -1299,7 +1315,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1299 ctrl |= MACB_BIT(TX_WRAP); 1315 ctrl |= MACB_BIT(TX_WRAP);
1300 1316
1301 /* Set TX buffer descriptor */ 1317 /* Set TX buffer descriptor */
1302 desc->addr = tx_skb->mapping; 1318 macb_set_addr(desc, tx_skb->mapping);
1303 /* desc->addr must be visible to hardware before clearing 1319 /* desc->addr must be visible to hardware before clearing
1304 * 'TX_USED' bit in desc->ctrl. 1320 * 'TX_USED' bit in desc->ctrl.
1305 */ 1321 */
@@ -1422,6 +1438,9 @@ static void gem_free_rx_buffers(struct macb *bp)
1422 1438
1423 desc = &bp->rx_ring[i]; 1439 desc = &bp->rx_ring[i];
1424 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1440 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1441#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1442 addr |= ((u64)(desc->addrh) << 32);
1443#endif
1425 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1444 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1426 DMA_FROM_DEVICE); 1445 DMA_FROM_DEVICE);
1427 dev_kfree_skb_any(skb); 1446 dev_kfree_skb_any(skb);
@@ -1547,7 +1566,7 @@ static void gem_init_rings(struct macb *bp)
1547 1566
1548 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1567 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1549 for (i = 0; i < TX_RING_SIZE; i++) { 1568 for (i = 0; i < TX_RING_SIZE; i++) {
1550 queue->tx_ring[i].addr = 0; 1569 macb_set_addr(&(queue->tx_ring[i]), 0);
1551 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1570 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1552 } 1571 }
1553 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1572 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
@@ -1694,6 +1713,10 @@ static void macb_configure_dma(struct macb *bp)
1694 dmacfg |= GEM_BIT(TXCOEN); 1713 dmacfg |= GEM_BIT(TXCOEN);
1695 else 1714 else
1696 dmacfg &= ~GEM_BIT(TXCOEN); 1715 dmacfg &= ~GEM_BIT(TXCOEN);
1716
1717#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1718 dmacfg |= GEM_BIT(ADDR64);
1719#endif
1697 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 1720 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1698 dmacfg); 1721 dmacfg);
1699 gem_writel(bp, DMACFG, dmacfg); 1722 gem_writel(bp, DMACFG, dmacfg);
@@ -1739,9 +1762,15 @@ static void macb_init_hw(struct macb *bp)
1739 macb_configure_dma(bp); 1762 macb_configure_dma(bp);
1740 1763
1741 /* Initialize TX and RX buffers */ 1764 /* Initialize TX and RX buffers */
1742 macb_writel(bp, RBQP, bp->rx_ring_dma); 1765 macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
1766#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1767 macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
1768#endif
1743 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1769 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1744 queue_writel(queue, TBQP, queue->tx_ring_dma); 1770 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
1771#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1772 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
1773#endif
1745 1774
1746 /* Enable interrupts */ 1775 /* Enable interrupts */
1747 queue_writel(queue, IER, 1776 queue_writel(queue, IER,
@@ -2379,6 +2408,9 @@ static int macb_init(struct platform_device *pdev)
2379 queue->IDR = GEM_IDR(hw_q - 1); 2408 queue->IDR = GEM_IDR(hw_q - 1);
2380 queue->IMR = GEM_IMR(hw_q - 1); 2409 queue->IMR = GEM_IMR(hw_q - 1);
2381 queue->TBQP = GEM_TBQP(hw_q - 1); 2410 queue->TBQP = GEM_TBQP(hw_q - 1);
2411#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2412 queue->TBQPH = GEM_TBQPH(hw_q -1);
2413#endif
2382 } else { 2414 } else {
2383 /* queue0 uses legacy registers */ 2415 /* queue0 uses legacy registers */
2384 queue->ISR = MACB_ISR; 2416 queue->ISR = MACB_ISR;
@@ -2386,6 +2418,9 @@ static int macb_init(struct platform_device *pdev)
2386 queue->IDR = MACB_IDR; 2418 queue->IDR = MACB_IDR;
2387 queue->IMR = MACB_IMR; 2419 queue->IMR = MACB_IMR;
2388 queue->TBQP = MACB_TBQP; 2420 queue->TBQP = MACB_TBQP;
2421#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2422 queue->TBQPH = MACB_TBQPH;
2423#endif
2389 } 2424 }
2390 2425
2391 /* get irq: here we use the linux queue index, not the hardware 2426 /* get irq: here we use the linux queue index, not the hardware
@@ -2935,6 +2970,11 @@ static int macb_probe(struct platform_device *pdev)
2935 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 2970 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
2936 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 2971 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
2937 2972
2973#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2974 if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
2975 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
2976#endif
2977
2938 spin_lock_init(&bp->lock); 2978 spin_lock_init(&bp->lock);
2939 2979
2940 /* setup capabilities */ 2980 /* setup capabilities */