aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence/macb_main.c
diff options
context:
space:
mode:
authorHarini Katakam <harini.katakam@xilinx.com>2018-07-06 02:48:58 -0400
committerDavid S. Miller <davem@davemloft.net>2018-07-07 07:54:25 -0400
commit404cd086f29e867fc99f1174e8f3246a4ea14b7b (patch)
tree628216d7ad81abce9949d91f036135669367074b /drivers/net/ethernet/cadence/macb_main.c
parente50b770ea5c9eff0013e8ae714d20182ed50d5e6 (diff)
net: macb: Allocate valid memory for TX and RX BD prefetch
GEM version in ZynqMP and most versions greater than r1p07 supports TX and RX BD prefetch. The number of BDs that can be prefetched is a HW configurable parameter. For ZynqMP, this parameter is 4. When GEM DMA is accessing the last BD in the ring, even before the BD is processed and the WRAP bit is noticed, it will have prefetched BDs outside the BD ring. These will not be processed but it is necessary to have accessible memory after the last BD. Especially in cases where SMMU is used, memory locations immediately after the last BD may not have translation tables triggering HRESP errors. Hence always allocate extra BDs to accommodate for prefetch. The value of tx/rx bd prefetch for any given SoC version is: 2 ^ (corresponding field in design config 10 register). (value of this field >= 1) Added a capability flag so that older IP versions that do not have DCFG10 or this prefetch capability are not affected. Signed-off-by: Harini Katakam <harini.katakam@xilinx.com> Reviewed-by: Claudiu Beznea <claudiu.beznea@microchip.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cadence/macb_main.c')
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2d5d0d110151..a6c911bb5ce2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1811,6 +1811,7 @@ static void macb_free_consistent(struct macb *bp)
1811{ 1811{
1812 struct macb_queue *queue; 1812 struct macb_queue *queue;
1813 unsigned int q; 1813 unsigned int q;
1814 int size;
1814 1815
1815 bp->macbgem_ops.mog_free_rx_buffers(bp); 1816 bp->macbgem_ops.mog_free_rx_buffers(bp);
1816 1817
@@ -1818,12 +1819,14 @@ static void macb_free_consistent(struct macb *bp)
1818 kfree(queue->tx_skb); 1819 kfree(queue->tx_skb);
1819 queue->tx_skb = NULL; 1820 queue->tx_skb = NULL;
1820 if (queue->tx_ring) { 1821 if (queue->tx_ring) {
1821 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), 1822 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1823 dma_free_coherent(&bp->pdev->dev, size,
1822 queue->tx_ring, queue->tx_ring_dma); 1824 queue->tx_ring, queue->tx_ring_dma);
1823 queue->tx_ring = NULL; 1825 queue->tx_ring = NULL;
1824 } 1826 }
1825 if (queue->rx_ring) { 1827 if (queue->rx_ring) {
1826 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), 1828 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1829 dma_free_coherent(&bp->pdev->dev, size,
1827 queue->rx_ring, queue->rx_ring_dma); 1830 queue->rx_ring, queue->rx_ring_dma);
1828 queue->rx_ring = NULL; 1831 queue->rx_ring = NULL;
1829 } 1832 }
@@ -1873,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
1873 int size; 1876 int size;
1874 1877
1875 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1878 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1876 size = TX_RING_BYTES(bp); 1879 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1877 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1880 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1878 &queue->tx_ring_dma, 1881 &queue->tx_ring_dma,
1879 GFP_KERNEL); 1882 GFP_KERNEL);
@@ -1889,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
1889 if (!queue->tx_skb) 1892 if (!queue->tx_skb)
1890 goto out_err; 1893 goto out_err;
1891 1894
1892 size = RX_RING_BYTES(bp); 1895 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1893 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1896 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1894 &queue->rx_ring_dma, GFP_KERNEL); 1897 &queue->rx_ring_dma, GFP_KERNEL);
1895 if (!queue->rx_ring) 1898 if (!queue->rx_ring)
@@ -3796,7 +3799,7 @@ static const struct macb_config np4_config = {
3796static const struct macb_config zynqmp_config = { 3799static const struct macb_config zynqmp_config = {
3797 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3800 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3798 MACB_CAPS_JUMBO | 3801 MACB_CAPS_JUMBO |
3799 MACB_CAPS_GEM_HAS_PTP, 3802 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
3800 .dma_burst_length = 16, 3803 .dma_burst_length = 16,
3801 .clk_init = macb_clk_init, 3804 .clk_init = macb_clk_init,
3802 .init = macb_init, 3805 .init = macb_init,
@@ -3857,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
3857 void __iomem *mem; 3860 void __iomem *mem;
3858 const char *mac; 3861 const char *mac;
3859 struct macb *bp; 3862 struct macb *bp;
3860 int err; 3863 int err, val;
3861 3864
3862 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3865 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3863 mem = devm_ioremap_resource(&pdev->dev, regs); 3866 mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3946,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
3946 else 3949 else
3947 dev->max_mtu = ETH_DATA_LEN; 3950 dev->max_mtu = ETH_DATA_LEN;
3948 3951
3952 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
3953 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
3954 if (val)
3955 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
3956 macb_dma_desc_get_size(bp);
3957
3958 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
3959 if (val)
3960 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
3961 macb_dma_desc_get_size(bp);
3962 }
3963
3949 mac = of_get_mac_address(np); 3964 mac = of_get_mac_address(np);
3950 if (mac) { 3965 if (mac) {
3951 ether_addr_copy(bp->dev->dev_addr, mac); 3966 ether_addr_copy(bp->dev->dev_addr, mac);