aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-27 17:26:21 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-27 17:26:21 -0400
commit7145074b374dd90a4512fe52c062b336b724f276 (patch)
tree1ffcc5ab7dd245e0f828c34b27fad2adcf00ff18
parent19cc2dec8ab0fd258b023be1835aa8d710232877 (diff)
parent4055eaefb3603a2a55305c81292379922a742131 (diff)
Merge branch 'bcmgenet-next'
Petri Gynther says: ==================== net: bcmgenet: multiple Rx queues support Final patch set to add support for multiple Rx queues: 1. remove priv->int0_mask and priv->int1_mask 2. modify Tx ring int_enable and int_disable vectors 3. simplify bcmgenet_init_dma() 4. tweak init_umac() 5. rework Tx NAPI code 6. rework Rx NAPI code 7. add support for multiple Rx queues ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c380
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h20
2 files changed, 278 insertions, 122 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index c38d5429e27a..31e14079e1d7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -964,36 +964,58 @@ static void bcmgenet_free_cb(struct enet_cb *cb)
964 dma_unmap_addr_set(cb, dma_addr, 0); 964 dma_unmap_addr_set(cb, dma_addr, 0);
965} 965}
966 966
967static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, 967static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
968 struct bcmgenet_tx_ring *ring)
969{ 968{
970 bcmgenet_intrl2_0_writel(priv, 969 bcmgenet_intrl2_0_writel(ring->priv,
970 UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE,
971 INTRL2_CPU_MASK_SET);
972}
973
974static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
975{
976 bcmgenet_intrl2_0_writel(ring->priv,
977 UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE,
978 INTRL2_CPU_MASK_CLEAR);
979}
980
981static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
982{
983 bcmgenet_intrl2_1_writel(ring->priv,
984 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
985 INTRL2_CPU_MASK_SET);
986}
987
988static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
989{
990 bcmgenet_intrl2_1_writel(ring->priv,
991 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
992 INTRL2_CPU_MASK_CLEAR);
993}
994
995static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
996{
997 bcmgenet_intrl2_0_writel(ring->priv,
971 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, 998 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
972 INTRL2_CPU_MASK_SET); 999 INTRL2_CPU_MASK_SET);
973} 1000}
974 1001
975static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, 1002static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
976 struct bcmgenet_tx_ring *ring)
977{ 1003{
978 bcmgenet_intrl2_0_writel(priv, 1004 bcmgenet_intrl2_0_writel(ring->priv,
979 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, 1005 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
980 INTRL2_CPU_MASK_CLEAR); 1006 INTRL2_CPU_MASK_CLEAR);
981} 1007}
982 1008
983static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, 1009static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
984 struct bcmgenet_tx_ring *ring)
985{ 1010{
986 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), 1011 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
987 INTRL2_CPU_MASK_CLEAR); 1012 INTRL2_CPU_MASK_CLEAR);
988 priv->int1_mask &= ~(1 << ring->index);
989} 1013}
990 1014
991static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, 1015static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
992 struct bcmgenet_tx_ring *ring)
993{ 1016{
994 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), 1017 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
995 INTRL2_CPU_MASK_SET); 1018 INTRL2_CPU_MASK_SET);
996 priv->int1_mask |= (1 << ring->index);
997} 1019}
998 1020
999/* Unlocked version of the reclaim routine */ 1021/* Unlocked version of the reclaim routine */
@@ -1085,7 +1107,7 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1085 1107
1086 if (work_done == 0) { 1108 if (work_done == 0) {
1087 napi_complete(napi); 1109 napi_complete(napi);
1088 ring->int_enable(ring->priv, ring); 1110 ring->int_enable(ring);
1089 1111
1090 return 0; 1112 return 0;
1091 } 1113 }
@@ -1396,11 +1418,10 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1396/* bcmgenet_desc_rx - descriptor based rx process. 1418/* bcmgenet_desc_rx - descriptor based rx process.
1397 * this could be called from bottom half, or from NAPI polling method. 1419 * this could be called from bottom half, or from NAPI polling method.
1398 */ 1420 */
1399static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, 1421static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1400 unsigned int index,
1401 unsigned int budget) 1422 unsigned int budget)
1402{ 1423{
1403 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; 1424 struct bcmgenet_priv *priv = ring->priv;
1404 struct net_device *dev = priv->dev; 1425 struct net_device *dev = priv->dev;
1405 struct enet_cb *cb; 1426 struct enet_cb *cb;
1406 struct sk_buff *skb; 1427 struct sk_buff *skb;
@@ -1412,7 +1433,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1412 unsigned int discards; 1433 unsigned int discards;
1413 unsigned int chksum_ok = 0; 1434 unsigned int chksum_ok = 0;
1414 1435
1415 p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX); 1436 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1416 1437
1417 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & 1438 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1418 DMA_P_INDEX_DISCARD_CNT_MASK; 1439 DMA_P_INDEX_DISCARD_CNT_MASK;
@@ -1425,7 +1446,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1425 /* Clear HW register when we reach 75% of maximum 0xFFFF */ 1446 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1426 if (ring->old_discards >= 0xC000) { 1447 if (ring->old_discards >= 0xC000) {
1427 ring->old_discards = 0; 1448 ring->old_discards = 0;
1428 bcmgenet_rdma_ring_writel(priv, index, 0, 1449 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1429 RDMA_PROD_INDEX); 1450 RDMA_PROD_INDEX);
1430 } 1451 }
1431 } 1452 }
@@ -1533,7 +1554,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1533 dev->stats.multicast++; 1554 dev->stats.multicast++;
1534 1555
1535 /* Notify kernel */ 1556 /* Notify kernel */
1536 napi_gro_receive(&priv->napi, skb); 1557 napi_gro_receive(&ring->napi, skb);
1537 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); 1558 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1538 1559
1539next: 1560next:
@@ -1544,12 +1565,29 @@ next:
1544 ring->read_ptr = ring->cb_ptr; 1565 ring->read_ptr = ring->cb_ptr;
1545 1566
1546 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; 1567 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1547 bcmgenet_rdma_ring_writel(priv, index, ring->c_index, RDMA_CONS_INDEX); 1568 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1548 } 1569 }
1549 1570
1550 return rxpktprocessed; 1571 return rxpktprocessed;
1551} 1572}
1552 1573
1574/* Rx NAPI polling method */
1575static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1576{
1577 struct bcmgenet_rx_ring *ring = container_of(napi,
1578 struct bcmgenet_rx_ring, napi);
1579 unsigned int work_done;
1580
1581 work_done = bcmgenet_desc_rx(ring, budget);
1582
1583 if (work_done < budget) {
1584 napi_complete(napi);
1585 ring->int_enable(ring);
1586 }
1587
1588 return work_done;
1589}
1590
1553/* Assign skb to RX DMA descriptor. */ 1591/* Assign skb to RX DMA descriptor. */
1554static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, 1592static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1555 struct bcmgenet_rx_ring *ring) 1593 struct bcmgenet_rx_ring *ring)
@@ -1658,8 +1696,10 @@ static int init_umac(struct bcmgenet_priv *priv)
1658{ 1696{
1659 struct device *kdev = &priv->pdev->dev; 1697 struct device *kdev = &priv->pdev->dev;
1660 int ret; 1698 int ret;
1661 u32 reg, cpu_mask_clear; 1699 u32 reg;
1662 int index; 1700 u32 int0_enable = 0;
1701 u32 int1_enable = 0;
1702 int i;
1663 1703
1664 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1704 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1665 1705
@@ -1686,15 +1726,17 @@ static int init_umac(struct bcmgenet_priv *priv)
1686 1726
1687 bcmgenet_intr_disable(priv); 1727 bcmgenet_intr_disable(priv);
1688 1728
1689 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; 1729 /* Enable Rx default queue 16 interrupts */
1730 int0_enable |= (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE);
1690 1731
1691 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1732 /* Enable Tx default queue 16 interrupts */
1733 int0_enable |= (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE);
1692 1734
1693 /* Monitor cable plug/unplugged event for internal PHY */ 1735 /* Monitor cable plug/unplugged event for internal PHY */
1694 if (phy_is_internal(priv->phydev)) { 1736 if (phy_is_internal(priv->phydev)) {
1695 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); 1737 int0_enable |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1696 } else if (priv->ext_phy) { 1738 } else if (priv->ext_phy) {
1697 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); 1739 int0_enable |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1698 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 1740 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1699 reg = bcmgenet_bp_mc_get(priv); 1741 reg = bcmgenet_bp_mc_get(priv);
1700 reg |= BIT(priv->hw_params->bp_in_en_shift); 1742 reg |= BIT(priv->hw_params->bp_in_en_shift);
@@ -1709,13 +1751,18 @@ static int init_umac(struct bcmgenet_priv *priv)
1709 1751
1710 /* Enable MDIO interrupts on GENET v3+ */ 1752 /* Enable MDIO interrupts on GENET v3+ */
1711 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) 1753 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1712 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; 1754 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1755
1756 /* Enable Rx priority queue interrupts */
1757 for (i = 0; i < priv->hw_params->rx_queues; ++i)
1758 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1713 1759
1714 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1760 /* Enable Tx priority queue interrupts */
1761 for (i = 0; i < priv->hw_params->tx_queues; ++i)
1762 int1_enable |= (1 << i);
1715 1763
1716 for (index = 0; index < priv->hw_params->tx_queues; index++) 1764 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1717 bcmgenet_intrl2_1_writel(priv, (1 << index), 1765 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1718 INTRL2_CPU_MASK_CLEAR);
1719 1766
1720 /* Enable rx/tx engine.*/ 1767 /* Enable rx/tx engine.*/
1721 dev_dbg(kdev, "done init umac\n"); 1768 dev_dbg(kdev, "done init umac\n");
@@ -1734,7 +1781,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1734 1781
1735 spin_lock_init(&ring->lock); 1782 spin_lock_init(&ring->lock);
1736 ring->priv = priv; 1783 ring->priv = priv;
1737 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1738 ring->index = index; 1784 ring->index = index;
1739 if (index == DESC_INDEX) { 1785 if (index == DESC_INDEX) {
1740 ring->queue = 0; 1786 ring->queue = 0;
@@ -1778,17 +1824,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1778 TDMA_WRITE_PTR); 1824 TDMA_WRITE_PTR);
1779 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1825 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1780 DMA_END_ADDR); 1826 DMA_END_ADDR);
1781
1782 napi_enable(&ring->napi);
1783}
1784
1785static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1786 unsigned int index)
1787{
1788 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1789
1790 napi_disable(&ring->napi);
1791 netif_napi_del(&ring->napi);
1792} 1827}
1793 1828
1794/* Initialize a RDMA ring */ 1829/* Initialize a RDMA ring */
@@ -1800,7 +1835,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1800 u32 words_per_bd = WORDS_PER_BD(priv); 1835 u32 words_per_bd = WORDS_PER_BD(priv);
1801 int ret; 1836 int ret;
1802 1837
1838 ring->priv = priv;
1803 ring->index = index; 1839 ring->index = index;
1840 if (index == DESC_INDEX) {
1841 ring->int_enable = bcmgenet_rx_ring16_int_enable;
1842 ring->int_disable = bcmgenet_rx_ring16_int_disable;
1843 } else {
1844 ring->int_enable = bcmgenet_rx_ring_int_enable;
1845 ring->int_disable = bcmgenet_rx_ring_int_disable;
1846 }
1804 ring->cbs = priv->rx_cbs + start_ptr; 1847 ring->cbs = priv->rx_cbs + start_ptr;
1805 ring->size = size; 1848 ring->size = size;
1806 ring->c_index = 0; 1849 ring->c_index = 0;
@@ -1836,6 +1879,62 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1836 return ret; 1879 return ret;
1837} 1880}
1838 1881
1882static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
1883{
1884 unsigned int i;
1885 struct bcmgenet_tx_ring *ring;
1886
1887 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1888 ring = &priv->tx_rings[i];
1889 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1890 }
1891
1892 ring = &priv->tx_rings[DESC_INDEX];
1893 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1894}
1895
1896static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
1897{
1898 unsigned int i;
1899 struct bcmgenet_tx_ring *ring;
1900
1901 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1902 ring = &priv->tx_rings[i];
1903 napi_enable(&ring->napi);
1904 }
1905
1906 ring = &priv->tx_rings[DESC_INDEX];
1907 napi_enable(&ring->napi);
1908}
1909
1910static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
1911{
1912 unsigned int i;
1913 struct bcmgenet_tx_ring *ring;
1914
1915 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1916 ring = &priv->tx_rings[i];
1917 napi_disable(&ring->napi);
1918 }
1919
1920 ring = &priv->tx_rings[DESC_INDEX];
1921 napi_disable(&ring->napi);
1922}
1923
1924static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
1925{
1926 unsigned int i;
1927 struct bcmgenet_tx_ring *ring;
1928
1929 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1930 ring = &priv->tx_rings[i];
1931 netif_napi_del(&ring->napi);
1932 }
1933
1934 ring = &priv->tx_rings[DESC_INDEX];
1935 netif_napi_del(&ring->napi);
1936}
1937
1839/* Initialize Tx queues 1938/* Initialize Tx queues
1840 * 1939 *
1841 * Queues 0-3 are priority-based, each one has 32 descriptors, 1940 * Queues 0-3 are priority-based, each one has 32 descriptors,
@@ -1896,6 +1995,9 @@ static void bcmgenet_init_tx_queues(struct net_device *dev)
1896 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); 1995 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1897 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); 1996 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1898 1997
1998 /* Initialize Tx NAPI */
1999 bcmgenet_init_tx_napi(priv);
2000
1899 /* Enable Tx queues */ 2001 /* Enable Tx queues */
1900 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); 2002 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
1901 2003
@@ -1905,6 +2007,62 @@ static void bcmgenet_init_tx_queues(struct net_device *dev)
1905 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); 2007 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1906} 2008}
1907 2009
2010static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2011{
2012 unsigned int i;
2013 struct bcmgenet_rx_ring *ring;
2014
2015 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2016 ring = &priv->rx_rings[i];
2017 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2018 }
2019
2020 ring = &priv->rx_rings[DESC_INDEX];
2021 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2022}
2023
2024static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2025{
2026 unsigned int i;
2027 struct bcmgenet_rx_ring *ring;
2028
2029 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2030 ring = &priv->rx_rings[i];
2031 napi_enable(&ring->napi);
2032 }
2033
2034 ring = &priv->rx_rings[DESC_INDEX];
2035 napi_enable(&ring->napi);
2036}
2037
2038static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2039{
2040 unsigned int i;
2041 struct bcmgenet_rx_ring *ring;
2042
2043 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2044 ring = &priv->rx_rings[i];
2045 napi_disable(&ring->napi);
2046 }
2047
2048 ring = &priv->rx_rings[DESC_INDEX];
2049 napi_disable(&ring->napi);
2050}
2051
2052static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2053{
2054 unsigned int i;
2055 struct bcmgenet_rx_ring *ring;
2056
2057 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2058 ring = &priv->rx_rings[i];
2059 netif_napi_del(&ring->napi);
2060 }
2061
2062 ring = &priv->rx_rings[DESC_INDEX];
2063 netif_napi_del(&ring->napi);
2064}
2065
1908/* Initialize Rx queues 2066/* Initialize Rx queues
1909 * 2067 *
1910 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be 2068 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
@@ -1954,6 +2112,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
1954 ring_cfg |= (1 << DESC_INDEX); 2112 ring_cfg |= (1 << DESC_INDEX);
1955 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); 2113 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1956 2114
2115 /* Initialize Rx NAPI */
2116 bcmgenet_init_rx_napi(priv);
2117
1957 /* Enable rings */ 2118 /* Enable rings */
1958 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); 2119 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
1959 2120
@@ -2037,12 +2198,8 @@ static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2037 2198
2038static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 2199static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2039{ 2200{
2040 int i; 2201 bcmgenet_fini_rx_napi(priv);
2041 2202 bcmgenet_fini_tx_napi(priv);
2042 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
2043
2044 for (i = 0; i < priv->hw_params->tx_queues; i++)
2045 bcmgenet_fini_tx_ring(priv, i);
2046 2203
2047 __bcmgenet_fini_dma(priv); 2204 __bcmgenet_fini_dma(priv);
2048} 2205}
@@ -2056,9 +2213,6 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2056 2213
2057 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); 2214 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2058 2215
2059 /* Init rDma */
2060 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2061
2062 /* Initialize common Rx ring structures */ 2216 /* Initialize common Rx ring structures */
2063 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; 2217 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2064 priv->num_rx_bds = TOTAL_DESC; 2218 priv->num_rx_bds = TOTAL_DESC;
@@ -2072,25 +2226,13 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2072 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; 2226 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2073 } 2227 }
2074 2228
2075 /* Initialize Rx queues */
2076 ret = bcmgenet_init_rx_queues(priv->dev);
2077 if (ret) {
2078 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2079 bcmgenet_free_rx_buffers(priv);
2080 kfree(priv->rx_cbs);
2081 return ret;
2082 }
2083
2084 /* Init tDma */
2085 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2086
2087 /* Initialize common TX ring structures */ 2229 /* Initialize common TX ring structures */
2088 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; 2230 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2089 priv->num_tx_bds = TOTAL_DESC; 2231 priv->num_tx_bds = TOTAL_DESC;
2090 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 2232 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2091 GFP_KERNEL); 2233 GFP_KERNEL);
2092 if (!priv->tx_cbs) { 2234 if (!priv->tx_cbs) {
2093 __bcmgenet_fini_dma(priv); 2235 kfree(priv->rx_cbs);
2094 return -ENOMEM; 2236 return -ENOMEM;
2095 } 2237 }
2096 2238
@@ -2099,28 +2241,26 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2099 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; 2241 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2100 } 2242 }
2101 2243
2102 /* Initialize Tx queues */ 2244 /* Init rDma */
2103 bcmgenet_init_tx_queues(priv->dev); 2245 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2104
2105 return 0;
2106}
2107 2246
2108/* NAPI polling method*/ 2247 /* Initialize Rx queues */
2109static int bcmgenet_poll(struct napi_struct *napi, int budget) 2248 ret = bcmgenet_init_rx_queues(priv->dev);
2110{ 2249 if (ret) {
2111 struct bcmgenet_priv *priv = container_of(napi, 2250 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2112 struct bcmgenet_priv, napi); 2251 bcmgenet_free_rx_buffers(priv);
2113 unsigned int work_done; 2252 kfree(priv->rx_cbs);
2253 kfree(priv->tx_cbs);
2254 return ret;
2255 }
2114 2256
2115 work_done = bcmgenet_desc_rx(priv, DESC_INDEX, budget); 2257 /* Init tDma */
2258 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2116 2259
2117 if (work_done < budget) { 2260 /* Initialize Tx queues */
2118 napi_complete(napi); 2261 bcmgenet_init_tx_queues(priv->dev);
2119 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2120 INTRL2_CPU_MASK_CLEAR);
2121 }
2122 2262
2123 return work_done; 2263 return 0;
2124} 2264}
2125 2265
2126/* Interrupt bottom half */ 2266/* Interrupt bottom half */
@@ -2147,50 +2287,66 @@ static void bcmgenet_irq_task(struct work_struct *work)
2147 } 2287 }
2148} 2288}
2149 2289
2150/* bcmgenet_isr1: interrupt handler for ring buffer. */ 2290/* bcmgenet_isr1: handle Rx and Tx priority queues */
2151static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 2291static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2152{ 2292{
2153 struct bcmgenet_priv *priv = dev_id; 2293 struct bcmgenet_priv *priv = dev_id;
2154 struct bcmgenet_tx_ring *ring; 2294 struct bcmgenet_rx_ring *rx_ring;
2295 struct bcmgenet_tx_ring *tx_ring;
2155 unsigned int index; 2296 unsigned int index;
2156 2297
2157 /* Save irq status for bottom-half processing. */ 2298 /* Save irq status for bottom-half processing. */
2158 priv->irq1_stat = 2299 priv->irq1_stat =
2159 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 2300 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2160 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 2301 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2302
2161 /* clear interrupts */ 2303 /* clear interrupts */
2162 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2304 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2163 2305
2164 netif_dbg(priv, intr, priv->dev, 2306 netif_dbg(priv, intr, priv->dev,
2165 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2307 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2166 2308
2167 /* Check the MBDONE interrupts. 2309 /* Check Rx priority queue interrupts */
2168 * packet is done, reclaim descriptors 2310 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2169 */ 2311 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2312 continue;
2313
2314 rx_ring = &priv->rx_rings[index];
2315
2316 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2317 rx_ring->int_disable(rx_ring);
2318 __napi_schedule(&rx_ring->napi);
2319 }
2320 }
2321
2322 /* Check Tx priority queue interrupts */
2170 for (index = 0; index < priv->hw_params->tx_queues; index++) { 2323 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2171 if (!(priv->irq1_stat & BIT(index))) 2324 if (!(priv->irq1_stat & BIT(index)))
2172 continue; 2325 continue;
2173 2326
2174 ring = &priv->tx_rings[index]; 2327 tx_ring = &priv->tx_rings[index];
2175 2328
2176 if (likely(napi_schedule_prep(&ring->napi))) { 2329 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2177 ring->int_disable(priv, ring); 2330 tx_ring->int_disable(tx_ring);
2178 __napi_schedule(&ring->napi); 2331 __napi_schedule(&tx_ring->napi);
2179 } 2332 }
2180 } 2333 }
2181 2334
2182 return IRQ_HANDLED; 2335 return IRQ_HANDLED;
2183} 2336}
2184 2337
2185/* bcmgenet_isr0: Handle various interrupts. */ 2338/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2186static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) 2339static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2187{ 2340{
2188 struct bcmgenet_priv *priv = dev_id; 2341 struct bcmgenet_priv *priv = dev_id;
2342 struct bcmgenet_rx_ring *rx_ring;
2343 struct bcmgenet_tx_ring *tx_ring;
2189 2344
2190 /* Save irq status for bottom-half processing. */ 2345 /* Save irq status for bottom-half processing. */
2191 priv->irq0_stat = 2346 priv->irq0_stat =
2192 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & 2347 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2193 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 2348 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2349
2194 /* clear interrupts */ 2350 /* clear interrupts */
2195 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 2351 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2196 2352
@@ -2198,25 +2354,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2198 "IRQ=0x%x\n", priv->irq0_stat); 2354 "IRQ=0x%x\n", priv->irq0_stat);
2199 2355
2200 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { 2356 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
2201 /* We use NAPI(software interrupt throttling, if 2357 rx_ring = &priv->rx_rings[DESC_INDEX];
2202 * Rx Descriptor throttling is not used. 2358
2203 * Disable interrupt, will be enabled in the poll method. 2359 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2204 */ 2360 rx_ring->int_disable(rx_ring);
2205 if (likely(napi_schedule_prep(&priv->napi))) { 2361 __napi_schedule(&rx_ring->napi);
2206 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2207 INTRL2_CPU_MASK_SET);
2208 __napi_schedule(&priv->napi);
2209 } 2362 }
2210 } 2363 }
2211 if (priv->irq0_stat &
2212 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2213 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2214 2364
2215 if (likely(napi_schedule_prep(&ring->napi))) { 2365 if (priv->irq0_stat & (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2216 ring->int_disable(priv, ring); 2366 tx_ring = &priv->tx_rings[DESC_INDEX];
2217 __napi_schedule(&ring->napi); 2367
2368 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2369 tx_ring->int_disable(tx_ring);
2370 __napi_schedule(&tx_ring->napi);
2218 } 2371 }
2219 } 2372 }
2373
2220 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 2374 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2221 UMAC_IRQ_PHY_DET_F | 2375 UMAC_IRQ_PHY_DET_F |
2222 UMAC_IRQ_LINK_UP | 2376 UMAC_IRQ_LINK_UP |
@@ -2463,7 +2617,8 @@ static void bcmgenet_netif_start(struct net_device *dev)
2463 struct bcmgenet_priv *priv = netdev_priv(dev); 2617 struct bcmgenet_priv *priv = netdev_priv(dev);
2464 2618
2465 /* Start the network engine */ 2619 /* Start the network engine */
2466 napi_enable(&priv->napi); 2620 bcmgenet_enable_rx_napi(priv);
2621 bcmgenet_enable_tx_napi(priv);
2467 2622
2468 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); 2623 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2469 2624
@@ -2568,10 +2723,10 @@ static void bcmgenet_netif_stop(struct net_device *dev)
2568 struct bcmgenet_priv *priv = netdev_priv(dev); 2723 struct bcmgenet_priv *priv = netdev_priv(dev);
2569 2724
2570 netif_tx_stop_all_queues(dev); 2725 netif_tx_stop_all_queues(dev);
2571 napi_disable(&priv->napi);
2572 phy_stop(priv->phydev); 2726 phy_stop(priv->phydev);
2573
2574 bcmgenet_intr_disable(priv); 2727 bcmgenet_intr_disable(priv);
2728 bcmgenet_disable_rx_napi(priv);
2729 bcmgenet_disable_tx_napi(priv);
2575 2730
2576 /* Wait for pending work items to complete. Since interrupts are 2731 /* Wait for pending work items to complete. Since interrupts are
2577 * disabled no new work will be scheduled. 2732 * disabled no new work will be scheduled.
@@ -2972,7 +3127,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
2972 dev->watchdog_timeo = 2 * HZ; 3127 dev->watchdog_timeo = 2 * HZ;
2973 dev->ethtool_ops = &bcmgenet_ethtool_ops; 3128 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2974 dev->netdev_ops = &bcmgenet_netdev_ops; 3129 dev->netdev_ops = &bcmgenet_netdev_ops;
2975 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2976 3130
2977 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); 3131 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2978 3132
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7a59879d441f..a834da1dfe4c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -310,6 +310,11 @@ struct bcmgenet_mib_counters {
310#define UMAC_IRQ_MDIO_DONE (1 << 23) 310#define UMAC_IRQ_MDIO_DONE (1 << 23)
311#define UMAC_IRQ_MDIO_ERROR (1 << 24) 311#define UMAC_IRQ_MDIO_ERROR (1 << 24)
312 312
313/* INTRL2 instance 1 definitions */
314#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
315#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF
316#define UMAC_IRQ1_RX_INTR_SHIFT 16
317
313/* Register block offsets */ 318/* Register block offsets */
314#define GENET_SYS_OFF 0x0000 319#define GENET_SYS_OFF 0x0000
315#define GENET_GR_BRIDGE_OFF 0x0040 320#define GENET_GR_BRIDGE_OFF 0x0040
@@ -535,14 +540,13 @@ struct bcmgenet_tx_ring {
535 unsigned int prod_index; /* Tx ring producer index SW copy */ 540 unsigned int prod_index; /* Tx ring producer index SW copy */
536 unsigned int cb_ptr; /* Tx ring initial CB ptr */ 541 unsigned int cb_ptr; /* Tx ring initial CB ptr */
537 unsigned int end_ptr; /* Tx ring end CB ptr */ 542 unsigned int end_ptr; /* Tx ring end CB ptr */
538 void (*int_enable)(struct bcmgenet_priv *priv, 543 void (*int_enable)(struct bcmgenet_tx_ring *);
539 struct bcmgenet_tx_ring *); 544 void (*int_disable)(struct bcmgenet_tx_ring *);
540 void (*int_disable)(struct bcmgenet_priv *priv,
541 struct bcmgenet_tx_ring *);
542 struct bcmgenet_priv *priv; 545 struct bcmgenet_priv *priv;
543}; 546};
544 547
545struct bcmgenet_rx_ring { 548struct bcmgenet_rx_ring {
549 struct napi_struct napi; /* Rx NAPI struct */
546 unsigned int index; /* Rx ring index */ 550 unsigned int index; /* Rx ring index */
547 struct enet_cb *cbs; /* Rx ring buffer control block */ 551 struct enet_cb *cbs; /* Rx ring buffer control block */
548 unsigned int size; /* Rx ring size */ 552 unsigned int size; /* Rx ring size */
@@ -551,6 +555,9 @@ struct bcmgenet_rx_ring {
551 unsigned int cb_ptr; /* Rx ring initial CB ptr */ 555 unsigned int cb_ptr; /* Rx ring initial CB ptr */
552 unsigned int end_ptr; /* Rx ring end CB ptr */ 556 unsigned int end_ptr; /* Rx ring end CB ptr */
553 unsigned int old_discards; 557 unsigned int old_discards;
558 void (*int_enable)(struct bcmgenet_rx_ring *);
559 void (*int_disable)(struct bcmgenet_rx_ring *);
560 struct bcmgenet_priv *priv;
554}; 561};
555 562
556/* device context */ 563/* device context */
@@ -558,11 +565,6 @@ struct bcmgenet_priv {
558 void __iomem *base; 565 void __iomem *base;
559 enum bcmgenet_version version; 566 enum bcmgenet_version version;
560 struct net_device *dev; 567 struct net_device *dev;
561 u32 int0_mask;
562 u32 int1_mask;
563
564 /* NAPI for descriptor based rx */
565 struct napi_struct napi ____cacheline_aligned;
566 568
567 /* transmit variables */ 569 /* transmit variables */
568 void __iomem *tx_bds; 570 void __iomem *tx_bds;