diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/genet/bcmgenet.c')
| -rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 122 |
1 files changed, 92 insertions, 30 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff83c46bc389..6befde61c203 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
| @@ -487,6 +487,7 @@ enum bcmgenet_stat_type { | |||
| 487 | BCMGENET_STAT_MIB_TX, | 487 | BCMGENET_STAT_MIB_TX, |
| 488 | BCMGENET_STAT_RUNT, | 488 | BCMGENET_STAT_RUNT, |
| 489 | BCMGENET_STAT_MISC, | 489 | BCMGENET_STAT_MISC, |
| 490 | BCMGENET_STAT_SOFT, | ||
| 490 | }; | 491 | }; |
| 491 | 492 | ||
| 492 | struct bcmgenet_stats { | 493 | struct bcmgenet_stats { |
| @@ -515,6 +516,7 @@ struct bcmgenet_stats { | |||
| 515 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | 516 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) |
| 516 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | 517 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) |
| 517 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | 518 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) |
| 519 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) | ||
| 518 | 520 | ||
| 519 | #define STAT_GENET_MISC(str, m, offset) { \ | 521 | #define STAT_GENET_MISC(str, m, offset) { \ |
| 520 | .stat_string = str, \ | 522 | .stat_string = str, \ |
| @@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |||
| 614 | UMAC_RBUF_OVFL_CNT), | 616 | UMAC_RBUF_OVFL_CNT), |
| 615 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | 617 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), |
| 616 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | 618 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), |
| 617 | STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), | 619 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
| 618 | STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), | 620 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), |
| 619 | STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), | 621 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), |
| 620 | }; | 622 | }; |
| 621 | 623 | ||
| 622 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | 624 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) |
| @@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |||
| 668 | s = &bcmgenet_gstrings_stats[i]; | 670 | s = &bcmgenet_gstrings_stats[i]; |
| 669 | switch (s->type) { | 671 | switch (s->type) { |
| 670 | case BCMGENET_STAT_NETDEV: | 672 | case BCMGENET_STAT_NETDEV: |
| 673 | case BCMGENET_STAT_SOFT: | ||
| 671 | continue; | 674 | continue; |
| 672 | case BCMGENET_STAT_MIB_RX: | 675 | case BCMGENET_STAT_MIB_RX: |
| 673 | case BCMGENET_STAT_MIB_TX: | 676 | case BCMGENET_STAT_MIB_TX: |
| @@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | |||
| 971 | } | 974 | } |
| 972 | 975 | ||
| 973 | /* Unlocked version of the reclaim routine */ | 976 | /* Unlocked version of the reclaim routine */ |
| 974 | static void __bcmgenet_tx_reclaim(struct net_device *dev, | 977 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
| 975 | struct bcmgenet_tx_ring *ring) | 978 | struct bcmgenet_tx_ring *ring) |
| 976 | { | 979 | { |
| 977 | struct bcmgenet_priv *priv = netdev_priv(dev); | 980 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 978 | int last_tx_cn, last_c_index, num_tx_bds; | 981 | int last_tx_cn, last_c_index, num_tx_bds; |
| 979 | struct enet_cb *tx_cb_ptr; | 982 | struct enet_cb *tx_cb_ptr; |
| 980 | struct netdev_queue *txq; | 983 | struct netdev_queue *txq; |
| 984 | unsigned int pkts_compl = 0; | ||
| 981 | unsigned int bds_compl; | 985 | unsigned int bds_compl; |
| 982 | unsigned int c_index; | 986 | unsigned int c_index; |
| 983 | 987 | ||
| @@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 1005 | tx_cb_ptr = ring->cbs + last_c_index; | 1009 | tx_cb_ptr = ring->cbs + last_c_index; |
| 1006 | bds_compl = 0; | 1010 | bds_compl = 0; |
| 1007 | if (tx_cb_ptr->skb) { | 1011 | if (tx_cb_ptr->skb) { |
| 1012 | pkts_compl++; | ||
| 1008 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | 1013 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; |
| 1009 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 1014 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
| 1010 | dma_unmap_single(&dev->dev, | 1015 | dma_unmap_single(&dev->dev, |
| @@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 1028 | last_c_index &= (num_tx_bds - 1); | 1033 | last_c_index &= (num_tx_bds - 1); |
| 1029 | } | 1034 | } |
| 1030 | 1035 | ||
| 1031 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) | 1036 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
| 1032 | ring->int_disable(priv, ring); | 1037 | if (netif_tx_queue_stopped(txq)) |
| 1033 | 1038 | netif_tx_wake_queue(txq); | |
| 1034 | if (netif_tx_queue_stopped(txq)) | 1039 | } |
| 1035 | netif_tx_wake_queue(txq); | ||
| 1036 | 1040 | ||
| 1037 | ring->c_index = c_index; | 1041 | ring->c_index = c_index; |
| 1042 | |||
| 1043 | return pkts_compl; | ||
| 1038 | } | 1044 | } |
| 1039 | 1045 | ||
| 1040 | static void bcmgenet_tx_reclaim(struct net_device *dev, | 1046 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
| 1041 | struct bcmgenet_tx_ring *ring) | 1047 | struct bcmgenet_tx_ring *ring) |
| 1042 | { | 1048 | { |
| 1049 | unsigned int released; | ||
| 1043 | unsigned long flags; | 1050 | unsigned long flags; |
| 1044 | 1051 | ||
| 1045 | spin_lock_irqsave(&ring->lock, flags); | 1052 | spin_lock_irqsave(&ring->lock, flags); |
| 1046 | __bcmgenet_tx_reclaim(dev, ring); | 1053 | released = __bcmgenet_tx_reclaim(dev, ring); |
| 1047 | spin_unlock_irqrestore(&ring->lock, flags); | 1054 | spin_unlock_irqrestore(&ring->lock, flags); |
| 1055 | |||
| 1056 | return released; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | ||
| 1060 | { | ||
| 1061 | struct bcmgenet_tx_ring *ring = | ||
| 1062 | container_of(napi, struct bcmgenet_tx_ring, napi); | ||
| 1063 | unsigned int work_done = 0; | ||
| 1064 | |||
| 1065 | work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); | ||
| 1066 | |||
| 1067 | if (work_done == 0) { | ||
| 1068 | napi_complete(napi); | ||
| 1069 | ring->int_enable(ring->priv, ring); | ||
| 1070 | |||
| 1071 | return 0; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | return budget; | ||
| 1048 | } | 1075 | } |
| 1049 | 1076 | ||
| 1050 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | 1077 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) |
| @@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1302 | bcmgenet_tdma_ring_writel(priv, ring->index, | 1329 | bcmgenet_tdma_ring_writel(priv, ring->index, |
| 1303 | ring->prod_index, TDMA_PROD_INDEX); | 1330 | ring->prod_index, TDMA_PROD_INDEX); |
| 1304 | 1331 | ||
| 1305 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { | 1332 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
| 1306 | netif_tx_stop_queue(txq); | 1333 | netif_tx_stop_queue(txq); |
| 1307 | ring->int_enable(priv, ring); | ||
| 1308 | } | ||
| 1309 | 1334 | ||
| 1310 | out: | 1335 | out: |
| 1311 | spin_unlock_irqrestore(&ring->lock, flags); | 1336 | spin_unlock_irqrestore(&ring->lock, flags); |
| @@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1621 | struct device *kdev = &priv->pdev->dev; | 1646 | struct device *kdev = &priv->pdev->dev; |
| 1622 | int ret; | 1647 | int ret; |
| 1623 | u32 reg, cpu_mask_clear; | 1648 | u32 reg, cpu_mask_clear; |
| 1649 | int index; | ||
| 1624 | 1650 | ||
| 1625 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | 1651 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); |
| 1626 | 1652 | ||
| @@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1647 | 1673 | ||
| 1648 | bcmgenet_intr_disable(priv); | 1674 | bcmgenet_intr_disable(priv); |
| 1649 | 1675 | ||
| 1650 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; | 1676 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; |
| 1651 | 1677 | ||
| 1652 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | 1678 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); |
| 1653 | 1679 | ||
| @@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1674 | 1700 | ||
| 1675 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); | 1701 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); |
| 1676 | 1702 | ||
| 1703 | for (index = 0; index < priv->hw_params->tx_queues; index++) | ||
| 1704 | bcmgenet_intrl2_1_writel(priv, (1 << index), | ||
| 1705 | INTRL2_CPU_MASK_CLEAR); | ||
| 1706 | |||
| 1677 | /* Enable rx/tx engine.*/ | 1707 | /* Enable rx/tx engine.*/ |
| 1678 | dev_dbg(kdev, "done init umac\n"); | 1708 | dev_dbg(kdev, "done init umac\n"); |
| 1679 | 1709 | ||
| @@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
| 1693 | unsigned int first_bd; | 1723 | unsigned int first_bd; |
| 1694 | 1724 | ||
| 1695 | spin_lock_init(&ring->lock); | 1725 | spin_lock_init(&ring->lock); |
| 1726 | ring->priv = priv; | ||
| 1727 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); | ||
| 1696 | ring->index = index; | 1728 | ring->index = index; |
| 1697 | if (index == DESC_INDEX) { | 1729 | if (index == DESC_INDEX) { |
| 1698 | ring->queue = 0; | 1730 | ring->queue = 0; |
| @@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
| 1738 | TDMA_WRITE_PTR); | 1770 | TDMA_WRITE_PTR); |
| 1739 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | 1771 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
| 1740 | DMA_END_ADDR); | 1772 | DMA_END_ADDR); |
| 1773 | |||
| 1774 | napi_enable(&ring->napi); | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, | ||
| 1778 | unsigned int index) | ||
| 1779 | { | ||
| 1780 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | ||
| 1781 | |||
| 1782 | napi_disable(&ring->napi); | ||
| 1783 | netif_napi_del(&ring->napi); | ||
| 1741 | } | 1784 | } |
| 1742 | 1785 | ||
| 1743 | /* Initialize a RDMA ring */ | 1786 | /* Initialize a RDMA ring */ |
| @@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
| 1907 | return ret; | 1950 | return ret; |
| 1908 | } | 1951 | } |
| 1909 | 1952 | ||
| 1910 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1953 | static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
| 1911 | { | 1954 | { |
| 1912 | int i; | 1955 | int i; |
| 1913 | 1956 | ||
| @@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | |||
| 1926 | kfree(priv->tx_cbs); | 1969 | kfree(priv->tx_cbs); |
| 1927 | } | 1970 | } |
| 1928 | 1971 | ||
| 1972 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | ||
| 1973 | { | ||
| 1974 | int i; | ||
| 1975 | |||
| 1976 | bcmgenet_fini_tx_ring(priv, DESC_INDEX); | ||
| 1977 | |||
| 1978 | for (i = 0; i < priv->hw_params->tx_queues; i++) | ||
| 1979 | bcmgenet_fini_tx_ring(priv, i); | ||
| 1980 | |||
| 1981 | __bcmgenet_fini_dma(priv); | ||
| 1982 | } | ||
| 1983 | |||
| 1929 | /* init_edma: Initialize DMA control register */ | 1984 | /* init_edma: Initialize DMA control register */ |
| 1930 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | 1985 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) |
| 1931 | { | 1986 | { |
| @@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |||
| 1952 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), | 2007 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
| 1953 | GFP_KERNEL); | 2008 | GFP_KERNEL); |
| 1954 | if (!priv->tx_cbs) { | 2009 | if (!priv->tx_cbs) { |
| 1955 | bcmgenet_fini_dma(priv); | 2010 | __bcmgenet_fini_dma(priv); |
| 1956 | return -ENOMEM; | 2011 | return -ENOMEM; |
| 1957 | } | 2012 | } |
| 1958 | 2013 | ||
| @@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) | |||
| 1975 | struct bcmgenet_priv, napi); | 2030 | struct bcmgenet_priv, napi); |
| 1976 | unsigned int work_done; | 2031 | unsigned int work_done; |
| 1977 | 2032 | ||
| 1978 | /* tx reclaim */ | ||
| 1979 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | ||
| 1980 | |||
| 1981 | work_done = bcmgenet_desc_rx(priv, budget); | 2033 | work_done = bcmgenet_desc_rx(priv, budget); |
| 1982 | 2034 | ||
| 1983 | /* Advancing our consumer index*/ | 2035 | /* Advancing our consumer index*/ |
| @@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work) | |||
| 2022 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | 2074 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
| 2023 | { | 2075 | { |
| 2024 | struct bcmgenet_priv *priv = dev_id; | 2076 | struct bcmgenet_priv *priv = dev_id; |
| 2077 | struct bcmgenet_tx_ring *ring; | ||
| 2025 | unsigned int index; | 2078 | unsigned int index; |
| 2026 | 2079 | ||
| 2027 | /* Save irq status for bottom-half processing. */ | 2080 | /* Save irq status for bottom-half processing. */ |
| 2028 | priv->irq1_stat = | 2081 | priv->irq1_stat = |
| 2029 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | 2082 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & |
| 2030 | ~priv->int1_mask; | 2083 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
| 2031 | /* clear interrupts */ | 2084 | /* clear interrupts */ |
| 2032 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | 2085 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
| 2033 | 2086 | ||
| 2034 | netif_dbg(priv, intr, priv->dev, | 2087 | netif_dbg(priv, intr, priv->dev, |
| 2035 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); | 2088 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
| 2089 | |||
| 2036 | /* Check the MBDONE interrupts. | 2090 | /* Check the MBDONE interrupts. |
| 2037 | * packet is done, reclaim descriptors | 2091 | * packet is done, reclaim descriptors |
| 2038 | */ | 2092 | */ |
| 2039 | if (priv->irq1_stat & 0x0000ffff) { | 2093 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
| 2040 | index = 0; | 2094 | if (!(priv->irq1_stat & BIT(index))) |
| 2041 | for (index = 0; index < 16; index++) { | 2095 | continue; |
| 2042 | if (priv->irq1_stat & (1 << index)) | 2096 | |
| 2043 | bcmgenet_tx_reclaim(priv->dev, | 2097 | ring = &priv->tx_rings[index]; |
| 2044 | &priv->tx_rings[index]); | 2098 | |
| 2099 | if (likely(napi_schedule_prep(&ring->napi))) { | ||
| 2100 | ring->int_disable(priv, ring); | ||
| 2101 | __napi_schedule(&ring->napi); | ||
| 2045 | } | 2102 | } |
| 2046 | } | 2103 | } |
| 2104 | |||
| 2047 | return IRQ_HANDLED; | 2105 | return IRQ_HANDLED; |
| 2048 | } | 2106 | } |
| 2049 | 2107 | ||
| @@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
| 2075 | } | 2133 | } |
| 2076 | if (priv->irq0_stat & | 2134 | if (priv->irq0_stat & |
| 2077 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | 2135 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { |
| 2078 | /* Tx reclaim */ | 2136 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; |
| 2079 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | 2137 | |
| 2138 | if (likely(napi_schedule_prep(&ring->napi))) { | ||
| 2139 | ring->int_disable(priv, ring); | ||
| 2140 | __napi_schedule(&ring->napi); | ||
| 2141 | } | ||
| 2080 | } | 2142 | } |
| 2081 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | 2143 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
| 2082 | UMAC_IRQ_PHY_DET_F | | 2144 | UMAC_IRQ_PHY_DET_F | |
