diff options
author | Petri Gynther <pgynther@google.com> | 2015-03-25 15:35:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-27 17:26:16 -0400 |
commit | 4055eaefb3603a2a55305c81292379922a742131 (patch) | |
tree | 1ffcc5ab7dd245e0f828c34b27fad2adcf00ff18 /drivers | |
parent | 3ab113399b633bacb500a903d2f96f25ded2226c (diff) |
net: bcmgenet: add support for multiple Rx queues
Add support for multiple Rx queues:
1. Add NAPI context per Rx queue
2. Modify Rx interrupt and Rx NAPI code to handle multiple Rx queues
Signed-off-by: Petri Gynther <pgynther@google.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 170 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.h | 12 |
2 files changed, 137 insertions, 45 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index dc3b1faf6bbd..31e14079e1d7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -964,6 +964,34 @@ static void bcmgenet_free_cb(struct enet_cb *cb) | |||
964 | dma_unmap_addr_set(cb, dma_addr, 0); | 964 | dma_unmap_addr_set(cb, dma_addr, 0); |
965 | } | 965 | } |
966 | 966 | ||
967 | static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) | ||
968 | { | ||
969 | bcmgenet_intrl2_0_writel(ring->priv, | ||
970 | UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE, | ||
971 | INTRL2_CPU_MASK_SET); | ||
972 | } | ||
973 | |||
974 | static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) | ||
975 | { | ||
976 | bcmgenet_intrl2_0_writel(ring->priv, | ||
977 | UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE, | ||
978 | INTRL2_CPU_MASK_CLEAR); | ||
979 | } | ||
980 | |||
981 | static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) | ||
982 | { | ||
983 | bcmgenet_intrl2_1_writel(ring->priv, | ||
984 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | ||
985 | INTRL2_CPU_MASK_SET); | ||
986 | } | ||
987 | |||
988 | static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) | ||
989 | { | ||
990 | bcmgenet_intrl2_1_writel(ring->priv, | ||
991 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | ||
992 | INTRL2_CPU_MASK_CLEAR); | ||
993 | } | ||
994 | |||
967 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) | 995 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) |
968 | { | 996 | { |
969 | bcmgenet_intrl2_0_writel(ring->priv, | 997 | bcmgenet_intrl2_0_writel(ring->priv, |
@@ -1390,11 +1418,10 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, | |||
1390 | /* bcmgenet_desc_rx - descriptor based rx process. | 1418 | /* bcmgenet_desc_rx - descriptor based rx process. |
1391 | * this could be called from bottom half, or from NAPI polling method. | 1419 | * this could be called from bottom half, or from NAPI polling method. |
1392 | */ | 1420 | */ |
1393 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | 1421 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, |
1394 | unsigned int index, | ||
1395 | unsigned int budget) | 1422 | unsigned int budget) |
1396 | { | 1423 | { |
1397 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; | 1424 | struct bcmgenet_priv *priv = ring->priv; |
1398 | struct net_device *dev = priv->dev; | 1425 | struct net_device *dev = priv->dev; |
1399 | struct enet_cb *cb; | 1426 | struct enet_cb *cb; |
1400 | struct sk_buff *skb; | 1427 | struct sk_buff *skb; |
@@ -1406,7 +1433,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1406 | unsigned int discards; | 1433 | unsigned int discards; |
1407 | unsigned int chksum_ok = 0; | 1434 | unsigned int chksum_ok = 0; |
1408 | 1435 | ||
1409 | p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX); | 1436 | p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); |
1410 | 1437 | ||
1411 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & | 1438 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & |
1412 | DMA_P_INDEX_DISCARD_CNT_MASK; | 1439 | DMA_P_INDEX_DISCARD_CNT_MASK; |
@@ -1419,7 +1446,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1419 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ | 1446 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ |
1420 | if (ring->old_discards >= 0xC000) { | 1447 | if (ring->old_discards >= 0xC000) { |
1421 | ring->old_discards = 0; | 1448 | ring->old_discards = 0; |
1422 | bcmgenet_rdma_ring_writel(priv, index, 0, | 1449 | bcmgenet_rdma_ring_writel(priv, ring->index, 0, |
1423 | RDMA_PROD_INDEX); | 1450 | RDMA_PROD_INDEX); |
1424 | } | 1451 | } |
1425 | } | 1452 | } |
@@ -1527,7 +1554,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1527 | dev->stats.multicast++; | 1554 | dev->stats.multicast++; |
1528 | 1555 | ||
1529 | /* Notify kernel */ | 1556 | /* Notify kernel */ |
1530 | napi_gro_receive(&priv->napi, skb); | 1557 | napi_gro_receive(&ring->napi, skb); |
1531 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); | 1558 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); |
1532 | 1559 | ||
1533 | next: | 1560 | next: |
@@ -1538,7 +1565,7 @@ next: | |||
1538 | ring->read_ptr = ring->cb_ptr; | 1565 | ring->read_ptr = ring->cb_ptr; |
1539 | 1566 | ||
1540 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; | 1567 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; |
1541 | bcmgenet_rdma_ring_writel(priv, index, ring->c_index, RDMA_CONS_INDEX); | 1568 | bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); |
1542 | } | 1569 | } |
1543 | 1570 | ||
1544 | return rxpktprocessed; | 1571 | return rxpktprocessed; |
@@ -1547,17 +1574,15 @@ next: | |||
1547 | /* Rx NAPI polling method */ | 1574 | /* Rx NAPI polling method */ |
1548 | static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) | 1575 | static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) |
1549 | { | 1576 | { |
1550 | struct bcmgenet_priv *priv = container_of(napi, | 1577 | struct bcmgenet_rx_ring *ring = container_of(napi, |
1551 | struct bcmgenet_priv, napi); | 1578 | struct bcmgenet_rx_ring, napi); |
1552 | unsigned int work_done; | 1579 | unsigned int work_done; |
1553 | 1580 | ||
1554 | work_done = bcmgenet_desc_rx(priv, DESC_INDEX, budget); | 1581 | work_done = bcmgenet_desc_rx(ring, budget); |
1555 | 1582 | ||
1556 | if (work_done < budget) { | 1583 | if (work_done < budget) { |
1557 | napi_complete(napi); | 1584 | napi_complete(napi); |
1558 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE | | 1585 | ring->int_enable(ring); |
1559 | UMAC_IRQ_RXDMA_PDONE, | ||
1560 | INTRL2_CPU_MASK_CLEAR); | ||
1561 | } | 1586 | } |
1562 | 1587 | ||
1563 | return work_done; | 1588 | return work_done; |
@@ -1728,6 +1753,10 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
1728 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | 1753 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) |
1729 | int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); | 1754 | int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
1730 | 1755 | ||
1756 | /* Enable Rx priority queue interrupts */ | ||
1757 | for (i = 0; i < priv->hw_params->rx_queues; ++i) | ||
1758 | int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); | ||
1759 | |||
1731 | /* Enable Tx priority queue interrupts */ | 1760 | /* Enable Tx priority queue interrupts */ |
1732 | for (i = 0; i < priv->hw_params->tx_queues; ++i) | 1761 | for (i = 0; i < priv->hw_params->tx_queues; ++i) |
1733 | int1_enable |= (1 << i); | 1762 | int1_enable |= (1 << i); |
@@ -1806,7 +1835,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |||
1806 | u32 words_per_bd = WORDS_PER_BD(priv); | 1835 | u32 words_per_bd = WORDS_PER_BD(priv); |
1807 | int ret; | 1836 | int ret; |
1808 | 1837 | ||
1838 | ring->priv = priv; | ||
1809 | ring->index = index; | 1839 | ring->index = index; |
1840 | if (index == DESC_INDEX) { | ||
1841 | ring->int_enable = bcmgenet_rx_ring16_int_enable; | ||
1842 | ring->int_disable = bcmgenet_rx_ring16_int_disable; | ||
1843 | } else { | ||
1844 | ring->int_enable = bcmgenet_rx_ring_int_enable; | ||
1845 | ring->int_disable = bcmgenet_rx_ring_int_disable; | ||
1846 | } | ||
1810 | ring->cbs = priv->rx_cbs + start_ptr; | 1847 | ring->cbs = priv->rx_cbs + start_ptr; |
1811 | ring->size = size; | 1848 | ring->size = size; |
1812 | ring->c_index = 0; | 1849 | ring->c_index = 0; |
@@ -1972,22 +2009,58 @@ static void bcmgenet_init_tx_queues(struct net_device *dev) | |||
1972 | 2009 | ||
1973 | static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) | 2010 | static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) |
1974 | { | 2011 | { |
1975 | netif_napi_add(priv->dev, &priv->napi, bcmgenet_rx_poll, 64); | 2012 | unsigned int i; |
2013 | struct bcmgenet_rx_ring *ring; | ||
2014 | |||
2015 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | ||
2016 | ring = &priv->rx_rings[i]; | ||
2017 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | ||
2018 | } | ||
2019 | |||
2020 | ring = &priv->rx_rings[DESC_INDEX]; | ||
2021 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | ||
1976 | } | 2022 | } |
1977 | 2023 | ||
1978 | static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) | 2024 | static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) |
1979 | { | 2025 | { |
1980 | napi_enable(&priv->napi); | 2026 | unsigned int i; |
2027 | struct bcmgenet_rx_ring *ring; | ||
2028 | |||
2029 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | ||
2030 | ring = &priv->rx_rings[i]; | ||
2031 | napi_enable(&ring->napi); | ||
2032 | } | ||
2033 | |||
2034 | ring = &priv->rx_rings[DESC_INDEX]; | ||
2035 | napi_enable(&ring->napi); | ||
1981 | } | 2036 | } |
1982 | 2037 | ||
1983 | static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) | 2038 | static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) |
1984 | { | 2039 | { |
1985 | napi_disable(&priv->napi); | 2040 | unsigned int i; |
2041 | struct bcmgenet_rx_ring *ring; | ||
2042 | |||
2043 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | ||
2044 | ring = &priv->rx_rings[i]; | ||
2045 | napi_disable(&ring->napi); | ||
2046 | } | ||
2047 | |||
2048 | ring = &priv->rx_rings[DESC_INDEX]; | ||
2049 | napi_disable(&ring->napi); | ||
1986 | } | 2050 | } |
1987 | 2051 | ||
1988 | static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) | 2052 | static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) |
1989 | { | 2053 | { |
1990 | netif_napi_del(&priv->napi); | 2054 | unsigned int i; |
2055 | struct bcmgenet_rx_ring *ring; | ||
2056 | |||
2057 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | ||
2058 | ring = &priv->rx_rings[i]; | ||
2059 | netif_napi_del(&ring->napi); | ||
2060 | } | ||
2061 | |||
2062 | ring = &priv->rx_rings[DESC_INDEX]; | ||
2063 | netif_napi_del(&ring->napi); | ||
1991 | } | 2064 | } |
1992 | 2065 | ||
1993 | /* Initialize Rx queues | 2066 | /* Initialize Rx queues |
@@ -2214,50 +2287,66 @@ static void bcmgenet_irq_task(struct work_struct *work) | |||
2214 | } | 2287 | } |
2215 | } | 2288 | } |
2216 | 2289 | ||
2217 | /* bcmgenet_isr1: interrupt handler for ring buffer. */ | 2290 | /* bcmgenet_isr1: handle Rx and Tx priority queues */ |
2218 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | 2291 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
2219 | { | 2292 | { |
2220 | struct bcmgenet_priv *priv = dev_id; | 2293 | struct bcmgenet_priv *priv = dev_id; |
2221 | struct bcmgenet_tx_ring *ring; | 2294 | struct bcmgenet_rx_ring *rx_ring; |
2295 | struct bcmgenet_tx_ring *tx_ring; | ||
2222 | unsigned int index; | 2296 | unsigned int index; |
2223 | 2297 | ||
2224 | /* Save irq status for bottom-half processing. */ | 2298 | /* Save irq status for bottom-half processing. */ |
2225 | priv->irq1_stat = | 2299 | priv->irq1_stat = |
2226 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | 2300 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & |
2227 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); | 2301 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
2302 | |||
2228 | /* clear interrupts */ | 2303 | /* clear interrupts */ |
2229 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | 2304 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
2230 | 2305 | ||
2231 | netif_dbg(priv, intr, priv->dev, | 2306 | netif_dbg(priv, intr, priv->dev, |
2232 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); | 2307 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
2233 | 2308 | ||
2234 | /* Check the MBDONE interrupts. | 2309 | /* Check Rx priority queue interrupts */ |
2235 | * packet is done, reclaim descriptors | 2310 | for (index = 0; index < priv->hw_params->rx_queues; index++) { |
2236 | */ | 2311 | if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) |
2312 | continue; | ||
2313 | |||
2314 | rx_ring = &priv->rx_rings[index]; | ||
2315 | |||
2316 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | ||
2317 | rx_ring->int_disable(rx_ring); | ||
2318 | __napi_schedule(&rx_ring->napi); | ||
2319 | } | ||
2320 | } | ||
2321 | |||
2322 | /* Check Tx priority queue interrupts */ | ||
2237 | for (index = 0; index < priv->hw_params->tx_queues; index++) { | 2323 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
2238 | if (!(priv->irq1_stat & BIT(index))) | 2324 | if (!(priv->irq1_stat & BIT(index))) |
2239 | continue; | 2325 | continue; |
2240 | 2326 | ||
2241 | ring = &priv->tx_rings[index]; | 2327 | tx_ring = &priv->tx_rings[index]; |
2242 | 2328 | ||
2243 | if (likely(napi_schedule_prep(&ring->napi))) { | 2329 | if (likely(napi_schedule_prep(&tx_ring->napi))) { |
2244 | ring->int_disable(ring); | 2330 | tx_ring->int_disable(tx_ring); |
2245 | __napi_schedule(&ring->napi); | 2331 | __napi_schedule(&tx_ring->napi); |
2246 | } | 2332 | } |
2247 | } | 2333 | } |
2248 | 2334 | ||
2249 | return IRQ_HANDLED; | 2335 | return IRQ_HANDLED; |
2250 | } | 2336 | } |
2251 | 2337 | ||
2252 | /* bcmgenet_isr0: Handle various interrupts. */ | 2338 | /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ |
2253 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | 2339 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
2254 | { | 2340 | { |
2255 | struct bcmgenet_priv *priv = dev_id; | 2341 | struct bcmgenet_priv *priv = dev_id; |
2342 | struct bcmgenet_rx_ring *rx_ring; | ||
2343 | struct bcmgenet_tx_ring *tx_ring; | ||
2256 | 2344 | ||
2257 | /* Save irq status for bottom-half processing. */ | 2345 | /* Save irq status for bottom-half processing. */ |
2258 | priv->irq0_stat = | 2346 | priv->irq0_stat = |
2259 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | 2347 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & |
2260 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | 2348 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); |
2349 | |||
2261 | /* clear interrupts */ | 2350 | /* clear interrupts */ |
2262 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); | 2351 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); |
2263 | 2352 | ||
@@ -2265,26 +2354,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
2265 | "IRQ=0x%x\n", priv->irq0_stat); | 2354 | "IRQ=0x%x\n", priv->irq0_stat); |
2266 | 2355 | ||
2267 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { | 2356 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { |
2268 | /* We use NAPI(software interrupt throttling, if | 2357 | rx_ring = &priv->rx_rings[DESC_INDEX]; |
2269 | * Rx Descriptor throttling is not used. | 2358 | |
2270 | * Disable interrupt, will be enabled in the poll method. | 2359 | if (likely(napi_schedule_prep(&rx_ring->napi))) { |
2271 | */ | 2360 | rx_ring->int_disable(rx_ring); |
2272 | if (likely(napi_schedule_prep(&priv->napi))) { | 2361 | __napi_schedule(&rx_ring->napi); |
2273 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE | | ||
2274 | UMAC_IRQ_RXDMA_PDONE, | ||
2275 | INTRL2_CPU_MASK_SET); | ||
2276 | __napi_schedule(&priv->napi); | ||
2277 | } | 2362 | } |
2278 | } | 2363 | } |
2279 | if (priv->irq0_stat & | ||
2280 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | ||
2281 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; | ||
2282 | 2364 | ||
2283 | if (likely(napi_schedule_prep(&ring->napi))) { | 2365 | if (priv->irq0_stat & (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { |
2284 | ring->int_disable(ring); | 2366 | tx_ring = &priv->tx_rings[DESC_INDEX]; |
2285 | __napi_schedule(&ring->napi); | 2367 | |
2368 | if (likely(napi_schedule_prep(&tx_ring->napi))) { | ||
2369 | tx_ring->int_disable(tx_ring); | ||
2370 | __napi_schedule(&tx_ring->napi); | ||
2286 | } | 2371 | } |
2287 | } | 2372 | } |
2373 | |||
2288 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | 2374 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
2289 | UMAC_IRQ_PHY_DET_F | | 2375 | UMAC_IRQ_PHY_DET_F | |
2290 | UMAC_IRQ_LINK_UP | | 2376 | UMAC_IRQ_LINK_UP | |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 7b11e7a7e153..a834da1dfe4c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -310,6 +310,11 @@ struct bcmgenet_mib_counters { | |||
310 | #define UMAC_IRQ_MDIO_DONE (1 << 23) | 310 | #define UMAC_IRQ_MDIO_DONE (1 << 23) |
311 | #define UMAC_IRQ_MDIO_ERROR (1 << 24) | 311 | #define UMAC_IRQ_MDIO_ERROR (1 << 24) |
312 | 312 | ||
313 | /* INTRL2 instance 1 definitions */ | ||
314 | #define UMAC_IRQ1_TX_INTR_MASK 0xFFFF | ||
315 | #define UMAC_IRQ1_RX_INTR_MASK 0xFFFF | ||
316 | #define UMAC_IRQ1_RX_INTR_SHIFT 16 | ||
317 | |||
313 | /* Register block offsets */ | 318 | /* Register block offsets */ |
314 | #define GENET_SYS_OFF 0x0000 | 319 | #define GENET_SYS_OFF 0x0000 |
315 | #define GENET_GR_BRIDGE_OFF 0x0040 | 320 | #define GENET_GR_BRIDGE_OFF 0x0040 |
@@ -541,6 +546,7 @@ struct bcmgenet_tx_ring { | |||
541 | }; | 546 | }; |
542 | 547 | ||
543 | struct bcmgenet_rx_ring { | 548 | struct bcmgenet_rx_ring { |
549 | struct napi_struct napi; /* Rx NAPI struct */ | ||
544 | unsigned int index; /* Rx ring index */ | 550 | unsigned int index; /* Rx ring index */ |
545 | struct enet_cb *cbs; /* Rx ring buffer control block */ | 551 | struct enet_cb *cbs; /* Rx ring buffer control block */ |
546 | unsigned int size; /* Rx ring size */ | 552 | unsigned int size; /* Rx ring size */ |
@@ -549,6 +555,9 @@ struct bcmgenet_rx_ring { | |||
549 | unsigned int cb_ptr; /* Rx ring initial CB ptr */ | 555 | unsigned int cb_ptr; /* Rx ring initial CB ptr */ |
550 | unsigned int end_ptr; /* Rx ring end CB ptr */ | 556 | unsigned int end_ptr; /* Rx ring end CB ptr */ |
551 | unsigned int old_discards; | 557 | unsigned int old_discards; |
558 | void (*int_enable)(struct bcmgenet_rx_ring *); | ||
559 | void (*int_disable)(struct bcmgenet_rx_ring *); | ||
560 | struct bcmgenet_priv *priv; | ||
552 | }; | 561 | }; |
553 | 562 | ||
554 | /* device context */ | 563 | /* device context */ |
@@ -557,9 +566,6 @@ struct bcmgenet_priv { | |||
557 | enum bcmgenet_version version; | 566 | enum bcmgenet_version version; |
558 | struct net_device *dev; | 567 | struct net_device *dev; |
559 | 568 | ||
560 | /* NAPI for descriptor based rx */ | ||
561 | struct napi_struct napi ____cacheline_aligned; | ||
562 | |||
563 | /* transmit variables */ | 569 | /* transmit variables */ |
564 | void __iomem *tx_bds; | 570 | void __iomem *tx_bds; |
565 | struct enet_cb *tx_cbs; | 571 | struct enet_cb *tx_cbs; |