diff options
Diffstat (limited to 'drivers/net/ethernet')
62 files changed, 939 insertions, 352 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 29554992215a..2349ea970255 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev, | |||
1465 | { | 1465 | { |
1466 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1466 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
1467 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 1467 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1468 | unsigned int rxcsum, rxvlan, rxvlan_filter; | 1468 | netdev_features_t rxcsum, rxvlan, rxvlan_filter; |
1469 | 1469 | ||
1470 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; | 1470 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; |
1471 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; | 1471 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; |
@@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
1598 | struct skb_shared_hwtstamps *hwtstamps; | 1598 | struct skb_shared_hwtstamps *hwtstamps; |
1599 | unsigned int incomplete, error, context_next, context; | 1599 | unsigned int incomplete, error, context_next, context; |
1600 | unsigned int len, put_len, max_len; | 1600 | unsigned int len, put_len, max_len; |
1601 | int received = 0; | 1601 | unsigned int received = 0; |
1602 | int packet_count = 0; | ||
1602 | 1603 | ||
1603 | DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); | 1604 | DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); |
1604 | 1605 | ||
@@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
1608 | 1609 | ||
1609 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | 1610 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
1610 | packet = &ring->packet_data; | 1611 | packet = &ring->packet_data; |
1611 | while (received < budget) { | 1612 | while (packet_count < budget) { |
1612 | DBGPR(" cur = %d\n", ring->cur); | 1613 | DBGPR(" cur = %d\n", ring->cur); |
1613 | 1614 | ||
1614 | /* First time in loop see if we need to restore state */ | 1615 | /* First time in loop see if we need to restore state */ |
@@ -1662,7 +1663,7 @@ read_again: | |||
1662 | if (packet->errors) | 1663 | if (packet->errors) |
1663 | DBGPR("Error in received packet\n"); | 1664 | DBGPR("Error in received packet\n"); |
1664 | dev_kfree_skb(skb); | 1665 | dev_kfree_skb(skb); |
1665 | continue; | 1666 | goto next_packet; |
1666 | } | 1667 | } |
1667 | 1668 | ||
1668 | if (!context) { | 1669 | if (!context) { |
@@ -1677,7 +1678,7 @@ read_again: | |||
1677 | } | 1678 | } |
1678 | 1679 | ||
1679 | dev_kfree_skb(skb); | 1680 | dev_kfree_skb(skb); |
1680 | continue; | 1681 | goto next_packet; |
1681 | } | 1682 | } |
1682 | memcpy(skb_tail_pointer(skb), rdata->skb->data, | 1683 | memcpy(skb_tail_pointer(skb), rdata->skb->data, |
1683 | put_len); | 1684 | put_len); |
@@ -1694,7 +1695,7 @@ read_again: | |||
1694 | 1695 | ||
1695 | /* Stray Context Descriptor? */ | 1696 | /* Stray Context Descriptor? */ |
1696 | if (!skb) | 1697 | if (!skb) |
1697 | continue; | 1698 | goto next_packet; |
1698 | 1699 | ||
1699 | /* Be sure we don't exceed the configured MTU */ | 1700 | /* Be sure we don't exceed the configured MTU */ |
1700 | max_len = netdev->mtu + ETH_HLEN; | 1701 | max_len = netdev->mtu + ETH_HLEN; |
@@ -1705,7 +1706,7 @@ read_again: | |||
1705 | if (skb->len > max_len) { | 1706 | if (skb->len > max_len) { |
1706 | DBGPR("packet length exceeds configured MTU\n"); | 1707 | DBGPR("packet length exceeds configured MTU\n"); |
1707 | dev_kfree_skb(skb); | 1708 | dev_kfree_skb(skb); |
1708 | continue; | 1709 | goto next_packet; |
1709 | } | 1710 | } |
1710 | 1711 | ||
1711 | #ifdef XGMAC_ENABLE_RX_PKT_DUMP | 1712 | #ifdef XGMAC_ENABLE_RX_PKT_DUMP |
@@ -1739,6 +1740,9 @@ read_again: | |||
1739 | 1740 | ||
1740 | netdev->last_rx = jiffies; | 1741 | netdev->last_rx = jiffies; |
1741 | napi_gro_receive(&pdata->napi, skb); | 1742 | napi_gro_receive(&pdata->napi, skb); |
1743 | |||
1744 | next_packet: | ||
1745 | packet_count++; | ||
1742 | } | 1746 | } |
1743 | 1747 | ||
1744 | /* Check if we need to save state before leaving */ | 1748 | /* Check if we need to save state before leaving */ |
@@ -1752,9 +1756,9 @@ read_again: | |||
1752 | rdata->state.error = error; | 1756 | rdata->state.error = error; |
1753 | } | 1757 | } |
1754 | 1758 | ||
1755 | DBGPR("<--xgbe_rx_poll: received = %d\n", received); | 1759 | DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); |
1756 | 1760 | ||
1757 | return received; | 1761 | return packet_count; |
1758 | } | 1762 | } |
1759 | 1763 | ||
1760 | static int xgbe_poll(struct napi_struct *napi, int budget) | 1764 | static int xgbe_poll(struct napi_struct *napi, int budget) |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 63ea1941e973..7ba83ffb08ac 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) | |||
575 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); | 575 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); |
576 | } | 576 | } |
577 | 577 | ||
578 | static void xgene_enet_reset(struct xgene_enet_pdata *pdata) | 578 | bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) |
579 | { | ||
580 | if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) | ||
581 | return false; | ||
582 | |||
583 | if (ioread32(p->ring_csr_addr + SRST_ADDR)) | ||
584 | return false; | ||
585 | |||
586 | return true; | ||
587 | } | ||
588 | |||
589 | static int xgene_enet_reset(struct xgene_enet_pdata *pdata) | ||
579 | { | 590 | { |
580 | u32 val; | 591 | u32 val; |
581 | 592 | ||
593 | if (!xgene_ring_mgr_init(pdata)) | ||
594 | return -ENODEV; | ||
595 | |||
582 | clk_prepare_enable(pdata->clk); | 596 | clk_prepare_enable(pdata->clk); |
583 | clk_disable_unprepare(pdata->clk); | 597 | clk_disable_unprepare(pdata->clk); |
584 | clk_prepare_enable(pdata->clk); | 598 | clk_prepare_enable(pdata->clk); |
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
590 | val |= SCAN_AUTO_INCR; | 604 | val |= SCAN_AUTO_INCR; |
591 | MGMT_CLOCK_SEL_SET(&val, 1); | 605 | MGMT_CLOCK_SEL_SET(&val, 1); |
592 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); | 606 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); |
607 | |||
608 | return 0; | ||
593 | } | 609 | } |
594 | 610 | ||
595 | static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) | 611 | static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 38558584080e..ec45f3256f0e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | |||
@@ -104,6 +104,9 @@ enum xgene_enet_rm { | |||
104 | #define BLOCK_ETH_MAC_OFFSET 0x0000 | 104 | #define BLOCK_ETH_MAC_OFFSET 0x0000 |
105 | #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 | 105 | #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 |
106 | 106 | ||
107 | #define CLKEN_ADDR 0xc208 | ||
108 | #define SRST_ADDR 0xc200 | ||
109 | |||
107 | #define MAC_ADDR_REG_OFFSET 0x00 | 110 | #define MAC_ADDR_REG_OFFSET 0x00 |
108 | #define MAC_COMMAND_REG_OFFSET 0x04 | 111 | #define MAC_COMMAND_REG_OFFSET 0x04 |
109 | #define MAC_WRITE_REG_OFFSET 0x08 | 112 | #define MAC_WRITE_REG_OFFSET 0x08 |
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, | |||
318 | 321 | ||
319 | int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); | 322 | int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); |
320 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); | 323 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); |
324 | bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); | ||
321 | 325 | ||
322 | extern struct xgene_mac_ops xgene_gmac_ops; | 326 | extern struct xgene_mac_ops xgene_gmac_ops; |
323 | extern struct xgene_port_ops xgene_gport_ops; | 327 | extern struct xgene_port_ops xgene_gport_ops; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3c208cc6f6bb..123669696184 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) | |||
639 | struct device *dev = ndev_to_dev(ndev); | 639 | struct device *dev = ndev_to_dev(ndev); |
640 | struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; | 640 | struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; |
641 | struct xgene_enet_desc_ring *buf_pool = NULL; | 641 | struct xgene_enet_desc_ring *buf_pool = NULL; |
642 | u8 cpu_bufnum = 0, eth_bufnum = 0; | 642 | u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; |
643 | u8 bp_bufnum = 0x20; | 643 | u8 bp_bufnum = START_BP_BUFNUM; |
644 | u16 ring_id, ring_num = 0; | 644 | u16 ring_id, ring_num = START_RING_NUM; |
645 | int ret; | 645 | int ret; |
646 | 646 | ||
647 | /* allocate rx descriptor ring */ | 647 | /* allocate rx descriptor ring */ |
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) | |||
852 | u16 dst_ring_num; | 852 | u16 dst_ring_num; |
853 | int ret; | 853 | int ret; |
854 | 854 | ||
855 | pdata->port_ops->reset(pdata); | 855 | ret = pdata->port_ops->reset(pdata); |
856 | if (ret) | ||
857 | return ret; | ||
856 | 858 | ||
857 | ret = xgene_enet_create_desc_rings(ndev); | 859 | ret = xgene_enet_create_desc_rings(ndev); |
858 | if (ret) { | 860 | if (ret) { |
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
954 | 956 | ||
955 | return ret; | 957 | return ret; |
956 | err: | 958 | err: |
959 | unregister_netdev(ndev); | ||
957 | free_netdev(ndev); | 960 | free_netdev(ndev); |
958 | return ret; | 961 | return ret; |
959 | } | 962 | } |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 874e5a01161f..f9958fae6ffd 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h | |||
@@ -38,6 +38,9 @@ | |||
38 | #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) | 38 | #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) |
39 | #define NUM_PKT_BUF 64 | 39 | #define NUM_PKT_BUF 64 |
40 | #define NUM_BUFPOOL 32 | 40 | #define NUM_BUFPOOL 32 |
41 | #define START_ETH_BUFNUM 2 | ||
42 | #define START_BP_BUFNUM 0x22 | ||
43 | #define START_RING_NUM 8 | ||
41 | 44 | ||
42 | #define PHY_POLL_LINK_ON (10 * HZ) | 45 | #define PHY_POLL_LINK_ON (10 * HZ) |
43 | #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) | 46 | #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) |
@@ -83,7 +86,7 @@ struct xgene_mac_ops { | |||
83 | }; | 86 | }; |
84 | 87 | ||
85 | struct xgene_port_ops { | 88 | struct xgene_port_ops { |
86 | void (*reset)(struct xgene_enet_pdata *pdata); | 89 | int (*reset)(struct xgene_enet_pdata *pdata); |
87 | void (*cle_bypass)(struct xgene_enet_pdata *pdata, | 90 | void (*cle_bypass)(struct xgene_enet_pdata *pdata, |
88 | u32 dst_ring_num, u16 bufpool_id); | 91 | u32 dst_ring_num, u16 bufpool_id); |
89 | void (*shutdown)(struct xgene_enet_pdata *pdata); | 92 | void (*shutdown)(struct xgene_enet_pdata *pdata); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index e6d24c210198..f5d4f68c288c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c | |||
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) | |||
124 | { | 124 | { |
125 | struct net_device *ndev = p->ndev; | 125 | struct net_device *ndev = p->ndev; |
126 | u32 data; | 126 | u32 data; |
127 | int i; | 127 | int i = 0; |
128 | 128 | ||
129 | xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); | 129 | xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); |
130 | for (i = 0; i < 10 && data != ~0U ; i++) { | 130 | do { |
131 | usleep_range(100, 110); | 131 | usleep_range(100, 110); |
132 | data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); | 132 | data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); |
133 | } | 133 | if (data == ~0U) |
134 | 134 | return 0; | |
135 | if (data != ~0U) { | 135 | } while (++i < 10); |
136 | netdev_err(ndev, "Failed to release memory from shutdown\n"); | ||
137 | return -ENODEV; | ||
138 | } | ||
139 | 136 | ||
140 | return 0; | 137 | netdev_err(ndev, "Failed to release memory from shutdown\n"); |
138 | return -ENODEV; | ||
141 | } | 139 | } |
142 | 140 | ||
143 | static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) | 141 | static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) |
@@ -313,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) | |||
313 | xgene_sgmac_rxtx(p, TX_EN, false); | 311 | xgene_sgmac_rxtx(p, TX_EN, false); |
314 | } | 312 | } |
315 | 313 | ||
316 | static void xgene_enet_reset(struct xgene_enet_pdata *p) | 314 | static int xgene_enet_reset(struct xgene_enet_pdata *p) |
317 | { | 315 | { |
316 | if (!xgene_ring_mgr_init(p)) | ||
317 | return -ENODEV; | ||
318 | |||
318 | clk_prepare_enable(p->clk); | 319 | clk_prepare_enable(p->clk); |
319 | clk_disable_unprepare(p->clk); | 320 | clk_disable_unprepare(p->clk); |
320 | clk_prepare_enable(p->clk); | 321 | clk_prepare_enable(p->clk); |
321 | 322 | ||
322 | xgene_enet_ecc_init(p); | 323 | xgene_enet_ecc_init(p); |
323 | xgene_enet_config_ring_if_assoc(p); | 324 | xgene_enet_config_ring_if_assoc(p); |
325 | |||
326 | return 0; | ||
324 | } | 327 | } |
325 | 328 | ||
326 | static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, | 329 | static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 67d07206b3c7..a18a9d1f1143 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | |||
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) | |||
252 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); | 252 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void xgene_enet_reset(struct xgene_enet_pdata *pdata) | 255 | static int xgene_enet_reset(struct xgene_enet_pdata *pdata) |
256 | { | 256 | { |
257 | if (!xgene_ring_mgr_init(pdata)) | ||
258 | return -ENODEV; | ||
259 | |||
257 | clk_prepare_enable(pdata->clk); | 260 | clk_prepare_enable(pdata->clk); |
258 | clk_disable_unprepare(pdata->clk); | 261 | clk_disable_unprepare(pdata->clk); |
259 | clk_prepare_enable(pdata->clk); | 262 | clk_prepare_enable(pdata->clk); |
260 | 263 | ||
261 | xgene_enet_ecc_init(pdata); | 264 | xgene_enet_ecc_init(pdata); |
262 | xgene_enet_config_ring_if_assoc(pdata); | 265 | xgene_enet_config_ring_if_assoc(pdata); |
266 | |||
267 | return 0; | ||
263 | } | 268 | } |
264 | 269 | ||
265 | static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, | 270 | static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9ae36979bdee..531bb7c57531 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | |||
1110 | /* We just need one DMA descriptor which is DMA-able, since writing to | 1110 | /* We just need one DMA descriptor which is DMA-able, since writing to |
1111 | * the port will allocate a new descriptor in its internal linked-list | 1111 | * the port will allocate a new descriptor in its internal linked-list |
1112 | */ | 1112 | */ |
1113 | p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); | 1113 | p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, |
1114 | GFP_KERNEL); | ||
1114 | if (!p) { | 1115 | if (!p) { |
1115 | netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); | 1116 | netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); |
1116 | return -ENOMEM; | 1117 | return -ENOMEM; |
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, | |||
1174 | if (!(reg & TDMA_DISABLED)) | 1175 | if (!(reg & TDMA_DISABLED)) |
1175 | netdev_warn(priv->netdev, "TDMA not stopped!\n"); | 1176 | netdev_warn(priv->netdev, "TDMA not stopped!\n"); |
1176 | 1177 | ||
1178 | /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could | ||
1179 | * fail, so by checking this pointer we know whether the TX ring was | ||
1180 | * fully initialized or not. | ||
1181 | */ | ||
1182 | if (!ring->cbs) | ||
1183 | return; | ||
1184 | |||
1177 | napi_disable(&ring->napi); | 1185 | napi_disable(&ring->napi); |
1178 | netif_napi_del(&ring->napi); | 1186 | netif_napi_del(&ring->napi); |
1179 | 1187 | ||
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, | |||
1183 | ring->cbs = NULL; | 1191 | ring->cbs = NULL; |
1184 | 1192 | ||
1185 | if (ring->desc_dma) { | 1193 | if (ring->desc_dma) { |
1186 | dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); | 1194 | dma_free_coherent(kdev, sizeof(struct dma_desc), |
1195 | ring->desc_cpu, ring->desc_dma); | ||
1187 | ring->desc_dma = 0; | 1196 | ring->desc_dma = 0; |
1188 | } | 1197 | } |
1189 | ring->size = 0; | 1198 | ring->size = 0; |
@@ -1397,6 +1406,9 @@ static void bcm_sysport_netif_start(struct net_device *dev) | |||
1397 | /* Enable NAPI */ | 1406 | /* Enable NAPI */ |
1398 | napi_enable(&priv->napi); | 1407 | napi_enable(&priv->napi); |
1399 | 1408 | ||
1409 | /* Enable RX interrupt and TX ring full interrupt */ | ||
1410 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
1411 | |||
1400 | phy_start(priv->phydev); | 1412 | phy_start(priv->phydev); |
1401 | 1413 | ||
1402 | /* Enable TX interrupts for the 32 TXQs */ | 1414 | /* Enable TX interrupts for the 32 TXQs */ |
@@ -1499,9 +1511,6 @@ static int bcm_sysport_open(struct net_device *dev) | |||
1499 | if (ret) | 1511 | if (ret) |
1500 | goto out_free_rx_ring; | 1512 | goto out_free_rx_ring; |
1501 | 1513 | ||
1502 | /* Enable RX interrupt and TX ring full interrupt */ | ||
1503 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
1504 | |||
1505 | /* Turn on TDMA */ | 1514 | /* Turn on TDMA */ |
1506 | ret = tdma_enable_set(priv, 1); | 1515 | ret = tdma_enable_set(priv, 1); |
1507 | if (ret) | 1516 | if (ret) |
@@ -1858,6 +1867,8 @@ static int bcm_sysport_resume(struct device *d) | |||
1858 | if (!netif_running(dev)) | 1867 | if (!netif_running(dev)) |
1859 | return 0; | 1868 | return 0; |
1860 | 1869 | ||
1870 | umac_reset(priv); | ||
1871 | |||
1861 | /* We may have been suspended and never received a WOL event that | 1872 | /* We may have been suspended and never received a WOL event that |
1862 | * would turn off MPD detection, take care of that now | 1873 | * would turn off MPD detection, take care of that now |
1863 | */ | 1874 | */ |
@@ -1885,9 +1896,6 @@ static int bcm_sysport_resume(struct device *d) | |||
1885 | 1896 | ||
1886 | netif_device_attach(dev); | 1897 | netif_device_attach(dev); |
1887 | 1898 | ||
1888 | /* Enable RX interrupt and TX ring full interrupt */ | ||
1889 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
1890 | |||
1891 | /* RX pipe enable */ | 1899 | /* RX pipe enable */ |
1892 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); | 1900 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); |
1893 | 1901 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 23f23c97c2ad..f05fab65d78a 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, | |||
382 | if (l5_cid >= MAX_CM_SK_TBL_SZ) | 382 | if (l5_cid >= MAX_CM_SK_TBL_SZ) |
383 | break; | 383 | break; |
384 | 384 | ||
385 | rcu_read_lock(); | ||
386 | if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { | 385 | if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { |
387 | rc = -ENODEV; | 386 | rc = -ENODEV; |
388 | rcu_read_unlock(); | ||
389 | break; | 387 | break; |
390 | } | 388 | } |
391 | csk = &cp->csk_tbl[l5_cid]; | 389 | csk = &cp->csk_tbl[l5_cid]; |
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, | |||
414 | } | 412 | } |
415 | } | 413 | } |
416 | csk_put(csk); | 414 | csk_put(csk); |
417 | rcu_read_unlock(); | ||
418 | rc = 0; | 415 | rc = 0; |
419 | } | 416 | } |
420 | } | 417 | } |
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) | |||
615 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | 612 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); |
616 | 613 | ||
617 | mutex_lock(&cnic_lock); | 614 | mutex_lock(&cnic_lock); |
618 | if (rcu_dereference(cp->ulp_ops[ulp_type])) { | 615 | if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { |
619 | RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); | 616 | RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); |
620 | cnic_put(dev); | 617 | cnic_put(dev); |
621 | } else { | 618 | } else { |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fdc9ec09e453..da1a2500c91c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev) | |||
2140 | goto err_irq0; | 2140 | goto err_irq0; |
2141 | } | 2141 | } |
2142 | 2142 | ||
2143 | /* Re-configure the port multiplexer towards the PHY device */ | ||
2144 | bcmgenet_mii_config(priv->dev, false); | ||
2145 | |||
2146 | phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, | ||
2147 | priv->phy_interface); | ||
2148 | |||
2143 | bcmgenet_netif_start(dev); | 2149 | bcmgenet_netif_start(dev); |
2144 | 2150 | ||
2145 | return 0; | 2151 | return 0; |
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev) | |||
2184 | 2190 | ||
2185 | bcmgenet_netif_stop(dev); | 2191 | bcmgenet_netif_stop(dev); |
2186 | 2192 | ||
2193 | /* Really kill the PHY state machine and disconnect from it */ | ||
2194 | phy_disconnect(priv->phydev); | ||
2195 | |||
2187 | /* Disable MAC receive */ | 2196 | /* Disable MAC receive */ |
2188 | umac_enable_set(priv, CMD_RX_EN, false); | 2197 | umac_enable_set(priv, CMD_RX_EN, false); |
2189 | 2198 | ||
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d) | |||
2685 | 2694 | ||
2686 | phy_init_hw(priv->phydev); | 2695 | phy_init_hw(priv->phydev); |
2687 | /* Speed settings must be restored */ | 2696 | /* Speed settings must be restored */ |
2688 | bcmgenet_mii_config(priv->dev); | 2697 | bcmgenet_mii_config(priv->dev, false); |
2689 | 2698 | ||
2690 | /* disable ethernet MAC while updating its registers */ | 2699 | /* disable ethernet MAC while updating its registers */ |
2691 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | 2700 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbf524ea3b19..31b2da5f9b82 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); | |||
617 | 617 | ||
618 | /* MDIO routines */ | 618 | /* MDIO routines */ |
619 | int bcmgenet_mii_init(struct net_device *dev); | 619 | int bcmgenet_mii_init(struct net_device *dev); |
620 | int bcmgenet_mii_config(struct net_device *dev); | 620 | int bcmgenet_mii_config(struct net_device *dev, bool init); |
621 | void bcmgenet_mii_exit(struct net_device *dev); | 621 | void bcmgenet_mii_exit(struct net_device *dev); |
622 | void bcmgenet_mii_reset(struct net_device *dev); | 622 | void bcmgenet_mii_reset(struct net_device *dev); |
623 | void bcmgenet_mii_setup(struct net_device *dev); | ||
623 | 624 | ||
624 | /* Wake-on-LAN routines */ | 625 | /* Wake-on-LAN routines */ |
625 | void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); | 626 | void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9ff799a9f801..933cd7e7cd33 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, | |||
77 | /* setup netdev link state when PHY link status change and | 77 | /* setup netdev link state when PHY link status change and |
78 | * update UMAC and RGMII block when link up | 78 | * update UMAC and RGMII block when link up |
79 | */ | 79 | */ |
80 | static void bcmgenet_mii_setup(struct net_device *dev) | 80 | void bcmgenet_mii_setup(struct net_device *dev) |
81 | { | 81 | { |
82 | struct bcmgenet_priv *priv = netdev_priv(dev); | 82 | struct bcmgenet_priv *priv = netdev_priv(dev); |
83 | struct phy_device *phydev = priv->phydev; | 83 | struct phy_device *phydev = priv->phydev; |
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) | |||
211 | bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); | 211 | bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); |
212 | } | 212 | } |
213 | 213 | ||
214 | int bcmgenet_mii_config(struct net_device *dev) | 214 | int bcmgenet_mii_config(struct net_device *dev, bool init) |
215 | { | 215 | { |
216 | struct bcmgenet_priv *priv = netdev_priv(dev); | 216 | struct bcmgenet_priv *priv = netdev_priv(dev); |
217 | struct phy_device *phydev = priv->phydev; | 217 | struct phy_device *phydev = priv->phydev; |
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev) | |||
298 | return -EINVAL; | 298 | return -EINVAL; |
299 | } | 299 | } |
300 | 300 | ||
301 | dev_info(kdev, "configuring instance for %s\n", phy_name); | 301 | if (init) |
302 | dev_info(kdev, "configuring instance for %s\n", phy_name); | ||
302 | 303 | ||
303 | return 0; | 304 | return 0; |
304 | } | 305 | } |
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) | |||
350 | * PHY speed which is needed for bcmgenet_mii_config() to configure | 351 | * PHY speed which is needed for bcmgenet_mii_config() to configure |
351 | * things appropriately. | 352 | * things appropriately. |
352 | */ | 353 | */ |
353 | ret = bcmgenet_mii_config(dev); | 354 | ret = bcmgenet_mii_config(dev, true); |
354 | if (ret) { | 355 | if (ret) { |
355 | phy_disconnect(priv->phydev); | 356 | phy_disconnect(priv->phydev); |
356 | return ret; | 357 | return ret; |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index dbb41c1923e6..77f8f836cbbe 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp) | |||
8563 | if (tnapi->rx_rcb) | 8563 | if (tnapi->rx_rcb) |
8564 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 8564 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
8565 | 8565 | ||
8566 | if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { | 8566 | if (tnapi->prodring.rx_std && |
8567 | tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { | ||
8567 | tg3_free_rings(tp); | 8568 | tg3_free_rings(tp); |
8568 | return -ENOMEM; | 8569 | return -ENOMEM; |
8569 | } | 8570 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 8edf0f5bd679..4fe33606f372 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | |||
@@ -60,6 +60,43 @@ void cxgb4_dcb_version_init(struct net_device *dev) | |||
60 | dcb->dcb_version = FW_PORT_DCB_VER_AUTO; | 60 | dcb->dcb_version = FW_PORT_DCB_VER_AUTO; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void cxgb4_dcb_cleanup_apps(struct net_device *dev) | ||
64 | { | ||
65 | struct port_info *pi = netdev2pinfo(dev); | ||
66 | struct adapter *adap = pi->adapter; | ||
67 | struct port_dcb_info *dcb = &pi->dcb; | ||
68 | struct dcb_app app; | ||
69 | int i, err; | ||
70 | |||
71 | /* zero priority implies remove */ | ||
72 | app.priority = 0; | ||
73 | |||
74 | for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { | ||
75 | /* Check if app list is exhausted */ | ||
76 | if (!dcb->app_priority[i].protocolid) | ||
77 | break; | ||
78 | |||
79 | app.protocol = dcb->app_priority[i].protocolid; | ||
80 | |||
81 | if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { | ||
82 | app.priority = dcb->app_priority[i].user_prio_map; | ||
83 | app.selector = dcb->app_priority[i].sel_field + 1; | ||
84 | err = dcb_ieee_delapp(dev, &app); | ||
85 | } else { | ||
86 | app.selector = !!(dcb->app_priority[i].sel_field); | ||
87 | err = dcb_setapp(dev, &app); | ||
88 | } | ||
89 | |||
90 | if (err) { | ||
91 | dev_err(adap->pdev_dev, | ||
92 | "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", | ||
93 | dcb_ver_array[dcb->dcb_version], app.selector, | ||
94 | app.protocol, -err); | ||
95 | break; | ||
96 | } | ||
97 | } | ||
98 | } | ||
99 | |||
63 | /* Finite State machine for Data Center Bridging. | 100 | /* Finite State machine for Data Center Bridging. |
64 | */ | 101 | */ |
65 | void cxgb4_dcb_state_fsm(struct net_device *dev, | 102 | void cxgb4_dcb_state_fsm(struct net_device *dev, |
@@ -80,14 +117,17 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, | |||
80 | /* we're going to use Host DCB */ | 117 | /* we're going to use Host DCB */ |
81 | dcb->state = CXGB4_DCB_STATE_HOST; | 118 | dcb->state = CXGB4_DCB_STATE_HOST; |
82 | dcb->supported = CXGB4_DCBX_HOST_SUPPORT; | 119 | dcb->supported = CXGB4_DCBX_HOST_SUPPORT; |
83 | dcb->enabled = 1; | ||
84 | break; | 120 | break; |
85 | } | 121 | } |
86 | 122 | ||
87 | case CXGB4_DCB_INPUT_FW_ENABLED: { | 123 | case CXGB4_DCB_INPUT_FW_ENABLED: { |
88 | /* we're going to use Firmware DCB */ | 124 | /* we're going to use Firmware DCB */ |
89 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; | 125 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; |
90 | dcb->supported = CXGB4_DCBX_FW_SUPPORT; | 126 | dcb->supported = DCB_CAP_DCBX_LLD_MANAGED; |
127 | if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) | ||
128 | dcb->supported |= DCB_CAP_DCBX_VER_IEEE; | ||
129 | else | ||
130 | dcb->supported |= DCB_CAP_DCBX_VER_CEE; | ||
91 | break; | 131 | break; |
92 | } | 132 | } |
93 | 133 | ||
@@ -145,6 +185,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, | |||
145 | * state. We need to reset back to a ground state | 185 | * state. We need to reset back to a ground state |
146 | * of incomplete. | 186 | * of incomplete. |
147 | */ | 187 | */ |
188 | cxgb4_dcb_cleanup_apps(dev); | ||
148 | cxgb4_dcb_state_init(dev); | 189 | cxgb4_dcb_state_init(dev); |
149 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; | 190 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; |
150 | dcb->supported = CXGB4_DCBX_FW_SUPPORT; | 191 | dcb->supported = CXGB4_DCBX_FW_SUPPORT; |
@@ -349,6 +390,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) | |||
349 | { | 390 | { |
350 | struct port_info *pi = netdev2pinfo(dev); | 391 | struct port_info *pi = netdev2pinfo(dev); |
351 | 392 | ||
393 | /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ | ||
394 | if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { | ||
395 | pi->dcb.enabled = enabled; | ||
396 | return 0; | ||
397 | } | ||
398 | |||
352 | /* Firmware doesn't provide any mechanism to control the DCB state. | 399 | /* Firmware doesn't provide any mechanism to control the DCB state. |
353 | */ | 400 | */ |
354 | if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) | 401 | if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) |
@@ -394,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc, | |||
394 | *up_tc_map = (1 << tc); | 441 | *up_tc_map = (1 << tc); |
395 | 442 | ||
396 | /* prio_type is link strict */ | 443 | /* prio_type is link strict */ |
397 | *prio_type = 0x2; | 444 | if (*pgid != 0xF) |
445 | *prio_type = 0x2; | ||
398 | } | 446 | } |
399 | 447 | ||
400 | static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, | 448 | static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, |
401 | u8 *prio_type, u8 *pgid, u8 *bw_per, | 449 | u8 *prio_type, u8 *pgid, u8 *bw_per, |
402 | u8 *up_tc_map) | 450 | u8 *up_tc_map) |
403 | { | 451 | { |
404 | return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); | 452 | /* tc 0 is written at MSB position */ |
453 | return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, | ||
454 | up_tc_map, 1); | ||
405 | } | 455 | } |
406 | 456 | ||
407 | 457 | ||
@@ -409,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, | |||
409 | u8 *prio_type, u8 *pgid, u8 *bw_per, | 459 | u8 *prio_type, u8 *pgid, u8 *bw_per, |
410 | u8 *up_tc_map) | 460 | u8 *up_tc_map) |
411 | { | 461 | { |
412 | return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); | 462 | /* tc 0 is written at MSB position */ |
463 | return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, | ||
464 | up_tc_map, 0); | ||
413 | } | 465 | } |
414 | 466 | ||
415 | static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, | 467 | static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, |
@@ -419,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, | |||
419 | struct fw_port_cmd pcmd; | 471 | struct fw_port_cmd pcmd; |
420 | struct port_info *pi = netdev2pinfo(dev); | 472 | struct port_info *pi = netdev2pinfo(dev); |
421 | struct adapter *adap = pi->adapter; | 473 | struct adapter *adap = pi->adapter; |
474 | int fw_tc = 7 - tc; | ||
422 | u32 _pgid; | 475 | u32 _pgid; |
423 | int err; | 476 | int err; |
424 | 477 | ||
@@ -437,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, | |||
437 | } | 490 | } |
438 | 491 | ||
439 | _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); | 492 | _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); |
440 | _pgid &= ~(0xF << (tc * 4)); | 493 | _pgid &= ~(0xF << (fw_tc * 4)); |
441 | _pgid |= pgid << (tc * 4); | 494 | _pgid |= pgid << (fw_tc * 4); |
442 | pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); | 495 | pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); |
443 | 496 | ||
444 | INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); | 497 | INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); |
@@ -551,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) | |||
551 | priority >= CXGB4_MAX_PRIORITY) | 604 | priority >= CXGB4_MAX_PRIORITY) |
552 | *pfccfg = 0; | 605 | *pfccfg = 0; |
553 | else | 606 | else |
554 | *pfccfg = (pi->dcb.pfcen >> priority) & 1; | 607 | *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1; |
555 | } | 608 | } |
556 | 609 | ||
557 | /* Enable/disable Priority Pause Frames for the specified Traffic Class | 610 | /* Enable/disable Priority Pause Frames for the specified Traffic Class |
@@ -576,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) | |||
576 | pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; | 629 | pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; |
577 | 630 | ||
578 | if (pfccfg) | 631 | if (pfccfg) |
579 | pcmd.u.dcb.pfc.pfcen |= (1 << priority); | 632 | pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority)); |
580 | else | 633 | else |
581 | pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); | 634 | pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority))); |
582 | 635 | ||
583 | err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); | 636 | err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); |
584 | if (err != FW_PORT_DCB_CFG_SUCCESS) { | 637 | if (err != FW_PORT_DCB_CFG_SUCCESS) { |
@@ -833,11 +886,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, | |||
833 | 886 | ||
834 | /* Return whether IEEE Data Center Bridging has been negotiated. | 887 | /* Return whether IEEE Data Center Bridging has been negotiated. |
835 | */ | 888 | */ |
836 | static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) | 889 | static inline int |
890 | cxgb4_ieee_negotiation_complete(struct net_device *dev, | ||
891 | enum cxgb4_dcb_fw_msgs dcb_subtype) | ||
837 | { | 892 | { |
838 | struct port_info *pi = netdev2pinfo(dev); | 893 | struct port_info *pi = netdev2pinfo(dev); |
839 | struct port_dcb_info *dcb = &pi->dcb; | 894 | struct port_dcb_info *dcb = &pi->dcb; |
840 | 895 | ||
896 | if (dcb_subtype && !(dcb->msgs & dcb_subtype)) | ||
897 | return 0; | ||
898 | |||
841 | return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && | 899 | return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && |
842 | (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); | 900 | (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); |
843 | } | 901 | } |
@@ -850,7 +908,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) | |||
850 | { | 908 | { |
851 | int prio; | 909 | int prio; |
852 | 910 | ||
853 | if (!cxgb4_ieee_negotiation_complete(dev)) | 911 | if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) |
854 | return -EINVAL; | 912 | return -EINVAL; |
855 | if (!(app->selector && app->protocol)) | 913 | if (!(app->selector && app->protocol)) |
856 | return -EINVAL; | 914 | return -EINVAL; |
@@ -872,7 +930,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) | |||
872 | { | 930 | { |
873 | int ret; | 931 | int ret; |
874 | 932 | ||
875 | if (!cxgb4_ieee_negotiation_complete(dev)) | 933 | if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) |
876 | return -EINVAL; | 934 | return -EINVAL; |
877 | if (!(app->selector && app->protocol)) | 935 | if (!(app->selector && app->protocol)) |
878 | return -EINVAL; | 936 | return -EINVAL; |
@@ -1024,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) | |||
1024 | pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); | 1082 | pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); |
1025 | 1083 | ||
1026 | for (i = 0; i < CXGB4_MAX_PRIORITY; i++) | 1084 | for (i = 0; i < CXGB4_MAX_PRIORITY; i++) |
1027 | pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF; | 1085 | pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF; |
1028 | 1086 | ||
1029 | INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); | 1087 | INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); |
1030 | pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; | 1088 | pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3f60070f2519..279873cb6e3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) | |||
694 | #ifdef CONFIG_CHELSIO_T4_DCB | 694 | #ifdef CONFIG_CHELSIO_T4_DCB |
695 | struct port_info *pi = netdev_priv(dev); | 695 | struct port_info *pi = netdev_priv(dev); |
696 | 696 | ||
697 | return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; | 697 | if (!pi->dcb.enabled) |
698 | return 0; | ||
699 | |||
700 | return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || | ||
701 | (pi->dcb.state == CXGB4_DCB_STATE_HOST)); | ||
698 | #else | 702 | #else |
699 | return 0; | 703 | return 0; |
700 | #endif | 704 | #endif |
@@ -2438,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) | |||
2438 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | | 2442 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | |
2439 | SUPPORTED_10000baseKX4_Full; | 2443 | SUPPORTED_10000baseKX4_Full; |
2440 | else if (type == FW_PORT_TYPE_FIBER_XFI || | 2444 | else if (type == FW_PORT_TYPE_FIBER_XFI || |
2441 | type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) | 2445 | type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) { |
2442 | v |= SUPPORTED_FIBRE; | 2446 | v |= SUPPORTED_FIBRE; |
2443 | else if (type == FW_PORT_TYPE_BP40_BA) | 2447 | if (caps & FW_PORT_CAP_SPEED_1G) |
2448 | v |= SUPPORTED_1000baseT_Full; | ||
2449 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
2450 | v |= SUPPORTED_10000baseT_Full; | ||
2451 | } else if (type == FW_PORT_TYPE_BP40_BA) | ||
2444 | v |= SUPPORTED_40000baseSR4_Full; | 2452 | v |= SUPPORTED_40000baseSR4_Full; |
2445 | 2453 | ||
2446 | if (caps & FW_PORT_CAP_ANEG) | 2454 | if (caps & FW_PORT_CAP_ANEG) |
@@ -6610,6 +6618,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6610 | 6618 | ||
6611 | spin_lock_init(&adapter->stats_lock); | 6619 | spin_lock_init(&adapter->stats_lock); |
6612 | spin_lock_init(&adapter->tid_release_lock); | 6620 | spin_lock_init(&adapter->tid_release_lock); |
6621 | spin_lock_init(&adapter->win0_lock); | ||
6613 | 6622 | ||
6614 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); | 6623 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); |
6615 | INIT_WORK(&adapter->db_full_task, process_db_full); | 6624 | INIT_WORK(&adapter->db_full_task, process_db_full); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 5e1b314e11af..39f2b13e66c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap) | |||
2914 | int t4_sge_init(struct adapter *adap) | 2914 | int t4_sge_init(struct adapter *adap) |
2915 | { | 2915 | { |
2916 | struct sge *s = &adap->sge; | 2916 | struct sge *s = &adap->sge; |
2917 | u32 sge_control, sge_conm_ctrl; | 2917 | u32 sge_control, sge_control2, sge_conm_ctrl; |
2918 | unsigned int ingpadboundary, ingpackboundary; | ||
2918 | int ret, egress_threshold; | 2919 | int ret, egress_threshold; |
2919 | 2920 | ||
2920 | /* | 2921 | /* |
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap) | |||
2924 | sge_control = t4_read_reg(adap, SGE_CONTROL); | 2925 | sge_control = t4_read_reg(adap, SGE_CONTROL); |
2925 | s->pktshift = PKTSHIFT_GET(sge_control); | 2926 | s->pktshift = PKTSHIFT_GET(sge_control); |
2926 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; | 2927 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; |
2927 | s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + | 2928 | |
2928 | X_INGPADBOUNDARY_SHIFT); | 2929 | /* T4 uses a single control field to specify both the PCIe Padding and |
2930 | * Packing Boundary. T5 introduced the ability to specify these | ||
2931 | * separately. The actual Ingress Packet Data alignment boundary | ||
2932 | * within Packed Buffer Mode is the maximum of these two | ||
2933 | * specifications. | ||
2934 | */ | ||
2935 | ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + | ||
2936 | X_INGPADBOUNDARY_SHIFT); | ||
2937 | if (is_t4(adap->params.chip)) { | ||
2938 | s->fl_align = ingpadboundary; | ||
2939 | } else { | ||
2940 | /* T5 has a different interpretation of one of the PCIe Packing | ||
2941 | * Boundary values. | ||
2942 | */ | ||
2943 | sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); | ||
2944 | ingpackboundary = INGPACKBOUNDARY_G(sge_control2); | ||
2945 | if (ingpackboundary == INGPACKBOUNDARY_16B_X) | ||
2946 | ingpackboundary = 16; | ||
2947 | else | ||
2948 | ingpackboundary = 1 << (ingpackboundary + | ||
2949 | INGPACKBOUNDARY_SHIFT_X); | ||
2950 | |||
2951 | s->fl_align = max(ingpadboundary, ingpackboundary); | ||
2952 | } | ||
2929 | 2953 | ||
2930 | if (adap->flags & USING_SOFT_PARAMS) | 2954 | if (adap->flags & USING_SOFT_PARAMS) |
2931 | ret = t4_sge_init_soft(adap); | 2955 | ret = t4_sge_init_soft(adap); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a9d9d74e4f09..163a2a14948c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, | |||
3129 | HOSTPAGESIZEPF6(sge_hps) | | 3129 | HOSTPAGESIZEPF6(sge_hps) | |
3130 | HOSTPAGESIZEPF7(sge_hps)); | 3130 | HOSTPAGESIZEPF7(sge_hps)); |
3131 | 3131 | ||
3132 | t4_set_reg_field(adap, SGE_CONTROL, | 3132 | if (is_t4(adap->params.chip)) { |
3133 | INGPADBOUNDARY_MASK | | 3133 | t4_set_reg_field(adap, SGE_CONTROL, |
3134 | EGRSTATUSPAGESIZE_MASK, | 3134 | INGPADBOUNDARY_MASK | |
3135 | INGPADBOUNDARY(fl_align_log - 5) | | 3135 | EGRSTATUSPAGESIZE_MASK, |
3136 | EGRSTATUSPAGESIZE(stat_len != 64)); | 3136 | INGPADBOUNDARY(fl_align_log - 5) | |
3137 | 3137 | EGRSTATUSPAGESIZE(stat_len != 64)); | |
3138 | } else { | ||
3139 | /* T5 introduced the separation of the Free List Padding and | ||
3140 | * Packing Boundaries. Thus, we can select a smaller Padding | ||
3141 | * Boundary to avoid uselessly chewing up PCIe Link and Memory | ||
3142 | * Bandwidth, and use a Packing Boundary which is large enough | ||
3143 | * to avoid false sharing between CPUs, etc. | ||
3144 | * | ||
3145 | * For the PCI Link, the smaller the Padding Boundary the | ||
3146 | * better. For the Memory Controller, a smaller Padding | ||
3147 | * Boundary is better until we cross under the Memory Line | ||
3148 | * Size (the minimum unit of transfer to/from Memory). If we | ||
3149 | * have a Padding Boundary which is smaller than the Memory | ||
3150 | * Line Size, that'll involve a Read-Modify-Write cycle on the | ||
3151 | * Memory Controller which is never good. For T5 the smallest | ||
3152 | * Padding Boundary which we can select is 32 bytes which is | ||
3153 | * larger than any known Memory Controller Line Size so we'll | ||
3154 | * use that. | ||
3155 | * | ||
3156 | * T5 has a different interpretation of the "0" value for the | ||
3157 | * Packing Boundary. This corresponds to 16 bytes instead of | ||
3158 | * the expected 32 bytes. We never have a Packing Boundary | ||
3159 | * less than 32 bytes so we can't use that special value but | ||
3160 | * on the other hand, if we wanted 32 bytes, the best we can | ||
3161 | * really do is 64 bytes. | ||
3162 | */ | ||
3163 | if (fl_align <= 32) { | ||
3164 | fl_align = 64; | ||
3165 | fl_align_log = 6; | ||
3166 | } | ||
3167 | t4_set_reg_field(adap, SGE_CONTROL, | ||
3168 | INGPADBOUNDARY_MASK | | ||
3169 | EGRSTATUSPAGESIZE_MASK, | ||
3170 | INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | | ||
3171 | EGRSTATUSPAGESIZE(stat_len != 64)); | ||
3172 | t4_set_reg_field(adap, SGE_CONTROL2_A, | ||
3173 | INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), | ||
3174 | INGPACKBOUNDARY_V(fl_align_log - | ||
3175 | INGPACKBOUNDARY_SHIFT_X)); | ||
3176 | } | ||
3138 | /* | 3177 | /* |
3139 | * Adjust various SGE Free List Host Buffer Sizes. | 3178 | * Adjust various SGE Free List Host Buffer Sizes. |
3140 | * | 3179 | * |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a1024db5dc13..8d2de1006b08 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -95,6 +95,7 @@ | |||
95 | #define X_INGPADBOUNDARY_SHIFT 5 | 95 | #define X_INGPADBOUNDARY_SHIFT 5 |
96 | 96 | ||
97 | #define SGE_CONTROL 0x1008 | 97 | #define SGE_CONTROL 0x1008 |
98 | #define SGE_CONTROL2_A 0x1124 | ||
98 | #define DCASYSTYPE 0x00080000U | 99 | #define DCASYSTYPE 0x00080000U |
99 | #define RXPKTCPLMODE_MASK 0x00040000U | 100 | #define RXPKTCPLMODE_MASK 0x00040000U |
100 | #define RXPKTCPLMODE_SHIFT 18 | 101 | #define RXPKTCPLMODE_SHIFT 18 |
@@ -106,6 +107,7 @@ | |||
106 | #define PKTSHIFT_SHIFT 10 | 107 | #define PKTSHIFT_SHIFT 10 |
107 | #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) | 108 | #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) |
108 | #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) | 109 | #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) |
110 | #define INGPCIEBOUNDARY_32B_X 0 | ||
109 | #define INGPCIEBOUNDARY_MASK 0x00000380U | 111 | #define INGPCIEBOUNDARY_MASK 0x00000380U |
110 | #define INGPCIEBOUNDARY_SHIFT 7 | 112 | #define INGPCIEBOUNDARY_SHIFT 7 |
111 | #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) | 113 | #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) |
@@ -114,6 +116,14 @@ | |||
114 | #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) | 116 | #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) |
115 | #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ | 117 | #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ |
116 | >> INGPADBOUNDARY_SHIFT) | 118 | >> INGPADBOUNDARY_SHIFT) |
119 | #define INGPACKBOUNDARY_16B_X 0 | ||
120 | #define INGPACKBOUNDARY_SHIFT_X 5 | ||
121 | |||
122 | #define INGPACKBOUNDARY_S 16 | ||
123 | #define INGPACKBOUNDARY_M 0x7U | ||
124 | #define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) | ||
125 | #define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ | ||
126 | & INGPACKBOUNDARY_M) | ||
117 | #define EGRPCIEBOUNDARY_MASK 0x0000000eU | 127 | #define EGRPCIEBOUNDARY_MASK 0x0000000eU |
118 | #define EGRPCIEBOUNDARY_SHIFT 1 | 128 | #define EGRPCIEBOUNDARY_SHIFT 1 |
119 | #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) | 129 | #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 68eaa9c88c7d..3d06e77d7121 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
@@ -299,6 +299,14 @@ struct sge { | |||
299 | u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ | 299 | u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ |
300 | u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ | 300 | u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ |
301 | 301 | ||
302 | /* Decoded Adapter Parameters. | ||
303 | */ | ||
304 | u32 fl_pg_order; /* large page allocation size */ | ||
305 | u32 stat_len; /* length of status page at ring end */ | ||
306 | u32 pktshift; /* padding between CPL & packet data */ | ||
307 | u32 fl_align; /* response queue message alignment */ | ||
308 | u32 fl_starve_thres; /* Free List starvation threshold */ | ||
309 | |||
302 | /* | 310 | /* |
303 | * Reverse maps from Absolute Queue IDs to associated queue pointers. | 311 | * Reverse maps from Absolute Queue IDs to associated queue pointers. |
304 | * The absolute Queue IDs are in a compact range which start at a | 312 | * The absolute Queue IDs are in a compact range which start at a |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index bfa398d91826..0b42bddaf284 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = { | |||
2929 | CH_DEVICE(0x480d), /* T480-cr */ | 2929 | CH_DEVICE(0x480d), /* T480-cr */ |
2930 | CH_DEVICE(0x480e), /* T440-lp-cr */ | 2930 | CH_DEVICE(0x480e), /* T440-lp-cr */ |
2931 | CH_DEVICE(0x4880), | 2931 | CH_DEVICE(0x4880), |
2932 | CH_DEVICE(0x4880), | 2932 | CH_DEVICE(0x4881), |
2933 | CH_DEVICE(0x4880), | 2933 | CH_DEVICE(0x4882), |
2934 | CH_DEVICE(0x4880), | 2934 | CH_DEVICE(0x4883), |
2935 | CH_DEVICE(0x4880), | 2935 | CH_DEVICE(0x4884), |
2936 | CH_DEVICE(0x4880), | 2936 | CH_DEVICE(0x4885), |
2937 | CH_DEVICE(0x4880), | 2937 | CH_DEVICE(0x4886), |
2938 | CH_DEVICE(0x4880), | 2938 | CH_DEVICE(0x4887), |
2939 | CH_DEVICE(0x4880), | 2939 | CH_DEVICE(0x4888), |
2940 | CH_DEVICE(0x5801), /* T520-cr */ | 2940 | CH_DEVICE(0x5801), /* T520-cr */ |
2941 | CH_DEVICE(0x5802), /* T522-cr */ | 2941 | CH_DEVICE(0x5802), /* T522-cr */ |
2942 | CH_DEVICE(0x5803), /* T540-cr */ | 2942 | CH_DEVICE(0x5803), /* T540-cr */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 85036e6b42c4..fdd078d7d82c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -51,14 +51,6 @@ | |||
51 | #include "../cxgb4/t4_msg.h" | 51 | #include "../cxgb4/t4_msg.h" |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Decoded Adapter Parameters. | ||
55 | */ | ||
56 | static u32 FL_PG_ORDER; /* large page allocation size */ | ||
57 | static u32 STAT_LEN; /* length of status page at ring end */ | ||
58 | static u32 PKTSHIFT; /* padding between CPL and packet data */ | ||
59 | static u32 FL_ALIGN; /* response queue message alignment */ | ||
60 | |||
61 | /* | ||
62 | * Constants ... | 54 | * Constants ... |
63 | */ | 55 | */ |
64 | enum { | 56 | enum { |
@@ -102,12 +94,6 @@ enum { | |||
102 | MAX_TIMER_TX_RECLAIM = 100, | 94 | MAX_TIMER_TX_RECLAIM = 100, |
103 | 95 | ||
104 | /* | 96 | /* |
105 | * An FL with <= FL_STARVE_THRES buffers is starving and a periodic | ||
106 | * timer will attempt to refill it. | ||
107 | */ | ||
108 | FL_STARVE_THRES = 4, | ||
109 | |||
110 | /* | ||
111 | * Suspend an Ethernet TX queue with fewer available descriptors than | 97 | * Suspend an Ethernet TX queue with fewer available descriptors than |
112 | * this. We always want to have room for a maximum sized packet: | 98 | * this. We always want to have room for a maximum sized packet: |
113 | * inline immediate data + MAX_SKB_FRAGS. This is the same as | 99 | * inline immediate data + MAX_SKB_FRAGS. This is the same as |
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) | |||
264 | 250 | ||
265 | /** | 251 | /** |
266 | * fl_starving - return whether a Free List is starving. | 252 | * fl_starving - return whether a Free List is starving. |
253 | * @adapter: pointer to the adapter | ||
267 | * @fl: the Free List | 254 | * @fl: the Free List |
268 | * | 255 | * |
269 | * Tests specified Free List to see whether the number of buffers | 256 | * Tests specified Free List to see whether the number of buffers |
270 | * available to the hardware has falled below our "starvation" | 257 | * available to the hardware has falled below our "starvation" |
271 | * threshold. | 258 | * threshold. |
272 | */ | 259 | */ |
273 | static inline bool fl_starving(const struct sge_fl *fl) | 260 | static inline bool fl_starving(const struct adapter *adapter, |
261 | const struct sge_fl *fl) | ||
274 | { | 262 | { |
275 | return fl->avail - fl->pend_cred <= FL_STARVE_THRES; | 263 | const struct sge *s = &adapter->sge; |
264 | |||
265 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; | ||
276 | } | 266 | } |
277 | 267 | ||
278 | /** | 268 | /** |
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter, | |||
457 | 447 | ||
458 | /** | 448 | /** |
459 | * get_buf_size - return the size of an RX Free List buffer. | 449 | * get_buf_size - return the size of an RX Free List buffer. |
450 | * @adapter: pointer to the associated adapter | ||
460 | * @sdesc: pointer to the software buffer descriptor | 451 | * @sdesc: pointer to the software buffer descriptor |
461 | */ | 452 | */ |
462 | static inline int get_buf_size(const struct rx_sw_desc *sdesc) | 453 | static inline int get_buf_size(const struct adapter *adapter, |
454 | const struct rx_sw_desc *sdesc) | ||
463 | { | 455 | { |
464 | return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) | 456 | const struct sge *s = &adapter->sge; |
465 | ? (PAGE_SIZE << FL_PG_ORDER) | 457 | |
466 | : PAGE_SIZE; | 458 | return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) |
459 | ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); | ||
467 | } | 460 | } |
468 | 461 | ||
469 | /** | 462 | /** |
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) | |||
483 | 476 | ||
484 | if (is_buf_mapped(sdesc)) | 477 | if (is_buf_mapped(sdesc)) |
485 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), | 478 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), |
486 | get_buf_size(sdesc), PCI_DMA_FROMDEVICE); | 479 | get_buf_size(adapter, sdesc), |
480 | PCI_DMA_FROMDEVICE); | ||
487 | put_page(sdesc->page); | 481 | put_page(sdesc->page); |
488 | sdesc->page = NULL; | 482 | sdesc->page = NULL; |
489 | if (++fl->cidx == fl->size) | 483 | if (++fl->cidx == fl->size) |
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) | |||
511 | 505 | ||
512 | if (is_buf_mapped(sdesc)) | 506 | if (is_buf_mapped(sdesc)) |
513 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), | 507 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), |
514 | get_buf_size(sdesc), PCI_DMA_FROMDEVICE); | 508 | get_buf_size(adapter, sdesc), |
509 | PCI_DMA_FROMDEVICE); | ||
515 | sdesc->page = NULL; | 510 | sdesc->page = NULL; |
516 | if (++fl->cidx == fl->size) | 511 | if (++fl->cidx == fl->size) |
517 | fl->cidx = 0; | 512 | fl->cidx = 0; |
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz) | |||
589 | static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | 584 | static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, |
590 | int n, gfp_t gfp) | 585 | int n, gfp_t gfp) |
591 | { | 586 | { |
587 | struct sge *s = &adapter->sge; | ||
592 | struct page *page; | 588 | struct page *page; |
593 | dma_addr_t dma_addr; | 589 | dma_addr_t dma_addr; |
594 | unsigned int cred = fl->avail; | 590 | unsigned int cred = fl->avail; |
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |||
608 | * If we don't support large pages, drop directly into the small page | 604 | * If we don't support large pages, drop directly into the small page |
609 | * allocation code. | 605 | * allocation code. |
610 | */ | 606 | */ |
611 | if (FL_PG_ORDER == 0) | 607 | if (s->fl_pg_order == 0) |
612 | goto alloc_small_pages; | 608 | goto alloc_small_pages; |
613 | 609 | ||
614 | while (n) { | 610 | while (n) { |
615 | page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, | 611 | page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
616 | FL_PG_ORDER); | 612 | s->fl_pg_order); |
617 | if (unlikely(!page)) { | 613 | if (unlikely(!page)) { |
618 | /* | 614 | /* |
619 | * We've failed inour attempt to allocate a "large | 615 | * We've failed inour attempt to allocate a "large |
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |||
623 | fl->large_alloc_failed++; | 619 | fl->large_alloc_failed++; |
624 | break; | 620 | break; |
625 | } | 621 | } |
626 | poison_buf(page, PAGE_SIZE << FL_PG_ORDER); | 622 | poison_buf(page, PAGE_SIZE << s->fl_pg_order); |
627 | 623 | ||
628 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, | 624 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, |
629 | PAGE_SIZE << FL_PG_ORDER, | 625 | PAGE_SIZE << s->fl_pg_order, |
630 | PCI_DMA_FROMDEVICE); | 626 | PCI_DMA_FROMDEVICE); |
631 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { | 627 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { |
632 | /* | 628 | /* |
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |||
637 | * because DMA mapping resources are typically | 633 | * because DMA mapping resources are typically |
638 | * critical resources once they become scarse. | 634 | * critical resources once they become scarse. |
639 | */ | 635 | */ |
640 | __free_pages(page, FL_PG_ORDER); | 636 | __free_pages(page, s->fl_pg_order); |
641 | goto out; | 637 | goto out; |
642 | } | 638 | } |
643 | dma_addr |= RX_LARGE_BUF; | 639 | dma_addr |= RX_LARGE_BUF; |
@@ -693,7 +689,7 @@ out: | |||
693 | fl->pend_cred += cred; | 689 | fl->pend_cred += cred; |
694 | ring_fl_db(adapter, fl); | 690 | ring_fl_db(adapter, fl); |
695 | 691 | ||
696 | if (unlikely(fl_starving(fl))) { | 692 | if (unlikely(fl_starving(adapter, fl))) { |
697 | smp_wmb(); | 693 | smp_wmb(); |
698 | set_bit(fl->cntxt_id, adapter->sge.starving_fl); | 694 | set_bit(fl->cntxt_id, adapter->sge.starving_fl); |
699 | } | 695 | } |
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl) | |||
1468 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | 1464 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, |
1469 | const struct cpl_rx_pkt *pkt) | 1465 | const struct cpl_rx_pkt *pkt) |
1470 | { | 1466 | { |
1467 | struct adapter *adapter = rxq->rspq.adapter; | ||
1468 | struct sge *s = &adapter->sge; | ||
1471 | int ret; | 1469 | int ret; |
1472 | struct sk_buff *skb; | 1470 | struct sk_buff *skb; |
1473 | 1471 | ||
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | |||
1478 | return; | 1476 | return; |
1479 | } | 1477 | } |
1480 | 1478 | ||
1481 | copy_frags(skb, gl, PKTSHIFT); | 1479 | copy_frags(skb, gl, s->pktshift); |
1482 | skb->len = gl->tot_len - PKTSHIFT; | 1480 | skb->len = gl->tot_len - s->pktshift; |
1483 | skb->data_len = skb->len; | 1481 | skb->data_len = skb->len; |
1484 | skb->truesize += skb->data_len; | 1482 | skb->truesize += skb->data_len; |
1485 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1483 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1516 | bool csum_ok = pkt->csum_calc && !pkt->err_vec && | 1514 | bool csum_ok = pkt->csum_calc && !pkt->err_vec && |
1517 | (rspq->netdev->features & NETIF_F_RXCSUM); | 1515 | (rspq->netdev->features & NETIF_F_RXCSUM); |
1518 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | 1516 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1517 | struct adapter *adapter = rspq->adapter; | ||
1518 | struct sge *s = &adapter->sge; | ||
1519 | 1519 | ||
1520 | /* | 1520 | /* |
1521 | * If this is a good TCP packet and we have Generic Receive Offload | 1521 | * If this is a good TCP packet and we have Generic Receive Offload |
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1537 | rxq->stats.rx_drops++; | 1537 | rxq->stats.rx_drops++; |
1538 | return 0; | 1538 | return 0; |
1539 | } | 1539 | } |
1540 | __skb_pull(skb, PKTSHIFT); | 1540 | __skb_pull(skb, s->pktshift); |
1541 | skb->protocol = eth_type_trans(skb, rspq->netdev); | 1541 | skb->protocol = eth_type_trans(skb, rspq->netdev); |
1542 | skb_record_rx_queue(skb, rspq->idx); | 1542 | skb_record_rx_queue(skb, rspq->idx); |
1543 | rxq->stats.pkts++; | 1543 | rxq->stats.pkts++; |
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq) | |||
1648 | static int process_responses(struct sge_rspq *rspq, int budget) | 1648 | static int process_responses(struct sge_rspq *rspq, int budget) |
1649 | { | 1649 | { |
1650 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | 1650 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1651 | struct adapter *adapter = rspq->adapter; | ||
1652 | struct sge *s = &adapter->sge; | ||
1651 | int budget_left = budget; | 1653 | int budget_left = budget; |
1652 | 1654 | ||
1653 | while (likely(budget_left)) { | 1655 | while (likely(budget_left)) { |
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) | |||
1697 | BUG_ON(frag >= MAX_SKB_FRAGS); | 1699 | BUG_ON(frag >= MAX_SKB_FRAGS); |
1698 | BUG_ON(rxq->fl.avail == 0); | 1700 | BUG_ON(rxq->fl.avail == 0); |
1699 | sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; | 1701 | sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; |
1700 | bufsz = get_buf_size(sdesc); | 1702 | bufsz = get_buf_size(adapter, sdesc); |
1701 | fp->page = sdesc->page; | 1703 | fp->page = sdesc->page; |
1702 | fp->offset = rspq->offset; | 1704 | fp->offset = rspq->offset; |
1703 | fp->size = min(bufsz, len); | 1705 | fp->size = min(bufsz, len); |
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) | |||
1726 | */ | 1728 | */ |
1727 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); | 1729 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); |
1728 | if (likely(ret == 0)) | 1730 | if (likely(ret == 0)) |
1729 | rspq->offset += ALIGN(fp->size, FL_ALIGN); | 1731 | rspq->offset += ALIGN(fp->size, s->fl_align); |
1730 | else | 1732 | else |
1731 | restore_rx_bufs(&gl, &rxq->fl, frag); | 1733 | restore_rx_bufs(&gl, &rxq->fl, frag); |
1732 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | 1734 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { |
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
1963 | * schedule napi but the FL is no longer starving. | 1965 | * schedule napi but the FL is no longer starving. |
1964 | * No biggie. | 1966 | * No biggie. |
1965 | */ | 1967 | */ |
1966 | if (fl_starving(fl)) { | 1968 | if (fl_starving(adapter, fl)) { |
1967 | struct sge_eth_rxq *rxq; | 1969 | struct sge_eth_rxq *rxq; |
1968 | 1970 | ||
1969 | rxq = container_of(fl, struct sge_eth_rxq, fl); | 1971 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2047 | int intr_dest, | 2049 | int intr_dest, |
2048 | struct sge_fl *fl, rspq_handler_t hnd) | 2050 | struct sge_fl *fl, rspq_handler_t hnd) |
2049 | { | 2051 | { |
2052 | struct sge *s = &adapter->sge; | ||
2050 | struct port_info *pi = netdev_priv(dev); | 2053 | struct port_info *pi = netdev_priv(dev); |
2051 | struct fw_iq_cmd cmd, rpl; | 2054 | struct fw_iq_cmd cmd, rpl; |
2052 | int ret, iqandst, flsz = 0; | 2055 | int ret, iqandst, flsz = 0; |
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2117 | fl->size = roundup(fl->size, FL_PER_EQ_UNIT); | 2120 | fl->size = roundup(fl->size, FL_PER_EQ_UNIT); |
2118 | fl->desc = alloc_ring(adapter->pdev_dev, fl->size, | 2121 | fl->desc = alloc_ring(adapter->pdev_dev, fl->size, |
2119 | sizeof(__be64), sizeof(struct rx_sw_desc), | 2122 | sizeof(__be64), sizeof(struct rx_sw_desc), |
2120 | &fl->addr, &fl->sdesc, STAT_LEN); | 2123 | &fl->addr, &fl->sdesc, s->stat_len); |
2121 | if (!fl->desc) { | 2124 | if (!fl->desc) { |
2122 | ret = -ENOMEM; | 2125 | ret = -ENOMEM; |
2123 | goto err; | 2126 | goto err; |
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2129 | * free list ring) in Egress Queue Units. | 2132 | * free list ring) in Egress Queue Units. |
2130 | */ | 2133 | */ |
2131 | flsz = (fl->size / FL_PER_EQ_UNIT + | 2134 | flsz = (fl->size / FL_PER_EQ_UNIT + |
2132 | STAT_LEN / EQ_UNIT); | 2135 | s->stat_len / EQ_UNIT); |
2133 | 2136 | ||
2134 | /* | 2137 | /* |
2135 | * Fill in all the relevant firmware Ingress Queue Command | 2138 | * Fill in all the relevant firmware Ingress Queue Command |
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2217 | struct net_device *dev, struct netdev_queue *devq, | 2220 | struct net_device *dev, struct netdev_queue *devq, |
2218 | unsigned int iqid) | 2221 | unsigned int iqid) |
2219 | { | 2222 | { |
2223 | struct sge *s = &adapter->sge; | ||
2220 | int ret, nentries; | 2224 | int ret, nentries; |
2221 | struct fw_eq_eth_cmd cmd, rpl; | 2225 | struct fw_eq_eth_cmd cmd, rpl; |
2222 | struct port_info *pi = netdev_priv(dev); | 2226 | struct port_info *pi = netdev_priv(dev); |
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2225 | * Calculate the size of the hardware TX Queue (including the Status | 2229 | * Calculate the size of the hardware TX Queue (including the Status |
2226 | * Page on the end of the TX Queue) in units of TX Descriptors. | 2230 | * Page on the end of the TX Queue) in units of TX Descriptors. |
2227 | */ | 2231 | */ |
2228 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | 2232 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
2229 | 2233 | ||
2230 | /* | 2234 | /* |
2231 | * Allocate the hardware ring for the TX ring (with space for its | 2235 | * Allocate the hardware ring for the TX ring (with space for its |
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2234 | txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, | 2238 | txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, |
2235 | sizeof(struct tx_desc), | 2239 | sizeof(struct tx_desc), |
2236 | sizeof(struct tx_sw_desc), | 2240 | sizeof(struct tx_sw_desc), |
2237 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | 2241 | &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); |
2238 | if (!txq->q.desc) | 2242 | if (!txq->q.desc) |
2239 | return -ENOMEM; | 2243 | return -ENOMEM; |
2240 | 2244 | ||
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2307 | */ | 2311 | */ |
2308 | static void free_txq(struct adapter *adapter, struct sge_txq *tq) | 2312 | static void free_txq(struct adapter *adapter, struct sge_txq *tq) |
2309 | { | 2313 | { |
2314 | struct sge *s = &adapter->sge; | ||
2315 | |||
2310 | dma_free_coherent(adapter->pdev_dev, | 2316 | dma_free_coherent(adapter->pdev_dev, |
2311 | tq->size * sizeof(*tq->desc) + STAT_LEN, | 2317 | tq->size * sizeof(*tq->desc) + s->stat_len, |
2312 | tq->desc, tq->phys_addr); | 2318 | tq->desc, tq->phys_addr); |
2313 | tq->cntxt_id = 0; | 2319 | tq->cntxt_id = 0; |
2314 | tq->sdesc = NULL; | 2320 | tq->sdesc = NULL; |
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq) | |||
2322 | static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, | 2328 | static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, |
2323 | struct sge_fl *fl) | 2329 | struct sge_fl *fl) |
2324 | { | 2330 | { |
2331 | struct sge *s = &adapter->sge; | ||
2325 | unsigned int flid = fl ? fl->cntxt_id : 0xffff; | 2332 | unsigned int flid = fl ? fl->cntxt_id : 0xffff; |
2326 | 2333 | ||
2327 | t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, | 2334 | t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, |
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, | |||
2337 | if (fl) { | 2344 | if (fl) { |
2338 | free_rx_bufs(adapter, fl, fl->avail); | 2345 | free_rx_bufs(adapter, fl, fl->avail); |
2339 | dma_free_coherent(adapter->pdev_dev, | 2346 | dma_free_coherent(adapter->pdev_dev, |
2340 | fl->size * sizeof(*fl->desc) + STAT_LEN, | 2347 | fl->size * sizeof(*fl->desc) + s->stat_len, |
2341 | fl->desc, fl->addr); | 2348 | fl->desc, fl->addr); |
2342 | kfree(fl->sdesc); | 2349 | kfree(fl->sdesc); |
2343 | fl->sdesc = NULL; | 2350 | fl->sdesc = NULL; |
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter) | |||
2423 | u32 fl0 = sge_params->sge_fl_buffer_size[0]; | 2430 | u32 fl0 = sge_params->sge_fl_buffer_size[0]; |
2424 | u32 fl1 = sge_params->sge_fl_buffer_size[1]; | 2431 | u32 fl1 = sge_params->sge_fl_buffer_size[1]; |
2425 | struct sge *s = &adapter->sge; | 2432 | struct sge *s = &adapter->sge; |
2433 | unsigned int ingpadboundary, ingpackboundary; | ||
2426 | 2434 | ||
2427 | /* | 2435 | /* |
2428 | * Start by vetting the basic SGE parameters which have been set up by | 2436 | * Start by vetting the basic SGE parameters which have been set up by |
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter) | |||
2443 | * Now translate the adapter parameters into our internal forms. | 2451 | * Now translate the adapter parameters into our internal forms. |
2444 | */ | 2452 | */ |
2445 | if (fl1) | 2453 | if (fl1) |
2446 | FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; | 2454 | s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; |
2447 | STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) | 2455 | s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) |
2448 | ? 128 : 64); | 2456 | ? 128 : 64); |
2449 | PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); | 2457 | s->pktshift = PKTSHIFT_GET(sge_params->sge_control); |
2450 | FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + | 2458 | |
2451 | SGE_INGPADBOUNDARY_SHIFT); | 2459 | /* T4 uses a single control field to specify both the PCIe Padding and |
2460 | * Packing Boundary. T5 introduced the ability to specify these | ||
2461 | * separately. The actual Ingress Packet Data alignment boundary | ||
2462 | * within Packed Buffer Mode is the maximum of these two | ||
2463 | * specifications. (Note that it makes no real practical sense to | ||
2464 | * have the Pading Boudary be larger than the Packing Boundary but you | ||
2465 | * could set the chip up that way and, in fact, legacy T4 code would | ||
2466 | * end doing this because it would initialize the Padding Boundary and | ||
2467 | * leave the Packing Boundary initialized to 0 (16 bytes).) | ||
2468 | */ | ||
2469 | ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + | ||
2470 | X_INGPADBOUNDARY_SHIFT); | ||
2471 | if (is_t4(adapter->params.chip)) { | ||
2472 | s->fl_align = ingpadboundary; | ||
2473 | } else { | ||
2474 | /* T5 has a different interpretation of one of the PCIe Packing | ||
2475 | * Boundary values. | ||
2476 | */ | ||
2477 | ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); | ||
2478 | if (ingpackboundary == INGPACKBOUNDARY_16B_X) | ||
2479 | ingpackboundary = 16; | ||
2480 | else | ||
2481 | ingpackboundary = 1 << (ingpackboundary + | ||
2482 | INGPACKBOUNDARY_SHIFT_X); | ||
2483 | |||
2484 | s->fl_align = max(ingpadboundary, ingpackboundary); | ||
2485 | } | ||
2486 | |||
2487 | /* A FL with <= fl_starve_thres buffers is starving and a periodic | ||
2488 | * timer will attempt to refill it. This needs to be larger than the | ||
2489 | * SGE's Egress Congestion Threshold. If it isn't, then we can get | ||
2490 | * stuck waiting for new packets while the SGE is waiting for us to | ||
2491 | * give it more Free List entries. (Note that the SGE's Egress | ||
2492 | * Congestion Threshold is in units of 2 Free List pointers.) | ||
2493 | */ | ||
2494 | s->fl_starve_thres | ||
2495 | = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; | ||
2452 | 2496 | ||
2453 | /* | 2497 | /* |
2454 | * Set up tasklet timers. | 2498 | * Set up tasklet timers. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 95df61dcb4ce..4b6a6d14d86d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -134,11 +134,13 @@ struct dev_params { | |||
134 | */ | 134 | */ |
135 | struct sge_params { | 135 | struct sge_params { |
136 | u32 sge_control; /* padding, boundaries, lengths, etc. */ | 136 | u32 sge_control; /* padding, boundaries, lengths, etc. */ |
137 | u32 sge_control2; /* T5: more of the same */ | ||
137 | u32 sge_host_page_size; /* RDMA page sizes */ | 138 | u32 sge_host_page_size; /* RDMA page sizes */ |
138 | u32 sge_queues_per_page; /* RDMA queues/page */ | 139 | u32 sge_queues_per_page; /* RDMA queues/page */ |
139 | u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ | 140 | u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ |
140 | u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ | 141 | u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ |
141 | u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ | 142 | u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ |
143 | u32 sge_congestion_control; /* congestion thresholds, etc. */ | ||
142 | u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ | 144 | u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ |
143 | u32 sge_timer_value_2_and_3; | 145 | u32 sge_timer_value_2_and_3; |
144 | u32 sge_timer_value_4_and_5; | 146 | u32 sge_timer_value_4_and_5; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index e984fdc48ba2..1e896b923234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter) | |||
468 | sge_params->sge_timer_value_2_and_3 = vals[5]; | 468 | sge_params->sge_timer_value_2_and_3 = vals[5]; |
469 | sge_params->sge_timer_value_4_and_5 = vals[6]; | 469 | sge_params->sge_timer_value_4_and_5 = vals[6]; |
470 | 470 | ||
471 | /* T4 uses a single control field to specify both the PCIe Padding and | ||
472 | * Packing Boundary. T5 introduced the ability to specify these | ||
473 | * separately with the Padding Boundary in SGE_CONTROL and and Packing | ||
474 | * Boundary in SGE_CONTROL2. So for T5 and later we need to grab | ||
475 | * SGE_CONTROL in order to determine how ingress packet data will be | ||
476 | * laid out in Packed Buffer Mode. Unfortunately, older versions of | ||
477 | * the firmware won't let us retrieve SGE_CONTROL2 so if we get a | ||
478 | * failure grabbing it we throw an error since we can't figure out the | ||
479 | * right value. | ||
480 | */ | ||
481 | if (!is_t4(adapter->params.chip)) { | ||
482 | params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | | ||
483 | FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A)); | ||
484 | v = t4vf_query_params(adapter, 1, params, vals); | ||
485 | if (v != FW_SUCCESS) { | ||
486 | dev_err(adapter->pdev_dev, | ||
487 | "Unable to get SGE Control2; " | ||
488 | "probably old firmware.\n"); | ||
489 | return v; | ||
490 | } | ||
491 | sge_params->sge_control2 = vals[0]; | ||
492 | } | ||
493 | |||
471 | params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | | 494 | params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | |
472 | FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); | 495 | FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); |
473 | v = t4vf_query_params(adapter, 1, params, vals); | 496 | params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | |
497 | FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL)); | ||
498 | v = t4vf_query_params(adapter, 2, params, vals); | ||
474 | if (v) | 499 | if (v) |
475 | return v; | 500 | return v; |
476 | sge_params->sge_ingress_rx_threshold = vals[0]; | 501 | sge_params->sge_ingress_rx_threshold = vals[0]; |
502 | sge_params->sge_congestion_control = vals[1]; | ||
477 | 503 | ||
478 | return 0; | 504 | return 0; |
479 | } | 505 | } |
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 69dfd3c9e529..0be6850be8a2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c | |||
@@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) | |||
86 | int i; | 86 | int i; |
87 | 87 | ||
88 | enic_rfs_timer_stop(enic); | 88 | enic_rfs_timer_stop(enic); |
89 | spin_lock(&enic->rfs_h.lock); | 89 | spin_lock_bh(&enic->rfs_h.lock); |
90 | enic->rfs_h.free = 0; | 90 | enic->rfs_h.free = 0; |
91 | for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { | 91 | for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { |
92 | struct hlist_head *hhead; | 92 | struct hlist_head *hhead; |
@@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) | |||
100 | kfree(n); | 100 | kfree(n); |
101 | } | 101 | } |
102 | } | 102 | } |
103 | spin_unlock(&enic->rfs_h.lock); | 103 | spin_unlock_bh(&enic->rfs_h.lock); |
104 | } | 104 | } |
105 | 105 | ||
106 | struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) | 106 | struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) |
@@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data) | |||
128 | bool res; | 128 | bool res; |
129 | int j; | 129 | int j; |
130 | 130 | ||
131 | spin_lock(&enic->rfs_h.lock); | 131 | spin_lock_bh(&enic->rfs_h.lock); |
132 | for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { | 132 | for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { |
133 | struct hlist_head *hhead; | 133 | struct hlist_head *hhead; |
134 | struct hlist_node *tmp; | 134 | struct hlist_node *tmp; |
@@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data) | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | } | 150 | } |
151 | spin_unlock(&enic->rfs_h.lock); | 151 | spin_unlock_bh(&enic->rfs_h.lock); |
152 | mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); | 152 | mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); |
153 | } | 153 | } |
154 | 154 | ||
@@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |||
183 | return -EPROTONOSUPPORT; | 183 | return -EPROTONOSUPPORT; |
184 | 184 | ||
185 | tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; | 185 | tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; |
186 | spin_lock(&enic->rfs_h.lock); | 186 | spin_lock_bh(&enic->rfs_h.lock); |
187 | n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); | 187 | n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); |
188 | 188 | ||
189 | if (n) { /* entry already present */ | 189 | if (n) { /* entry already present */ |
@@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |||
277 | } | 277 | } |
278 | 278 | ||
279 | ret_unlock: | 279 | ret_unlock: |
280 | spin_unlock(&enic->rfs_h.lock); | 280 | spin_unlock_bh(&enic->rfs_h.lock); |
281 | return res; | 281 | return res; |
282 | } | 282 | } |
283 | 283 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 929bfe70080a..73cf1653a4a3 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) | |||
940 | struct vnic_rq_buf *buf = rq->to_use; | 940 | struct vnic_rq_buf *buf = rq->to_use; |
941 | 941 | ||
942 | if (buf->os_buf) { | 942 | if (buf->os_buf) { |
943 | buf = buf->next; | 943 | enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, |
944 | rq->to_use = buf; | 944 | buf->len); |
945 | rq->ring.desc_avail--; | ||
946 | if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { | ||
947 | /* Adding write memory barrier prevents compiler and/or | ||
948 | * CPU reordering, thus avoiding descriptor posting | ||
949 | * before descriptor is initialized. Otherwise, hardware | ||
950 | * can read stale descriptor fields. | ||
951 | */ | ||
952 | wmb(); | ||
953 | iowrite32(buf->index, &rq->ctrl->posted_index); | ||
954 | } | ||
955 | 945 | ||
956 | return 0; | 946 | return 0; |
957 | } | 947 | } |
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1037 | enic->rq_truncated_pkts++; | 1027 | enic->rq_truncated_pkts++; |
1038 | } | 1028 | } |
1039 | 1029 | ||
1030 | pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, | ||
1031 | PCI_DMA_FROMDEVICE); | ||
1040 | dev_kfree_skb_any(skb); | 1032 | dev_kfree_skb_any(skb); |
1033 | buf->os_buf = NULL; | ||
1041 | 1034 | ||
1042 | return; | 1035 | return; |
1043 | } | 1036 | } |
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1088 | /* Buffer overflow | 1081 | /* Buffer overflow |
1089 | */ | 1082 | */ |
1090 | 1083 | ||
1084 | pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, | ||
1085 | PCI_DMA_FROMDEVICE); | ||
1091 | dev_kfree_skb_any(skb); | 1086 | dev_kfree_skb_any(skb); |
1087 | buf->os_buf = NULL; | ||
1092 | } | 1088 | } |
1093 | } | 1089 | } |
1094 | 1090 | ||
@@ -1674,13 +1670,13 @@ static int enic_stop(struct net_device *netdev) | |||
1674 | 1670 | ||
1675 | enic_dev_disable(enic); | 1671 | enic_dev_disable(enic); |
1676 | 1672 | ||
1677 | local_bh_disable(); | ||
1678 | for (i = 0; i < enic->rq_count; i++) { | 1673 | for (i = 0; i < enic->rq_count; i++) { |
1679 | napi_disable(&enic->napi[i]); | 1674 | napi_disable(&enic->napi[i]); |
1675 | local_bh_disable(); | ||
1680 | while (!enic_poll_lock_napi(&enic->rq[i])) | 1676 | while (!enic_poll_lock_napi(&enic->rq[i])) |
1681 | mdelay(1); | 1677 | mdelay(1); |
1678 | local_bh_enable(); | ||
1682 | } | 1679 | } |
1683 | local_bh_enable(); | ||
1684 | 1680 | ||
1685 | netif_carrier_off(netdev); | 1681 | netif_carrier_off(netdev); |
1686 | netif_tx_disable(netdev); | 1682 | netif_tx_disable(netdev); |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9a18e7930b31..597c463e384d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) | |||
4309 | return -EOPNOTSUPP; | 4309 | return -EOPNOTSUPP; |
4310 | 4310 | ||
4311 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); | 4311 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
4312 | if (!br_spec) | ||
4313 | return -EINVAL; | ||
4312 | 4314 | ||
4313 | nla_for_each_nested(attr, br_spec, rem) { | 4315 | nla_for_each_nested(attr, br_spec, rem) { |
4314 | if (nla_type(attr) != IFLA_BRIDGE_MODE) | 4316 | if (nla_type(attr) != IFLA_BRIDGE_MODE) |
4315 | continue; | 4317 | continue; |
4316 | 4318 | ||
4319 | if (nla_len(attr) < sizeof(mode)) | ||
4320 | return -EINVAL; | ||
4321 | |||
4317 | mode = nla_get_u16(attr); | 4322 | mode = nla_get_u16(attr); |
4318 | if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) | 4323 | if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) |
4319 | return -EINVAL; | 4324 | return -EINVAL; |
@@ -4421,6 +4426,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, | |||
4421 | "Disabled VxLAN offloads for UDP port %d\n", | 4426 | "Disabled VxLAN offloads for UDP port %d\n", |
4422 | be16_to_cpu(port)); | 4427 | be16_to_cpu(port)); |
4423 | } | 4428 | } |
4429 | |||
4430 | static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) | ||
4431 | { | ||
4432 | return vxlan_gso_check(skb); | ||
4433 | } | ||
4424 | #endif | 4434 | #endif |
4425 | 4435 | ||
4426 | static const struct net_device_ops be_netdev_ops = { | 4436 | static const struct net_device_ops be_netdev_ops = { |
@@ -4450,6 +4460,7 @@ static const struct net_device_ops be_netdev_ops = { | |||
4450 | #ifdef CONFIG_BE2NET_VXLAN | 4460 | #ifdef CONFIG_BE2NET_VXLAN |
4451 | .ndo_add_vxlan_port = be_add_vxlan_port, | 4461 | .ndo_add_vxlan_port = be_add_vxlan_port, |
4452 | .ndo_del_vxlan_port = be_del_vxlan_port, | 4462 | .ndo_del_vxlan_port = be_del_vxlan_port, |
4463 | .ndo_gso_check = be_gso_check, | ||
4453 | #endif | 4464 | #endif |
4454 | }; | 4465 | }; |
4455 | 4466 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 81b96cf87574..3dca494797bd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len) | |||
298 | return bufaddr; | 298 | return bufaddr; |
299 | } | 299 | } |
300 | 300 | ||
301 | static void swap_buffer2(void *dst_buf, void *src_buf, int len) | ||
302 | { | ||
303 | int i; | ||
304 | unsigned int *src = src_buf; | ||
305 | unsigned int *dst = dst_buf; | ||
306 | |||
307 | for (i = 0; i < len; i += 4, src++, dst++) | ||
308 | *dst = swab32p(src); | ||
309 | } | ||
310 | |||
301 | static void fec_dump(struct net_device *ndev) | 311 | static void fec_dump(struct net_device *ndev) |
302 | { | 312 | { |
303 | struct fec_enet_private *fep = netdev_priv(ndev); | 313 | struct fec_enet_private *fep = netdev_priv(ndev); |
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff | |||
1307 | } | 1317 | } |
1308 | 1318 | ||
1309 | static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, | 1319 | static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, |
1310 | struct bufdesc *bdp, u32 length) | 1320 | struct bufdesc *bdp, u32 length, bool swap) |
1311 | { | 1321 | { |
1312 | struct fec_enet_private *fep = netdev_priv(ndev); | 1322 | struct fec_enet_private *fep = netdev_priv(ndev); |
1313 | struct sk_buff *new_skb; | 1323 | struct sk_buff *new_skb; |
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, | |||
1322 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, | 1332 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, |
1323 | FEC_ENET_RX_FRSIZE - fep->rx_align, | 1333 | FEC_ENET_RX_FRSIZE - fep->rx_align, |
1324 | DMA_FROM_DEVICE); | 1334 | DMA_FROM_DEVICE); |
1325 | memcpy(new_skb->data, (*skb)->data, length); | 1335 | if (!swap) |
1336 | memcpy(new_skb->data, (*skb)->data, length); | ||
1337 | else | ||
1338 | swap_buffer2(new_skb->data, (*skb)->data, length); | ||
1326 | *skb = new_skb; | 1339 | *skb = new_skb; |
1327 | 1340 | ||
1328 | return true; | 1341 | return true; |
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1352 | u16 vlan_tag; | 1365 | u16 vlan_tag; |
1353 | int index = 0; | 1366 | int index = 0; |
1354 | bool is_copybreak; | 1367 | bool is_copybreak; |
1368 | bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME; | ||
1355 | 1369 | ||
1356 | #ifdef CONFIG_M532x | 1370 | #ifdef CONFIG_M532x |
1357 | flush_cache_all(); | 1371 | flush_cache_all(); |
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1415 | * include that when passing upstream as it messes up | 1429 | * include that when passing upstream as it messes up |
1416 | * bridging applications. | 1430 | * bridging applications. |
1417 | */ | 1431 | */ |
1418 | is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); | 1432 | is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, |
1433 | need_swap); | ||
1419 | if (!is_copybreak) { | 1434 | if (!is_copybreak) { |
1420 | skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | 1435 | skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); |
1421 | if (unlikely(!skb_new)) { | 1436 | if (unlikely(!skb_new)) { |
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1430 | prefetch(skb->data - NET_IP_ALIGN); | 1445 | prefetch(skb->data - NET_IP_ALIGN); |
1431 | skb_put(skb, pkt_len - 4); | 1446 | skb_put(skb, pkt_len - 4); |
1432 | data = skb->data; | 1447 | data = skb->data; |
1433 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 1448 | if (!is_copybreak && need_swap) |
1434 | swap_buffer(data, pkt_len); | 1449 | swap_buffer(data, pkt_len); |
1435 | 1450 | ||
1436 | /* Extract the enhanced buffer descriptor */ | 1451 | /* Extract the enhanced buffer descriptor */ |
@@ -1581,7 +1596,8 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1581 | complete(&fep->mdio_done); | 1596 | complete(&fep->mdio_done); |
1582 | } | 1597 | } |
1583 | 1598 | ||
1584 | fec_ptp_check_pps_event(fep); | 1599 | if (fep->ptp_clock) |
1600 | fec_ptp_check_pps_event(fep); | ||
1585 | 1601 | ||
1586 | return ret; | 1602 | return ret; |
1587 | } | 1603 | } |
@@ -3342,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev) | |||
3342 | netif_device_detach(ndev); | 3358 | netif_device_detach(ndev); |
3343 | netif_tx_unlock_bh(ndev); | 3359 | netif_tx_unlock_bh(ndev); |
3344 | fec_stop(ndev); | 3360 | fec_stop(ndev); |
3361 | fec_enet_clk_enable(ndev, false); | ||
3362 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | ||
3345 | } | 3363 | } |
3346 | rtnl_unlock(); | 3364 | rtnl_unlock(); |
3347 | 3365 | ||
3348 | fec_enet_clk_enable(ndev, false); | ||
3349 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | ||
3350 | |||
3351 | if (fep->reg_phy) | 3366 | if (fep->reg_phy) |
3352 | regulator_disable(fep->reg_phy); | 3367 | regulator_disable(fep->reg_phy); |
3353 | 3368 | ||
@@ -3366,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev) | |||
3366 | return ret; | 3381 | return ret; |
3367 | } | 3382 | } |
3368 | 3383 | ||
3369 | pinctrl_pm_select_default_state(&fep->pdev->dev); | ||
3370 | ret = fec_enet_clk_enable(ndev, true); | ||
3371 | if (ret) | ||
3372 | goto failed_clk; | ||
3373 | |||
3374 | rtnl_lock(); | 3384 | rtnl_lock(); |
3375 | if (netif_running(ndev)) { | 3385 | if (netif_running(ndev)) { |
3386 | pinctrl_pm_select_default_state(&fep->pdev->dev); | ||
3387 | ret = fec_enet_clk_enable(ndev, true); | ||
3388 | if (ret) { | ||
3389 | rtnl_unlock(); | ||
3390 | goto failed_clk; | ||
3391 | } | ||
3376 | fec_restart(ndev); | 3392 | fec_restart(ndev); |
3377 | netif_tx_lock_bh(ndev); | 3393 | netif_tx_lock_bh(ndev); |
3378 | netif_device_attach(ndev); | 3394 | netif_device_attach(ndev); |
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 3d4e08be1709..b34214e2df5f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c | |||
@@ -341,6 +341,9 @@ static void restart(struct net_device *dev) | |||
341 | FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ | 341 | FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ |
342 | } | 342 | } |
343 | 343 | ||
344 | /* Restore multicast and promiscuous settings */ | ||
345 | set_multicast_list(dev); | ||
346 | |||
344 | /* | 347 | /* |
345 | * Enable interrupts we wish to service. | 348 | * Enable interrupts we wish to service. |
346 | */ | 349 | */ |
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index f30411f0701f..7a184e8816a4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c | |||
@@ -355,6 +355,9 @@ static void restart(struct net_device *dev) | |||
355 | if (fep->phydev->duplex) | 355 | if (fep->phydev->duplex) |
356 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); | 356 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); |
357 | 357 | ||
358 | /* Restore multicast and promiscuous settings */ | ||
359 | set_multicast_list(dev); | ||
360 | |||
358 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); | 361 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); |
359 | } | 362 | } |
360 | 363 | ||
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 5f6aded512f5..24f3986cfae2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1075 | NETIF_F_HW_CSUM | | 1075 | NETIF_F_HW_CSUM | |
1076 | NETIF_F_SG); | 1076 | NETIF_F_SG); |
1077 | 1077 | ||
1078 | netdev->priv_flags |= IFF_UNICAST_FLT; | 1078 | /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ |
1079 | if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || | ||
1080 | hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) | ||
1081 | netdev->priv_flags |= IFF_UNICAST_FLT; | ||
1079 | 1082 | ||
1080 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); | 1083 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); |
1081 | 1084 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ed5f1c15fb0f..c3a7f4a4b775 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) | |||
6151 | I40E_GL_MDET_TX_PF_NUM_SHIFT; | 6151 | I40E_GL_MDET_TX_PF_NUM_SHIFT; |
6152 | u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> | 6152 | u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> |
6153 | I40E_GL_MDET_TX_VF_NUM_SHIFT; | 6153 | I40E_GL_MDET_TX_VF_NUM_SHIFT; |
6154 | u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> | 6154 | u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> |
6155 | I40E_GL_MDET_TX_EVENT_SHIFT; | 6155 | I40E_GL_MDET_TX_EVENT_SHIFT; |
6156 | u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> | 6156 | u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> |
6157 | I40E_GL_MDET_TX_QUEUE_SHIFT; | 6157 | I40E_GL_MDET_TX_QUEUE_SHIFT; |
@@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) | |||
6165 | if (reg & I40E_GL_MDET_RX_VALID_MASK) { | 6165 | if (reg & I40E_GL_MDET_RX_VALID_MASK) { |
6166 | u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> | 6166 | u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> |
6167 | I40E_GL_MDET_RX_FUNCTION_SHIFT; | 6167 | I40E_GL_MDET_RX_FUNCTION_SHIFT; |
6168 | u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> | 6168 | u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> |
6169 | I40E_GL_MDET_RX_EVENT_SHIFT; | 6169 | I40E_GL_MDET_RX_EVENT_SHIFT; |
6170 | u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> | 6170 | u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> |
6171 | I40E_GL_MDET_RX_QUEUE_SHIFT; | 6171 | I40E_GL_MDET_RX_QUEUE_SHIFT; |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a21b14495ebd..487cd9c4ac0d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) | |||
1012 | /* igb_get_stats64() might access the rings on this vector, | 1012 | /* igb_get_stats64() might access the rings on this vector, |
1013 | * we must wait a grace period before freeing it. | 1013 | * we must wait a grace period before freeing it. |
1014 | */ | 1014 | */ |
1015 | kfree_rcu(q_vector, rcu); | 1015 | if (q_vector) |
1016 | kfree_rcu(q_vector, rcu); | ||
1016 | } | 1017 | } |
1017 | 1018 | ||
1018 | /** | 1019 | /** |
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter) | |||
1792 | adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; | 1793 | adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; |
1793 | 1794 | ||
1794 | for (i = 0; i < adapter->num_q_vectors; i++) { | 1795 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1795 | napi_synchronize(&(adapter->q_vector[i]->napi)); | 1796 | if (adapter->q_vector[i]) { |
1796 | napi_disable(&(adapter->q_vector[i]->napi)); | 1797 | napi_synchronize(&adapter->q_vector[i]->napi); |
1798 | napi_disable(&adapter->q_vector[i]->napi); | ||
1799 | } | ||
1797 | } | 1800 | } |
1798 | 1801 | ||
1799 | 1802 | ||
@@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) | |||
3717 | int i; | 3720 | int i; |
3718 | 3721 | ||
3719 | for (i = 0; i < adapter->num_tx_queues; i++) | 3722 | for (i = 0; i < adapter->num_tx_queues; i++) |
3720 | igb_free_tx_resources(adapter->tx_ring[i]); | 3723 | if (adapter->tx_ring[i]) |
3724 | igb_free_tx_resources(adapter->tx_ring[i]); | ||
3721 | } | 3725 | } |
3722 | 3726 | ||
3723 | void igb_unmap_and_free_tx_resource(struct igb_ring *ring, | 3727 | void igb_unmap_and_free_tx_resource(struct igb_ring *ring, |
@@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |||
3782 | int i; | 3786 | int i; |
3783 | 3787 | ||
3784 | for (i = 0; i < adapter->num_tx_queues; i++) | 3788 | for (i = 0; i < adapter->num_tx_queues; i++) |
3785 | igb_clean_tx_ring(adapter->tx_ring[i]); | 3789 | if (adapter->tx_ring[i]) |
3790 | igb_clean_tx_ring(adapter->tx_ring[i]); | ||
3786 | } | 3791 | } |
3787 | 3792 | ||
3788 | /** | 3793 | /** |
@@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |||
3819 | int i; | 3824 | int i; |
3820 | 3825 | ||
3821 | for (i = 0; i < adapter->num_rx_queues; i++) | 3826 | for (i = 0; i < adapter->num_rx_queues; i++) |
3822 | igb_free_rx_resources(adapter->rx_ring[i]); | 3827 | if (adapter->rx_ring[i]) |
3828 | igb_free_rx_resources(adapter->rx_ring[i]); | ||
3823 | } | 3829 | } |
3824 | 3830 | ||
3825 | /** | 3831 | /** |
@@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) | |||
3874 | int i; | 3880 | int i; |
3875 | 3881 | ||
3876 | for (i = 0; i < adapter->num_rx_queues; i++) | 3882 | for (i = 0; i < adapter->num_rx_queues; i++) |
3877 | igb_clean_rx_ring(adapter->rx_ring[i]); | 3883 | if (adapter->rx_ring[i]) |
3884 | igb_clean_rx_ring(adapter->rx_ring[i]); | ||
3878 | } | 3885 | } |
3879 | 3886 | ||
3880 | /** | 3887 | /** |
@@ -6537,6 +6544,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | |||
6537 | if (unlikely(page_to_nid(page) != numa_node_id())) | 6544 | if (unlikely(page_to_nid(page) != numa_node_id())) |
6538 | return false; | 6545 | return false; |
6539 | 6546 | ||
6547 | if (unlikely(page->pfmemalloc)) | ||
6548 | return false; | ||
6549 | |||
6540 | #if (PAGE_SIZE < 8192) | 6550 | #if (PAGE_SIZE < 8192) |
6541 | /* if we are only owner of page we can reuse it */ | 6551 | /* if we are only owner of page we can reuse it */ |
6542 | if (unlikely(page_count(page) != 1)) | 6552 | if (unlikely(page_count(page) != 1)) |
@@ -6603,7 +6613,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6603 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); | 6613 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
6604 | 6614 | ||
6605 | /* we can reuse buffer as-is, just make sure it is local */ | 6615 | /* we can reuse buffer as-is, just make sure it is local */ |
6606 | if (likely(page_to_nid(page) == numa_node_id())) | 6616 | if (likely((page_to_nid(page) == numa_node_id()) && |
6617 | !page->pfmemalloc)) | ||
6607 | return true; | 6618 | return true; |
6608 | 6619 | ||
6609 | /* this page cannot be reused so discard it */ | 6620 | /* this page cannot be reused so discard it */ |
@@ -7400,6 +7411,8 @@ static int igb_resume(struct device *dev) | |||
7400 | pci_restore_state(pdev); | 7411 | pci_restore_state(pdev); |
7401 | pci_save_state(pdev); | 7412 | pci_save_state(pdev); |
7402 | 7413 | ||
7414 | if (!pci_device_is_present(pdev)) | ||
7415 | return -ENODEV; | ||
7403 | err = pci_enable_device_mem(pdev); | 7416 | err = pci_enable_device_mem(pdev); |
7404 | if (err) { | 7417 | if (err) { |
7405 | dev_err(&pdev->dev, | 7418 | dev_err(&pdev->dev, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3ce4a258f945..0ae038b9af90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
342 | if (old == advertised) | 342 | if (old == advertised) |
343 | return err; | 343 | return err; |
344 | /* this sets the link speed and restarts auto-neg */ | 344 | /* this sets the link speed and restarts auto-neg */ |
345 | while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
346 | usleep_range(1000, 2000); | ||
347 | |||
345 | hw->mac.autotry_restart = true; | 348 | hw->mac.autotry_restart = true; |
346 | err = hw->mac.ops.setup_link(hw, advertised, true); | 349 | err = hw->mac.ops.setup_link(hw, advertised, true); |
347 | if (err) { | 350 | if (err) { |
348 | e_info(probe, "setup link failed with code %d\n", err); | 351 | e_info(probe, "setup link failed with code %d\n", err); |
349 | hw->mac.ops.setup_link(hw, old, true); | 352 | hw->mac.ops.setup_link(hw, old, true); |
350 | } | 353 | } |
354 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
351 | } else { | 355 | } else { |
352 | /* in this case we currently only support 10Gb/FULL */ | 356 | /* in this case we currently only support 10Gb/FULL */ |
353 | u32 speed = ethtool_cmd_speed(ecmd); | 357 | u32 speed = ethtool_cmd_speed(ecmd); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fec5212d4337..cc51554c9e99 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3936 | * if SR-IOV and VMDQ are disabled - otherwise ensure | 3936 | * if SR-IOV and VMDQ are disabled - otherwise ensure |
3937 | * that hardware VLAN filters remain enabled. | 3937 | * that hardware VLAN filters remain enabled. |
3938 | */ | 3938 | */ |
3939 | if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | | 3939 | if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | |
3940 | IXGBE_FLAG_SRIOV_ENABLED))) | 3940 | IXGBE_FLAG_SRIOV_ENABLED)) |
3941 | vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | 3941 | vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); |
3942 | } else { | 3942 | } else { |
3943 | if (netdev->flags & IFF_ALLMULTI) { | 3943 | if (netdev->flags & IFF_ALLMULTI) { |
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) | |||
4321 | IXGBE_CB(skb)->page_released = false; | 4321 | IXGBE_CB(skb)->page_released = false; |
4322 | } | 4322 | } |
4323 | dev_kfree_skb(skb); | 4323 | dev_kfree_skb(skb); |
4324 | rx_buffer->skb = NULL; | ||
4324 | } | 4325 | } |
4325 | rx_buffer->skb = NULL; | ||
4326 | if (rx_buffer->dma) | 4326 | if (rx_buffer->dma) |
4327 | dma_unmap_page(dev, rx_buffer->dma, | 4327 | dma_unmap_page(dev, rx_buffer->dma, |
4328 | ixgbe_rx_pg_size(rx_ring), | 4328 | ixgbe_rx_pg_size(rx_ring), |
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, | |||
7669 | return -EOPNOTSUPP; | 7669 | return -EOPNOTSUPP; |
7670 | 7670 | ||
7671 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); | 7671 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
7672 | if (!br_spec) | ||
7673 | return -EINVAL; | ||
7672 | 7674 | ||
7673 | nla_for_each_nested(attr, br_spec, rem) { | 7675 | nla_for_each_nested(attr, br_spec, rem) { |
7674 | __u16 mode; | 7676 | __u16 mode; |
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, | |||
7677 | if (nla_type(attr) != IFLA_BRIDGE_MODE) | 7679 | if (nla_type(attr) != IFLA_BRIDGE_MODE) |
7678 | continue; | 7680 | continue; |
7679 | 7681 | ||
7682 | if (nla_len(attr) < sizeof(mode)) | ||
7683 | return -EINVAL; | ||
7684 | |||
7680 | mode = nla_get_u16(attr); | 7685 | mode = nla_get_u16(attr); |
7681 | if (mode == BRIDGE_MODE_VEPA) { | 7686 | if (mode == BRIDGE_MODE_VEPA) { |
7682 | reg = 0; | 7687 | reg = 0; |
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7979 | int i, err, pci_using_dac, expected_gts; | 7984 | int i, err, pci_using_dac, expected_gts; |
7980 | unsigned int indices = MAX_TX_QUEUES; | 7985 | unsigned int indices = MAX_TX_QUEUES; |
7981 | u8 part_str[IXGBE_PBANUM_LENGTH]; | 7986 | u8 part_str[IXGBE_PBANUM_LENGTH]; |
7987 | bool disable_dev = false; | ||
7982 | #ifdef IXGBE_FCOE | 7988 | #ifdef IXGBE_FCOE |
7983 | u16 device_caps; | 7989 | u16 device_caps; |
7984 | #endif | 7990 | #endif |
@@ -8369,13 +8375,14 @@ err_sw_init: | |||
8369 | iounmap(adapter->io_addr); | 8375 | iounmap(adapter->io_addr); |
8370 | kfree(adapter->mac_table); | 8376 | kfree(adapter->mac_table); |
8371 | err_ioremap: | 8377 | err_ioremap: |
8378 | disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); | ||
8372 | free_netdev(netdev); | 8379 | free_netdev(netdev); |
8373 | err_alloc_etherdev: | 8380 | err_alloc_etherdev: |
8374 | pci_release_selected_regions(pdev, | 8381 | pci_release_selected_regions(pdev, |
8375 | pci_select_bars(pdev, IORESOURCE_MEM)); | 8382 | pci_select_bars(pdev, IORESOURCE_MEM)); |
8376 | err_pci_reg: | 8383 | err_pci_reg: |
8377 | err_dma: | 8384 | err_dma: |
8378 | if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) | 8385 | if (!adapter || disable_dev) |
8379 | pci_disable_device(pdev); | 8386 | pci_disable_device(pdev); |
8380 | return err; | 8387 | return err; |
8381 | } | 8388 | } |
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev) | |||
8393 | { | 8400 | { |
8394 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); | 8401 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
8395 | struct net_device *netdev = adapter->netdev; | 8402 | struct net_device *netdev = adapter->netdev; |
8403 | bool disable_dev; | ||
8396 | 8404 | ||
8397 | ixgbe_dbg_adapter_exit(adapter); | 8405 | ixgbe_dbg_adapter_exit(adapter); |
8398 | 8406 | ||
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev) | |||
8442 | e_dev_info("complete\n"); | 8450 | e_dev_info("complete\n"); |
8443 | 8451 | ||
8444 | kfree(adapter->mac_table); | 8452 | kfree(adapter->mac_table); |
8453 | disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); | ||
8445 | free_netdev(netdev); | 8454 | free_netdev(netdev); |
8446 | 8455 | ||
8447 | pci_disable_pcie_error_reporting(pdev); | 8456 | pci_disable_pcie_error_reporting(pdev); |
8448 | 8457 | ||
8449 | if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) | 8458 | if (disable_dev) |
8450 | pci_disable_device(pdev); | 8459 | pci_disable_device(pdev); |
8451 | } | 8460 | } |
8452 | 8461 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index d47b19f27c35..28b81ae09b5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, | |||
635 | **/ | 635 | **/ |
636 | s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) | 636 | s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) |
637 | { | 637 | { |
638 | s32 status; | ||
639 | u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; | 638 | u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; |
640 | bool autoneg = false; | 639 | bool autoneg = false; |
641 | ixgbe_link_speed speed; | 640 | ixgbe_link_speed speed; |
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) | |||
700 | 699 | ||
701 | hw->phy.ops.write_reg(hw, MDIO_CTRL1, | 700 | hw->phy.ops.write_reg(hw, MDIO_CTRL1, |
702 | MDIO_MMD_AN, autoneg_reg); | 701 | MDIO_MMD_AN, autoneg_reg); |
703 | 702 | return 0; | |
704 | return status; | ||
705 | } | 703 | } |
706 | 704 | ||
707 | /** | 705 | /** |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index b151a949f352..d44560d1d268 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
1047 | int tx_index; | 1047 | int tx_index; |
1048 | struct tx_desc *desc; | 1048 | struct tx_desc *desc; |
1049 | u32 cmd_sts; | 1049 | u32 cmd_sts; |
1050 | struct sk_buff *skb; | ||
1051 | 1050 | ||
1052 | tx_index = txq->tx_used_desc; | 1051 | tx_index = txq->tx_used_desc; |
1053 | desc = &txq->tx_desc_area[tx_index]; | 1052 | desc = &txq->tx_desc_area[tx_index]; |
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
1066 | reclaimed++; | 1065 | reclaimed++; |
1067 | txq->tx_desc_count--; | 1066 | txq->tx_desc_count--; |
1068 | 1067 | ||
1069 | skb = NULL; | 1068 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) |
1070 | if (cmd_sts & TX_LAST_DESC) | 1069 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, |
1071 | skb = __skb_dequeue(&txq->tx_skb); | 1070 | desc->byte_cnt, DMA_TO_DEVICE); |
1071 | |||
1072 | if (cmd_sts & TX_ENABLE_INTERRUPT) { | ||
1073 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); | ||
1074 | |||
1075 | if (!WARN_ON(!skb)) | ||
1076 | dev_kfree_skb(skb); | ||
1077 | } | ||
1072 | 1078 | ||
1073 | if (cmd_sts & ERROR_SUMMARY) { | 1079 | if (cmd_sts & ERROR_SUMMARY) { |
1074 | netdev_info(mp->dev, "tx error\n"); | 1080 | netdev_info(mp->dev, "tx error\n"); |
1075 | mp->dev->stats.tx_errors++; | 1081 | mp->dev->stats.tx_errors++; |
1076 | } | 1082 | } |
1077 | 1083 | ||
1078 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) | ||
1079 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, | ||
1080 | desc->byte_cnt, DMA_TO_DEVICE); | ||
1081 | dev_kfree_skb(skb); | ||
1082 | } | 1084 | } |
1083 | 1085 | ||
1084 | __netif_tx_unlock_bh(nq); | 1086 | __netif_tx_unlock_bh(nq); |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index ece83f101526..fdf3e382e464 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, | |||
1692 | { | 1692 | { |
1693 | struct mvpp2_prs_entry *pe; | 1693 | struct mvpp2_prs_entry *pe; |
1694 | int tid_aux, tid; | 1694 | int tid_aux, tid; |
1695 | int ret = 0; | ||
1695 | 1696 | ||
1696 | pe = mvpp2_prs_vlan_find(priv, tpid, ai); | 1697 | pe = mvpp2_prs_vlan_find(priv, tpid, ai); |
1697 | 1698 | ||
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, | |||
1723 | break; | 1724 | break; |
1724 | } | 1725 | } |
1725 | 1726 | ||
1726 | if (tid <= tid_aux) | 1727 | if (tid <= tid_aux) { |
1727 | return -EINVAL; | 1728 | ret = -EINVAL; |
1729 | goto error; | ||
1730 | } | ||
1728 | 1731 | ||
1729 | memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); | 1732 | memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); |
1730 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); | 1733 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); |
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, | |||
1756 | 1759 | ||
1757 | mvpp2_prs_hw_write(priv, pe); | 1760 | mvpp2_prs_hw_write(priv, pe); |
1758 | 1761 | ||
1762 | error: | ||
1759 | kfree(pe); | 1763 | kfree(pe); |
1760 | 1764 | ||
1761 | return 0; | 1765 | return ret; |
1762 | } | 1766 | } |
1763 | 1767 | ||
1764 | /* Get first free double vlan ai number */ | 1768 | /* Get first free double vlan ai number */ |
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, | |||
1821 | unsigned int port_map) | 1825 | unsigned int port_map) |
1822 | { | 1826 | { |
1823 | struct mvpp2_prs_entry *pe; | 1827 | struct mvpp2_prs_entry *pe; |
1824 | int tid_aux, tid, ai; | 1828 | int tid_aux, tid, ai, ret = 0; |
1825 | 1829 | ||
1826 | pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); | 1830 | pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); |
1827 | 1831 | ||
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, | |||
1838 | 1842 | ||
1839 | /* Set ai value for new double vlan entry */ | 1843 | /* Set ai value for new double vlan entry */ |
1840 | ai = mvpp2_prs_double_vlan_ai_free_get(priv); | 1844 | ai = mvpp2_prs_double_vlan_ai_free_get(priv); |
1841 | if (ai < 0) | 1845 | if (ai < 0) { |
1842 | return ai; | 1846 | ret = ai; |
1847 | goto error; | ||
1848 | } | ||
1843 | 1849 | ||
1844 | /* Get first single/triple vlan tid */ | 1850 | /* Get first single/triple vlan tid */ |
1845 | for (tid_aux = MVPP2_PE_FIRST_FREE_TID; | 1851 | for (tid_aux = MVPP2_PE_FIRST_FREE_TID; |
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, | |||
1859 | break; | 1865 | break; |
1860 | } | 1866 | } |
1861 | 1867 | ||
1862 | if (tid >= tid_aux) | 1868 | if (tid >= tid_aux) { |
1863 | return -ERANGE; | 1869 | ret = -ERANGE; |
1870 | goto error; | ||
1871 | } | ||
1864 | 1872 | ||
1865 | memset(pe, 0, sizeof(struct mvpp2_prs_entry)); | 1873 | memset(pe, 0, sizeof(struct mvpp2_prs_entry)); |
1866 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); | 1874 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); |
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, | |||
1887 | mvpp2_prs_tcam_port_map_set(pe, port_map); | 1895 | mvpp2_prs_tcam_port_map_set(pe, port_map); |
1888 | mvpp2_prs_hw_write(priv, pe); | 1896 | mvpp2_prs_hw_write(priv, pe); |
1889 | 1897 | ||
1898 | error: | ||
1890 | kfree(pe); | 1899 | kfree(pe); |
1891 | return 0; | 1900 | return ret; |
1892 | } | 1901 | } |
1893 | 1902 | ||
1894 | /* IPv4 header parsing for fragmentation and L4 offset */ | 1903 | /* IPv4 header parsing for fragmentation and L4 offset */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f3032fec8fce..4d69e382b4e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
1693 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); | 1693 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); |
1694 | 1694 | ||
1695 | #ifdef CONFIG_MLX4_EN_VXLAN | 1695 | #ifdef CONFIG_MLX4_EN_VXLAN |
1696 | if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) | 1696 | if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
1697 | vxlan_get_rx_port(dev); | 1697 | vxlan_get_rx_port(dev); |
1698 | #endif | 1698 | #endif |
1699 | priv->port_up = true; | 1699 | priv->port_up = true; |
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) | |||
2281 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, | 2281 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, |
2282 | VXLAN_STEER_BY_OUTER_MAC, 1); | 2282 | VXLAN_STEER_BY_OUTER_MAC, 1); |
2283 | out: | 2283 | out: |
2284 | if (ret) | 2284 | if (ret) { |
2285 | en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); | 2285 | en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); |
2286 | return; | ||
2287 | } | ||
2288 | |||
2289 | /* set offloads */ | ||
2290 | priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
2291 | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; | ||
2292 | priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2293 | priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2286 | } | 2294 | } |
2287 | 2295 | ||
2288 | static void mlx4_en_del_vxlan_offloads(struct work_struct *work) | 2296 | static void mlx4_en_del_vxlan_offloads(struct work_struct *work) |
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) | |||
2290 | int ret; | 2298 | int ret; |
2291 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | 2299 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, |
2292 | vxlan_del_task); | 2300 | vxlan_del_task); |
2301 | /* unset offloads */ | ||
2302 | priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
2303 | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); | ||
2304 | priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; | ||
2305 | priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; | ||
2293 | 2306 | ||
2294 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, | 2307 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, |
2295 | VXLAN_STEER_BY_OUTER_MAC, 0); | 2308 | VXLAN_STEER_BY_OUTER_MAC, 0); |
@@ -2342,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, | |||
2342 | 2355 | ||
2343 | queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); | 2356 | queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); |
2344 | } | 2357 | } |
2358 | |||
2359 | static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) | ||
2360 | { | ||
2361 | return vxlan_gso_check(skb); | ||
2362 | } | ||
2345 | #endif | 2363 | #endif |
2346 | 2364 | ||
2347 | static const struct net_device_ops mlx4_netdev_ops = { | 2365 | static const struct net_device_ops mlx4_netdev_ops = { |
@@ -2373,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
2373 | #ifdef CONFIG_MLX4_EN_VXLAN | 2391 | #ifdef CONFIG_MLX4_EN_VXLAN |
2374 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, | 2392 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, |
2375 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, | 2393 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, |
2394 | .ndo_gso_check = mlx4_en_gso_check, | ||
2376 | #endif | 2395 | #endif |
2377 | }; | 2396 | }; |
2378 | 2397 | ||
@@ -2403,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
2403 | .ndo_rx_flow_steer = mlx4_en_filter_rfs, | 2422 | .ndo_rx_flow_steer = mlx4_en_filter_rfs, |
2404 | #endif | 2423 | #endif |
2405 | .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, | 2424 | .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, |
2425 | #ifdef CONFIG_MLX4_EN_VXLAN | ||
2426 | .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, | ||
2427 | .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, | ||
2428 | .ndo_gso_check = mlx4_en_gso_check, | ||
2429 | #endif | ||
2406 | }; | 2430 | }; |
2407 | 2431 | ||
2408 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | 2432 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
@@ -2568,13 +2592,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2568 | if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) | 2592 | if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) |
2569 | dev->priv_flags |= IFF_UNICAST_FLT; | 2593 | dev->priv_flags |= IFF_UNICAST_FLT; |
2570 | 2594 | ||
2571 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | ||
2572 | dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
2573 | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; | ||
2574 | dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2575 | dev->features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2576 | } | ||
2577 | |||
2578 | mdev->pndev[port] = dev; | 2595 | mdev->pndev[port] = dev; |
2579 | 2596 | ||
2580 | netif_carrier_off(dev); | 2597 | netif_carrier_off(dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 34c137878545..454d9fea640e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
836 | * whether LSO is used */ | 836 | * whether LSO is used */ |
837 | tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; | 837 | tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; |
838 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 838 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
839 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | 839 | if (!skb->encapsulation) |
840 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | 840 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | |
841 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | ||
842 | else | ||
843 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); | ||
841 | ring->tx_csum++; | 844 | ring->tx_csum++; |
842 | } | 845 | } |
843 | 846 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index a49c9d11d8a5..49290a405903 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
1026 | pr_cont("\n"); | 1026 | pr_cont("\n"); |
1027 | } | 1027 | } |
1028 | } | 1028 | } |
1029 | synchronize_irq(eq->irq); | ||
1029 | 1030 | ||
1030 | mlx4_mtt_cleanup(dev, &eq->mtt); | 1031 | mlx4_mtt_cleanup(dev, &eq->mtt); |
1031 | for (i = 0; i < npages; ++i) | 1032 | for (i = 0; i < npages; ++i) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ca0f98c95105..872843179f44 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, | |||
955 | cur->ib.dst_gid_msk); | 955 | cur->ib.dst_gid_msk); |
956 | break; | 956 | break; |
957 | 957 | ||
958 | case MLX4_NET_TRANS_RULE_ID_VXLAN: | ||
959 | len += snprintf(buf + len, BUF_SIZE - len, | ||
960 | "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); | ||
961 | break; | ||
958 | case MLX4_NET_TRANS_RULE_ID_IPV6: | 962 | case MLX4_NET_TRANS_RULE_ID_IPV6: |
959 | break; | 963 | break; |
960 | 964 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 5d2498dcf536..cd5cf6d957c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, | |||
1546 | 1546 | ||
1547 | switch (op) { | 1547 | switch (op) { |
1548 | case RES_OP_RESERVE: | 1548 | case RES_OP_RESERVE: |
1549 | count = get_param_l(&in_param); | 1549 | count = get_param_l(&in_param) & 0xffffff; |
1550 | align = get_param_h(&in_param); | 1550 | align = get_param_h(&in_param); |
1551 | err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); | 1551 | err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); |
1552 | if (err) | 1552 | if (err) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ed53291468f3..ad2c96a02a53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | |||
374 | snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", | 374 | snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", |
375 | name, pci_name(dev->pdev)); | 375 | name, pci_name(dev->pdev)); |
376 | eq->eqn = out.eq_number; | 376 | eq->eqn = out.eq_number; |
377 | eq->irqn = vecidx; | ||
378 | eq->dev = dev; | ||
379 | eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; | ||
377 | err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, | 380 | err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, |
378 | eq->name, eq); | 381 | eq->name, eq); |
379 | if (err) | 382 | if (err) |
380 | goto err_eq; | 383 | goto err_eq; |
381 | 384 | ||
382 | eq->irqn = vecidx; | ||
383 | eq->dev = dev; | ||
384 | eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; | ||
385 | |||
386 | err = mlx5_debug_eq_add(dev, eq); | 385 | err = mlx5_debug_eq_add(dev, eq); |
387 | if (err) | 386 | if (err) |
388 | goto err_irq; | 387 | goto err_irq; |
@@ -420,6 +419,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
420 | if (err) | 419 | if (err) |
421 | mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", | 420 | mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", |
422 | eq->eqn); | 421 | eq->eqn); |
422 | synchronize_irq(table->msix_arr[eq->irqn].vector); | ||
423 | mlx5_buf_free(dev, &eq->buf); | 423 | mlx5_buf_free(dev, &eq->buf); |
424 | 424 | ||
425 | return err; | 425 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3d8e8e489b2d..71b10b210792 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev, | |||
864 | dev->profile = &profile[prof_sel]; | 864 | dev->profile = &profile[prof_sel]; |
865 | dev->event = mlx5_core_event; | 865 | dev->event = mlx5_core_event; |
866 | 866 | ||
867 | INIT_LIST_HEAD(&priv->ctx_list); | ||
868 | spin_lock_init(&priv->ctx_lock); | ||
867 | err = mlx5_dev_init(dev, pdev); | 869 | err = mlx5_dev_init(dev, pdev); |
868 | if (err) { | 870 | if (err) { |
869 | dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); | 871 | dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); |
870 | goto out; | 872 | goto out; |
871 | } | 873 | } |
872 | 874 | ||
873 | INIT_LIST_HEAD(&priv->ctx_list); | ||
874 | spin_lock_init(&priv->ctx_lock); | ||
875 | err = mlx5_register_device(dev); | 875 | err = mlx5_register_device(dev); |
876 | if (err) { | 876 | if (err) { |
877 | dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); | 877 | dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 0b2a1ccd276d..613037584d08 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work) | |||
2762 | if (test_bit(__NX_RESETTING, &adapter->state)) | 2762 | if (test_bit(__NX_RESETTING, &adapter->state)) |
2763 | goto reschedule; | 2763 | goto reschedule; |
2764 | 2764 | ||
2765 | if (test_bit(__NX_DEV_UP, &adapter->state)) { | 2765 | if (test_bit(__NX_DEV_UP, &adapter->state) && |
2766 | !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { | ||
2766 | if (!adapter->has_link_events) { | 2767 | if (!adapter->has_link_events) { |
2767 | 2768 | ||
2768 | netxen_nic_handle_phy_intr(adapter); | 2769 | netxen_nic_handle_phy_intr(adapter); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index f5e29f7bdae3..a913b3ad2f89 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, | |||
503 | 503 | ||
504 | adapter->flags |= QLCNIC_DEL_VXLAN_PORT; | 504 | adapter->flags |= QLCNIC_DEL_VXLAN_PORT; |
505 | } | 505 | } |
506 | |||
507 | static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) | ||
508 | { | ||
509 | return vxlan_gso_check(skb); | ||
510 | } | ||
506 | #endif | 511 | #endif |
507 | 512 | ||
508 | static const struct net_device_ops qlcnic_netdev_ops = { | 513 | static const struct net_device_ops qlcnic_netdev_ops = { |
@@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { | |||
526 | #ifdef CONFIG_QLCNIC_VXLAN | 531 | #ifdef CONFIG_QLCNIC_VXLAN |
527 | .ndo_add_vxlan_port = qlcnic_add_vxlan_port, | 532 | .ndo_add_vxlan_port = qlcnic_add_vxlan_port, |
528 | .ndo_del_vxlan_port = qlcnic_del_vxlan_port, | 533 | .ndo_del_vxlan_port = qlcnic_del_vxlan_port, |
534 | .ndo_gso_check = qlcnic_gso_check, | ||
529 | #endif | 535 | #endif |
530 | #ifdef CONFIG_NET_POLL_CONTROLLER | 536 | #ifdef CONFIG_NET_POLL_CONTROLLER |
531 | .ndo_poll_controller = qlcnic_poll_controller, | 537 | .ndo_poll_controller = qlcnic_poll_controller, |
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index f3a47147937d..9a49f42ac2ba 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig | |||
@@ -5,7 +5,6 @@ | |||
5 | config NET_VENDOR_QUALCOMM | 5 | config NET_VENDOR_QUALCOMM |
6 | bool "Qualcomm devices" | 6 | bool "Qualcomm devices" |
7 | default y | 7 | default y |
8 | depends on SPI_MASTER && OF_GPIO | ||
9 | ---help--- | 8 | ---help--- |
10 | If you have a network (Ethernet) card belonging to this class, say Y | 9 | If you have a network (Ethernet) card belonging to this class, say Y |
11 | and read the Ethernet-HOWTO, available from | 10 | and read the Ethernet-HOWTO, available from |
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM | |||
20 | 19 | ||
21 | config QCA7000 | 20 | config QCA7000 |
22 | tristate "Qualcomm Atheros QCA7000 support" | 21 | tristate "Qualcomm Atheros QCA7000 support" |
23 | depends on SPI_MASTER && OF_GPIO | 22 | depends on SPI_MASTER && OF |
24 | ---help--- | 23 | ---help--- |
25 | This SPI protocol driver supports the Qualcomm Atheros QCA7000. | 24 | This SPI protocol driver supports the Qualcomm Atheros QCA7000. |
26 | 25 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 60e9c2cd051e..b5db6b3f939f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev) | |||
917 | return ret; | 917 | return ret; |
918 | } | 918 | } |
919 | 919 | ||
920 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
921 | static void sh_eth_set_receive_align(struct sk_buff *skb) | 920 | static void sh_eth_set_receive_align(struct sk_buff *skb) |
922 | { | 921 | { |
923 | int reserve; | 922 | uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); |
924 | 923 | ||
925 | reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); | ||
926 | if (reserve) | 924 | if (reserve) |
927 | skb_reserve(skb, reserve); | 925 | skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); |
928 | } | 926 | } |
929 | #else | ||
930 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
931 | { | ||
932 | skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); | ||
933 | } | ||
934 | #endif | ||
935 | 927 | ||
936 | 928 | ||
937 | /* CPU <-> EDMAC endian convert */ | 929 | /* CPU <-> EDMAC endian convert */ |
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1119 | struct sh_eth_txdesc *txdesc = NULL; | 1111 | struct sh_eth_txdesc *txdesc = NULL; |
1120 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1112 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
1121 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1113 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
1114 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | ||
1122 | 1115 | ||
1123 | mdp->cur_rx = 0; | 1116 | mdp->cur_rx = 0; |
1124 | mdp->cur_tx = 0; | 1117 | mdp->cur_tx = 0; |
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1131 | for (i = 0; i < mdp->num_rx_ring; i++) { | 1124 | for (i = 0; i < mdp->num_rx_ring; i++) { |
1132 | /* skb */ | 1125 | /* skb */ |
1133 | mdp->rx_skbuff[i] = NULL; | 1126 | mdp->rx_skbuff[i] = NULL; |
1134 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 1127 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1135 | mdp->rx_skbuff[i] = skb; | 1128 | mdp->rx_skbuff[i] = skb; |
1136 | if (skb == NULL) | 1129 | if (skb == NULL) |
1137 | break; | 1130 | break; |
1138 | dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, | ||
1139 | DMA_FROM_DEVICE); | ||
1140 | sh_eth_set_receive_align(skb); | 1131 | sh_eth_set_receive_align(skb); |
1141 | 1132 | ||
1142 | /* RX descriptor */ | 1133 | /* RX descriptor */ |
1143 | rxdesc = &mdp->rx_ring[i]; | 1134 | rxdesc = &mdp->rx_ring[i]; |
1135 | /* The size of the buffer is a multiple of 16 bytes. */ | ||
1136 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
1137 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | ||
1138 | DMA_FROM_DEVICE); | ||
1144 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); | 1139 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
1145 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1140 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1146 | 1141 | ||
1147 | /* The size of the buffer is 16 byte boundary. */ | ||
1148 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
1149 | /* Rx descriptor address set */ | 1142 | /* Rx descriptor address set */ |
1150 | if (i == 0) { | 1143 | if (i == 0) { |
1151 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); | 1144 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); |
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1397 | struct sk_buff *skb; | 1390 | struct sk_buff *skb; |
1398 | u16 pkt_len = 0; | 1391 | u16 pkt_len = 0; |
1399 | u32 desc_status; | 1392 | u32 desc_status; |
1393 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | ||
1400 | 1394 | ||
1401 | rxdesc = &mdp->rx_ring[entry]; | 1395 | rxdesc = &mdp->rx_ring[entry]; |
1402 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { | 1396 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { |
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1448 | if (mdp->cd->rpadir) | 1442 | if (mdp->cd->rpadir) |
1449 | skb_reserve(skb, NET_IP_ALIGN); | 1443 | skb_reserve(skb, NET_IP_ALIGN); |
1450 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1444 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, |
1451 | mdp->rx_buf_sz, | 1445 | ALIGN(mdp->rx_buf_sz, 16), |
1452 | DMA_FROM_DEVICE); | 1446 | DMA_FROM_DEVICE); |
1453 | skb_put(skb, pkt_len); | 1447 | skb_put(skb, pkt_len); |
1454 | skb->protocol = eth_type_trans(skb, ndev); | 1448 | skb->protocol = eth_type_trans(skb, ndev); |
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1468 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1462 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
1469 | 1463 | ||
1470 | if (mdp->rx_skbuff[entry] == NULL) { | 1464 | if (mdp->rx_skbuff[entry] == NULL) { |
1471 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 1465 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1472 | mdp->rx_skbuff[entry] = skb; | 1466 | mdp->rx_skbuff[entry] = skb; |
1473 | if (skb == NULL) | 1467 | if (skb == NULL) |
1474 | break; /* Better luck next round. */ | 1468 | break; /* Better luck next round. */ |
1475 | dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, | ||
1476 | DMA_FROM_DEVICE); | ||
1477 | sh_eth_set_receive_align(skb); | 1469 | sh_eth_set_receive_align(skb); |
1470 | dma_map_single(&ndev->dev, skb->data, | ||
1471 | rxdesc->buffer_length, DMA_FROM_DEVICE); | ||
1478 | 1472 | ||
1479 | skb_checksum_none_assert(skb); | 1473 | skb_checksum_none_assert(skb); |
1480 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); | 1474 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev) | |||
2042 | if (ret) | 2036 | if (ret) |
2043 | goto out_free_irq; | 2037 | goto out_free_irq; |
2044 | 2038 | ||
2039 | mdp->is_opened = 1; | ||
2040 | |||
2045 | return ret; | 2041 | return ret; |
2046 | 2042 | ||
2047 | out_free_irq: | 2043 | out_free_irq: |
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2131 | return NETDEV_TX_OK; | 2127 | return NETDEV_TX_OK; |
2132 | } | 2128 | } |
2133 | 2129 | ||
2130 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
2131 | { | ||
2132 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2133 | |||
2134 | if (sh_eth_is_rz_fast_ether(mdp)) | ||
2135 | return &ndev->stats; | ||
2136 | |||
2137 | if (!mdp->is_opened) | ||
2138 | return &ndev->stats; | ||
2139 | |||
2140 | ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
2141 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
2142 | ndev->stats.collisions += sh_eth_read(ndev, CDCR); | ||
2143 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
2144 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
2145 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
2146 | |||
2147 | if (sh_eth_is_gether(mdp)) { | ||
2148 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
2149 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
2150 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
2151 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
2152 | } else { | ||
2153 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
2154 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
2155 | } | ||
2156 | |||
2157 | return &ndev->stats; | ||
2158 | } | ||
2159 | |||
2134 | /* device close function */ | 2160 | /* device close function */ |
2135 | static int sh_eth_close(struct net_device *ndev) | 2161 | static int sh_eth_close(struct net_device *ndev) |
2136 | { | 2162 | { |
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev) | |||
2145 | sh_eth_write(ndev, 0, EDTRR); | 2171 | sh_eth_write(ndev, 0, EDTRR); |
2146 | sh_eth_write(ndev, 0, EDRRR); | 2172 | sh_eth_write(ndev, 0, EDRRR); |
2147 | 2173 | ||
2174 | sh_eth_get_stats(ndev); | ||
2148 | /* PHY Disconnect */ | 2175 | /* PHY Disconnect */ |
2149 | if (mdp->phydev) { | 2176 | if (mdp->phydev) { |
2150 | phy_stop(mdp->phydev); | 2177 | phy_stop(mdp->phydev); |
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev) | |||
2163 | 2190 | ||
2164 | pm_runtime_put_sync(&mdp->pdev->dev); | 2191 | pm_runtime_put_sync(&mdp->pdev->dev); |
2165 | 2192 | ||
2166 | return 0; | 2193 | mdp->is_opened = 0; |
2167 | } | ||
2168 | |||
2169 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
2170 | { | ||
2171 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2172 | |||
2173 | if (sh_eth_is_rz_fast_ether(mdp)) | ||
2174 | return &ndev->stats; | ||
2175 | 2194 | ||
2176 | pm_runtime_get_sync(&mdp->pdev->dev); | 2195 | return 0; |
2177 | |||
2178 | ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
2179 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
2180 | ndev->stats.collisions += sh_eth_read(ndev, CDCR); | ||
2181 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
2182 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
2183 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
2184 | if (sh_eth_is_gether(mdp)) { | ||
2185 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
2186 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
2187 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
2188 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
2189 | } else { | ||
2190 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
2191 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
2192 | } | ||
2193 | pm_runtime_put_sync(&mdp->pdev->dev); | ||
2194 | |||
2195 | return &ndev->stats; | ||
2196 | } | 2196 | } |
2197 | 2197 | ||
2198 | /* ioctl to device function */ | 2198 | /* ioctl to device function */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index b37c427144ee..22301bf9c21d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -162,9 +162,9 @@ enum { | |||
162 | 162 | ||
163 | /* Driver's parameters */ | 163 | /* Driver's parameters */ |
164 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 164 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
165 | #define SH4_SKB_RX_ALIGN 32 | 165 | #define SH_ETH_RX_ALIGN 32 |
166 | #else | 166 | #else |
167 | #define SH2_SH3_SKB_RX_ALIGN 2 | 167 | #define SH_ETH_RX_ALIGN 2 |
168 | #endif | 168 | #endif |
169 | 169 | ||
170 | /* Register's bits | 170 | /* Register's bits |
@@ -522,6 +522,7 @@ struct sh_eth_private { | |||
522 | 522 | ||
523 | unsigned no_ether_link:1; | 523 | unsigned no_ether_link:1; |
524 | unsigned ether_link_active_low:1; | 524 | unsigned ether_link_active_low:1; |
525 | unsigned is_opened:1; | ||
525 | }; | 526 | }; |
526 | 527 | ||
527 | static inline void sh_eth_soft_swap(char *src, int len) | 528 | static inline void sh_eth_soft_swap(char *src, int len) |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 002d4cdc319f..a77f05ce8325 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx) | |||
180 | EFX_MAX_CHANNELS, | 180 | EFX_MAX_CHANNELS, |
181 | resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / | 181 | resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / |
182 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); | 182 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); |
183 | BUG_ON(efx->max_channels == 0); | 183 | if (WARN_ON(efx->max_channels == 0)) |
184 | return -EIO; | ||
184 | 185 | ||
185 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | 186 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
186 | if (!nic_data) | 187 | if (!nic_data) |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index ee84a90e371c..aaf2987512b5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
343 | unsigned short dma_flags; | 343 | unsigned short dma_flags; |
344 | int i = 0; | 344 | int i = 0; |
345 | 345 | ||
346 | EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); | ||
347 | |||
348 | if (skb_shinfo(skb)->gso_size) | 346 | if (skb_shinfo(skb)->gso_size) |
349 | return efx_enqueue_skb_tso(tx_queue, skb); | 347 | return efx_enqueue_skb_tso(tx_queue, skb); |
350 | 348 | ||
@@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1258 | /* Find the packet protocol and sanity-check it */ | 1256 | /* Find the packet protocol and sanity-check it */ |
1259 | state.protocol = efx_tso_check_protocol(skb); | 1257 | state.protocol = efx_tso_check_protocol(skb); |
1260 | 1258 | ||
1261 | EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); | ||
1262 | |||
1263 | rc = tso_start(&state, efx, skb); | 1259 | rc = tso_start(&state, efx, skb); |
1264 | if (rc) | 1260 | if (rc) |
1265 | goto mem_err; | 1261 | goto mem_err; |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 5e94d00b96b3..6cc3cf6f17c8 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -81,6 +81,7 @@ static const char version[] = | |||
81 | #include <linux/workqueue.h> | 81 | #include <linux/workqueue.h> |
82 | #include <linux/of.h> | 82 | #include <linux/of.h> |
83 | #include <linux/of_device.h> | 83 | #include <linux/of_device.h> |
84 | #include <linux/of_gpio.h> | ||
84 | 85 | ||
85 | #include <linux/netdevice.h> | 86 | #include <linux/netdevice.h> |
86 | #include <linux/etherdevice.h> | 87 | #include <linux/etherdevice.h> |
@@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = { | |||
2188 | {}, | 2189 | {}, |
2189 | }; | 2190 | }; |
2190 | MODULE_DEVICE_TABLE(of, smc91x_match); | 2191 | MODULE_DEVICE_TABLE(of, smc91x_match); |
2192 | |||
2193 | /** | ||
2194 | * of_try_set_control_gpio - configure a gpio if it exists | ||
2195 | */ | ||
2196 | static int try_toggle_control_gpio(struct device *dev, | ||
2197 | struct gpio_desc **desc, | ||
2198 | const char *name, int index, | ||
2199 | int value, unsigned int nsdelay) | ||
2200 | { | ||
2201 | struct gpio_desc *gpio = *desc; | ||
2202 | int res; | ||
2203 | |||
2204 | gpio = devm_gpiod_get_index(dev, name, index); | ||
2205 | if (IS_ERR(gpio)) { | ||
2206 | if (PTR_ERR(gpio) == -ENOENT) { | ||
2207 | *desc = NULL; | ||
2208 | return 0; | ||
2209 | } | ||
2210 | |||
2211 | return PTR_ERR(gpio); | ||
2212 | } | ||
2213 | res = gpiod_direction_output(gpio, !value); | ||
2214 | if (res) { | ||
2215 | dev_err(dev, "unable to toggle gpio %s: %i\n", name, res); | ||
2216 | devm_gpiod_put(dev, gpio); | ||
2217 | gpio = NULL; | ||
2218 | return res; | ||
2219 | } | ||
2220 | if (nsdelay) | ||
2221 | usleep_range(nsdelay, 2 * nsdelay); | ||
2222 | gpiod_set_value_cansleep(gpio, value); | ||
2223 | *desc = gpio; | ||
2224 | |||
2225 | return 0; | ||
2226 | } | ||
2191 | #endif | 2227 | #endif |
2192 | 2228 | ||
2193 | /* | 2229 | /* |
@@ -2207,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2207 | const struct of_device_id *match = NULL; | 2243 | const struct of_device_id *match = NULL; |
2208 | struct smc_local *lp; | 2244 | struct smc_local *lp; |
2209 | struct net_device *ndev; | 2245 | struct net_device *ndev; |
2210 | struct resource *res, *ires; | 2246 | struct resource *res; |
2211 | unsigned int __iomem *addr; | 2247 | unsigned int __iomem *addr; |
2212 | unsigned long irq_flags = SMC_IRQ_FLAGS; | 2248 | unsigned long irq_flags = SMC_IRQ_FLAGS; |
2249 | unsigned long irq_resflags; | ||
2213 | int ret; | 2250 | int ret; |
2214 | 2251 | ||
2215 | ndev = alloc_etherdev(sizeof(struct smc_local)); | 2252 | ndev = alloc_etherdev(sizeof(struct smc_local)); |
@@ -2237,6 +2274,28 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2237 | struct device_node *np = pdev->dev.of_node; | 2274 | struct device_node *np = pdev->dev.of_node; |
2238 | u32 val; | 2275 | u32 val; |
2239 | 2276 | ||
2277 | /* Optional pwrdwn GPIO configured? */ | ||
2278 | ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, | ||
2279 | "power", 0, 0, 100); | ||
2280 | if (ret) | ||
2281 | return ret; | ||
2282 | |||
2283 | /* | ||
2284 | * Optional reset GPIO configured? Minimum 100 ns reset needed | ||
2285 | * according to LAN91C96 datasheet page 14. | ||
2286 | */ | ||
2287 | ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, | ||
2288 | "reset", 0, 0, 100); | ||
2289 | if (ret) | ||
2290 | return ret; | ||
2291 | |||
2292 | /* | ||
2293 | * Need to wait for optional EEPROM to load, max 750 us according | ||
2294 | * to LAN91C96 datasheet page 55. | ||
2295 | */ | ||
2296 | if (lp->reset_gpio) | ||
2297 | usleep_range(750, 1000); | ||
2298 | |||
2240 | /* Combination of IO widths supported, default to 16-bit */ | 2299 | /* Combination of IO widths supported, default to 16-bit */ |
2241 | if (!of_property_read_u32(np, "reg-io-width", &val)) { | 2300 | if (!of_property_read_u32(np, "reg-io-width", &val)) { |
2242 | if (val & 1) | 2301 | if (val & 1) |
@@ -2279,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2279 | goto out_free_netdev; | 2338 | goto out_free_netdev; |
2280 | } | 2339 | } |
2281 | 2340 | ||
2282 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 2341 | ndev->irq = platform_get_irq(pdev, 0); |
2283 | if (!ires) { | 2342 | if (ndev->irq <= 0) { |
2284 | ret = -ENODEV; | 2343 | ret = -ENODEV; |
2285 | goto out_release_io; | 2344 | goto out_release_io; |
2286 | } | 2345 | } |
2287 | 2346 | /* | |
2288 | ndev->irq = ires->start; | 2347 | * If this platform does not specify any special irqflags, or if |
2289 | 2348 | * the resource supplies a trigger, override the irqflags with | |
2290 | if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) | 2349 | * the trigger flags from the resource. |
2291 | irq_flags = ires->flags & IRQF_TRIGGER_MASK; | 2350 | */ |
2351 | irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); | ||
2352 | if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) | ||
2353 | irq_flags = irq_resflags & IRQF_TRIGGER_MASK; | ||
2292 | 2354 | ||
2293 | ret = smc_request_attrib(pdev, ndev); | 2355 | ret = smc_request_attrib(pdev, ndev); |
2294 | if (ret) | 2356 | if (ret) |
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 47dce918eb0f..2a38dacbbd27 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -298,6 +298,9 @@ struct smc_local { | |||
298 | struct sk_buff *pending_tx_skb; | 298 | struct sk_buff *pending_tx_skb; |
299 | struct tasklet_struct tx_task; | 299 | struct tasklet_struct tx_task; |
300 | 300 | ||
301 | struct gpio_desc *power_gpio; | ||
302 | struct gpio_desc *reset_gpio; | ||
303 | |||
301 | /* version/revision of the SMC91x chip */ | 304 | /* version/revision of the SMC91x chip */ |
302 | int version; | 305 | int version; |
303 | 306 | ||
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index affb29da353e..77ed74561e5f 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) | |||
1342 | spin_unlock(&pdata->mac_lock); | 1342 | spin_unlock(&pdata->mac_lock); |
1343 | } | 1343 | } |
1344 | 1344 | ||
1345 | static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) | ||
1346 | { | ||
1347 | int rc = 0; | ||
1348 | |||
1349 | if (!pdata->phy_dev) | ||
1350 | return rc; | ||
1351 | |||
1352 | /* If the internal PHY is in General Power-Down mode, all, except the | ||
1353 | * management interface, is powered-down and stays in that condition as | ||
1354 | * long as Phy register bit 0.11 is HIGH. | ||
1355 | * | ||
1356 | * In that case, clear the bit 0.11, so the PHY powers up and we can | ||
1357 | * access to the phy registers. | ||
1358 | */ | ||
1359 | rc = phy_read(pdata->phy_dev, MII_BMCR); | ||
1360 | if (rc < 0) { | ||
1361 | SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); | ||
1362 | return rc; | ||
1363 | } | ||
1364 | |||
1365 | /* If the PHY general power-down bit is not set is not necessary to | ||
1366 | * disable the general power down-mode. | ||
1367 | */ | ||
1368 | if (rc & BMCR_PDOWN) { | ||
1369 | rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); | ||
1370 | if (rc < 0) { | ||
1371 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); | ||
1372 | return rc; | ||
1373 | } | ||
1374 | |||
1375 | usleep_range(1000, 1500); | ||
1376 | } | ||
1377 | |||
1378 | return 0; | ||
1379 | } | ||
1380 | |||
1345 | static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) | 1381 | static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) |
1346 | { | 1382 | { |
1347 | int rc = 0; | 1383 | int rc = 0; |
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) | |||
1356 | return rc; | 1392 | return rc; |
1357 | } | 1393 | } |
1358 | 1394 | ||
1359 | /* | 1395 | /* Only disable if energy detect mode is already enabled */ |
1360 | * If energy is detected the PHY is already awake so is not necessary | 1396 | if (rc & MII_LAN83C185_EDPWRDOWN) { |
1361 | * to disable the energy detect power-down mode. | ||
1362 | */ | ||
1363 | if ((rc & MII_LAN83C185_EDPWRDOWN) && | ||
1364 | !(rc & MII_LAN83C185_ENERGYON)) { | ||
1365 | /* Disable energy detect mode for this SMSC Transceivers */ | 1397 | /* Disable energy detect mode for this SMSC Transceivers */ |
1366 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, | 1398 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, |
1367 | rc & (~MII_LAN83C185_EDPWRDOWN)); | 1399 | rc & (~MII_LAN83C185_EDPWRDOWN)); |
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) | |||
1370 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); | 1402 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); |
1371 | return rc; | 1403 | return rc; |
1372 | } | 1404 | } |
1373 | 1405 | /* Allow PHY to wakeup */ | |
1374 | mdelay(1); | 1406 | mdelay(2); |
1375 | } | 1407 | } |
1376 | 1408 | ||
1377 | return 0; | 1409 | return 0; |
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) | |||
1393 | 1425 | ||
1394 | /* Only enable if energy detect mode is already disabled */ | 1426 | /* Only enable if energy detect mode is already disabled */ |
1395 | if (!(rc & MII_LAN83C185_EDPWRDOWN)) { | 1427 | if (!(rc & MII_LAN83C185_EDPWRDOWN)) { |
1396 | mdelay(100); | ||
1397 | /* Enable energy detect mode for this SMSC Transceivers */ | 1428 | /* Enable energy detect mode for this SMSC Transceivers */ |
1398 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, | 1429 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, |
1399 | rc | MII_LAN83C185_EDPWRDOWN); | 1430 | rc | MII_LAN83C185_EDPWRDOWN); |
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) | |||
1402 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); | 1433 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); |
1403 | return rc; | 1434 | return rc; |
1404 | } | 1435 | } |
1405 | |||
1406 | mdelay(1); | ||
1407 | } | 1436 | } |
1408 | return 0; | 1437 | return 0; |
1409 | } | 1438 | } |
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) | |||
1415 | int ret; | 1444 | int ret; |
1416 | 1445 | ||
1417 | /* | 1446 | /* |
1447 | * Make sure to power-up the PHY chip before doing a reset, otherwise | ||
1448 | * the reset fails. | ||
1449 | */ | ||
1450 | ret = smsc911x_phy_general_power_up(pdata); | ||
1451 | if (ret) { | ||
1452 | SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); | ||
1453 | return ret; | ||
1454 | } | ||
1455 | |||
1456 | /* | ||
1418 | * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that | 1457 | * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that |
1419 | * are initialized in a Energy Detect Power-Down mode that prevents | 1458 | * are initialized in a Energy Detect Power-Down mode that prevents |
1420 | * the MAC chip to be software reseted. So we have to wakeup the PHY | 1459 | * the MAC chip to be software reseted. So we have to wakeup the PHY |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6f77a46c7e2c..18c46bb0f3bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) | |||
276 | bool stmmac_eee_init(struct stmmac_priv *priv) | 276 | bool stmmac_eee_init(struct stmmac_priv *priv) |
277 | { | 277 | { |
278 | char *phy_bus_name = priv->plat->phy_bus_name; | 278 | char *phy_bus_name = priv->plat->phy_bus_name; |
279 | unsigned long flags; | ||
279 | bool ret = false; | 280 | bool ret = false; |
280 | 281 | ||
281 | /* Using PCS we cannot dial with the phy registers at this stage | 282 | /* Using PCS we cannot dial with the phy registers at this stage |
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
300 | * changed). | 301 | * changed). |
301 | * In that case the driver disable own timers. | 302 | * In that case the driver disable own timers. |
302 | */ | 303 | */ |
304 | spin_lock_irqsave(&priv->lock, flags); | ||
303 | if (priv->eee_active) { | 305 | if (priv->eee_active) { |
304 | pr_debug("stmmac: disable EEE\n"); | 306 | pr_debug("stmmac: disable EEE\n"); |
305 | del_timer_sync(&priv->eee_ctrl_timer); | 307 | del_timer_sync(&priv->eee_ctrl_timer); |
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
307 | tx_lpi_timer); | 309 | tx_lpi_timer); |
308 | } | 310 | } |
309 | priv->eee_active = 0; | 311 | priv->eee_active = 0; |
312 | spin_unlock_irqrestore(&priv->lock, flags); | ||
310 | goto out; | 313 | goto out; |
311 | } | 314 | } |
312 | /* Activate the EEE and start timers */ | 315 | /* Activate the EEE and start timers */ |
316 | spin_lock_irqsave(&priv->lock, flags); | ||
313 | if (!priv->eee_active) { | 317 | if (!priv->eee_active) { |
314 | priv->eee_active = 1; | 318 | priv->eee_active = 1; |
315 | init_timer(&priv->eee_ctrl_timer); | 319 | init_timer(&priv->eee_ctrl_timer); |
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
325 | /* Set HW EEE according to the speed */ | 329 | /* Set HW EEE according to the speed */ |
326 | priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); | 330 | priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); |
327 | 331 | ||
328 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); | ||
329 | |||
330 | ret = true; | 332 | ret = true; |
333 | spin_unlock_irqrestore(&priv->lock, flags); | ||
334 | |||
335 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); | ||
331 | } | 336 | } |
332 | out: | 337 | out: |
333 | return ret; | 338 | return ret; |
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev) | |||
760 | if (new_state && netif_msg_link(priv)) | 765 | if (new_state && netif_msg_link(priv)) |
761 | phy_print_status(phydev); | 766 | phy_print_status(phydev); |
762 | 767 | ||
768 | spin_unlock_irqrestore(&priv->lock, flags); | ||
769 | |||
763 | /* At this stage, it could be needed to setup the EEE or adjust some | 770 | /* At this stage, it could be needed to setup the EEE or adjust some |
764 | * MAC related HW registers. | 771 | * MAC related HW registers. |
765 | */ | 772 | */ |
766 | priv->eee_enabled = stmmac_eee_init(priv); | 773 | priv->eee_enabled = stmmac_eee_init(priv); |
767 | |||
768 | spin_unlock_irqrestore(&priv->lock, flags); | ||
769 | } | 774 | } |
770 | 775 | ||
771 | /** | 776 | /** |
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) | |||
959 | } | 964 | } |
960 | 965 | ||
961 | static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | 966 | static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, |
962 | int i) | 967 | int i, gfp_t flags) |
963 | { | 968 | { |
964 | struct sk_buff *skb; | 969 | struct sk_buff *skb; |
965 | 970 | ||
966 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, | 971 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
967 | GFP_KERNEL); | 972 | flags); |
968 | if (!skb) { | 973 | if (!skb) { |
969 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | 974 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
970 | return -ENOMEM; | 975 | return -ENOMEM; |
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) | |||
1006 | * and allocates the socket buffers. It suppors the chained and ring | 1011 | * and allocates the socket buffers. It suppors the chained and ring |
1007 | * modes. | 1012 | * modes. |
1008 | */ | 1013 | */ |
1009 | static int init_dma_desc_rings(struct net_device *dev) | 1014 | static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) |
1010 | { | 1015 | { |
1011 | int i; | 1016 | int i; |
1012 | struct stmmac_priv *priv = netdev_priv(dev); | 1017 | struct stmmac_priv *priv = netdev_priv(dev); |
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
1041 | else | 1046 | else |
1042 | p = priv->dma_rx + i; | 1047 | p = priv->dma_rx + i; |
1043 | 1048 | ||
1044 | ret = stmmac_init_rx_buffers(priv, p, i); | 1049 | ret = stmmac_init_rx_buffers(priv, p, i, flags); |
1045 | if (ret) | 1050 | if (ret) |
1046 | goto err_init_rx_buffers; | 1051 | goto err_init_rx_buffers; |
1047 | 1052 | ||
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev) | |||
1647 | struct stmmac_priv *priv = netdev_priv(dev); | 1652 | struct stmmac_priv *priv = netdev_priv(dev); |
1648 | int ret; | 1653 | int ret; |
1649 | 1654 | ||
1650 | ret = init_dma_desc_rings(dev); | ||
1651 | if (ret < 0) { | ||
1652 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
1653 | return ret; | ||
1654 | } | ||
1655 | /* DMA initialization and SW reset */ | 1655 | /* DMA initialization and SW reset */ |
1656 | ret = stmmac_init_dma_engine(priv); | 1656 | ret = stmmac_init_dma_engine(priv); |
1657 | if (ret < 0) { | 1657 | if (ret < 0) { |
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev) | |||
1705 | } | 1705 | } |
1706 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; | 1706 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
1707 | 1707 | ||
1708 | priv->eee_enabled = stmmac_eee_init(priv); | ||
1709 | |||
1710 | stmmac_init_tx_coalesce(priv); | ||
1711 | |||
1712 | if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { | 1708 | if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { |
1713 | priv->rx_riwt = MAX_DMA_RIWT; | 1709 | priv->rx_riwt = MAX_DMA_RIWT; |
1714 | priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); | 1710 | priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); |
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev) | |||
1761 | goto dma_desc_error; | 1757 | goto dma_desc_error; |
1762 | } | 1758 | } |
1763 | 1759 | ||
1760 | ret = init_dma_desc_rings(dev, GFP_KERNEL); | ||
1761 | if (ret < 0) { | ||
1762 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
1763 | goto init_error; | ||
1764 | } | ||
1765 | |||
1764 | ret = stmmac_hw_setup(dev); | 1766 | ret = stmmac_hw_setup(dev); |
1765 | if (ret < 0) { | 1767 | if (ret < 0) { |
1766 | pr_err("%s: Hw setup failed\n", __func__); | 1768 | pr_err("%s: Hw setup failed\n", __func__); |
1767 | goto init_error; | 1769 | goto init_error; |
1768 | } | 1770 | } |
1769 | 1771 | ||
1772 | stmmac_init_tx_coalesce(priv); | ||
1773 | |||
1770 | if (priv->phydev) | 1774 | if (priv->phydev) |
1771 | phy_start(priv->phydev); | 1775 | phy_start(priv->phydev); |
1772 | 1776 | ||
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1894 | unsigned int nopaged_len = skb_headlen(skb); | 1898 | unsigned int nopaged_len = skb_headlen(skb); |
1895 | unsigned int enh_desc = priv->plat->enh_desc; | 1899 | unsigned int enh_desc = priv->plat->enh_desc; |
1896 | 1900 | ||
1901 | spin_lock(&priv->tx_lock); | ||
1902 | |||
1897 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { | 1903 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
1904 | spin_unlock(&priv->tx_lock); | ||
1898 | if (!netif_queue_stopped(dev)) { | 1905 | if (!netif_queue_stopped(dev)) { |
1899 | netif_stop_queue(dev); | 1906 | netif_stop_queue(dev); |
1900 | /* This is a hard error, log it. */ | 1907 | /* This is a hard error, log it. */ |
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1903 | return NETDEV_TX_BUSY; | 1910 | return NETDEV_TX_BUSY; |
1904 | } | 1911 | } |
1905 | 1912 | ||
1906 | spin_lock(&priv->tx_lock); | ||
1907 | |||
1908 | if (priv->tx_path_in_lpi_mode) | 1913 | if (priv->tx_path_in_lpi_mode) |
1909 | stmmac_disable_eee_mode(priv); | 1914 | stmmac_disable_eee_mode(priv); |
1910 | 1915 | ||
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2025 | return NETDEV_TX_OK; | 2030 | return NETDEV_TX_OK; |
2026 | 2031 | ||
2027 | dma_map_err: | 2032 | dma_map_err: |
2033 | spin_unlock(&priv->tx_lock); | ||
2028 | dev_err(priv->device, "Tx dma map failed\n"); | 2034 | dev_err(priv->device, "Tx dma map failed\n"); |
2029 | dev_kfree_skb(skb); | 2035 | dev_kfree_skb(skb); |
2030 | priv->dev->stats.tx_dropped++; | 2036 | priv->dev->stats.tx_dropped++; |
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) | |||
2281 | { | 2287 | { |
2282 | struct stmmac_priv *priv = netdev_priv(dev); | 2288 | struct stmmac_priv *priv = netdev_priv(dev); |
2283 | 2289 | ||
2284 | spin_lock(&priv->lock); | ||
2285 | priv->hw->mac->set_filter(priv->hw, dev); | 2290 | priv->hw->mac->set_filter(priv->hw, dev); |
2286 | spin_unlock(&priv->lock); | ||
2287 | } | 2291 | } |
2288 | 2292 | ||
2289 | /** | 2293 | /** |
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev) | |||
2950 | stmmac_set_mac(priv->ioaddr, false); | 2954 | stmmac_set_mac(priv->ioaddr, false); |
2951 | pinctrl_pm_select_sleep_state(priv->device); | 2955 | pinctrl_pm_select_sleep_state(priv->device); |
2952 | /* Disable clock in case of PWM is off */ | 2956 | /* Disable clock in case of PWM is off */ |
2953 | clk_disable_unprepare(priv->stmmac_clk); | 2957 | clk_disable(priv->stmmac_clk); |
2954 | } | 2958 | } |
2955 | spin_unlock_irqrestore(&priv->lock, flags); | 2959 | spin_unlock_irqrestore(&priv->lock, flags); |
2956 | 2960 | ||
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev) | |||
2982 | } else { | 2986 | } else { |
2983 | pinctrl_pm_select_default_state(priv->device); | 2987 | pinctrl_pm_select_default_state(priv->device); |
2984 | /* enable the clk prevously disabled */ | 2988 | /* enable the clk prevously disabled */ |
2985 | clk_prepare_enable(priv->stmmac_clk); | 2989 | clk_enable(priv->stmmac_clk); |
2986 | /* reset the phy so that it's ready */ | 2990 | /* reset the phy so that it's ready */ |
2987 | if (priv->mii) | 2991 | if (priv->mii) |
2988 | stmmac_mdio_reset(priv->mii); | 2992 | stmmac_mdio_reset(priv->mii); |
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev) | |||
2990 | 2994 | ||
2991 | netif_device_attach(ndev); | 2995 | netif_device_attach(ndev); |
2992 | 2996 | ||
2997 | init_dma_desc_rings(ndev, GFP_ATOMIC); | ||
2993 | stmmac_hw_setup(ndev); | 2998 | stmmac_hw_setup(ndev); |
2999 | stmmac_init_tx_coalesce(priv); | ||
2994 | 3000 | ||
2995 | napi_enable(&priv->napi); | 3001 | napi_enable(&priv->napi); |
2996 | 3002 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 655a23bbc451..e17a970eaf2b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg; | |||
33 | static void stmmac_default_data(void) | 33 | static void stmmac_default_data(void) |
34 | { | 34 | { |
35 | memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); | 35 | memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); |
36 | |||
36 | plat_dat.bus_id = 1; | 37 | plat_dat.bus_id = 1; |
37 | plat_dat.phy_addr = 0; | 38 | plat_dat.phy_addr = 0; |
38 | plat_dat.interface = PHY_INTERFACE_MODE_GMII; | 39 | plat_dat.interface = PHY_INTERFACE_MODE_GMII; |
@@ -47,6 +48,12 @@ static void stmmac_default_data(void) | |||
47 | dma_cfg.pbl = 32; | 48 | dma_cfg.pbl = 32; |
48 | dma_cfg.burst_len = DMA_AXI_BLEN_256; | 49 | dma_cfg.burst_len = DMA_AXI_BLEN_256; |
49 | plat_dat.dma_cfg = &dma_cfg; | 50 | plat_dat.dma_cfg = &dma_cfg; |
51 | |||
52 | /* Set default value for multicast hash bins */ | ||
53 | plat_dat.multicast_filter_bins = HASH_TABLE_SIZE; | ||
54 | |||
55 | /* Set default value for unicast filter entries */ | ||
56 | plat_dat.unicast_filter_entries = 1; | ||
50 | } | 57 | } |
51 | 58 | ||
52 | /** | 59 | /** |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index db56fa7ce8f9..58a1a0a423d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
177 | */ | 177 | */ |
178 | plat->maxmtu = JUMBO_LEN; | 178 | plat->maxmtu = JUMBO_LEN; |
179 | 179 | ||
180 | /* Set default value for multicast hash bins */ | ||
181 | plat->multicast_filter_bins = HASH_TABLE_SIZE; | ||
182 | |||
183 | /* Set default value for unicast filter entries */ | ||
184 | plat->unicast_filter_entries = 1; | ||
185 | |||
186 | /* | 180 | /* |
187 | * Currently only the properties needed on SPEAr600 | 181 | * Currently only the properties needed on SPEAr600 |
188 | * are provided. All other properties should be added | 182 | * are provided. All other properties should be added |
@@ -270,16 +264,23 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
270 | return PTR_ERR(addr); | 264 | return PTR_ERR(addr); |
271 | 265 | ||
272 | plat_dat = dev_get_platdata(&pdev->dev); | 266 | plat_dat = dev_get_platdata(&pdev->dev); |
273 | if (pdev->dev.of_node) { | 267 | |
274 | if (!plat_dat) | 268 | if (!plat_dat) |
275 | plat_dat = devm_kzalloc(&pdev->dev, | 269 | plat_dat = devm_kzalloc(&pdev->dev, |
276 | sizeof(struct plat_stmmacenet_data), | 270 | sizeof(struct plat_stmmacenet_data), |
277 | GFP_KERNEL); | 271 | GFP_KERNEL); |
278 | if (!plat_dat) { | 272 | if (!plat_dat) { |
279 | pr_err("%s: ERROR: no memory", __func__); | 273 | pr_err("%s: ERROR: no memory", __func__); |
280 | return -ENOMEM; | 274 | return -ENOMEM; |
281 | } | 275 | } |
276 | |||
277 | /* Set default value for multicast hash bins */ | ||
278 | plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; | ||
282 | 279 | ||
280 | /* Set default value for unicast filter entries */ | ||
281 | plat_dat->unicast_filter_entries = 1; | ||
282 | |||
283 | if (pdev->dev.of_node) { | ||
283 | ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); | 284 | ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); |
284 | if (ret) { | 285 | if (ret) { |
285 | pr_err("%s: main dt probe failed", __func__); | 286 | pr_err("%s: main dt probe failed", __func__); |
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 72c8525d5457..9c014803b03b 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c | |||
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp) | |||
1262 | HMD(("init rxring, ")); | 1262 | HMD(("init rxring, ")); |
1263 | for (i = 0; i < RX_RING_SIZE; i++) { | 1263 | for (i = 0; i < RX_RING_SIZE; i++) { |
1264 | struct sk_buff *skb; | 1264 | struct sk_buff *skb; |
1265 | u32 mapping; | ||
1265 | 1266 | ||
1266 | skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); | 1267 | skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); |
1267 | if (!skb) { | 1268 | if (!skb) { |
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp) | |||
1272 | 1273 | ||
1273 | /* Because we reserve afterwards. */ | 1274 | /* Because we reserve afterwards. */ |
1274 | skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | 1275 | skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); |
1276 | mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, | ||
1277 | DMA_FROM_DEVICE); | ||
1278 | if (dma_mapping_error(hp->dma_dev, mapping)) { | ||
1279 | dev_kfree_skb_any(skb); | ||
1280 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); | ||
1281 | continue; | ||
1282 | } | ||
1275 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], | 1283 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], |
1276 | (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), | 1284 | (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), |
1277 | dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, | 1285 | mapping); |
1278 | DMA_FROM_DEVICE)); | ||
1279 | skb_reserve(skb, RX_OFFSET); | 1286 | skb_reserve(skb, RX_OFFSET); |
1280 | } | 1287 | } |
1281 | 1288 | ||
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | |||
2020 | skb = hp->rx_skbs[elem]; | 2027 | skb = hp->rx_skbs[elem]; |
2021 | if (len > RX_COPY_THRESHOLD) { | 2028 | if (len > RX_COPY_THRESHOLD) { |
2022 | struct sk_buff *new_skb; | 2029 | struct sk_buff *new_skb; |
2030 | u32 mapping; | ||
2023 | 2031 | ||
2024 | /* Now refill the entry, if we can. */ | 2032 | /* Now refill the entry, if we can. */ |
2025 | new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); | 2033 | new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); |
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | |||
2027 | drops++; | 2035 | drops++; |
2028 | goto drop_it; | 2036 | goto drop_it; |
2029 | } | 2037 | } |
2038 | skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | ||
2039 | mapping = dma_map_single(hp->dma_dev, new_skb->data, | ||
2040 | RX_BUF_ALLOC_SIZE, | ||
2041 | DMA_FROM_DEVICE); | ||
2042 | if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { | ||
2043 | dev_kfree_skb_any(new_skb); | ||
2044 | drops++; | ||
2045 | goto drop_it; | ||
2046 | } | ||
2047 | |||
2030 | dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); | 2048 | dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); |
2031 | hp->rx_skbs[elem] = new_skb; | 2049 | hp->rx_skbs[elem] = new_skb; |
2032 | skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | ||
2033 | hme_write_rxd(hp, this, | 2050 | hme_write_rxd(hp, this, |
2034 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), | 2051 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), |
2035 | dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, | 2052 | mapping); |
2036 | DMA_FROM_DEVICE)); | ||
2037 | skb_reserve(new_skb, RX_OFFSET); | 2053 | skb_reserve(new_skb, RX_OFFSET); |
2038 | 2054 | ||
2039 | /* Trim the original skb for the netif. */ | 2055 | /* Trim the original skb for the netif. */ |
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev) | |||
2248 | netif_wake_queue(dev); | 2264 | netif_wake_queue(dev); |
2249 | } | 2265 | } |
2250 | 2266 | ||
2267 | static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, | ||
2268 | u32 first_len, u32 first_entry, u32 entry) | ||
2269 | { | ||
2270 | struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; | ||
2271 | |||
2272 | dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); | ||
2273 | |||
2274 | first_entry = NEXT_TX(first_entry); | ||
2275 | while (first_entry != entry) { | ||
2276 | struct happy_meal_txd *this = &txbase[first_entry]; | ||
2277 | u32 addr, len; | ||
2278 | |||
2279 | addr = hme_read_desc32(hp, &this->tx_addr); | ||
2280 | len = hme_read_desc32(hp, &this->tx_flags); | ||
2281 | len &= TXFLAG_SIZE; | ||
2282 | dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); | ||
2283 | } | ||
2284 | } | ||
2285 | |||
2251 | static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, | 2286 | static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, |
2252 | struct net_device *dev) | 2287 | struct net_device *dev) |
2253 | { | 2288 | { |
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, | |||
2284 | 2319 | ||
2285 | len = skb->len; | 2320 | len = skb->len; |
2286 | mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); | 2321 | mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); |
2322 | if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) | ||
2323 | goto out_dma_error; | ||
2287 | tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); | 2324 | tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); |
2288 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], | 2325 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], |
2289 | (tx_flags | (len & TXFLAG_SIZE)), | 2326 | (tx_flags | (len & TXFLAG_SIZE)), |
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, | |||
2299 | first_len = skb_headlen(skb); | 2336 | first_len = skb_headlen(skb); |
2300 | first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, | 2337 | first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, |
2301 | DMA_TO_DEVICE); | 2338 | DMA_TO_DEVICE); |
2339 | if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) | ||
2340 | goto out_dma_error; | ||
2302 | entry = NEXT_TX(entry); | 2341 | entry = NEXT_TX(entry); |
2303 | 2342 | ||
2304 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 2343 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, | |||
2308 | len = skb_frag_size(this_frag); | 2347 | len = skb_frag_size(this_frag); |
2309 | mapping = skb_frag_dma_map(hp->dma_dev, this_frag, | 2348 | mapping = skb_frag_dma_map(hp->dma_dev, this_frag, |
2310 | 0, len, DMA_TO_DEVICE); | 2349 | 0, len, DMA_TO_DEVICE); |
2350 | if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { | ||
2351 | unmap_partial_tx_skb(hp, first_mapping, first_len, | ||
2352 | first_entry, entry); | ||
2353 | goto out_dma_error; | ||
2354 | } | ||
2311 | this_txflags = tx_flags; | 2355 | this_txflags = tx_flags; |
2312 | if (frag == skb_shinfo(skb)->nr_frags - 1) | 2356 | if (frag == skb_shinfo(skb)->nr_frags - 1) |
2313 | this_txflags |= TXFLAG_EOP; | 2357 | this_txflags |= TXFLAG_EOP; |
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, | |||
2333 | 2377 | ||
2334 | tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); | 2378 | tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); |
2335 | return NETDEV_TX_OK; | 2379 | return NETDEV_TX_OK; |
2380 | |||
2381 | out_dma_error: | ||
2382 | hp->tx_skbs[hp->tx_new] = NULL; | ||
2383 | spin_unlock_irq(&hp->happy_lock); | ||
2384 | |||
2385 | dev_kfree_skb_any(skb); | ||
2386 | dev->stats.tx_dropped++; | ||
2387 | return NETDEV_TX_OK; | ||
2336 | } | 2388 | } |
2337 | 2389 | ||
2338 | static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) | 2390 | static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 952e1e4764b7..c560f9aeb55d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -129,9 +129,9 @@ do { \ | |||
129 | #define CPSW_VLAN_AWARE BIT(1) | 129 | #define CPSW_VLAN_AWARE BIT(1) |
130 | #define CPSW_ALE_VLAN_AWARE 1 | 130 | #define CPSW_ALE_VLAN_AWARE 1 |
131 | 131 | ||
132 | #define CPSW_FIFO_NORMAL_MODE (0 << 15) | 132 | #define CPSW_FIFO_NORMAL_MODE (0 << 16) |
133 | #define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) | 133 | #define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) |
134 | #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) | 134 | #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) |
135 | 135 | ||
136 | #define CPSW_INTPACEEN (0x3f << 16) | 136 | #define CPSW_INTPACEEN (0x3f << 16) |
137 | #define CPSW_INTPRESCALE_MASK (0x7FF << 0) | 137 | #define CPSW_INTPRESCALE_MASK (0x7FF << 0) |
@@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
591 | if (enable) { | 591 | if (enable) { |
592 | unsigned long timeout = jiffies + HZ; | 592 | unsigned long timeout = jiffies + HZ; |
593 | 593 | ||
594 | /* Disable Learn for all ports */ | 594 | /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ |
595 | for (i = 0; i < priv->data.slaves; i++) { | 595 | for (i = 0; i <= priv->data.slaves; i++) { |
596 | cpsw_ale_control_set(ale, i, | 596 | cpsw_ale_control_set(ale, i, |
597 | ALE_PORT_NOLEARN, 1); | 597 | ALE_PORT_NOLEARN, 1); |
598 | cpsw_ale_control_set(ale, i, | 598 | cpsw_ale_control_set(ale, i, |
@@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
616 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); | 616 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); |
617 | dev_dbg(&ndev->dev, "promiscuity enabled\n"); | 617 | dev_dbg(&ndev->dev, "promiscuity enabled\n"); |
618 | } else { | 618 | } else { |
619 | /* Flood All Unicast Packets to Host port */ | 619 | /* Don't Flood All Unicast Packets to Host port */ |
620 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); | 620 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); |
621 | 621 | ||
622 | /* Enable Learn for all ports */ | 622 | /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ |
623 | for (i = 0; i < priv->data.slaves; i++) { | 623 | for (i = 0; i <= priv->data.slaves; i++) { |
624 | cpsw_ale_control_set(ale, i, | 624 | cpsw_ale_control_set(ale, i, |
625 | ALE_PORT_NOLEARN, 0); | 625 | ALE_PORT_NOLEARN, 0); |
626 | cpsw_ale_control_set(ale, i, | 626 | cpsw_ale_control_set(ale, i, |
@@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) | |||
638 | if (ndev->flags & IFF_PROMISC) { | 638 | if (ndev->flags & IFF_PROMISC) { |
639 | /* Enable promiscuous mode */ | 639 | /* Enable promiscuous mode */ |
640 | cpsw_set_promiscious(ndev, true); | 640 | cpsw_set_promiscious(ndev, true); |
641 | cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); | ||
641 | return; | 642 | return; |
642 | } else { | 643 | } else { |
643 | /* Disable promiscuous mode */ | 644 | /* Disable promiscuous mode */ |
644 | cpsw_set_promiscious(ndev, false); | 645 | cpsw_set_promiscious(ndev, false); |
645 | } | 646 | } |
646 | 647 | ||
648 | /* Restore allmulti on vlans if necessary */ | ||
649 | cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); | ||
650 | |||
647 | /* Clear all mcast from ALE */ | 651 | /* Clear all mcast from ALE */ |
648 | cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); | 652 | cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); |
649 | 653 | ||
@@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) | |||
1149 | const int port = priv->host_port; | 1153 | const int port = priv->host_port; |
1150 | u32 reg; | 1154 | u32 reg; |
1151 | int i; | 1155 | int i; |
1156 | int unreg_mcast_mask; | ||
1152 | 1157 | ||
1153 | reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : | 1158 | reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : |
1154 | CPSW2_PORT_VLAN; | 1159 | CPSW2_PORT_VLAN; |
@@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) | |||
1158 | for (i = 0; i < priv->data.slaves; i++) | 1163 | for (i = 0; i < priv->data.slaves; i++) |
1159 | slave_write(priv->slaves + i, vlan, reg); | 1164 | slave_write(priv->slaves + i, vlan, reg); |
1160 | 1165 | ||
1166 | if (priv->ndev->flags & IFF_ALLMULTI) | ||
1167 | unreg_mcast_mask = ALE_ALL_PORTS; | ||
1168 | else | ||
1169 | unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; | ||
1170 | |||
1161 | cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, | 1171 | cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, |
1162 | ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, | 1172 | ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, |
1163 | (ALE_PORT_1 | ALE_PORT_2) << port); | 1173 | unreg_mcast_mask << port); |
1164 | } | 1174 | } |
1165 | 1175 | ||
1166 | static void cpsw_init_host_port(struct cpsw_priv *priv) | 1176 | static void cpsw_init_host_port(struct cpsw_priv *priv) |
@@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, | |||
1620 | unsigned short vid) | 1630 | unsigned short vid) |
1621 | { | 1631 | { |
1622 | int ret; | 1632 | int ret; |
1633 | int unreg_mcast_mask; | ||
1634 | |||
1635 | if (priv->ndev->flags & IFF_ALLMULTI) | ||
1636 | unreg_mcast_mask = ALE_ALL_PORTS; | ||
1637 | else | ||
1638 | unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; | ||
1623 | 1639 | ||
1624 | ret = cpsw_ale_add_vlan(priv->ale, vid, | 1640 | ret = cpsw_ale_add_vlan(priv->ale, vid, |
1625 | ALE_ALL_PORTS << priv->host_port, | 1641 | ALE_ALL_PORTS << priv->host_port, |
1626 | 0, ALE_ALL_PORTS << priv->host_port, | 1642 | 0, ALE_ALL_PORTS << priv->host_port, |
1627 | (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); | 1643 | unreg_mcast_mask << priv->host_port); |
1628 | if (ret != 0) | 1644 | if (ret != 0) |
1629 | return ret; | 1645 | return ret; |
1630 | 1646 | ||
@@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2006 | parp = of_get_property(slave_node, "phy_id", &lenp); | 2022 | parp = of_get_property(slave_node, "phy_id", &lenp); |
2007 | if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { | 2023 | if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { |
2008 | dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); | 2024 | dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); |
2009 | return -EINVAL; | 2025 | goto no_phy_slave; |
2010 | } | 2026 | } |
2011 | mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); | 2027 | mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); |
2012 | phyid = be32_to_cpup(parp+1); | 2028 | phyid = be32_to_cpup(parp+1); |
@@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2019 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | 2035 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), |
2020 | PHY_ID_FMT, mdio->name, phyid); | 2036 | PHY_ID_FMT, mdio->name, phyid); |
2021 | 2037 | ||
2038 | slave_data->phy_if = of_get_phy_mode(slave_node); | ||
2039 | if (slave_data->phy_if < 0) { | ||
2040 | dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", | ||
2041 | i); | ||
2042 | return slave_data->phy_if; | ||
2043 | } | ||
2044 | |||
2045 | no_phy_slave: | ||
2022 | mac_addr = of_get_mac_address(slave_node); | 2046 | mac_addr = of_get_mac_address(slave_node); |
2023 | if (mac_addr) { | 2047 | if (mac_addr) { |
2024 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); | 2048 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); |
@@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2030 | return ret; | 2054 | return ret; |
2031 | } | 2055 | } |
2032 | } | 2056 | } |
2033 | |||
2034 | slave_data->phy_if = of_get_phy_mode(slave_node); | ||
2035 | if (slave_data->phy_if < 0) { | ||
2036 | dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", | ||
2037 | i); | ||
2038 | return slave_data->phy_if; | ||
2039 | } | ||
2040 | |||
2041 | if (data->dual_emac) { | 2057 | if (data->dual_emac) { |
2042 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", | 2058 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", |
2043 | &prop)) { | 2059 | &prop)) { |
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0579b2243bb6..097ebe7077ac 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c | |||
@@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) | |||
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
445 | 445 | ||
446 | void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) | ||
447 | { | ||
448 | u32 ale_entry[ALE_ENTRY_WORDS]; | ||
449 | int type, idx; | ||
450 | int unreg_mcast = 0; | ||
451 | |||
452 | /* Only bother doing the work if the setting is actually changing */ | ||
453 | if (ale->allmulti == allmulti) | ||
454 | return; | ||
455 | |||
456 | /* Remember the new setting to check against next time */ | ||
457 | ale->allmulti = allmulti; | ||
458 | |||
459 | for (idx = 0; idx < ale->params.ale_entries; idx++) { | ||
460 | cpsw_ale_read(ale, idx, ale_entry); | ||
461 | type = cpsw_ale_get_entry_type(ale_entry); | ||
462 | if (type != ALE_TYPE_VLAN) | ||
463 | continue; | ||
464 | |||
465 | unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); | ||
466 | if (allmulti) | ||
467 | unreg_mcast |= 1; | ||
468 | else | ||
469 | unreg_mcast &= ~1; | ||
470 | cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); | ||
471 | cpsw_ale_write(ale, idx, ale_entry); | ||
472 | } | ||
473 | } | ||
474 | |||
446 | struct ale_control_info { | 475 | struct ale_control_info { |
447 | const char *name; | 476 | const char *name; |
448 | int offset, port_offset; | 477 | int offset, port_offset; |
@@ -756,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale) | |||
756 | { | 785 | { |
757 | if (!ale) | 786 | if (!ale) |
758 | return -EINVAL; | 787 | return -EINVAL; |
759 | cpsw_ale_stop(ale); | ||
760 | cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); | 788 | cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); |
761 | kfree(ale); | 789 | kfree(ale); |
762 | return 0; | 790 | return 0; |
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 31cf43cab42e..c0d4127aa549 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h | |||
@@ -27,6 +27,7 @@ struct cpsw_ale { | |||
27 | struct cpsw_ale_params params; | 27 | struct cpsw_ale_params params; |
28 | struct timer_list timer; | 28 | struct timer_list timer; |
29 | unsigned long ageout; | 29 | unsigned long ageout; |
30 | int allmulti; | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | enum cpsw_ale_control { | 33 | enum cpsw_ale_control { |
@@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, | |||
103 | int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, | 104 | int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, |
104 | int reg_mcast, int unreg_mcast); | 105 | int reg_mcast, int unreg_mcast); |
105 | int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); | 106 | int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); |
107 | void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); | ||
106 | 108 | ||
107 | int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); | 109 | int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); |
108 | int cpsw_ale_control_set(struct cpsw_ale *ale, int port, | 110 | int cpsw_ale_control_set(struct cpsw_ale *ale, int port, |
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index ab92f67da035..4a4388b813ac 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c | |||
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, | |||
264 | 264 | ||
265 | switch (ptp_class & PTP_CLASS_PMASK) { | 265 | switch (ptp_class & PTP_CLASS_PMASK) { |
266 | case PTP_CLASS_IPV4: | 266 | case PTP_CLASS_IPV4: |
267 | offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; | 267 | offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
268 | break; | 268 | break; |
269 | case PTP_CLASS_IPV6: | 269 | case PTP_CLASS_IPV6: |
270 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; | 270 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |