diff options
Diffstat (limited to 'drivers/net/ethernet')
28 files changed, 265 insertions, 77 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 29554992215a..2349ea970255 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev, | |||
1465 | { | 1465 | { |
1466 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1466 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
1467 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 1467 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1468 | unsigned int rxcsum, rxvlan, rxvlan_filter; | 1468 | netdev_features_t rxcsum, rxvlan, rxvlan_filter; |
1469 | 1469 | ||
1470 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; | 1470 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; |
1471 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; | 1471 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; |
@@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
1598 | struct skb_shared_hwtstamps *hwtstamps; | 1598 | struct skb_shared_hwtstamps *hwtstamps; |
1599 | unsigned int incomplete, error, context_next, context; | 1599 | unsigned int incomplete, error, context_next, context; |
1600 | unsigned int len, put_len, max_len; | 1600 | unsigned int len, put_len, max_len; |
1601 | int received = 0; | 1601 | unsigned int received = 0; |
1602 | int packet_count = 0; | ||
1602 | 1603 | ||
1603 | DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); | 1604 | DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); |
1604 | 1605 | ||
@@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
1608 | 1609 | ||
1609 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | 1610 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
1610 | packet = &ring->packet_data; | 1611 | packet = &ring->packet_data; |
1611 | while (received < budget) { | 1612 | while (packet_count < budget) { |
1612 | DBGPR(" cur = %d\n", ring->cur); | 1613 | DBGPR(" cur = %d\n", ring->cur); |
1613 | 1614 | ||
1614 | /* First time in loop see if we need to restore state */ | 1615 | /* First time in loop see if we need to restore state */ |
@@ -1662,7 +1663,7 @@ read_again: | |||
1662 | if (packet->errors) | 1663 | if (packet->errors) |
1663 | DBGPR("Error in received packet\n"); | 1664 | DBGPR("Error in received packet\n"); |
1664 | dev_kfree_skb(skb); | 1665 | dev_kfree_skb(skb); |
1665 | continue; | 1666 | goto next_packet; |
1666 | } | 1667 | } |
1667 | 1668 | ||
1668 | if (!context) { | 1669 | if (!context) { |
@@ -1677,7 +1678,7 @@ read_again: | |||
1677 | } | 1678 | } |
1678 | 1679 | ||
1679 | dev_kfree_skb(skb); | 1680 | dev_kfree_skb(skb); |
1680 | continue; | 1681 | goto next_packet; |
1681 | } | 1682 | } |
1682 | memcpy(skb_tail_pointer(skb), rdata->skb->data, | 1683 | memcpy(skb_tail_pointer(skb), rdata->skb->data, |
1683 | put_len); | 1684 | put_len); |
@@ -1694,7 +1695,7 @@ read_again: | |||
1694 | 1695 | ||
1695 | /* Stray Context Descriptor? */ | 1696 | /* Stray Context Descriptor? */ |
1696 | if (!skb) | 1697 | if (!skb) |
1697 | continue; | 1698 | goto next_packet; |
1698 | 1699 | ||
1699 | /* Be sure we don't exceed the configured MTU */ | 1700 | /* Be sure we don't exceed the configured MTU */ |
1700 | max_len = netdev->mtu + ETH_HLEN; | 1701 | max_len = netdev->mtu + ETH_HLEN; |
@@ -1705,7 +1706,7 @@ read_again: | |||
1705 | if (skb->len > max_len) { | 1706 | if (skb->len > max_len) { |
1706 | DBGPR("packet length exceeds configured MTU\n"); | 1707 | DBGPR("packet length exceeds configured MTU\n"); |
1707 | dev_kfree_skb(skb); | 1708 | dev_kfree_skb(skb); |
1708 | continue; | 1709 | goto next_packet; |
1709 | } | 1710 | } |
1710 | 1711 | ||
1711 | #ifdef XGMAC_ENABLE_RX_PKT_DUMP | 1712 | #ifdef XGMAC_ENABLE_RX_PKT_DUMP |
@@ -1739,6 +1740,9 @@ read_again: | |||
1739 | 1740 | ||
1740 | netdev->last_rx = jiffies; | 1741 | netdev->last_rx = jiffies; |
1741 | napi_gro_receive(&pdata->napi, skb); | 1742 | napi_gro_receive(&pdata->napi, skb); |
1743 | |||
1744 | next_packet: | ||
1745 | packet_count++; | ||
1742 | } | 1746 | } |
1743 | 1747 | ||
1744 | /* Check if we need to save state before leaving */ | 1748 | /* Check if we need to save state before leaving */ |
@@ -1752,9 +1756,9 @@ read_again: | |||
1752 | rdata->state.error = error; | 1756 | rdata->state.error = error; |
1753 | } | 1757 | } |
1754 | 1758 | ||
1755 | DBGPR("<--xgbe_rx_poll: received = %d\n", received); | 1759 | DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); |
1756 | 1760 | ||
1757 | return received; | 1761 | return packet_count; |
1758 | } | 1762 | } |
1759 | 1763 | ||
1760 | static int xgbe_poll(struct napi_struct *napi, int budget) | 1764 | static int xgbe_poll(struct napi_struct *napi, int budget) |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index e6d24c210198..c22f32622fa9 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c | |||
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) | |||
124 | { | 124 | { |
125 | struct net_device *ndev = p->ndev; | 125 | struct net_device *ndev = p->ndev; |
126 | u32 data; | 126 | u32 data; |
127 | int i; | 127 | int i = 0; |
128 | 128 | ||
129 | xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); | 129 | xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); |
130 | for (i = 0; i < 10 && data != ~0U ; i++) { | 130 | do { |
131 | usleep_range(100, 110); | 131 | usleep_range(100, 110); |
132 | data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); | 132 | data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); |
133 | } | 133 | if (data == ~0U) |
134 | return 0; | ||
135 | } while (++i < 10); | ||
134 | 136 | ||
135 | if (data != ~0U) { | 137 | netdev_err(ndev, "Failed to release memory from shutdown\n"); |
136 | netdev_err(ndev, "Failed to release memory from shutdown\n"); | 138 | return -ENODEV; |
137 | return -ENODEV; | ||
138 | } | ||
139 | |||
140 | return 0; | ||
141 | } | 139 | } |
142 | 140 | ||
143 | static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) | 141 | static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9ae36979bdee..3a6778a667f4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -1397,6 +1397,9 @@ static void bcm_sysport_netif_start(struct net_device *dev) | |||
1397 | /* Enable NAPI */ | 1397 | /* Enable NAPI */ |
1398 | napi_enable(&priv->napi); | 1398 | napi_enable(&priv->napi); |
1399 | 1399 | ||
1400 | /* Enable RX interrupt and TX ring full interrupt */ | ||
1401 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
1402 | |||
1400 | phy_start(priv->phydev); | 1403 | phy_start(priv->phydev); |
1401 | 1404 | ||
1402 | /* Enable TX interrupts for the 32 TXQs */ | 1405 | /* Enable TX interrupts for the 32 TXQs */ |
@@ -1499,9 +1502,6 @@ static int bcm_sysport_open(struct net_device *dev) | |||
1499 | if (ret) | 1502 | if (ret) |
1500 | goto out_free_rx_ring; | 1503 | goto out_free_rx_ring; |
1501 | 1504 | ||
1502 | /* Enable RX interrupt and TX ring full interrupt */ | ||
1503 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
1504 | |||
1505 | /* Turn on TDMA */ | 1505 | /* Turn on TDMA */ |
1506 | ret = tdma_enable_set(priv, 1); | 1506 | ret = tdma_enable_set(priv, 1); |
1507 | if (ret) | 1507 | if (ret) |
@@ -1858,6 +1858,8 @@ static int bcm_sysport_resume(struct device *d) | |||
1858 | if (!netif_running(dev)) | 1858 | if (!netif_running(dev)) |
1859 | return 0; | 1859 | return 0; |
1860 | 1860 | ||
1861 | umac_reset(priv); | ||
1862 | |||
1861 | /* We may have been suspended and never received a WOL event that | 1863 | /* We may have been suspended and never received a WOL event that |
1862 | * would turn off MPD detection, take care of that now | 1864 | * would turn off MPD detection, take care of that now |
1863 | */ | 1865 | */ |
@@ -1885,9 +1887,6 @@ static int bcm_sysport_resume(struct device *d) | |||
1885 | 1887 | ||
1886 | netif_device_attach(dev); | 1888 | netif_device_attach(dev); |
1887 | 1889 | ||
1888 | /* Enable RX interrupt and TX ring full interrupt */ | ||
1889 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
1890 | |||
1891 | /* RX pipe enable */ | 1890 | /* RX pipe enable */ |
1892 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); | 1891 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); |
1893 | 1892 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 23f23c97c2ad..f05fab65d78a 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, | |||
382 | if (l5_cid >= MAX_CM_SK_TBL_SZ) | 382 | if (l5_cid >= MAX_CM_SK_TBL_SZ) |
383 | break; | 383 | break; |
384 | 384 | ||
385 | rcu_read_lock(); | ||
386 | if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { | 385 | if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { |
387 | rc = -ENODEV; | 386 | rc = -ENODEV; |
388 | rcu_read_unlock(); | ||
389 | break; | 387 | break; |
390 | } | 388 | } |
391 | csk = &cp->csk_tbl[l5_cid]; | 389 | csk = &cp->csk_tbl[l5_cid]; |
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, | |||
414 | } | 412 | } |
415 | } | 413 | } |
416 | csk_put(csk); | 414 | csk_put(csk); |
417 | rcu_read_unlock(); | ||
418 | rc = 0; | 415 | rc = 0; |
419 | } | 416 | } |
420 | } | 417 | } |
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) | |||
615 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | 612 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); |
616 | 613 | ||
617 | mutex_lock(&cnic_lock); | 614 | mutex_lock(&cnic_lock); |
618 | if (rcu_dereference(cp->ulp_ops[ulp_type])) { | 615 | if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { |
619 | RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); | 616 | RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); |
620 | cnic_put(dev); | 617 | cnic_put(dev); |
621 | } else { | 618 | } else { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 8edf0f5bd679..6fe300e316c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | |||
@@ -60,6 +60,42 @@ void cxgb4_dcb_version_init(struct net_device *dev) | |||
60 | dcb->dcb_version = FW_PORT_DCB_VER_AUTO; | 60 | dcb->dcb_version = FW_PORT_DCB_VER_AUTO; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void cxgb4_dcb_cleanup_apps(struct net_device *dev) | ||
64 | { | ||
65 | struct port_info *pi = netdev2pinfo(dev); | ||
66 | struct adapter *adap = pi->adapter; | ||
67 | struct port_dcb_info *dcb = &pi->dcb; | ||
68 | struct dcb_app app; | ||
69 | int i, err; | ||
70 | |||
71 | /* zero priority implies remove */ | ||
72 | app.priority = 0; | ||
73 | |||
74 | for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { | ||
75 | /* Check if app list is exhausted */ | ||
76 | if (!dcb->app_priority[i].protocolid) | ||
77 | break; | ||
78 | |||
79 | app.protocol = dcb->app_priority[i].protocolid; | ||
80 | |||
81 | if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { | ||
82 | app.selector = dcb->app_priority[i].sel_field + 1; | ||
83 | err = dcb_ieee_setapp(dev, &app); | ||
84 | } else { | ||
85 | app.selector = !!(dcb->app_priority[i].sel_field); | ||
86 | err = dcb_setapp(dev, &app); | ||
87 | } | ||
88 | |||
89 | if (err) { | ||
90 | dev_err(adap->pdev_dev, | ||
91 | "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", | ||
92 | dcb_ver_array[dcb->dcb_version], app.selector, | ||
93 | app.protocol, -err); | ||
94 | break; | ||
95 | } | ||
96 | } | ||
97 | } | ||
98 | |||
63 | /* Finite State machine for Data Center Bridging. | 99 | /* Finite State machine for Data Center Bridging. |
64 | */ | 100 | */ |
65 | void cxgb4_dcb_state_fsm(struct net_device *dev, | 101 | void cxgb4_dcb_state_fsm(struct net_device *dev, |
@@ -80,7 +116,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, | |||
80 | /* we're going to use Host DCB */ | 116 | /* we're going to use Host DCB */ |
81 | dcb->state = CXGB4_DCB_STATE_HOST; | 117 | dcb->state = CXGB4_DCB_STATE_HOST; |
82 | dcb->supported = CXGB4_DCBX_HOST_SUPPORT; | 118 | dcb->supported = CXGB4_DCBX_HOST_SUPPORT; |
83 | dcb->enabled = 1; | ||
84 | break; | 119 | break; |
85 | } | 120 | } |
86 | 121 | ||
@@ -145,6 +180,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, | |||
145 | * state. We need to reset back to a ground state | 180 | * state. We need to reset back to a ground state |
146 | * of incomplete. | 181 | * of incomplete. |
147 | */ | 182 | */ |
183 | cxgb4_dcb_cleanup_apps(dev); | ||
148 | cxgb4_dcb_state_init(dev); | 184 | cxgb4_dcb_state_init(dev); |
149 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; | 185 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; |
150 | dcb->supported = CXGB4_DCBX_FW_SUPPORT; | 186 | dcb->supported = CXGB4_DCBX_FW_SUPPORT; |
@@ -349,6 +385,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) | |||
349 | { | 385 | { |
350 | struct port_info *pi = netdev2pinfo(dev); | 386 | struct port_info *pi = netdev2pinfo(dev); |
351 | 387 | ||
388 | /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ | ||
389 | if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { | ||
390 | pi->dcb.enabled = enabled; | ||
391 | return 0; | ||
392 | } | ||
393 | |||
352 | /* Firmware doesn't provide any mechanism to control the DCB state. | 394 | /* Firmware doesn't provide any mechanism to control the DCB state. |
353 | */ | 395 | */ |
354 | if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) | 396 | if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) |
@@ -833,11 +875,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, | |||
833 | 875 | ||
834 | /* Return whether IEEE Data Center Bridging has been negotiated. | 876 | /* Return whether IEEE Data Center Bridging has been negotiated. |
835 | */ | 877 | */ |
836 | static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) | 878 | static inline int |
879 | cxgb4_ieee_negotiation_complete(struct net_device *dev, | ||
880 | enum cxgb4_dcb_fw_msgs dcb_subtype) | ||
837 | { | 881 | { |
838 | struct port_info *pi = netdev2pinfo(dev); | 882 | struct port_info *pi = netdev2pinfo(dev); |
839 | struct port_dcb_info *dcb = &pi->dcb; | 883 | struct port_dcb_info *dcb = &pi->dcb; |
840 | 884 | ||
885 | if (dcb_subtype && !(dcb->msgs & dcb_subtype)) | ||
886 | return 0; | ||
887 | |||
841 | return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && | 888 | return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && |
842 | (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); | 889 | (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); |
843 | } | 890 | } |
@@ -850,7 +897,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) | |||
850 | { | 897 | { |
851 | int prio; | 898 | int prio; |
852 | 899 | ||
853 | if (!cxgb4_ieee_negotiation_complete(dev)) | 900 | if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) |
854 | return -EINVAL; | 901 | return -EINVAL; |
855 | if (!(app->selector && app->protocol)) | 902 | if (!(app->selector && app->protocol)) |
856 | return -EINVAL; | 903 | return -EINVAL; |
@@ -872,7 +919,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) | |||
872 | { | 919 | { |
873 | int ret; | 920 | int ret; |
874 | 921 | ||
875 | if (!cxgb4_ieee_negotiation_complete(dev)) | 922 | if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) |
876 | return -EINVAL; | 923 | return -EINVAL; |
877 | if (!(app->selector && app->protocol)) | 924 | if (!(app->selector && app->protocol)) |
878 | return -EINVAL; | 925 | return -EINVAL; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3f60070f2519..8520d5529df8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) | |||
694 | #ifdef CONFIG_CHELSIO_T4_DCB | 694 | #ifdef CONFIG_CHELSIO_T4_DCB |
695 | struct port_info *pi = netdev_priv(dev); | 695 | struct port_info *pi = netdev_priv(dev); |
696 | 696 | ||
697 | return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; | 697 | if (!pi->dcb.enabled) |
698 | return 0; | ||
699 | |||
700 | return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || | ||
701 | (pi->dcb.state == CXGB4_DCB_STATE_HOST)); | ||
698 | #else | 702 | #else |
699 | return 0; | 703 | return 0; |
700 | #endif | 704 | #endif |
@@ -6610,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6610 | 6614 | ||
6611 | spin_lock_init(&adapter->stats_lock); | 6615 | spin_lock_init(&adapter->stats_lock); |
6612 | spin_lock_init(&adapter->tid_release_lock); | 6616 | spin_lock_init(&adapter->tid_release_lock); |
6617 | spin_lock_init(&adapter->win0_lock); | ||
6613 | 6618 | ||
6614 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); | 6619 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); |
6615 | INIT_WORK(&adapter->db_full_task, process_db_full); | 6620 | INIT_WORK(&adapter->db_full_task, process_db_full); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index bfa398d91826..0b42bddaf284 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = { | |||
2929 | CH_DEVICE(0x480d), /* T480-cr */ | 2929 | CH_DEVICE(0x480d), /* T480-cr */ |
2930 | CH_DEVICE(0x480e), /* T440-lp-cr */ | 2930 | CH_DEVICE(0x480e), /* T440-lp-cr */ |
2931 | CH_DEVICE(0x4880), | 2931 | CH_DEVICE(0x4880), |
2932 | CH_DEVICE(0x4880), | 2932 | CH_DEVICE(0x4881), |
2933 | CH_DEVICE(0x4880), | 2933 | CH_DEVICE(0x4882), |
2934 | CH_DEVICE(0x4880), | 2934 | CH_DEVICE(0x4883), |
2935 | CH_DEVICE(0x4880), | 2935 | CH_DEVICE(0x4884), |
2936 | CH_DEVICE(0x4880), | 2936 | CH_DEVICE(0x4885), |
2937 | CH_DEVICE(0x4880), | 2937 | CH_DEVICE(0x4886), |
2938 | CH_DEVICE(0x4880), | 2938 | CH_DEVICE(0x4887), |
2939 | CH_DEVICE(0x4880), | 2939 | CH_DEVICE(0x4888), |
2940 | CH_DEVICE(0x5801), /* T520-cr */ | 2940 | CH_DEVICE(0x5801), /* T520-cr */ |
2941 | CH_DEVICE(0x5802), /* T522-cr */ | 2941 | CH_DEVICE(0x5802), /* T522-cr */ |
2942 | CH_DEVICE(0x5803), /* T540-cr */ | 2942 | CH_DEVICE(0x5803), /* T540-cr */ |
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 69dfd3c9e529..0be6850be8a2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c | |||
@@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) | |||
86 | int i; | 86 | int i; |
87 | 87 | ||
88 | enic_rfs_timer_stop(enic); | 88 | enic_rfs_timer_stop(enic); |
89 | spin_lock(&enic->rfs_h.lock); | 89 | spin_lock_bh(&enic->rfs_h.lock); |
90 | enic->rfs_h.free = 0; | 90 | enic->rfs_h.free = 0; |
91 | for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { | 91 | for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { |
92 | struct hlist_head *hhead; | 92 | struct hlist_head *hhead; |
@@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) | |||
100 | kfree(n); | 100 | kfree(n); |
101 | } | 101 | } |
102 | } | 102 | } |
103 | spin_unlock(&enic->rfs_h.lock); | 103 | spin_unlock_bh(&enic->rfs_h.lock); |
104 | } | 104 | } |
105 | 105 | ||
106 | struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) | 106 | struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) |
@@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data) | |||
128 | bool res; | 128 | bool res; |
129 | int j; | 129 | int j; |
130 | 130 | ||
131 | spin_lock(&enic->rfs_h.lock); | 131 | spin_lock_bh(&enic->rfs_h.lock); |
132 | for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { | 132 | for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { |
133 | struct hlist_head *hhead; | 133 | struct hlist_head *hhead; |
134 | struct hlist_node *tmp; | 134 | struct hlist_node *tmp; |
@@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data) | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | } | 150 | } |
151 | spin_unlock(&enic->rfs_h.lock); | 151 | spin_unlock_bh(&enic->rfs_h.lock); |
152 | mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); | 152 | mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); |
153 | } | 153 | } |
154 | 154 | ||
@@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |||
183 | return -EPROTONOSUPPORT; | 183 | return -EPROTONOSUPPORT; |
184 | 184 | ||
185 | tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; | 185 | tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; |
186 | spin_lock(&enic->rfs_h.lock); | 186 | spin_lock_bh(&enic->rfs_h.lock); |
187 | n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); | 187 | n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); |
188 | 188 | ||
189 | if (n) { /* entry already present */ | 189 | if (n) { /* entry already present */ |
@@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |||
277 | } | 277 | } |
278 | 278 | ||
279 | ret_unlock: | 279 | ret_unlock: |
280 | spin_unlock(&enic->rfs_h.lock); | 280 | spin_unlock_bh(&enic->rfs_h.lock); |
281 | return res; | 281 | return res; |
282 | } | 282 | } |
283 | 283 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 929bfe70080a..180e53fa628f 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1674,13 +1674,13 @@ static int enic_stop(struct net_device *netdev) | |||
1674 | 1674 | ||
1675 | enic_dev_disable(enic); | 1675 | enic_dev_disable(enic); |
1676 | 1676 | ||
1677 | local_bh_disable(); | ||
1678 | for (i = 0; i < enic->rq_count; i++) { | 1677 | for (i = 0; i < enic->rq_count; i++) { |
1679 | napi_disable(&enic->napi[i]); | 1678 | napi_disable(&enic->napi[i]); |
1679 | local_bh_disable(); | ||
1680 | while (!enic_poll_lock_napi(&enic->rq[i])) | 1680 | while (!enic_poll_lock_napi(&enic->rq[i])) |
1681 | mdelay(1); | 1681 | mdelay(1); |
1682 | local_bh_enable(); | ||
1682 | } | 1683 | } |
1683 | local_bh_enable(); | ||
1684 | 1684 | ||
1685 | netif_carrier_off(netdev); | 1685 | netif_carrier_off(netdev); |
1686 | netif_tx_disable(netdev); | 1686 | netif_tx_disable(netdev); |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 81b96cf87574..50a851db2852 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1581,7 +1581,8 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1581 | complete(&fep->mdio_done); | 1581 | complete(&fep->mdio_done); |
1582 | } | 1582 | } |
1583 | 1583 | ||
1584 | fec_ptp_check_pps_event(fep); | 1584 | if (fep->ptp_clock) |
1585 | fec_ptp_check_pps_event(fep); | ||
1585 | 1586 | ||
1586 | return ret; | 1587 | return ret; |
1587 | } | 1588 | } |
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 3d4e08be1709..b34214e2df5f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c | |||
@@ -341,6 +341,9 @@ static void restart(struct net_device *dev) | |||
341 | FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ | 341 | FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ |
342 | } | 342 | } |
343 | 343 | ||
344 | /* Restore multicast and promiscuous settings */ | ||
345 | set_multicast_list(dev); | ||
346 | |||
344 | /* | 347 | /* |
345 | * Enable interrupts we wish to service. | 348 | * Enable interrupts we wish to service. |
346 | */ | 349 | */ |
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index f30411f0701f..7a184e8816a4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c | |||
@@ -355,6 +355,9 @@ static void restart(struct net_device *dev) | |||
355 | if (fep->phydev->duplex) | 355 | if (fep->phydev->duplex) |
356 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); | 356 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); |
357 | 357 | ||
358 | /* Restore multicast and promiscuous settings */ | ||
359 | set_multicast_list(dev); | ||
360 | |||
358 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); | 361 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); |
359 | } | 362 | } |
360 | 363 | ||
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 5f6aded512f5..24f3986cfae2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1075 | NETIF_F_HW_CSUM | | 1075 | NETIF_F_HW_CSUM | |
1076 | NETIF_F_SG); | 1076 | NETIF_F_SG); |
1077 | 1077 | ||
1078 | netdev->priv_flags |= IFF_UNICAST_FLT; | 1078 | /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ |
1079 | if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || | ||
1080 | hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) | ||
1081 | netdev->priv_flags |= IFF_UNICAST_FLT; | ||
1079 | 1082 | ||
1080 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); | 1083 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); |
1081 | 1084 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ed5f1c15fb0f..c3a7f4a4b775 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) | |||
6151 | I40E_GL_MDET_TX_PF_NUM_SHIFT; | 6151 | I40E_GL_MDET_TX_PF_NUM_SHIFT; |
6152 | u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> | 6152 | u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> |
6153 | I40E_GL_MDET_TX_VF_NUM_SHIFT; | 6153 | I40E_GL_MDET_TX_VF_NUM_SHIFT; |
6154 | u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> | 6154 | u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> |
6155 | I40E_GL_MDET_TX_EVENT_SHIFT; | 6155 | I40E_GL_MDET_TX_EVENT_SHIFT; |
6156 | u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> | 6156 | u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> |
6157 | I40E_GL_MDET_TX_QUEUE_SHIFT; | 6157 | I40E_GL_MDET_TX_QUEUE_SHIFT; |
@@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) | |||
6165 | if (reg & I40E_GL_MDET_RX_VALID_MASK) { | 6165 | if (reg & I40E_GL_MDET_RX_VALID_MASK) { |
6166 | u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> | 6166 | u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> |
6167 | I40E_GL_MDET_RX_FUNCTION_SHIFT; | 6167 | I40E_GL_MDET_RX_FUNCTION_SHIFT; |
6168 | u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> | 6168 | u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> |
6169 | I40E_GL_MDET_RX_EVENT_SHIFT; | 6169 | I40E_GL_MDET_RX_EVENT_SHIFT; |
6170 | u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> | 6170 | u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> |
6171 | I40E_GL_MDET_RX_QUEUE_SHIFT; | 6171 | I40E_GL_MDET_RX_QUEUE_SHIFT; |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a21b14495ebd..a2d72a87cbde 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | |||
6537 | if (unlikely(page_to_nid(page) != numa_node_id())) | 6537 | if (unlikely(page_to_nid(page) != numa_node_id())) |
6538 | return false; | 6538 | return false; |
6539 | 6539 | ||
6540 | if (unlikely(page->pfmemalloc)) | ||
6541 | return false; | ||
6542 | |||
6540 | #if (PAGE_SIZE < 8192) | 6543 | #if (PAGE_SIZE < 8192) |
6541 | /* if we are only owner of page we can reuse it */ | 6544 | /* if we are only owner of page we can reuse it */ |
6542 | if (unlikely(page_count(page) != 1)) | 6545 | if (unlikely(page_count(page) != 1)) |
@@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6603 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); | 6606 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
6604 | 6607 | ||
6605 | /* we can reuse buffer as-is, just make sure it is local */ | 6608 | /* we can reuse buffer as-is, just make sure it is local */ |
6606 | if (likely(page_to_nid(page) == numa_node_id())) | 6609 | if (likely((page_to_nid(page) == numa_node_id()) && |
6610 | !page->pfmemalloc)) | ||
6607 | return true; | 6611 | return true; |
6608 | 6612 | ||
6609 | /* this page cannot be reused so discard it */ | 6613 | /* this page cannot be reused so discard it */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3ce4a258f945..0ae038b9af90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
342 | if (old == advertised) | 342 | if (old == advertised) |
343 | return err; | 343 | return err; |
344 | /* this sets the link speed and restarts auto-neg */ | 344 | /* this sets the link speed and restarts auto-neg */ |
345 | while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
346 | usleep_range(1000, 2000); | ||
347 | |||
345 | hw->mac.autotry_restart = true; | 348 | hw->mac.autotry_restart = true; |
346 | err = hw->mac.ops.setup_link(hw, advertised, true); | 349 | err = hw->mac.ops.setup_link(hw, advertised, true); |
347 | if (err) { | 350 | if (err) { |
348 | e_info(probe, "setup link failed with code %d\n", err); | 351 | e_info(probe, "setup link failed with code %d\n", err); |
349 | hw->mac.ops.setup_link(hw, old, true); | 352 | hw->mac.ops.setup_link(hw, old, true); |
350 | } | 353 | } |
354 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
351 | } else { | 355 | } else { |
352 | /* in this case we currently only support 10Gb/FULL */ | 356 | /* in this case we currently only support 10Gb/FULL */ |
353 | u32 speed = ethtool_cmd_speed(ecmd); | 357 | u32 speed = ethtool_cmd_speed(ecmd); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fec5212d4337..d2df4e3d1032 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) | |||
4321 | IXGBE_CB(skb)->page_released = false; | 4321 | IXGBE_CB(skb)->page_released = false; |
4322 | } | 4322 | } |
4323 | dev_kfree_skb(skb); | 4323 | dev_kfree_skb(skb); |
4324 | rx_buffer->skb = NULL; | ||
4324 | } | 4325 | } |
4325 | rx_buffer->skb = NULL; | ||
4326 | if (rx_buffer->dma) | 4326 | if (rx_buffer->dma) |
4327 | dma_unmap_page(dev, rx_buffer->dma, | 4327 | dma_unmap_page(dev, rx_buffer->dma, |
4328 | ixgbe_rx_pg_size(rx_ring), | 4328 | ixgbe_rx_pg_size(rx_ring), |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 34c137878545..454d9fea640e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
836 | * whether LSO is used */ | 836 | * whether LSO is used */ |
837 | tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; | 837 | tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; |
838 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 838 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
839 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | 839 | if (!skb->encapsulation) |
840 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | 840 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | |
841 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | ||
842 | else | ||
843 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); | ||
841 | ring->tx_csum++; | 844 | ring->tx_csum++; |
842 | } | 845 | } |
843 | 846 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index a49c9d11d8a5..49290a405903 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
1026 | pr_cont("\n"); | 1026 | pr_cont("\n"); |
1027 | } | 1027 | } |
1028 | } | 1028 | } |
1029 | synchronize_irq(eq->irq); | ||
1029 | 1030 | ||
1030 | mlx4_mtt_cleanup(dev, &eq->mtt); | 1031 | mlx4_mtt_cleanup(dev, &eq->mtt); |
1031 | for (i = 0; i < npages; ++i) | 1032 | for (i = 0; i < npages; ++i) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ca0f98c95105..872843179f44 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, | |||
955 | cur->ib.dst_gid_msk); | 955 | cur->ib.dst_gid_msk); |
956 | break; | 956 | break; |
957 | 957 | ||
958 | case MLX4_NET_TRANS_RULE_ID_VXLAN: | ||
959 | len += snprintf(buf + len, BUF_SIZE - len, | ||
960 | "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); | ||
961 | break; | ||
958 | case MLX4_NET_TRANS_RULE_ID_IPV6: | 962 | case MLX4_NET_TRANS_RULE_ID_IPV6: |
959 | break; | 963 | break; |
960 | 964 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ed53291468f3..a278238a2db6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -420,6 +420,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
420 | if (err) | 420 | if (err) |
421 | mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", | 421 | mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", |
422 | eq->eqn); | 422 | eq->eqn); |
423 | synchronize_irq(table->msix_arr[eq->irqn].vector); | ||
423 | mlx5_buf_free(dev, &eq->buf); | 424 | mlx5_buf_free(dev, &eq->buf); |
424 | 425 | ||
425 | return err; | 426 | return err; |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index ee84a90e371c..aaf2987512b5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
343 | unsigned short dma_flags; | 343 | unsigned short dma_flags; |
344 | int i = 0; | 344 | int i = 0; |
345 | 345 | ||
346 | EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); | ||
347 | |||
348 | if (skb_shinfo(skb)->gso_size) | 346 | if (skb_shinfo(skb)->gso_size) |
349 | return efx_enqueue_skb_tso(tx_queue, skb); | 347 | return efx_enqueue_skb_tso(tx_queue, skb); |
350 | 348 | ||
@@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1258 | /* Find the packet protocol and sanity-check it */ | 1256 | /* Find the packet protocol and sanity-check it */ |
1259 | state.protocol = efx_tso_check_protocol(skb); | 1257 | state.protocol = efx_tso_check_protocol(skb); |
1260 | 1258 | ||
1261 | EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); | ||
1262 | |||
1263 | rc = tso_start(&state, efx, skb); | 1259 | rc = tso_start(&state, efx, skb); |
1264 | if (rc) | 1260 | if (rc) |
1265 | goto mem_err; | 1261 | goto mem_err; |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 5e94d00b96b3..2c62208077fe 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -81,6 +81,7 @@ static const char version[] = | |||
81 | #include <linux/workqueue.h> | 81 | #include <linux/workqueue.h> |
82 | #include <linux/of.h> | 82 | #include <linux/of.h> |
83 | #include <linux/of_device.h> | 83 | #include <linux/of_device.h> |
84 | #include <linux/of_gpio.h> | ||
84 | 85 | ||
85 | #include <linux/netdevice.h> | 86 | #include <linux/netdevice.h> |
86 | #include <linux/etherdevice.h> | 87 | #include <linux/etherdevice.h> |
@@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = { | |||
2188 | {}, | 2189 | {}, |
2189 | }; | 2190 | }; |
2190 | MODULE_DEVICE_TABLE(of, smc91x_match); | 2191 | MODULE_DEVICE_TABLE(of, smc91x_match); |
2192 | |||
2193 | /** | ||
2194 | * of_try_set_control_gpio - configure a gpio if it exists | ||
2195 | */ | ||
2196 | static int try_toggle_control_gpio(struct device *dev, | ||
2197 | struct gpio_desc **desc, | ||
2198 | const char *name, int index, | ||
2199 | int value, unsigned int nsdelay) | ||
2200 | { | ||
2201 | struct gpio_desc *gpio = *desc; | ||
2202 | int res; | ||
2203 | |||
2204 | gpio = devm_gpiod_get_index(dev, name, index); | ||
2205 | if (IS_ERR(gpio)) { | ||
2206 | if (PTR_ERR(gpio) == -ENOENT) { | ||
2207 | *desc = NULL; | ||
2208 | return 0; | ||
2209 | } | ||
2210 | |||
2211 | return PTR_ERR(gpio); | ||
2212 | } | ||
2213 | res = gpiod_direction_output(gpio, !value); | ||
2214 | if (res) { | ||
2215 | dev_err(dev, "unable to toggle gpio %s: %i\n", name, res); | ||
2216 | devm_gpiod_put(dev, gpio); | ||
2217 | gpio = NULL; | ||
2218 | return res; | ||
2219 | } | ||
2220 | if (nsdelay) | ||
2221 | usleep_range(nsdelay, 2 * nsdelay); | ||
2222 | gpiod_set_value_cansleep(gpio, value); | ||
2223 | *desc = gpio; | ||
2224 | |||
2225 | return 0; | ||
2226 | } | ||
2191 | #endif | 2227 | #endif |
2192 | 2228 | ||
2193 | /* | 2229 | /* |
@@ -2237,6 +2273,28 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2237 | struct device_node *np = pdev->dev.of_node; | 2273 | struct device_node *np = pdev->dev.of_node; |
2238 | u32 val; | 2274 | u32 val; |
2239 | 2275 | ||
2276 | /* Optional pwrdwn GPIO configured? */ | ||
2277 | ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, | ||
2278 | "power", 0, 0, 100); | ||
2279 | if (ret) | ||
2280 | return ret; | ||
2281 | |||
2282 | /* | ||
2283 | * Optional reset GPIO configured? Minimum 100 ns reset needed | ||
2284 | * according to LAN91C96 datasheet page 14. | ||
2285 | */ | ||
2286 | ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, | ||
2287 | "reset", 0, 0, 100); | ||
2288 | if (ret) | ||
2289 | return ret; | ||
2290 | |||
2291 | /* | ||
2292 | * Need to wait for optional EEPROM to load, max 750 us according | ||
2293 | * to LAN91C96 datasheet page 55. | ||
2294 | */ | ||
2295 | if (lp->reset_gpio) | ||
2296 | usleep_range(750, 1000); | ||
2297 | |||
2240 | /* Combination of IO widths supported, default to 16-bit */ | 2298 | /* Combination of IO widths supported, default to 16-bit */ |
2241 | if (!of_property_read_u32(np, "reg-io-width", &val)) { | 2299 | if (!of_property_read_u32(np, "reg-io-width", &val)) { |
2242 | if (val & 1) | 2300 | if (val & 1) |
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 47dce918eb0f..2a38dacbbd27 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -298,6 +298,9 @@ struct smc_local { | |||
298 | struct sk_buff *pending_tx_skb; | 298 | struct sk_buff *pending_tx_skb; |
299 | struct tasklet_struct tx_task; | 299 | struct tasklet_struct tx_task; |
300 | 300 | ||
301 | struct gpio_desc *power_gpio; | ||
302 | struct gpio_desc *reset_gpio; | ||
303 | |||
301 | /* version/revision of the SMC91x chip */ | 304 | /* version/revision of the SMC91x chip */ |
302 | int version; | 305 | int version; |
303 | 306 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 655a23bbc451..e17a970eaf2b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg; | |||
33 | static void stmmac_default_data(void) | 33 | static void stmmac_default_data(void) |
34 | { | 34 | { |
35 | memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); | 35 | memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); |
36 | |||
36 | plat_dat.bus_id = 1; | 37 | plat_dat.bus_id = 1; |
37 | plat_dat.phy_addr = 0; | 38 | plat_dat.phy_addr = 0; |
38 | plat_dat.interface = PHY_INTERFACE_MODE_GMII; | 39 | plat_dat.interface = PHY_INTERFACE_MODE_GMII; |
@@ -47,6 +48,12 @@ static void stmmac_default_data(void) | |||
47 | dma_cfg.pbl = 32; | 48 | dma_cfg.pbl = 32; |
48 | dma_cfg.burst_len = DMA_AXI_BLEN_256; | 49 | dma_cfg.burst_len = DMA_AXI_BLEN_256; |
49 | plat_dat.dma_cfg = &dma_cfg; | 50 | plat_dat.dma_cfg = &dma_cfg; |
51 | |||
52 | /* Set default value for multicast hash bins */ | ||
53 | plat_dat.multicast_filter_bins = HASH_TABLE_SIZE; | ||
54 | |||
55 | /* Set default value for unicast filter entries */ | ||
56 | plat_dat.unicast_filter_entries = 1; | ||
50 | } | 57 | } |
51 | 58 | ||
52 | /** | 59 | /** |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 952e1e4764b7..d8794488f80a 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
591 | if (enable) { | 591 | if (enable) { |
592 | unsigned long timeout = jiffies + HZ; | 592 | unsigned long timeout = jiffies + HZ; |
593 | 593 | ||
594 | /* Disable Learn for all ports */ | 594 | /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ |
595 | for (i = 0; i < priv->data.slaves; i++) { | 595 | for (i = 0; i <= priv->data.slaves; i++) { |
596 | cpsw_ale_control_set(ale, i, | 596 | cpsw_ale_control_set(ale, i, |
597 | ALE_PORT_NOLEARN, 1); | 597 | ALE_PORT_NOLEARN, 1); |
598 | cpsw_ale_control_set(ale, i, | 598 | cpsw_ale_control_set(ale, i, |
@@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
616 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); | 616 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); |
617 | dev_dbg(&ndev->dev, "promiscuity enabled\n"); | 617 | dev_dbg(&ndev->dev, "promiscuity enabled\n"); |
618 | } else { | 618 | } else { |
619 | /* Flood All Unicast Packets to Host port */ | 619 | /* Don't Flood All Unicast Packets to Host port */ |
620 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); | 620 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); |
621 | 621 | ||
622 | /* Enable Learn for all ports */ | 622 | /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ |
623 | for (i = 0; i < priv->data.slaves; i++) { | 623 | for (i = 0; i <= priv->data.slaves; i++) { |
624 | cpsw_ale_control_set(ale, i, | 624 | cpsw_ale_control_set(ale, i, |
625 | ALE_PORT_NOLEARN, 0); | 625 | ALE_PORT_NOLEARN, 0); |
626 | cpsw_ale_control_set(ale, i, | 626 | cpsw_ale_control_set(ale, i, |
@@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) | |||
638 | if (ndev->flags & IFF_PROMISC) { | 638 | if (ndev->flags & IFF_PROMISC) { |
639 | /* Enable promiscuous mode */ | 639 | /* Enable promiscuous mode */ |
640 | cpsw_set_promiscious(ndev, true); | 640 | cpsw_set_promiscious(ndev, true); |
641 | cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); | ||
641 | return; | 642 | return; |
642 | } else { | 643 | } else { |
643 | /* Disable promiscuous mode */ | 644 | /* Disable promiscuous mode */ |
644 | cpsw_set_promiscious(ndev, false); | 645 | cpsw_set_promiscious(ndev, false); |
645 | } | 646 | } |
646 | 647 | ||
648 | /* Restore allmulti on vlans if necessary */ | ||
649 | cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); | ||
650 | |||
647 | /* Clear all mcast from ALE */ | 651 | /* Clear all mcast from ALE */ |
648 | cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); | 652 | cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); |
649 | 653 | ||
@@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) | |||
1149 | const int port = priv->host_port; | 1153 | const int port = priv->host_port; |
1150 | u32 reg; | 1154 | u32 reg; |
1151 | int i; | 1155 | int i; |
1156 | int unreg_mcast_mask; | ||
1152 | 1157 | ||
1153 | reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : | 1158 | reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : |
1154 | CPSW2_PORT_VLAN; | 1159 | CPSW2_PORT_VLAN; |
@@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) | |||
1158 | for (i = 0; i < priv->data.slaves; i++) | 1163 | for (i = 0; i < priv->data.slaves; i++) |
1159 | slave_write(priv->slaves + i, vlan, reg); | 1164 | slave_write(priv->slaves + i, vlan, reg); |
1160 | 1165 | ||
1166 | if (priv->ndev->flags & IFF_ALLMULTI) | ||
1167 | unreg_mcast_mask = ALE_ALL_PORTS; | ||
1168 | else | ||
1169 | unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; | ||
1170 | |||
1161 | cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, | 1171 | cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, |
1162 | ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, | 1172 | ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, |
1163 | (ALE_PORT_1 | ALE_PORT_2) << port); | 1173 | unreg_mcast_mask << port); |
1164 | } | 1174 | } |
1165 | 1175 | ||
1166 | static void cpsw_init_host_port(struct cpsw_priv *priv) | 1176 | static void cpsw_init_host_port(struct cpsw_priv *priv) |
@@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, | |||
1620 | unsigned short vid) | 1630 | unsigned short vid) |
1621 | { | 1631 | { |
1622 | int ret; | 1632 | int ret; |
1633 | int unreg_mcast_mask; | ||
1634 | |||
1635 | if (priv->ndev->flags & IFF_ALLMULTI) | ||
1636 | unreg_mcast_mask = ALE_ALL_PORTS; | ||
1637 | else | ||
1638 | unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; | ||
1623 | 1639 | ||
1624 | ret = cpsw_ale_add_vlan(priv->ale, vid, | 1640 | ret = cpsw_ale_add_vlan(priv->ale, vid, |
1625 | ALE_ALL_PORTS << priv->host_port, | 1641 | ALE_ALL_PORTS << priv->host_port, |
1626 | 0, ALE_ALL_PORTS << priv->host_port, | 1642 | 0, ALE_ALL_PORTS << priv->host_port, |
1627 | (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); | 1643 | unreg_mcast_mask << priv->host_port); |
1628 | if (ret != 0) | 1644 | if (ret != 0) |
1629 | return ret; | 1645 | return ret; |
1630 | 1646 | ||
@@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2006 | parp = of_get_property(slave_node, "phy_id", &lenp); | 2022 | parp = of_get_property(slave_node, "phy_id", &lenp); |
2007 | if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { | 2023 | if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { |
2008 | dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); | 2024 | dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); |
2009 | return -EINVAL; | 2025 | goto no_phy_slave; |
2010 | } | 2026 | } |
2011 | mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); | 2027 | mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); |
2012 | phyid = be32_to_cpup(parp+1); | 2028 | phyid = be32_to_cpup(parp+1); |
@@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2019 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | 2035 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), |
2020 | PHY_ID_FMT, mdio->name, phyid); | 2036 | PHY_ID_FMT, mdio->name, phyid); |
2021 | 2037 | ||
2038 | slave_data->phy_if = of_get_phy_mode(slave_node); | ||
2039 | if (slave_data->phy_if < 0) { | ||
2040 | dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", | ||
2041 | i); | ||
2042 | return slave_data->phy_if; | ||
2043 | } | ||
2044 | |||
2045 | no_phy_slave: | ||
2022 | mac_addr = of_get_mac_address(slave_node); | 2046 | mac_addr = of_get_mac_address(slave_node); |
2023 | if (mac_addr) { | 2047 | if (mac_addr) { |
2024 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); | 2048 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); |
@@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2030 | return ret; | 2054 | return ret; |
2031 | } | 2055 | } |
2032 | } | 2056 | } |
2033 | |||
2034 | slave_data->phy_if = of_get_phy_mode(slave_node); | ||
2035 | if (slave_data->phy_if < 0) { | ||
2036 | dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", | ||
2037 | i); | ||
2038 | return slave_data->phy_if; | ||
2039 | } | ||
2040 | |||
2041 | if (data->dual_emac) { | 2057 | if (data->dual_emac) { |
2042 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", | 2058 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", |
2043 | &prop)) { | 2059 | &prop)) { |
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0579b2243bb6..3ae83879a75f 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c | |||
@@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) | |||
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
445 | 445 | ||
446 | void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) | ||
447 | { | ||
448 | u32 ale_entry[ALE_ENTRY_WORDS]; | ||
449 | int type, idx; | ||
450 | int unreg_mcast = 0; | ||
451 | |||
452 | /* Only bother doing the work if the setting is actually changing */ | ||
453 | if (ale->allmulti == allmulti) | ||
454 | return; | ||
455 | |||
456 | /* Remember the new setting to check against next time */ | ||
457 | ale->allmulti = allmulti; | ||
458 | |||
459 | for (idx = 0; idx < ale->params.ale_entries; idx++) { | ||
460 | cpsw_ale_read(ale, idx, ale_entry); | ||
461 | type = cpsw_ale_get_entry_type(ale_entry); | ||
462 | if (type != ALE_TYPE_VLAN) | ||
463 | continue; | ||
464 | |||
465 | unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); | ||
466 | if (allmulti) | ||
467 | unreg_mcast |= 1; | ||
468 | else | ||
469 | unreg_mcast &= ~1; | ||
470 | cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); | ||
471 | cpsw_ale_write(ale, idx, ale_entry); | ||
472 | } | ||
473 | } | ||
474 | |||
446 | struct ale_control_info { | 475 | struct ale_control_info { |
447 | const char *name; | 476 | const char *name; |
448 | int offset, port_offset; | 477 | int offset, port_offset; |
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 31cf43cab42e..c0d4127aa549 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h | |||
@@ -27,6 +27,7 @@ struct cpsw_ale { | |||
27 | struct cpsw_ale_params params; | 27 | struct cpsw_ale_params params; |
28 | struct timer_list timer; | 28 | struct timer_list timer; |
29 | unsigned long ageout; | 29 | unsigned long ageout; |
30 | int allmulti; | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | enum cpsw_ale_control { | 33 | enum cpsw_ale_control { |
@@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, | |||
103 | int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, | 104 | int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, |
104 | int reg_mcast, int unreg_mcast); | 105 | int reg_mcast, int unreg_mcast); |
105 | int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); | 106 | int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); |
107 | void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); | ||
106 | 108 | ||
107 | int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); | 109 | int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); |
108 | int cpsw_ale_control_set(struct cpsw_ale *ale, int port, | 110 | int cpsw_ale_control_set(struct cpsw_ale *ale, int port, |