aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c8
-rw-r--r--drivers/net/atl1c/atl1c_main.c23
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c3
-rw-r--r--drivers/net/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/atlx/atl1.c17
-rw-r--r--drivers/net/atlx/atl2.c14
-rw-r--r--drivers/net/bcm63xx_enet.c4
-rw-r--r--drivers/net/benet/be.h31
-rw-r--r--drivers/net/benet/be_cmds.c107
-rw-r--r--drivers/net/benet/be_cmds.h21
-rw-r--r--drivers/net/benet/be_ethtool.c99
-rw-r--r--drivers/net/benet/be_hw.h4
-rw-r--r--drivers/net/benet/be_main.c187
-rw-r--r--drivers/net/bnx2.c165
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h13
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c22
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h8
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c44
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c5
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cnic.c62
-rw-r--r--drivers/net/cnic.h1
-rw-r--r--drivers/net/cnic_if.h6
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c25
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c17
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_dev.c62
-rw-r--r--drivers/net/enic/enic_dev.h7
-rw-r--r--drivers/net/enic/enic_main.c270
-rw-r--r--drivers/net/enic/enic_pp.c264
-rw-r--r--drivers/net/enic/enic_pp.h27
-rw-r--r--drivers/net/enic/enic_res.c4
-rw-r--r--drivers/net/enic/vnic_dev.c97
-rw-r--r--drivers/net/enic/vnic_dev.h6
-rw-r--r--drivers/net/enic/vnic_devcmd.h57
-rw-r--r--drivers/net/enic/vnic_vic.c5
-rw-r--r--drivers/net/enic/vnic_vic.h13
-rw-r--r--drivers/net/ewrk3.c56
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/gianfar.h17
-rw-r--r--drivers/net/gianfar_ethtool.c52
-rw-r--r--drivers/net/ibmlana.c3
-rw-r--r--drivers/net/jme.c77
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/ksz884x.c73
-rw-r--r--drivers/net/macb.c3
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c102
-rw-r--r--drivers/net/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c55
-rw-r--r--drivers/net/niu.c34
-rw-r--r--drivers/net/niu.h1
-rw-r--r--drivers/net/pcnet32.c74
-rw-r--r--drivers/net/qla3xxx.c2
-rw-r--r--drivers/net/qlcnic/qlcnic.h224
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c135
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c149
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c82
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c58
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c446
-rw-r--r--drivers/net/qlge/qlge_ethtool.c38
-rw-r--r--drivers/net/r8169.c404
-rw-r--r--drivers/net/s2io.c86
-rw-r--r--drivers/net/s2io.h3
-rw-r--r--drivers/net/sfc/efx.c20
-rw-r--r--drivers/net/sfc/ethtool.c106
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c6
-rw-r--r--drivers/net/sfc/rx.c3
-rw-r--r--drivers/net/skge.c86
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c25
-rw-r--r--drivers/net/smsc911x.c292
-rw-r--r--drivers/net/smsc911x.h22
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c14
-rw-r--r--drivers/net/stmmac/stmmac_main.c83
-rw-r--r--drivers/net/sungem.c3
-rw-r--r--drivers/net/sunhme.c6
-rw-r--r--drivers/net/tg3.c499
-rw-r--r--drivers/net/tg3.h67
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/plusb.c32
-rw-r--r--drivers/net/usb/smsc75xx.c124
-rw-r--r--drivers/net/usb/smsc95xx.c83
-rw-r--r--drivers/net/veth.c45
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c7
-rw-r--r--drivers/net/virtio_net.c46
-rw-r--r--drivers/net/vxge/vxge-ethtool.c25
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/net/xen-netfront.c106
98 files changed, 2991 insertions, 2691 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index dd16e83933a2..10c45051caea 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -758,8 +758,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
758 758
759 entry = cp->tx_head; 759 entry = cp->tx_head;
760 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 760 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
761 if (dev->features & NETIF_F_TSO) 761 mss = skb_shinfo(skb)->gso_size;
762 mss = skb_shinfo(skb)->gso_size;
763 762
764 if (skb_shinfo(skb)->nr_frags == 0) { 763 if (skb_shinfo(skb)->nr_frags == 0) {
765 struct cp_desc *txd = &cp->tx_ring[entry]; 764 struct cp_desc *txd = &cp->tx_ring[entry];
@@ -1416,32 +1415,23 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
1416 cp->msg_enable = value; 1415 cp->msg_enable = value;
1417} 1416}
1418 1417
1419static u32 cp_get_rx_csum(struct net_device *dev) 1418static int cp_set_features(struct net_device *dev, u32 features)
1420{ 1419{
1421 struct cp_private *cp = netdev_priv(dev); 1420 struct cp_private *cp = netdev_priv(dev);
1422 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0; 1421 unsigned long flags;
1423}
1424 1422
1425static int cp_set_rx_csum(struct net_device *dev, u32 data) 1423 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1426{ 1424 return 0;
1427 struct cp_private *cp = netdev_priv(dev);
1428 u16 cmd = cp->cpcmd, newcmd;
1429 1425
1430 newcmd = cmd; 1426 spin_lock_irqsave(&cp->lock, flags);
1431 1427
1432 if (data) 1428 if (features & NETIF_F_RXCSUM)
1433 newcmd |= RxChkSum; 1429 cp->cpcmd |= RxChkSum;
1434 else 1430 else
1435 newcmd &= ~RxChkSum; 1431 cp->cpcmd &= ~RxChkSum;
1436
1437 if (newcmd != cmd) {
1438 unsigned long flags;
1439 1432
1440 spin_lock_irqsave(&cp->lock, flags); 1433 cpw16_f(CpCmd, cp->cpcmd);
1441 cp->cpcmd = newcmd; 1434 spin_unlock_irqrestore(&cp->lock, flags);
1442 cpw16_f(CpCmd, newcmd);
1443 spin_unlock_irqrestore(&cp->lock, flags);
1444 }
1445 1435
1446 return 0; 1436 return 0;
1447} 1437}
@@ -1554,11 +1544,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
1554 .get_link = ethtool_op_get_link, 1544 .get_link = ethtool_op_get_link,
1555 .get_msglevel = cp_get_msglevel, 1545 .get_msglevel = cp_get_msglevel,
1556 .set_msglevel = cp_set_msglevel, 1546 .set_msglevel = cp_set_msglevel,
1557 .get_rx_csum = cp_get_rx_csum,
1558 .set_rx_csum = cp_set_rx_csum,
1559 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1560 .set_sg = ethtool_op_set_sg,
1561 .set_tso = ethtool_op_set_tso,
1562 .get_regs = cp_get_regs, 1547 .get_regs = cp_get_regs,
1563 .get_wol = cp_get_wol, 1548 .get_wol = cp_get_wol,
1564 .set_wol = cp_set_wol, 1549 .set_wol = cp_set_wol,
@@ -1831,6 +1816,7 @@ static const struct net_device_ops cp_netdev_ops = {
1831 .ndo_do_ioctl = cp_ioctl, 1816 .ndo_do_ioctl = cp_ioctl,
1832 .ndo_start_xmit = cp_start_xmit, 1817 .ndo_start_xmit = cp_start_xmit,
1833 .ndo_tx_timeout = cp_tx_timeout, 1818 .ndo_tx_timeout = cp_tx_timeout,
1819 .ndo_set_features = cp_set_features,
1834#if CP_VLAN_TAG_USED 1820#if CP_VLAN_TAG_USED
1835 .ndo_vlan_rx_register = cp_vlan_rx_register, 1821 .ndo_vlan_rx_register = cp_vlan_rx_register,
1836#endif 1822#endif
@@ -1934,6 +1920,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1934 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | 1920 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1935 PCIMulRW | RxChkSum | CpRxOn | CpTxOn; 1921 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1936 1922
1923 dev->features |= NETIF_F_RXCSUM;
1924 dev->hw_features |= NETIF_F_RXCSUM;
1925
1937 regs = ioremap(pciaddr, CP_REGS_SIZE); 1926 regs = ioremap(pciaddr, CP_REGS_SIZE);
1938 if (!regs) { 1927 if (!regs) {
1939 rc = -EIO; 1928 rc = -EIO;
@@ -1966,9 +1955,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1966 if (pci_using_dac) 1955 if (pci_using_dac)
1967 dev->features |= NETIF_F_HIGHDMA; 1956 dev->features |= NETIF_F_HIGHDMA;
1968 1957
1969#if 0 /* disabled by default until verified */ 1958 /* disabled by default until verified */
1970 dev->features |= NETIF_F_TSO; 1959 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1971#endif
1972 1960
1973 dev->irq = pdev->irq; 1961 dev->irq = pdev->irq;
1974 1962
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 7c521508313c..3af5a336a5af 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -113,11 +113,6 @@ static int atl1c_set_settings(struct net_device *netdev,
113 return 0; 113 return 0;
114} 114}
115 115
116static u32 atl1c_get_tx_csum(struct net_device *netdev)
117{
118 return (netdev->features & NETIF_F_HW_CSUM) != 0;
119}
120
121static u32 atl1c_get_msglevel(struct net_device *netdev) 116static u32 atl1c_get_msglevel(struct net_device *netdev)
122{ 117{
123 struct atl1c_adapter *adapter = netdev_priv(netdev); 118 struct atl1c_adapter *adapter = netdev_priv(netdev);
@@ -307,9 +302,6 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
307 .get_link = ethtool_op_get_link, 302 .get_link = ethtool_op_get_link,
308 .get_eeprom_len = atl1c_get_eeprom_len, 303 .get_eeprom_len = atl1c_get_eeprom_len,
309 .get_eeprom = atl1c_get_eeprom, 304 .get_eeprom = atl1c_get_eeprom,
310 .get_tx_csum = atl1c_get_tx_csum,
311 .get_sg = ethtool_op_get_sg,
312 .set_sg = ethtool_op_set_sg,
313}; 305};
314 306
315void atl1c_set_ethtool_ops(struct net_device *netdev) 307void atl1c_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 7d9d5067a65c..894d485bf5bc 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -484,6 +484,15 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
484 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? 484 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
485 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; 485 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
486} 486}
487
488static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
489{
490 if (netdev->mtu > MAX_TSO_FRAME_SIZE)
491 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
492
493 return features;
494}
495
487/* 496/*
488 * atl1c_change_mtu - Change the Maximum Transfer Unit 497 * atl1c_change_mtu - Change the Maximum Transfer Unit
489 * @netdev: network interface device structure 498 * @netdev: network interface device structure
@@ -510,14 +519,8 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
510 netdev->mtu = new_mtu; 519 netdev->mtu = new_mtu;
511 adapter->hw.max_frame_size = new_mtu; 520 adapter->hw.max_frame_size = new_mtu;
512 atl1c_set_rxbufsize(adapter, netdev); 521 atl1c_set_rxbufsize(adapter, netdev);
513 if (new_mtu > MAX_TSO_FRAME_SIZE) {
514 adapter->netdev->features &= ~NETIF_F_TSO;
515 adapter->netdev->features &= ~NETIF_F_TSO6;
516 } else {
517 adapter->netdev->features |= NETIF_F_TSO;
518 adapter->netdev->features |= NETIF_F_TSO6;
519 }
520 atl1c_down(adapter); 522 atl1c_down(adapter);
523 netdev_update_features(netdev);
521 atl1c_up(adapter); 524 atl1c_up(adapter);
522 clear_bit(__AT_RESETTING, &adapter->flags); 525 clear_bit(__AT_RESETTING, &adapter->flags);
523 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) { 526 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
@@ -2585,6 +2588,7 @@ static const struct net_device_ops atl1c_netdev_ops = {
2585 .ndo_set_mac_address = atl1c_set_mac_addr, 2588 .ndo_set_mac_address = atl1c_set_mac_addr,
2586 .ndo_set_multicast_list = atl1c_set_multi, 2589 .ndo_set_multicast_list = atl1c_set_multi,
2587 .ndo_change_mtu = atl1c_change_mtu, 2590 .ndo_change_mtu = atl1c_change_mtu,
2591 .ndo_fix_features = atl1c_fix_features,
2588 .ndo_do_ioctl = atl1c_ioctl, 2592 .ndo_do_ioctl = atl1c_ioctl,
2589 .ndo_tx_timeout = atl1c_tx_timeout, 2593 .ndo_tx_timeout = atl1c_tx_timeout,
2590 .ndo_get_stats = atl1c_get_stats, 2594 .ndo_get_stats = atl1c_get_stats,
@@ -2605,12 +2609,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2605 atl1c_set_ethtool_ops(netdev); 2609 atl1c_set_ethtool_ops(netdev);
2606 2610
2607 /* TODO: add when ready */ 2611 /* TODO: add when ready */
2608 netdev->features = NETIF_F_SG | 2612 netdev->hw_features = NETIF_F_SG |
2609 NETIF_F_HW_CSUM | 2613 NETIF_F_HW_CSUM |
2610 NETIF_F_HW_VLAN_TX | 2614 NETIF_F_HW_VLAN_TX |
2611 NETIF_F_HW_VLAN_RX |
2612 NETIF_F_TSO | 2615 NETIF_F_TSO |
2613 NETIF_F_TSO6; 2616 NETIF_F_TSO6;
2617 netdev->features = netdev->hw_features |
2618 NETIF_F_HW_VLAN_RX;
2614 return 0; 2619 return 0;
2615} 2620}
2616 2621
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 1209297433b8..47783749d9fa 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -382,9 +382,6 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
382 .get_eeprom_len = atl1e_get_eeprom_len, 382 .get_eeprom_len = atl1e_get_eeprom_len,
383 .get_eeprom = atl1e_get_eeprom, 383 .get_eeprom = atl1e_get_eeprom,
384 .set_eeprom = atl1e_set_eeprom, 384 .set_eeprom = atl1e_set_eeprom,
385 .set_tx_csum = ethtool_op_set_tx_hw_csum,
386 .set_sg = ethtool_op_set_sg,
387 .set_tso = ethtool_op_set_tso,
388}; 385};
389 386
390void atl1e_set_ethtool_ops(struct net_device *netdev) 387void atl1e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index b0a71e2f28a9..9900ca1d8eda 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1927,11 +1927,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
1927 * reschedule our watchdog timer */ 1927 * reschedule our watchdog timer */
1928 set_bit(__AT_DOWN, &adapter->flags); 1928 set_bit(__AT_DOWN, &adapter->flags);
1929 1929
1930#ifdef NETIF_F_LLTX
1931 netif_stop_queue(netdev); 1930 netif_stop_queue(netdev);
1932#else
1933 netif_tx_disable(netdev);
1934#endif
1935 1931
1936 /* reset MAC to disable all RX/TX */ 1932 /* reset MAC to disable all RX/TX */
1937 atl1e_reset_hw(&adapter->hw); 1933 atl1e_reset_hw(&adapter->hw);
@@ -2223,10 +2219,10 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2223 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2219 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2224 atl1e_set_ethtool_ops(netdev); 2220 atl1e_set_ethtool_ops(netdev);
2225 2221
2226 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | 2222 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
2227 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2223 NETIF_F_HW_VLAN_TX;
2228 netdev->features |= NETIF_F_LLTX; 2224 netdev->features = netdev->hw_features |
2229 netdev->features |= NETIF_F_TSO; 2225 NETIF_F_HW_VLAN_RX | NETIF_F_LLTX;
2230 2226
2231 return 0; 2227 return 0;
2232} 2228}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 67f40b9c16ed..98334a1f0c5c 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2572,7 +2572,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2572{ 2572{
2573 struct net_device *netdev = adapter->netdev; 2573 struct net_device *netdev = adapter->netdev;
2574 int err; 2574 int err;
2575 int irq_flags = IRQF_SAMPLE_RANDOM; 2575 int irq_flags = 0;
2576 2576
2577 /* hardware has been reset, we need to reload some things */ 2577 /* hardware has been reset, we need to reload some things */
2578 atlx_set_multi(netdev); 2578 atlx_set_multi(netdev);
@@ -2986,6 +2986,11 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2986 netdev->features |= NETIF_F_SG; 2986 netdev->features |= NETIF_F_SG;
2987 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 2987 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2988 2988
2989 netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO;
2990
2991 /* is this valid? see atl1_setup_mac_ctrl() */
2992 netdev->features |= NETIF_F_RXCSUM;
2993
2989 /* 2994 /*
2990 * patch for some L1 of old version, 2995 * patch for some L1 of old version,
2991 * the final version of L1 may not need these 2996 * the final version of L1 may not need these
@@ -3595,12 +3600,6 @@ static int atl1_set_pauseparam(struct net_device *netdev,
3595 return 0; 3600 return 0;
3596} 3601}
3597 3602
3598/* FIXME: is this right? -- CHS */
3599static u32 atl1_get_rx_csum(struct net_device *netdev)
3600{
3601 return 1;
3602}
3603
3604static void atl1_get_strings(struct net_device *netdev, u32 stringset, 3603static void atl1_get_strings(struct net_device *netdev, u32 stringset,
3605 u8 *data) 3604 u8 *data)
3606{ 3605{
@@ -3668,13 +3667,9 @@ static const struct ethtool_ops atl1_ethtool_ops = {
3668 .set_ringparam = atl1_set_ringparam, 3667 .set_ringparam = atl1_set_ringparam,
3669 .get_pauseparam = atl1_get_pauseparam, 3668 .get_pauseparam = atl1_get_pauseparam,
3670 .set_pauseparam = atl1_set_pauseparam, 3669 .set_pauseparam = atl1_set_pauseparam,
3671 .get_rx_csum = atl1_get_rx_csum,
3672 .set_tx_csum = ethtool_op_set_tx_hw_csum,
3673 .get_link = ethtool_op_get_link, 3670 .get_link = ethtool_op_get_link,
3674 .set_sg = ethtool_op_set_sg,
3675 .get_strings = atl1_get_strings, 3671 .get_strings = atl1_get_strings,
3676 .nway_reset = atl1_nway_reset, 3672 .nway_reset = atl1_nway_reset,
3677 .get_ethtool_stats = atl1_get_ethtool_stats, 3673 .get_ethtool_stats = atl1_get_ethtool_stats,
3678 .get_sset_count = atl1_get_sset_count, 3674 .get_sset_count = atl1_get_sset_count,
3679 .set_tso = ethtool_op_set_tso,
3680}; 3675};
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index e3cbf45dc612..b75aa295d37e 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1411,9 +1411,8 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
1411 1411
1412 err = -EIO; 1412 err = -EIO;
1413 1413
1414#ifdef NETIF_F_HW_VLAN_TX 1414 netdev->hw_features = NETIF_F_SG;
1415 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 1415 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
1416#endif
1417 1416
1418 /* Init PHY as early as possible due to power saving issue */ 1417 /* Init PHY as early as possible due to power saving issue */
1419 atl2_phy_init(&adapter->hw); 1418 atl2_phy_init(&adapter->hw);
@@ -1840,11 +1839,6 @@ static int atl2_set_settings(struct net_device *netdev,
1840 return 0; 1839 return 0;
1841} 1840}
1842 1841
1843static u32 atl2_get_tx_csum(struct net_device *netdev)
1844{
1845 return (netdev->features & NETIF_F_HW_CSUM) != 0;
1846}
1847
1848static u32 atl2_get_msglevel(struct net_device *netdev) 1842static u32 atl2_get_msglevel(struct net_device *netdev)
1849{ 1843{
1850 return 0; 1844 return 0;
@@ -2112,12 +2106,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
2112 .get_eeprom_len = atl2_get_eeprom_len, 2106 .get_eeprom_len = atl2_get_eeprom_len,
2113 .get_eeprom = atl2_get_eeprom, 2107 .get_eeprom = atl2_get_eeprom,
2114 .set_eeprom = atl2_set_eeprom, 2108 .set_eeprom = atl2_set_eeprom,
2115 .get_tx_csum = atl2_get_tx_csum,
2116 .get_sg = ethtool_op_get_sg,
2117 .set_sg = ethtool_op_set_sg,
2118#ifdef NETIF_F_TSO
2119 .get_tso = ethtool_op_get_tso,
2120#endif
2121}; 2109};
2122 2110
2123static void atl2_set_ethtool_ops(struct net_device *netdev) 2111static void atl2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index c48104b08861..e68ffe622e69 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -839,8 +839,8 @@ static int bcm_enet_open(struct net_device *dev)
839 if (ret) 839 if (ret)
840 goto out_phy_disconnect; 840 goto out_phy_disconnect;
841 841
842 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 842 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
843 IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); 843 dev->name, dev);
844 if (ret) 844 if (ret)
845 goto out_freeirq; 845 goto out_freeirq;
846 846
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 66823eded7a3..a0b4743d7224 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,15 +84,14 @@ static inline char *nic_name(struct pci_dev *pdev)
84#define MCC_CQ_LEN 256 84#define MCC_CQ_LEN 256
85 85
86#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */ 86#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
87#define BE_MAX_MSIX_VECTORS (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */ 87#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
88#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
88#define BE_NAPI_WEIGHT 64 89#define BE_NAPI_WEIGHT 64
89#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 90#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
90#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 91#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
91 92
92#define FW_VER_LEN 32 93#define FW_VER_LEN 32
93 94
94#define BE_MAX_VF 32
95
96struct be_dma_mem { 95struct be_dma_mem {
97 void *va; 96 void *va;
98 dma_addr_t dma; 97 dma_addr_t dma;
@@ -276,7 +275,7 @@ struct be_adapter {
276 spinlock_t mcc_cq_lock; 275 spinlock_t mcc_cq_lock;
277 276
278 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS]; 277 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
279 bool msix_enabled; 278 u32 num_msix_vec;
280 bool isr_registered; 279 bool isr_registered;
281 280
282 /* TX Rings */ 281 /* TX Rings */
@@ -287,7 +286,7 @@ struct be_adapter {
287 u32 cache_line_break[8]; 286 u32 cache_line_break[8];
288 287
289 /* Rx rings */ 288 /* Rx rings */
290 struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */ 289 struct be_rx_obj rx_obj[MAX_RX_QS];
291 u32 num_rx_qs; 290 u32 num_rx_qs;
292 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 291 u32 big_page_size; /* Compounded page size shared by rx wrbs */
293 292
@@ -308,10 +307,10 @@ struct be_adapter {
308 u16 work_counter; 307 u16 work_counter;
309 308
310 /* Ethtool knobs and info */ 309 /* Ethtool knobs and info */
311 bool rx_csum; /* BE card must perform rx-checksumming */
312 char fw_ver[FW_VER_LEN]; 310 char fw_ver[FW_VER_LEN];
313 u32 if_handle; /* Used to configure filtering */ 311 u32 if_handle; /* Used to configure filtering */
314 u32 pmac_id; /* MAC addr handle used by BE card */ 312 u32 pmac_id; /* MAC addr handle used by BE card */
313 u32 beacon_state; /* for set_phys_id */
315 314
316 bool eeh_err; 315 bool eeh_err;
317 bool link_up; 316 bool link_up;
@@ -334,7 +333,7 @@ struct be_adapter {
334 333
335 bool be3_native; 334 bool be3_native;
336 bool sriov_enabled; 335 bool sriov_enabled;
337 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 336 struct be_vf_cfg *vf_cfg;
338 u8 is_virtfn; 337 u8 is_virtfn;
339 u32 sli_family; 338 u32 sli_family;
340 u8 hba_port_num; 339 u8 hba_port_num;
@@ -351,6 +350,7 @@ struct be_adapter {
351 350
352extern const struct ethtool_ops be_ethtool_ops; 351extern const struct ethtool_ops be_ethtool_ops;
353 352
353#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
354#define tx_stats(adapter) (&adapter->tx_stats) 354#define tx_stats(adapter) (&adapter->tx_stats)
355#define rx_stats(rxo) (&rxo->stats) 355#define rx_stats(rxo) (&rxo->stats)
356 356
@@ -455,18 +455,10 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
455 455
456static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 456static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
457{ 457{
458 u8 data;
459 u32 sli_intf; 458 u32 sli_intf;
460 459
461 if (lancer_chip(adapter)) { 460 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
462 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, 461 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
463 &sli_intf);
464 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
465 } else {
466 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
467 pci_read_config_byte(adapter->pdev, 0xFE, &data);
468 adapter->is_virtfn = (data != 0xAA);
469 }
470} 462}
471 463
472static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 464static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
@@ -482,6 +474,11 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
482 memcpy(mac, adapter->netdev->dev_addr, 3); 474 memcpy(mac, adapter->netdev->dev_addr, 3);
483} 475}
484 476
477static inline bool be_multi_rxq(const struct be_adapter *adapter)
478{
479 return adapter->num_rx_qs > 1;
480}
481
485extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 482extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
486 u16 num_popped); 483 u16 num_popped);
487extern void be_link_status_update(struct be_adapter *adapter, bool link_up); 484extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e2d825bb94a..11b774a5eaff 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1186,6 +1186,113 @@ err:
1186 return status; 1186 return status;
1187} 1187}
1188 1188
1189/* Uses synchronous mcc */
1190int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1191{
1192 struct be_mcc_wrb *wrb;
1193 struct be_cmd_req_get_fat *req;
1194 int status;
1195
1196 spin_lock_bh(&adapter->mcc_lock);
1197
1198 wrb = wrb_from_mccq(adapter);
1199 if (!wrb) {
1200 status = -EBUSY;
1201 goto err;
1202 }
1203 req = embedded_payload(wrb);
1204
1205 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1206 OPCODE_COMMON_MANAGE_FAT);
1207
1208 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1209 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1210 req->fat_operation = cpu_to_le32(QUERY_FAT);
1211 status = be_mcc_notify_wait(adapter);
1212 if (!status) {
1213 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1214 if (log_size && resp->log_size)
1215 *log_size = le32_to_cpu(resp->log_size -
1216 sizeof(u32));
1217 }
1218err:
1219 spin_unlock_bh(&adapter->mcc_lock);
1220 return status;
1221}
1222
1223void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1224{
1225 struct be_dma_mem get_fat_cmd;
1226 struct be_mcc_wrb *wrb;
1227 struct be_cmd_req_get_fat *req;
1228 struct be_sge *sge;
1229 u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32);
1230 int status;
1231
1232 if (buf_len == 0)
1233 return;
1234
1235 total_size = buf_len;
1236
1237 spin_lock_bh(&adapter->mcc_lock);
1238
1239 wrb = wrb_from_mccq(adapter);
1240 if (!wrb) {
1241 status = -EBUSY;
1242 goto err;
1243 }
1244 while (total_size) {
1245 buf_size = min(total_size, (u32)60*1024);
1246 total_size -= buf_size;
1247
1248 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + buf_size;
1249 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1250 get_fat_cmd.size,
1251 &get_fat_cmd.dma);
1252 if (!get_fat_cmd.va) {
1253 status = -ENOMEM;
1254 dev_err(&adapter->pdev->dev,
1255 "Memory allocation failure while retrieving FAT data\n");
1256 goto err;
1257 }
1258 req = get_fat_cmd.va;
1259 sge = nonembedded_sgl(wrb);
1260
1261 be_wrb_hdr_prepare(wrb, get_fat_cmd.size, false, 1,
1262 OPCODE_COMMON_MANAGE_FAT);
1263
1264 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1265 OPCODE_COMMON_MANAGE_FAT, get_fat_cmd.size);
1266
1267 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.size));
1268 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1269 sge->len = cpu_to_le32(get_fat_cmd.size);
1270
1271 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1272 req->read_log_offset = cpu_to_le32(log_offset);
1273 req->read_log_length = cpu_to_le32(buf_size);
1274 req->data_buffer_size = cpu_to_le32(buf_size);
1275
1276 status = be_mcc_notify_wait(adapter);
1277 if (!status) {
1278 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1279 memcpy(buf + offset,
1280 resp->data_buffer,
1281 resp->read_log_length);
1282 }
1283 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1284 get_fat_cmd.va,
1285 get_fat_cmd.dma);
1286 if (status)
1287 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1288
1289 offset += buf_size;
1290 log_offset += buf_size;
1291 }
1292err:
1293 spin_unlock_bh(&adapter->mcc_lock);
1294}
1295
1189/* Uses Mbox */ 1296/* Uses Mbox */
1190int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1297int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1191{ 1298{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 4f254cfaabe2..3fb6e0a3ad7a 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -186,6 +186,7 @@ struct be_mcc_mailbox {
186#define OPCODE_COMMON_NTWK_PMAC_ADD 59 186#define OPCODE_COMMON_NTWK_PMAC_ADD 59
187#define OPCODE_COMMON_NTWK_PMAC_DEL 60 187#define OPCODE_COMMON_NTWK_PMAC_DEL 60
188#define OPCODE_COMMON_FUNCTION_RESET 61 188#define OPCODE_COMMON_FUNCTION_RESET 61
189#define OPCODE_COMMON_MANAGE_FAT 68
189#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 190#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
190#define OPCODE_COMMON_GET_BEACON_STATE 70 191#define OPCODE_COMMON_GET_BEACON_STATE 70
191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 192#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
@@ -380,6 +381,24 @@ struct be_cmd_resp_cq_create {
380 u16 rsvd0; 381 u16 rsvd0;
381} __packed; 382} __packed;
382 383
384struct be_cmd_req_get_fat {
385 struct be_cmd_req_hdr hdr;
386 u32 fat_operation;
387 u32 read_log_offset;
388 u32 read_log_length;
389 u32 data_buffer_size;
390 u32 data_buffer[1];
391} __packed;
392
393struct be_cmd_resp_get_fat {
394 struct be_cmd_resp_hdr hdr;
395 u32 log_size;
396 u32 read_log_length;
397 u32 rsvd[2];
398 u32 data_buffer[1];
399} __packed;
400
401
383/******************** Create MCCQ ***************************/ 402/******************** Create MCCQ ***************************/
384/* Pseudo amap definition in which each bit of the actual structure is defined 403/* Pseudo amap definition in which each bit of the actual structure is defined
385 * as a byte: used to calculate offset/shift/mask of each field */ 404 * as a byte: used to calculate offset/shift/mask of each field */
@@ -1148,4 +1167,6 @@ extern void be_detect_dump_ue(struct be_adapter *adapter);
1148extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 1167extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1149extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 1168extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1150extern int be_cmd_check_native_mode(struct be_adapter *adapter); 1169extern int be_cmd_check_native_mode(struct be_adapter *adapter);
1170extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1171extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1151 1172
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index aac248fbd18b..96f5502e0ef7 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -156,6 +156,25 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
156} 156}
157 157
158static int 158static int
159be_get_reg_len(struct net_device *netdev)
160{
161 struct be_adapter *adapter = netdev_priv(netdev);
162 u32 log_size = 0;
163
164 be_cmd_get_reg_len(adapter, &log_size);
165 return log_size;
166}
167
168static void
169be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
170{
171 struct be_adapter *adapter = netdev_priv(netdev);
172
173 memset(buf, 0, regs->len);
174 be_cmd_get_regs(adapter, regs->len, buf);
175}
176
177static int
159be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 178be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
160{ 179{
161 struct be_adapter *adapter = netdev_priv(netdev); 180 struct be_adapter *adapter = netdev_priv(netdev);
@@ -242,25 +261,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
242 return 0; 261 return 0;
243} 262}
244 263
245static u32 be_get_rx_csum(struct net_device *netdev)
246{
247 struct be_adapter *adapter = netdev_priv(netdev);
248
249 return adapter->rx_csum;
250}
251
252static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
253{
254 struct be_adapter *adapter = netdev_priv(netdev);
255
256 if (data)
257 adapter->rx_csum = true;
258 else
259 adapter->rx_csum = false;
260
261 return 0;
262}
263
264static void 264static void
265be_get_ethtool_stats(struct net_device *netdev, 265be_get_ethtool_stats(struct net_device *netdev,
266 struct ethtool_stats *stats, uint64_t *data) 266 struct ethtool_stats *stats, uint64_t *data)
@@ -507,29 +507,33 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
507} 507}
508 508
509static int 509static int
510be_phys_id(struct net_device *netdev, u32 data) 510be_set_phys_id(struct net_device *netdev,
511 enum ethtool_phys_id_state state)
511{ 512{
512 struct be_adapter *adapter = netdev_priv(netdev); 513 struct be_adapter *adapter = netdev_priv(netdev);
513 int status;
514 u32 cur;
515
516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
517 514
518 if (cur == BEACON_STATE_ENABLED) 515 switch (state) {
519 return 0; 516 case ETHTOOL_ID_ACTIVE:
517 be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
518 &adapter->beacon_state);
519 return -EINVAL;
520 520
521 if (data < 2) 521 case ETHTOOL_ID_ON:
522 data = 2; 522 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
523 BEACON_STATE_ENABLED);
524 break;
523 525
524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 526 case ETHTOOL_ID_OFF:
525 BEACON_STATE_ENABLED); 527 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
526 set_current_state(TASK_INTERRUPTIBLE); 528 BEACON_STATE_DISABLED);
527 schedule_timeout(data*HZ); 529 break;
528 530
529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 531 case ETHTOOL_ID_INACTIVE:
530 BEACON_STATE_DISABLED); 532 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
533 adapter->beacon_state);
534 }
531 535
532 return status; 536 return 0;
533} 537}
534 538
535static bool 539static bool
@@ -712,6 +716,18 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
712 return status; 716 return status;
713} 717}
714 718
719static int be_set_flags(struct net_device *netdev, u32 data)
720{
721 struct be_adapter *adapter = netdev_priv(netdev);
722 int rc = -1;
723
724 if (be_multi_rxq(adapter))
725 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXHASH |
726 ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
727
728 return rc;
729}
730
715const struct ethtool_ops be_ethtool_ops = { 731const struct ethtool_ops be_ethtool_ops = {
716 .get_settings = be_get_settings, 732 .get_settings = be_get_settings,
717 .get_drvinfo = be_get_drvinfo, 733 .get_drvinfo = be_get_drvinfo,
@@ -725,18 +741,13 @@ const struct ethtool_ops be_ethtool_ops = {
725 .get_ringparam = be_get_ringparam, 741 .get_ringparam = be_get_ringparam,
726 .get_pauseparam = be_get_pauseparam, 742 .get_pauseparam = be_get_pauseparam,
727 .set_pauseparam = be_set_pauseparam, 743 .set_pauseparam = be_set_pauseparam,
728 .get_rx_csum = be_get_rx_csum,
729 .set_rx_csum = be_set_rx_csum,
730 .get_tx_csum = ethtool_op_get_tx_csum,
731 .set_tx_csum = ethtool_op_set_tx_hw_csum,
732 .get_sg = ethtool_op_get_sg,
733 .set_sg = ethtool_op_set_sg,
734 .get_tso = ethtool_op_get_tso,
735 .set_tso = ethtool_op_set_tso,
736 .get_strings = be_get_stat_strings, 744 .get_strings = be_get_stat_strings,
737 .phys_id = be_phys_id, 745 .set_phys_id = be_set_phys_id,
738 .get_sset_count = be_get_sset_count, 746 .get_sset_count = be_get_sset_count,
739 .get_ethtool_stats = be_get_ethtool_stats, 747 .get_ethtool_stats = be_get_ethtool_stats,
748 .get_regs_len = be_get_reg_len,
749 .get_regs = be_get_regs,
740 .flash_device = be_do_flash, 750 .flash_device = be_do_flash,
741 .self_test = be_self_test, 751 .self_test = be_self_test,
752 .set_flags = be_set_flags,
742}; 753};
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index d4344a06090b..53d658afea2a 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -155,6 +155,10 @@
155/********** SRIOV VF PCICFG OFFSET ********/ 155/********** SRIOV VF PCICFG OFFSET ********/
156#define SRIOV_VF_PCICFG_OFFSET (4096) 156#define SRIOV_VF_PCICFG_OFFSET (4096)
157 157
158/********** FAT TABLE ********/
159#define RETRIEVE_FAT 0
160#define QUERY_FAT 1
161
158/* Flashrom related descriptors */ 162/* Flashrom related descriptors */
159#define IMAGE_TYPE_FIRMWARE 160 163#define IMAGE_TYPE_FIRMWARE 160
160#define IMAGE_TYPE_BOOTCODE 224 164#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 7cb5a114c733..1bb763cda3ad 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -116,11 +116,6 @@ static char *ue_status_hi_desc[] = {
116 "Unknown" 116 "Unknown"
117}; 117};
118 118
119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 120{
126 struct be_dma_mem *mem = &q->dma_mem; 121 struct be_dma_mem *mem = &q->dma_mem;
@@ -993,9 +988,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
993 struct be_rx_obj *rxo, 988 struct be_rx_obj *rxo,
994 struct be_rx_compl_info *rxcp) 989 struct be_rx_compl_info *rxcp)
995{ 990{
991 struct net_device *netdev = adapter->netdev;
996 struct sk_buff *skb; 992 struct sk_buff *skb;
997 993
998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
999 if (unlikely(!skb)) { 995 if (unlikely(!skb)) {
1000 if (net_ratelimit()) 996 if (net_ratelimit())
1001 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -1005,13 +1001,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1005 1001
1006 skb_fill_rx_data(adapter, rxo, skb, rxcp); 1002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007 1003
1008 if (likely(adapter->rx_csum && csum_passed(rxcp))) 1004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1009 skb->ip_summed = CHECKSUM_UNNECESSARY; 1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 else 1006 else
1011 skb_checksum_none_assert(skb); 1007 skb_checksum_none_assert(skb);
1012 1008
1013 skb->truesize = skb->len + sizeof(struct sk_buff); 1009 skb->truesize = skb->len + sizeof(struct sk_buff);
1014 skb->protocol = eth_type_trans(skb, adapter->netdev); 1010 skb->protocol = eth_type_trans(skb, netdev);
1011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
1015 1014
1016 if (unlikely(rxcp->vlanf)) { 1015 if (unlikely(rxcp->vlanf)) {
1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
@@ -1072,6 +1071,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1072 skb->data_len = rxcp->pkt_size; 1071 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size; 1072 skb->truesize += rxcp->pkt_size;
1074 skb->ip_summed = CHECKSUM_UNNECESSARY; 1073 skb->ip_summed = CHECKSUM_UNNECESSARY;
1074 if (adapter->netdev->features & NETIF_F_RXHASH)
1075 skb->rxhash = rxcp->rss_hash;
1075 1076
1076 if (likely(!rxcp->vlanf)) 1077 if (likely(!rxcp->vlanf))
1077 napi_gro_frags(&eq_obj->napi); 1078 napi_gro_frags(&eq_obj->napi);
@@ -1101,8 +1102,14 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); 1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type = 1103 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); 1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); 1105 rxcp->rss_hash =
1105 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); 1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1107 if (rxcp->vlanf) {
1108 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1109 compl);
1110 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1111 compl);
1112 }
1106} 1113}
1107 1114
1108static void be_parse_rx_compl_v0(struct be_adapter *adapter, 1115static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1127,8 +1134,14 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); 1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1128 rxcp->pkt_type = 1135 rxcp->pkt_type =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); 1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); 1137 rxcp->rss_hash =
1131 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); 1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1139 if (rxcp->vlanf) {
1140 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1141 compl);
1142 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1143 compl);
1144 }
1132} 1145}
1133 1146
1134static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) 1147static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1150,15 +1163,19 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1150 else 1163 else
1151 be_parse_rx_compl_v0(adapter, compl, rxcp); 1164 be_parse_rx_compl_v0(adapter, compl, rxcp);
1152 1165
1153 /* vlanf could be wrongly set in some cards. ignore if vtm is not set */ 1166 if (rxcp->vlanf) {
1154 if ((adapter->function_mode & 0x400) && !rxcp->vtm) 1167 /* vlanf could be wrongly set in some cards.
1155 rxcp->vlanf = 0; 1168 * ignore if vtm is not set */
1169 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1170 rxcp->vlanf = 0;
1156 1171
1157 if (!lancer_chip(adapter)) 1172 if (!lancer_chip(adapter))
1158 rxcp->vid = swab16(rxcp->vid); 1173 rxcp->vid = swab16(rxcp->vid);
1159 1174
1160 if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) 1175 if ((adapter->pvid == rxcp->vid) &&
1161 rxcp->vlanf = 0; 1176 !adapter->vlan_tag[rxcp->vid])
1177 rxcp->vlanf = 0;
1178 }
1162 1179
1163 /* As the compl has been parsed, reset it; we wont touch it again */ 1180 /* As the compl has been parsed, reset it; we wont touch it again */
1164 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; 1181 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
@@ -1567,12 +1584,31 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
1567 } 1584 }
1568} 1585}
1569 1586
1587static u32 be_num_rxqs_want(struct be_adapter *adapter)
1588{
1589 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1590 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1591 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1592 } else {
1593 dev_warn(&adapter->pdev->dev,
1594 "No support for multiple RX queues\n");
1595 return 1;
1596 }
1597}
1598
1570static int be_rx_queues_create(struct be_adapter *adapter) 1599static int be_rx_queues_create(struct be_adapter *adapter)
1571{ 1600{
1572 struct be_queue_info *eq, *q, *cq; 1601 struct be_queue_info *eq, *q, *cq;
1573 struct be_rx_obj *rxo; 1602 struct be_rx_obj *rxo;
1574 int rc, i; 1603 int rc, i;
1575 1604
1605 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1606 msix_enabled(adapter) ?
1607 adapter->num_msix_vec - 1 : 1);
1608 if (adapter->num_rx_qs != MAX_RX_QS)
1609 dev_warn(&adapter->pdev->dev,
1610 "Can create only %d RX queues", adapter->num_rx_qs);
1611
1576 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1612 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1577 for_all_rx_queues(adapter, rxo, i) { 1613 for_all_rx_queues(adapter, rxo, i) {
1578 rxo->adapter = adapter; 1614 rxo->adapter = adapter;
@@ -1837,6 +1873,9 @@ static void be_worker(struct work_struct *work)
1837 struct be_rx_obj *rxo; 1873 struct be_rx_obj *rxo;
1838 int i; 1874 int i;
1839 1875
1876 if (!adapter->ue_detected && !lancer_chip(adapter))
1877 be_detect_dump_ue(adapter);
1878
1840 /* when interrupts are not yet enabled, just reap any pending 1879 /* when interrupts are not yet enabled, just reap any pending
1841 * mcc completions */ 1880 * mcc completions */
1842 if (!netif_running(adapter->netdev)) { 1881 if (!netif_running(adapter->netdev)) {
@@ -1849,9 +1888,6 @@ static void be_worker(struct work_struct *work)
1849 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 1888 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1850 } 1889 }
1851 1890
1852 if (!adapter->ue_detected && !lancer_chip(adapter))
1853 be_detect_dump_ue(adapter);
1854
1855 goto reschedule; 1891 goto reschedule;
1856 } 1892 }
1857 1893
@@ -1869,8 +1905,6 @@ static void be_worker(struct work_struct *work)
1869 be_post_rx_frags(rxo, GFP_KERNEL); 1905 be_post_rx_frags(rxo, GFP_KERNEL);
1870 } 1906 }
1871 } 1907 }
1872 if (!adapter->ue_detected && !lancer_chip(adapter))
1873 be_detect_dump_ue(adapter);
1874 1908
1875reschedule: 1909reschedule:
1876 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1910 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -1878,51 +1912,35 @@ reschedule:
1878 1912
1879static void be_msix_disable(struct be_adapter *adapter) 1913static void be_msix_disable(struct be_adapter *adapter)
1880{ 1914{
1881 if (adapter->msix_enabled) { 1915 if (msix_enabled(adapter)) {
1882 pci_disable_msix(adapter->pdev); 1916 pci_disable_msix(adapter->pdev);
1883 adapter->msix_enabled = false; 1917 adapter->num_msix_vec = 0;
1884 }
1885}
1886
1887static int be_num_rxqs_get(struct be_adapter *adapter)
1888{
1889 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1890 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1891 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1892 } else {
1893 dev_warn(&adapter->pdev->dev,
1894 "No support for multiple RX queues\n");
1895 return 1;
1896 } 1918 }
1897} 1919}
1898 1920
1899static void be_msix_enable(struct be_adapter *adapter) 1921static void be_msix_enable(struct be_adapter *adapter)
1900{ 1922{
1901#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */ 1923#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1902 int i, status; 1924 int i, status, num_vec;
1903 1925
1904 adapter->num_rx_qs = be_num_rxqs_get(adapter); 1926 num_vec = be_num_rxqs_want(adapter) + 1;
1905 1927
1906 for (i = 0; i < (adapter->num_rx_qs + 1); i++) 1928 for (i = 0; i < num_vec; i++)
1907 adapter->msix_entries[i].entry = i; 1929 adapter->msix_entries[i].entry = i;
1908 1930
1909 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1931 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1910 adapter->num_rx_qs + 1);
1911 if (status == 0) { 1932 if (status == 0) {
1912 goto done; 1933 goto done;
1913 } else if (status >= BE_MIN_MSIX_VECTORS) { 1934 } else if (status >= BE_MIN_MSIX_VECTORS) {
1935 num_vec = status;
1914 if (pci_enable_msix(adapter->pdev, adapter->msix_entries, 1936 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1915 status) == 0) { 1937 num_vec) == 0)
1916 adapter->num_rx_qs = status - 1;
1917 dev_warn(&adapter->pdev->dev,
1918 "Could alloc only %d MSIx vectors. "
1919 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1920 goto done; 1938 goto done;
1921 }
1922 } 1939 }
1923 return; 1940 return;
1924done: 1941done:
1925 adapter->msix_enabled = true; 1942 adapter->num_msix_vec = num_vec;
1943 return;
1926} 1944}
1927 1945
1928static void be_sriov_enable(struct be_adapter *adapter) 1946static void be_sriov_enable(struct be_adapter *adapter)
@@ -1930,7 +1948,20 @@ static void be_sriov_enable(struct be_adapter *adapter)
1930 be_check_sriov_fn_type(adapter); 1948 be_check_sriov_fn_type(adapter);
1931#ifdef CONFIG_PCI_IOV 1949#ifdef CONFIG_PCI_IOV
1932 if (be_physfn(adapter) && num_vfs) { 1950 if (be_physfn(adapter) && num_vfs) {
1933 int status; 1951 int status, pos;
1952 u16 nvfs;
1953
1954 pos = pci_find_ext_capability(adapter->pdev,
1955 PCI_EXT_CAP_ID_SRIOV);
1956 pci_read_config_word(adapter->pdev,
1957 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1958
1959 if (num_vfs > nvfs) {
1960 dev_info(&adapter->pdev->dev,
1961 "Device supports %d VFs and not %d\n",
1962 nvfs, num_vfs);
1963 num_vfs = nvfs;
1964 }
1934 1965
1935 status = pci_enable_sriov(adapter->pdev, num_vfs); 1966 status = pci_enable_sriov(adapter->pdev, num_vfs);
1936 adapter->sriov_enabled = status ? false : true; 1967 adapter->sriov_enabled = status ? false : true;
@@ -2003,8 +2034,7 @@ err_msix:
2003err: 2034err:
2004 dev_warn(&adapter->pdev->dev, 2035 dev_warn(&adapter->pdev->dev,
2005 "MSIX Request IRQ failed - err %d\n", status); 2036 "MSIX Request IRQ failed - err %d\n", status);
2006 pci_disable_msix(adapter->pdev); 2037 be_msix_disable(adapter);
2007 adapter->msix_enabled = false;
2008 return status; 2038 return status;
2009} 2039}
2010 2040
@@ -2013,7 +2043,7 @@ static int be_irq_register(struct be_adapter *adapter)
2013 struct net_device *netdev = adapter->netdev; 2043 struct net_device *netdev = adapter->netdev;
2014 int status; 2044 int status;
2015 2045
2016 if (adapter->msix_enabled) { 2046 if (msix_enabled(adapter)) {
2017 status = be_msix_register(adapter); 2047 status = be_msix_register(adapter);
2018 if (status == 0) 2048 if (status == 0)
2019 goto done; 2049 goto done;
@@ -2046,7 +2076,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
2046 return; 2076 return;
2047 2077
2048 /* INTx */ 2078 /* INTx */
2049 if (!adapter->msix_enabled) { 2079 if (!msix_enabled(adapter)) {
2050 free_irq(netdev->irq, adapter); 2080 free_irq(netdev->irq, adapter);
2051 goto done; 2081 goto done;
2052 } 2082 }
@@ -2088,7 +2118,7 @@ static int be_close(struct net_device *netdev)
2088 be_cq_notify(adapter, rxo->cq.id, false, 0); 2118 be_cq_notify(adapter, rxo->cq.id, false, 0);
2089 } 2119 }
2090 2120
2091 if (adapter->msix_enabled) { 2121 if (msix_enabled(adapter)) {
2092 vec = be_msix_vec_get(adapter, tx_eq); 2122 vec = be_msix_vec_get(adapter, tx_eq);
2093 synchronize_irq(vec); 2123 synchronize_irq(vec);
2094 2124
@@ -2261,7 +2291,7 @@ static int be_setup(struct be_adapter *adapter)
2261 BE_IF_FLAGS_PASS_L3L4_ERRORS; 2291 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2262 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; 2292 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2263 2293
2264 if (be_multi_rxq(adapter)) { 2294 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2265 cap_flags |= BE_IF_FLAGS_RSS; 2295 cap_flags |= BE_IF_FLAGS_RSS;
2266 en_flags |= BE_IF_FLAGS_RSS; 2296 en_flags |= BE_IF_FLAGS_RSS;
2267 } 2297 }
@@ -2318,7 +2348,6 @@ static int be_setup(struct be_adapter *adapter)
2318 2348
2319 return 0; 2349 return 0;
2320 2350
2321 be_mcc_queues_destroy(adapter);
2322rx_qs_destroy: 2351rx_qs_destroy:
2323 be_rx_queues_destroy(adapter); 2352 be_rx_queues_destroy(adapter);
2324tx_qs_destroy: 2353tx_qs_destroy:
@@ -2599,10 +2628,14 @@ static void be_netdev_init(struct net_device *netdev)
2599 struct be_rx_obj *rxo; 2628 struct be_rx_obj *rxo;
2600 int i; 2629 int i;
2601 2630
2602 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2631 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2603 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2632 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2633 NETIF_F_HW_VLAN_TX;
2605 NETIF_F_GRO | NETIF_F_TSO6; 2634 if (be_multi_rxq(adapter))
2635 netdev->hw_features |= NETIF_F_RXHASH;
2636
2637 netdev->features |= netdev->hw_features |
2638 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2606 2639
2607 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | 2640 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2608 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2641 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2612,8 +2645,6 @@ static void be_netdev_init(struct net_device *netdev)
2612 2645
2613 netdev->flags |= IFF_MULTICAST; 2646 netdev->flags |= IFF_MULTICAST;
2614 2647
2615 adapter->rx_csum = true;
2616
2617 /* Default settings for Rx and Tx flow control */ 2648 /* Default settings for Rx and Tx flow control */
2618 adapter->rx_fc = true; 2649 adapter->rx_fc = true;
2619 adapter->tx_fc = true; 2650 adapter->tx_fc = true;
@@ -2807,6 +2838,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
2807 2838
2808 be_ctrl_cleanup(adapter); 2839 be_ctrl_cleanup(adapter);
2809 2840
2841 kfree(adapter->vf_cfg);
2810 be_sriov_disable(adapter); 2842 be_sriov_disable(adapter);
2811 2843
2812 be_msix_disable(adapter); 2844 be_msix_disable(adapter);
@@ -2991,16 +3023,23 @@ static int __devinit be_probe(struct pci_dev *pdev,
2991 } 3023 }
2992 3024
2993 be_sriov_enable(adapter); 3025 be_sriov_enable(adapter);
3026 if (adapter->sriov_enabled) {
3027 adapter->vf_cfg = kcalloc(num_vfs,
3028 sizeof(struct be_vf_cfg), GFP_KERNEL);
3029
3030 if (!adapter->vf_cfg)
3031 goto free_netdev;
3032 }
2994 3033
2995 status = be_ctrl_init(adapter); 3034 status = be_ctrl_init(adapter);
2996 if (status) 3035 if (status)
2997 goto free_netdev; 3036 goto free_vf_cfg;
2998 3037
2999 if (lancer_chip(adapter)) { 3038 if (lancer_chip(adapter)) {
3000 status = lancer_test_and_set_rdy_state(adapter); 3039 status = lancer_test_and_set_rdy_state(adapter);
3001 if (status) { 3040 if (status) {
3002 dev_err(&pdev->dev, "Adapter in non recoverable error\n"); 3041 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3003 goto free_netdev; 3042 goto ctrl_clean;
3004 } 3043 }
3005 } 3044 }
3006 3045
@@ -3062,6 +3101,8 @@ stats_clean:
3062 be_stats_cleanup(adapter); 3101 be_stats_cleanup(adapter);
3063ctrl_clean: 3102ctrl_clean:
3064 be_ctrl_cleanup(adapter); 3103 be_ctrl_cleanup(adapter);
3104free_vf_cfg:
3105 kfree(adapter->vf_cfg);
3065free_netdev: 3106free_netdev:
3066 be_sriov_disable(adapter); 3107 be_sriov_disable(adapter);
3067 free_netdev(netdev); 3108 free_netdev(netdev);
@@ -3146,16 +3187,15 @@ static void be_shutdown(struct pci_dev *pdev)
3146 if (!adapter) 3187 if (!adapter)
3147 return; 3188 return;
3148 3189
3149 if (netif_running(adapter->netdev)) 3190 cancel_delayed_work_sync(&adapter->work);
3150 cancel_delayed_work_sync(&adapter->work);
3151 3191
3152 netif_device_detach(adapter->netdev); 3192 netif_device_detach(adapter->netdev);
3153 3193
3154 be_cmd_reset_function(adapter);
3155
3156 if (adapter->wol) 3194 if (adapter->wol)
3157 be_setup_wol(adapter, true); 3195 be_setup_wol(adapter, true);
3158 3196
3197 be_cmd_reset_function(adapter);
3198
3159 pci_disable_device(pdev); 3199 pci_disable_device(pdev);
3160} 3200}
3161 3201
@@ -3267,13 +3307,6 @@ static int __init be_init_module(void)
3267 rx_frag_size = 2048; 3307 rx_frag_size = 2048;
3268 } 3308 }
3269 3309
3270 if (num_vfs > 32) {
3271 printk(KERN_WARNING DRV_NAME
3272 " : Module param num_vfs must not be greater than 32."
3273 "Using 32\n");
3274 num_vfs = 32;
3275 }
3276
3277 return pci_register_driver(&be_driver); 3310 return pci_register_driver(&be_driver);
3278} 3311}
3279module_init(be_init_module); 3312module_init(be_init_module);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8e6d618b5305..0a52079bafef 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3174,7 +3174,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3174 } 3174 }
3175 3175
3176 skb_checksum_none_assert(skb); 3176 skb_checksum_none_assert(skb);
3177 if (bp->rx_csum && 3177 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT | 3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3179 L2_FHDR_STATUS_UDP_DATAGRAM))) { 3179 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3180 3180
@@ -7189,38 +7189,6 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7189 return 0; 7189 return 0;
7190} 7190}
7191 7191
7192static u32
7193bnx2_get_rx_csum(struct net_device *dev)
7194{
7195 struct bnx2 *bp = netdev_priv(dev);
7196
7197 return bp->rx_csum;
7198}
7199
7200static int
7201bnx2_set_rx_csum(struct net_device *dev, u32 data)
7202{
7203 struct bnx2 *bp = netdev_priv(dev);
7204
7205 bp->rx_csum = data;
7206 return 0;
7207}
7208
7209static int
7210bnx2_set_tso(struct net_device *dev, u32 data)
7211{
7212 struct bnx2 *bp = netdev_priv(dev);
7213
7214 if (data) {
7215 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7216 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7217 dev->features |= NETIF_F_TSO6;
7218 } else
7219 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7220 NETIF_F_TSO_ECN);
7221 return 0;
7222}
7223
7224static struct { 7192static struct {
7225 char string[ETH_GSTRING_LEN]; 7193 char string[ETH_GSTRING_LEN];
7226} bnx2_stats_str_arr[] = { 7194} bnx2_stats_str_arr[] = {
@@ -7495,82 +7463,74 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7495} 7463}
7496 7464
7497static int 7465static int
7498bnx2_phys_id(struct net_device *dev, u32 data) 7466bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7499{ 7467{
7500 struct bnx2 *bp = netdev_priv(dev); 7468 struct bnx2 *bp = netdev_priv(dev);
7501 int i;
7502 u32 save;
7503 7469
7504 bnx2_set_power_state(bp, PCI_D0); 7470 switch (state) {
7471 case ETHTOOL_ID_ACTIVE:
7472 bnx2_set_power_state(bp, PCI_D0);
7505 7473
7506 if (data == 0) 7474 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7507 data = 2; 7475 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7476 return -EINVAL;
7508 7477
7509 save = REG_RD(bp, BNX2_MISC_CFG); 7478 case ETHTOOL_ID_ON:
7510 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); 7479 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7480 BNX2_EMAC_LED_1000MB_OVERRIDE |
7481 BNX2_EMAC_LED_100MB_OVERRIDE |
7482 BNX2_EMAC_LED_10MB_OVERRIDE |
7483 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7484 BNX2_EMAC_LED_TRAFFIC);
7485 break;
7511 7486
7512 for (i = 0; i < (data * 2); i++) { 7487 case ETHTOOL_ID_OFF:
7513 if ((i % 2) == 0) { 7488 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7514 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); 7489 break;
7515 }
7516 else {
7517 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7518 BNX2_EMAC_LED_1000MB_OVERRIDE |
7519 BNX2_EMAC_LED_100MB_OVERRIDE |
7520 BNX2_EMAC_LED_10MB_OVERRIDE |
7521 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7522 BNX2_EMAC_LED_TRAFFIC);
7523 }
7524 msleep_interruptible(500);
7525 if (signal_pending(current))
7526 break;
7527 }
7528 REG_WR(bp, BNX2_EMAC_LED, 0);
7529 REG_WR(bp, BNX2_MISC_CFG, save);
7530 7490
7531 if (!netif_running(dev)) 7491 case ETHTOOL_ID_INACTIVE:
7532 bnx2_set_power_state(bp, PCI_D3hot); 7492 REG_WR(bp, BNX2_EMAC_LED, 0);
7493 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7494
7495 if (!netif_running(dev))
7496 bnx2_set_power_state(bp, PCI_D3hot);
7497 break;
7498 }
7533 7499
7534 return 0; 7500 return 0;
7535} 7501}
7536 7502
7537static int 7503static u32
7538bnx2_set_tx_csum(struct net_device *dev, u32 data) 7504bnx2_fix_features(struct net_device *dev, u32 features)
7539{ 7505{
7540 struct bnx2 *bp = netdev_priv(dev); 7506 struct bnx2 *bp = netdev_priv(dev);
7541 7507
7542 if (CHIP_NUM(bp) == CHIP_NUM_5709) 7508 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7543 return ethtool_op_set_tx_ipv6_csum(dev, data); 7509 features |= NETIF_F_HW_VLAN_RX;
7544 else 7510
7545 return ethtool_op_set_tx_csum(dev, data); 7511 return features;
7546} 7512}
7547 7513
7548static int 7514static int
7549bnx2_set_flags(struct net_device *dev, u32 data) 7515bnx2_set_features(struct net_device *dev, u32 features)
7550{ 7516{
7551 struct bnx2 *bp = netdev_priv(dev); 7517 struct bnx2 *bp = netdev_priv(dev);
7552 int rc;
7553
7554 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
7555 !(data & ETH_FLAG_RXVLAN))
7556 return -EINVAL;
7557 7518
7558 /* TSO with VLAN tag won't work with current firmware */ 7519 /* TSO with VLAN tag won't work with current firmware */
7559 if (!(data & ETH_FLAG_TXVLAN)) 7520 if (features & NETIF_F_HW_VLAN_TX)
7560 return -EINVAL; 7521 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7561 7522 else
7562 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7523 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7563 ETH_FLAG_TXVLAN);
7564 if (rc)
7565 return rc;
7566 7524
7567 if ((!!(data & ETH_FLAG_RXVLAN) != 7525 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7568 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && 7526 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7569 netif_running(dev)) { 7527 netif_running(dev)) {
7570 bnx2_netif_stop(bp, false); 7528 bnx2_netif_stop(bp, false);
7529 dev->features = features;
7571 bnx2_set_rx_mode(dev); 7530 bnx2_set_rx_mode(dev);
7572 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 7531 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7573 bnx2_netif_start(bp, false); 7532 bnx2_netif_start(bp, false);
7533 return 1;
7574 } 7534 }
7575 7535
7576 return 0; 7536 return 0;
@@ -7595,18 +7555,11 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
7595 .set_ringparam = bnx2_set_ringparam, 7555 .set_ringparam = bnx2_set_ringparam,
7596 .get_pauseparam = bnx2_get_pauseparam, 7556 .get_pauseparam = bnx2_get_pauseparam,
7597 .set_pauseparam = bnx2_set_pauseparam, 7557 .set_pauseparam = bnx2_set_pauseparam,
7598 .get_rx_csum = bnx2_get_rx_csum,
7599 .set_rx_csum = bnx2_set_rx_csum,
7600 .set_tx_csum = bnx2_set_tx_csum,
7601 .set_sg = ethtool_op_set_sg,
7602 .set_tso = bnx2_set_tso,
7603 .self_test = bnx2_self_test, 7558 .self_test = bnx2_self_test,
7604 .get_strings = bnx2_get_strings, 7559 .get_strings = bnx2_get_strings,
7605 .phys_id = bnx2_phys_id, 7560 .set_phys_id = bnx2_set_phys_id,
7606 .get_ethtool_stats = bnx2_get_ethtool_stats, 7561 .get_ethtool_stats = bnx2_get_ethtool_stats,
7607 .get_sset_count = bnx2_get_sset_count, 7562 .get_sset_count = bnx2_get_sset_count,
7608 .set_flags = bnx2_set_flags,
7609 .get_flags = ethtool_op_get_flags,
7610}; 7563};
7611 7564
7612/* Called with rtnl_lock */ 7565/* Called with rtnl_lock */
@@ -8118,8 +8071,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8118 bp->tx_ring_size = MAX_TX_DESC_CNT; 8071 bp->tx_ring_size = MAX_TX_DESC_CNT;
8119 bnx2_set_rx_ring_size(bp, 255); 8072 bnx2_set_rx_ring_size(bp, 255);
8120 8073
8121 bp->rx_csum = 1;
8122
8123 bp->tx_quick_cons_trip_int = 2; 8074 bp->tx_quick_cons_trip_int = 2;
8124 bp->tx_quick_cons_trip = 20; 8075 bp->tx_quick_cons_trip = 20;
8125 bp->tx_ticks_int = 18; 8076 bp->tx_ticks_int = 18;
@@ -8311,17 +8262,14 @@ static const struct net_device_ops bnx2_netdev_ops = {
8311 .ndo_validate_addr = eth_validate_addr, 8262 .ndo_validate_addr = eth_validate_addr,
8312 .ndo_set_mac_address = bnx2_change_mac_addr, 8263 .ndo_set_mac_address = bnx2_change_mac_addr,
8313 .ndo_change_mtu = bnx2_change_mtu, 8264 .ndo_change_mtu = bnx2_change_mtu,
8265 .ndo_fix_features = bnx2_fix_features,
8266 .ndo_set_features = bnx2_set_features,
8314 .ndo_tx_timeout = bnx2_tx_timeout, 8267 .ndo_tx_timeout = bnx2_tx_timeout,
8315#ifdef CONFIG_NET_POLL_CONTROLLER 8268#ifdef CONFIG_NET_POLL_CONTROLLER
8316 .ndo_poll_controller = poll_bnx2, 8269 .ndo_poll_controller = poll_bnx2,
8317#endif 8270#endif
8318}; 8271};
8319 8272
8320static inline void vlan_features_add(struct net_device *dev, u32 flags)
8321{
8322 dev->vlan_features |= flags;
8323}
8324
8325static int __devinit 8273static int __devinit
8326bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8274bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8327{ 8275{
@@ -8361,20 +8309,17 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8361 memcpy(dev->dev_addr, bp->mac_addr, 6); 8309 memcpy(dev->dev_addr, bp->mac_addr, 6);
8362 memcpy(dev->perm_addr, bp->mac_addr, 6); 8310 memcpy(dev->perm_addr, bp->mac_addr, 6);
8363 8311
8364 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO | 8312 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8365 NETIF_F_RXHASH; 8313 NETIF_F_TSO | NETIF_F_TSO_ECN |
8366 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8314 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8367 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8315
8368 dev->features |= NETIF_F_IPV6_CSUM; 8316 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8369 vlan_features_add(dev, NETIF_F_IPV6_CSUM); 8317 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8370 } 8318
8371 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8319 dev->vlan_features = dev->hw_features;
8372 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 8320 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8373 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN); 8321 dev->features |= dev->hw_features;
8374 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8322
8375 dev->features |= NETIF_F_TSO6;
8376 vlan_features_add(dev, NETIF_F_TSO6);
8377 }
8378 if ((rc = register_netdev(dev))) { 8323 if ((rc = register_netdev(dev))) {
8379 dev_err(&pdev->dev, "Cannot register net device\n"); 8324 dev_err(&pdev->dev, "Cannot register net device\n");
8380 goto error; 8325 goto error;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 68020451dc4f..bf371f6fe154 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6754,8 +6754,6 @@ struct bnx2 {
6754 u32 rx_max_ring_idx; 6754 u32 rx_max_ring_idx;
6755 u32 rx_max_pg_ring_idx; 6755 u32 rx_max_pg_ring_idx;
6756 6756
6757 u32 rx_csum;
6758
6759 /* TX constants */ 6757 /* TX constants */
6760 int tx_ring_size; 6758 int tx_ring_size;
6761 u32 tx_wake_thresh; 6759 u32 tx_wake_thresh;
@@ -6922,6 +6920,7 @@ struct bnx2 {
6922 u8 num_tx_rings; 6920 u8 num_tx_rings;
6923 u8 num_rx_rings; 6921 u8 num_rx_rings;
6924 6922
6923 u32 leds_save;
6925 u32 idle_chk_status_idx; 6924 u32 idle_chk_status_idx;
6926 6925
6927#ifdef BCM_CNIC 6926#ifdef BCM_CNIC
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index ef37b98d6146..775fef031ad8 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1041,12 +1041,23 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1041 struct cmng_struct_per_port *cmng, 1041 struct cmng_struct_per_port *cmng,
1042 u8 port) 1042 u8 port)
1043{ 1043{
1044 size_t size = sizeof(struct cmng_struct_per_port); 1044 size_t size =
1045 sizeof(struct rate_shaping_vars_per_port) +
1046 sizeof(struct fairness_vars_per_port) +
1047 sizeof(struct safc_struct_per_port) +
1048 sizeof(struct pfc_struct_per_port);
1045 1049
1046 u32 addr = BAR_XSTRORM_INTMEM + 1050 u32 addr = BAR_XSTRORM_INTMEM +
1047 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1051 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1048 1052
1049 __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1053 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1054
1055 addr += size + 4 /* SKIP DCB+LLFC */;
1056 size = sizeof(struct cmng_struct_per_port) -
1057 size /* written */ - 4 /*skipped*/;
1058
1059 __storm_memset_struct(bp, addr, size,
1060 (u32 *)(cmng->traffic_type_to_priority_cos));
1050} 1061}
1051 1062
1052/* HW Lock for shared dual port PHYs */ 1063/* HW Lock for shared dual port PHYs */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index 9a24d79c71d9..1214907d00d9 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -571,6 +571,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
571{ 571{
572 switch (state) { 572 switch (state) {
573 case BNX2X_DCBX_STATE_NEG_RECEIVED: 573 case BNX2X_DCBX_STATE_NEG_RECEIVED:
574#ifdef BCM_CNIC
575 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
576 struct cnic_ops *c_ops;
577 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
578 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
579 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
580 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
581
582 rcu_read_lock();
583 c_ops = rcu_dereference(bp->cnic_ops);
584 if (c_ops) {
585 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
586 rcu_read_unlock();
587 return;
588 }
589 rcu_read_unlock();
590 }
591
592 /* fall through if no CNIC initialized */
593 case BNX2X_DCBX_STATE_ISCSI_STOPPED:
594#endif
595
574 { 596 {
575 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); 597 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
576#ifdef BCM_DCBNL 598#ifdef BCM_DCBNL
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index 71b8eda43bd0..1e14775a18cb 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -183,9 +183,13 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
183 183
184enum { 184enum {
185 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, 185 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
186 BNX2X_DCBX_STATE_TX_PAUSED = 0x2, 186#ifdef BCM_CNIC
187 BNX2X_DCBX_STATE_TX_RELEASED = 0x4 187 BNX2X_DCBX_STATE_ISCSI_STOPPED,
188#endif
189 BNX2X_DCBX_STATE_TX_PAUSED,
190 BNX2X_DCBX_STATE_TX_RELEASED
188}; 191};
192
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 193void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190 194
191/* DCB netlink */ 195/* DCB netlink */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index f5050155c6b5..147999459df5 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -2097,36 +2097,38 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2097 } 2097 }
2098} 2098}
2099 2099
2100static int bnx2x_phys_id(struct net_device *dev, u32 data) 2100static int bnx2x_set_phys_id(struct net_device *dev,
2101 enum ethtool_phys_id_state state)
2101{ 2102{
2102 struct bnx2x *bp = netdev_priv(dev); 2103 struct bnx2x *bp = netdev_priv(dev);
2103 int i;
2104 2104
2105 if (!netif_running(dev)) 2105 if (!netif_running(dev))
2106 return 0; 2106 return -EAGAIN;
2107 2107
2108 if (!bp->port.pmf) 2108 if (!bp->port.pmf)
2109 return 0; 2109 return -EOPNOTSUPP;
2110 2110
2111 if (data == 0) 2111 switch (state) {
2112 data = 2; 2112 case ETHTOOL_ID_ACTIVE:
2113 return -EINVAL;
2113 2114
2114 for (i = 0; i < (data * 2); i++) { 2115 case ETHTOOL_ID_ON:
2115 if ((i % 2) == 0) 2116 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2116 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2117 LED_MODE_OPER, SPEED_1000);
2117 LED_MODE_OPER, SPEED_1000); 2118 break;
2118 else
2119 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2120 LED_MODE_OFF, 0);
2121 2119
2122 msleep_interruptible(500); 2120 case ETHTOOL_ID_OFF:
2123 if (signal_pending(current)) 2121 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2124 break; 2122 LED_MODE_OFF, 0);
2125 }
2126 2123
2127 if (bp->link_vars.link_up) 2124 break;
2128 bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, 2125
2129 bp->link_vars.line_speed); 2126 case ETHTOOL_ID_INACTIVE:
2127 if (bp->link_vars.link_up)
2128 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2129 LED_MODE_OPER,
2130 bp->link_vars.line_speed);
2131 }
2130 2132
2131 return 0; 2133 return 0;
2132} 2134}
@@ -2218,7 +2220,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2218 .self_test = bnx2x_self_test, 2220 .self_test = bnx2x_self_test,
2219 .get_sset_count = bnx2x_get_sset_count, 2221 .get_sset_count = bnx2x_get_sset_count,
2220 .get_strings = bnx2x_get_strings, 2222 .get_strings = bnx2x_get_strings,
2221 .phys_id = bnx2x_phys_id, 2223 .set_phys_id = bnx2x_set_phys_id,
2222 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2224 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2223 .get_rxnfc = bnx2x_get_rxnfc, 2225 .get_rxnfc = bnx2x_get_rxnfc,
2224 .get_rxfh_indir = bnx2x_get_rxfh_indir, 2226 .get_rxfh_indir = bnx2x_get_rxfh_indir,
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index dac1bf9cbbfa..2b5940af5d1b 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1929,7 +1929,7 @@ struct host_func_stats {
1929 1929
1930#define BCM_5710_FW_MAJOR_VERSION 6 1930#define BCM_5710_FW_MAJOR_VERSION 6
1931#define BCM_5710_FW_MINOR_VERSION 2 1931#define BCM_5710_FW_MINOR_VERSION 2
1932#define BCM_5710_FW_REVISION_VERSION 5 1932#define BCM_5710_FW_REVISION_VERSION 9
1933#define BCM_5710_FW_ENGINEERING_VERSION 0 1933#define BCM_5710_FW_ENGINEERING_VERSION 0
1934#define BCM_5710_FW_COMPILE_FLAGS 1 1934#define BCM_5710_FW_COMPILE_FLAGS 1
1935 1935
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index a97a4a1c344f..a6915aafa695 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -10342,6 +10342,11 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10342 break; 10342 break;
10343 } 10343 }
10344 10344
10345 case DRV_CTL_ISCSI_STOPPED_CMD: {
10346 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10347 break;
10348 }
10349
10345 default: 10350 default:
10346 BNX2X_ERR("unknown command %x\n", ctl->cmd); 10351 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10347 rc = -EINVAL; 10352 rc = -EINVAL;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 16d6fe954695..b51e021354b5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1407,7 +1407,7 @@ static int bond_compute_features(struct bonding *bond)
1407 int i; 1407 int i;
1408 1408
1409 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); 1409 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
1410 features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; 1410 features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_NOCACHE_COPY;
1411 1411
1412 if (!bond->first_slave) 1412 if (!bond->first_slave)
1413 goto done; 1413 goto done;
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 8cca60e43444..cde59b4e5ef8 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2966,31 +2966,36 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
2966 return 0; 2966 return 0;
2967} 2967}
2968 2968
2969static void cnic_ulp_stop(struct cnic_dev *dev) 2969static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
2970{ 2970{
2971 struct cnic_local *cp = dev->cnic_priv; 2971 struct cnic_ulp_ops *ulp_ops;
2972 int if_type;
2973
2974 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2975 2972
2976 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2973 if (if_type == CNIC_ULP_ISCSI)
2977 struct cnic_ulp_ops *ulp_ops; 2974 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2978 2975
2979 mutex_lock(&cnic_lock); 2976 mutex_lock(&cnic_lock);
2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 2977 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2981 lockdep_is_held(&cnic_lock)); 2978 lockdep_is_held(&cnic_lock));
2982 if (!ulp_ops) { 2979 if (!ulp_ops) {
2983 mutex_unlock(&cnic_lock);
2984 continue;
2985 }
2986 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2987 mutex_unlock(&cnic_lock); 2980 mutex_unlock(&cnic_lock);
2981 return;
2982 }
2983 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2984 mutex_unlock(&cnic_lock);
2988 2985
2989 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2986 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2990 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 2987 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2991 2988
2992 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2989 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2993 } 2990}
2991
2992static void cnic_ulp_stop(struct cnic_dev *dev)
2993{
2994 struct cnic_local *cp = dev->cnic_priv;
2995 int if_type;
2996
2997 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
2998 cnic_ulp_stop_one(cp, if_type);
2994} 2999}
2995 3000
2996static void cnic_ulp_start(struct cnic_dev *dev) 3001static void cnic_ulp_start(struct cnic_dev *dev)
@@ -3039,6 +3044,12 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3039 3044
3040 cnic_put(dev); 3045 cnic_put(dev);
3041 break; 3046 break;
3047 case CNIC_CTL_STOP_ISCSI_CMD: {
3048 struct cnic_local *cp = dev->cnic_priv;
3049 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3050 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3051 break;
3052 }
3042 case CNIC_CTL_COMPLETION_CMD: { 3053 case CNIC_CTL_COMPLETION_CMD: {
3043 u32 cid = BNX2X_SW_CID(info->data.comp.cid); 3054 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3044 u32 l5_cid; 3055 u32 l5_cid;
@@ -3562,8 +3573,12 @@ static void cnic_init_csk_state(struct cnic_sock *csk)
3562 3573
3563static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3574static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3564{ 3575{
3576 struct cnic_local *cp = csk->dev->cnic_priv;
3565 int err = 0; 3577 int err = 0;
3566 3578
3579 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3580 return -EOPNOTSUPP;
3581
3567 if (!cnic_in_use(csk)) 3582 if (!cnic_in_use(csk))
3568 return -EINVAL; 3583 return -EINVAL;
3569 3584
@@ -3965,6 +3980,15 @@ static void cnic_delete_task(struct work_struct *work)
3965 cp = container_of(work, struct cnic_local, delete_task.work); 3980 cp = container_of(work, struct cnic_local, delete_task.work);
3966 dev = cp->dev; 3981 dev = cp->dev;
3967 3982
3983 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
3984 struct drv_ctl_info info;
3985
3986 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
3987
3988 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
3989 cp->ethdev->drv_ctl(dev->netdev, &info);
3990 }
3991
3968 for (i = 0; i < cp->max_cid_space; i++) { 3992 for (i = 0; i < cp->max_cid_space; i++) {
3969 struct cnic_context *ctx = &cp->ctx_tbl[i]; 3993 struct cnic_context *ctx = &cp->ctx_tbl[i];
3970 3994
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 4456260c653c..3367a6d3a774 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -226,6 +226,7 @@ struct cnic_local {
226#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
227#define CNIC_LCL_FL_L2_WAIT 0x1 227#define CNIC_LCL_FL_L2_WAIT 0x1
228#define CNIC_LCL_FL_RINGS_INITED 0x2 228#define CNIC_LCL_FL_RINGS_INITED 0x2
229#define CNIC_LCL_FL_STOP_ISCSI 0x4
229 230
230 struct cnic_dev *dev; 231 struct cnic_dev *dev;
231 232
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index e01b49ee3591..fdd8e46a9050 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.13" 15#define CNIC_MODULE_VERSION "2.2.14"
16#define CNIC_MODULE_RELDATE "Jan 31, 2011" 16#define CNIC_MODULE_RELDATE "Mar 30, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -85,6 +85,7 @@ struct kcqe {
85#define CNIC_CTL_STOP_CMD 1 85#define CNIC_CTL_STOP_CMD 1
86#define CNIC_CTL_START_CMD 2 86#define CNIC_CTL_START_CMD 2
87#define CNIC_CTL_COMPLETION_CMD 3 87#define CNIC_CTL_COMPLETION_CMD 3
88#define CNIC_CTL_STOP_ISCSI_CMD 4
88 89
89#define DRV_CTL_IO_WR_CMD 0x101 90#define DRV_CTL_IO_WR_CMD 0x101
90#define DRV_CTL_IO_RD_CMD 0x102 91#define DRV_CTL_IO_RD_CMD 0x102
@@ -94,6 +95,7 @@ struct kcqe {
94#define DRV_CTL_START_L2_CMD 0x106 95#define DRV_CTL_START_L2_CMD 0x106
95#define DRV_CTL_STOP_L2_CMD 0x107 96#define DRV_CTL_STOP_L2_CMD 0x107
96#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c 97#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
98#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
97 99
98struct cnic_ctl_completion { 100struct cnic_ctl_completion {
99 u32 cid; 101 u32 cid;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9d267d3a6892..e66aceb57cef 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -491,8 +491,8 @@ e100_open(struct net_device *dev)
491 491
492 /* allocate the irq corresponding to the receiving DMA */ 492 /* allocate the irq corresponding to the receiving DMA */
493 493
494 if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 494 if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname,
495 IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) { 495 (void *)dev)) {
496 goto grace_exit0; 496 goto grace_exit0;
497 } 497 }
498 498
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 910893143295..802c7a7c3b25 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1749,23 +1749,26 @@ static int restart_autoneg(struct net_device *dev)
1749 return 0; 1749 return 0;
1750} 1750}
1751 1751
1752static int cxgb3_phys_id(struct net_device *dev, u32 data) 1752static int set_phys_id(struct net_device *dev,
1753 enum ethtool_phys_id_state state)
1753{ 1754{
1754 struct port_info *pi = netdev_priv(dev); 1755 struct port_info *pi = netdev_priv(dev);
1755 struct adapter *adapter = pi->adapter; 1756 struct adapter *adapter = pi->adapter;
1756 int i;
1757 1757
1758 if (data == 0) 1758 switch (state) {
1759 data = 2; 1759 case ETHTOOL_ID_ACTIVE:
1760 return -EINVAL;
1761
1762 case ETHTOOL_ID_OFF:
1763 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1764 break;
1760 1765
1761 for (i = 0; i < data * 2; i++) { 1766 case ETHTOOL_ID_ON:
1767 case ETHTOOL_ID_INACTIVE:
1762 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 1768 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1763 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1764 if (msleep_interruptible(500))
1765 break;
1766 }
1767 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1768 F_GPIO0_OUT_VAL); 1769 F_GPIO0_OUT_VAL);
1770 }
1771
1769 return 0; 1772 return 0;
1770} 1773}
1771 1774
@@ -2107,7 +2110,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2107 .set_sg = ethtool_op_set_sg, 2110 .set_sg = ethtool_op_set_sg,
2108 .get_link = ethtool_op_get_link, 2111 .get_link = ethtool_op_get_link,
2109 .get_strings = get_strings, 2112 .get_strings = get_strings,
2110 .phys_id = cxgb3_phys_id, 2113 .set_phys_id = set_phys_id,
2111 .nway_reset = restart_autoneg, 2114 .nway_reset = restart_autoneg,
2112 .get_sset_count = get_sset_count, 2115 .get_sset_count = get_sset_count,
2113 .get_ethtool_stats = get_stats, 2116 .get_ethtool_stats = get_stats,
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 5352c8a23f4d..0af9c9f0ca78 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -1336,15 +1336,20 @@ static int restart_autoneg(struct net_device *dev)
1336 return 0; 1336 return 0;
1337} 1337}
1338 1338
1339static int identify_port(struct net_device *dev, u32 data) 1339static int identify_port(struct net_device *dev,
1340 enum ethtool_phys_id_state state)
1340{ 1341{
1342 unsigned int val;
1341 struct adapter *adap = netdev2adap(dev); 1343 struct adapter *adap = netdev2adap(dev);
1342 1344
1343 if (data == 0) 1345 if (state == ETHTOOL_ID_ACTIVE)
1344 data = 2; /* default to 2 seconds */ 1346 val = 0xffff;
1347 else if (state == ETHTOOL_ID_INACTIVE)
1348 val = 0;
1349 else
1350 return -EINVAL;
1345 1351
1346 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, 1352 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1347 data * 5);
1348} 1353}
1349 1354
1350static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) 1355static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
@@ -2011,7 +2016,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
2011 .set_sg = ethtool_op_set_sg, 2016 .set_sg = ethtool_op_set_sg,
2012 .get_link = ethtool_op_get_link, 2017 .get_link = ethtool_op_get_link,
2013 .get_strings = get_strings, 2018 .get_strings = get_strings,
2014 .phys_id = identify_port, 2019 .set_phys_id = identify_port,
2015 .nway_reset = restart_autoneg, 2020 .nway_reset = restart_autoneg,
2016 .get_sset_count = get_sset_count, 2021 .get_sset_count = get_sset_count,
2017 .get_ethtool_stats = get_stats, 2022 .get_ethtool_stats = get_stats,
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 4661cbbd9bd9..c662679de4fa 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -1352,11 +1352,20 @@ static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum)
1352/* 1352/*
1353 * Identify the port by blinking the port's LED. 1353 * Identify the port by blinking the port's LED.
1354 */ 1354 */
1355static int cxgb4vf_phys_id(struct net_device *dev, u32 id) 1355static int cxgb4vf_phys_id(struct net_device *dev,
1356 enum ethtool_phys_id_state state)
1356{ 1357{
1358 unsigned int val;
1357 struct port_info *pi = netdev_priv(dev); 1359 struct port_info *pi = netdev_priv(dev);
1358 1360
1359 return t4vf_identify_port(pi->adapter, pi->viid, 5); 1361 if (state == ETHTOOL_ID_ACTIVE)
1362 val = 0xffff;
1363 else if (state == ETHTOOL_ID_INACTIVE)
1364 val = 0;
1365 else
1366 return -EINVAL;
1367
1368 return t4vf_identify_port(pi->adapter, pi->viid, val);
1360} 1369}
1361 1370
1362/* 1371/*
@@ -1588,7 +1597,7 @@ static struct ethtool_ops cxgb4vf_ethtool_ops = {
1588 .set_sg = ethtool_op_set_sg, 1597 .set_sg = ethtool_op_set_sg,
1589 .get_link = ethtool_op_get_link, 1598 .get_link = ethtool_op_get_link,
1590 .get_strings = cxgb4vf_get_strings, 1599 .get_strings = cxgb4vf_get_strings,
1591 .phys_id = cxgb4vf_phys_id, 1600 .set_phys_id = cxgb4vf_phys_id,
1592 .get_sset_count = cxgb4vf_get_sset_count, 1601 .get_sset_count = cxgb4vf_get_sset_count,
1593 .get_ethtool_stats = cxgb4vf_get_ethtool_stats, 1602 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1594 .get_regs_len = cxgb4vf_get_regs_len, 1603 .get_regs_len = cxgb4vf_get_regs_len,
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 2e573be16c13..9d4974bba247 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 3a3c3c8a3a9b..38b351c7b979 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.12" 35#define DRV_VERSION "2.1.1.13"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
@@ -84,7 +84,6 @@ struct enic {
84 unsigned int flags; 84 unsigned int flags;
85 unsigned int mc_count; 85 unsigned int mc_count;
86 unsigned int uc_count; 86 unsigned int uc_count;
87 int csum_rx_enabled;
88 u32 port_mtu; 87 u32 port_mtu;
89 u32 rx_coalesce_usecs; 88 u32 rx_coalesce_usecs;
90 u32 tx_coalesce_usecs; 89 u32 tx_coalesce_usecs;
@@ -120,4 +119,6 @@ static inline struct device *enic_get_dev(struct enic *enic)
120 return &(enic->pdev->dev); 119 return &(enic->pdev->dev);
121} 120}
122 121
122void enic_reset_addr_lists(struct enic *enic);
123
123#endif /* _ENIC_H_ */ 124#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
index 37ad3a1c82ee..90687b14e60f 100644
--- a/drivers/net/enic/enic_dev.c
+++ b/drivers/net/enic/enic_dev.c
@@ -177,24 +177,24 @@ int enic_vnic_dev_deinit(struct enic *enic)
177 return err; 177 return err;
178} 178}
179 179
180int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp) 180int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
181{ 181{
182 int err; 182 int err;
183 183
184 spin_lock(&enic->devcmd_lock); 184 spin_lock(&enic->devcmd_lock);
185 err = vnic_dev_init_prov(enic->vdev, 185 err = vnic_dev_init_prov2(enic->vdev,
186 (u8 *)vp, vic_provinfo_size(vp)); 186 (u8 *)vp, vic_provinfo_size(vp));
187 spin_unlock(&enic->devcmd_lock); 187 spin_unlock(&enic->devcmd_lock);
188 188
189 return err; 189 return err;
190} 190}
191 191
192int enic_dev_init_done(struct enic *enic, int *done, int *error) 192int enic_dev_deinit_done(struct enic *enic, int *status)
193{ 193{
194 int err; 194 int err;
195 195
196 spin_lock(&enic->devcmd_lock); 196 spin_lock(&enic->devcmd_lock);
197 err = vnic_dev_init_done(enic->vdev, done, error); 197 err = vnic_dev_deinit_done(enic->vdev, status);
198 spin_unlock(&enic->devcmd_lock); 198 spin_unlock(&enic->devcmd_lock);
199 199
200 return err; 200 return err;
@@ -219,3 +219,57 @@ void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
219 enic_del_vlan(enic, vid); 219 enic_del_vlan(enic, vid);
220 spin_unlock(&enic->devcmd_lock); 220 spin_unlock(&enic->devcmd_lock);
221} 221}
222
223int enic_dev_enable2(struct enic *enic, int active)
224{
225 int err;
226
227 spin_lock(&enic->devcmd_lock);
228 err = vnic_dev_enable2(enic->vdev, active);
229 spin_unlock(&enic->devcmd_lock);
230
231 return err;
232}
233
234int enic_dev_enable2_done(struct enic *enic, int *status)
235{
236 int err;
237
238 spin_lock(&enic->devcmd_lock);
239 err = vnic_dev_enable2_done(enic->vdev, status);
240 spin_unlock(&enic->devcmd_lock);
241
242 return err;
243}
244
245int enic_dev_status_to_errno(int devcmd_status)
246{
247 switch (devcmd_status) {
248 case ERR_SUCCESS:
249 return 0;
250 case ERR_EINVAL:
251 return -EINVAL;
252 case ERR_EFAULT:
253 return -EFAULT;
254 case ERR_EPERM:
255 return -EPERM;
256 case ERR_EBUSY:
257 return -EBUSY;
258 case ERR_ECMDUNKNOWN:
259 case ERR_ENOTSUPPORTED:
260 return -EOPNOTSUPP;
261 case ERR_EBADSTATE:
262 return -EINVAL;
263 case ERR_ENOMEM:
264 return -ENOMEM;
265 case ERR_ETIMEDOUT:
266 return -ETIMEDOUT;
267 case ERR_ELINKDOWN:
268 return -ENETDOWN;
269 case ERR_EINPROGRESS:
270 return -EINPROGRESS;
271 case ERR_EMAXRES:
272 default:
273 return (devcmd_status < 0) ? devcmd_status : -1;
274 }
275}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
index 495f57fcb887..d5f681337626 100644
--- a/drivers/net/enic/enic_dev.h
+++ b/drivers/net/enic/enic_dev.h
@@ -35,7 +35,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
35int enic_dev_enable(struct enic *enic); 35int enic_dev_enable(struct enic *enic);
36int enic_dev_disable(struct enic *enic); 36int enic_dev_disable(struct enic *enic);
37int enic_vnic_dev_deinit(struct enic *enic); 37int enic_vnic_dev_deinit(struct enic *enic);
38int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp); 38int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
39int enic_dev_init_done(struct enic *enic, int *done, int *error); 39int enic_dev_deinit_done(struct enic *enic, int *status);
40int enic_dev_enable2(struct enic *enic, int arg);
41int enic_dev_enable2_done(struct enic *enic, int *status);
42int enic_dev_status_to_errno(int devcmd_status);
40 43
41#endif /* _ENIC_DEV_H_ */ 44#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 8b9cad5e9712..b2245511c51a 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -45,6 +45,7 @@
45#include "enic_res.h" 45#include "enic_res.h"
46#include "enic.h" 46#include "enic.h"
47#include "enic_dev.h" 47#include "enic_dev.h"
48#include "enic_pp.h"
48 49
49#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 50#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
50#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 51#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -250,56 +251,6 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
250 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; 251 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
251} 252}
252 253
253static u32 enic_get_rx_csum(struct net_device *netdev)
254{
255 struct enic *enic = netdev_priv(netdev);
256 return enic->csum_rx_enabled;
257}
258
259static int enic_set_rx_csum(struct net_device *netdev, u32 data)
260{
261 struct enic *enic = netdev_priv(netdev);
262
263 if (data && !ENIC_SETTING(enic, RXCSUM))
264 return -EINVAL;
265
266 enic->csum_rx_enabled = !!data;
267
268 return 0;
269}
270
271static int enic_set_tx_csum(struct net_device *netdev, u32 data)
272{
273 struct enic *enic = netdev_priv(netdev);
274
275 if (data && !ENIC_SETTING(enic, TXCSUM))
276 return -EINVAL;
277
278 if (data)
279 netdev->features |= NETIF_F_HW_CSUM;
280 else
281 netdev->features &= ~NETIF_F_HW_CSUM;
282
283 return 0;
284}
285
286static int enic_set_tso(struct net_device *netdev, u32 data)
287{
288 struct enic *enic = netdev_priv(netdev);
289
290 if (data && !ENIC_SETTING(enic, TSO))
291 return -EINVAL;
292
293 if (data)
294 netdev->features |=
295 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
296 else
297 netdev->features &=
298 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
299
300 return 0;
301}
302
303static u32 enic_get_msglevel(struct net_device *netdev) 254static u32 enic_get_msglevel(struct net_device *netdev)
304{ 255{
305 struct enic *enic = netdev_priv(netdev); 256 struct enic *enic = netdev_priv(netdev);
@@ -387,17 +338,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
387 .get_strings = enic_get_strings, 338 .get_strings = enic_get_strings,
388 .get_sset_count = enic_get_sset_count, 339 .get_sset_count = enic_get_sset_count,
389 .get_ethtool_stats = enic_get_ethtool_stats, 340 .get_ethtool_stats = enic_get_ethtool_stats,
390 .get_rx_csum = enic_get_rx_csum,
391 .set_rx_csum = enic_set_rx_csum,
392 .get_tx_csum = ethtool_op_get_tx_csum,
393 .set_tx_csum = enic_set_tx_csum,
394 .get_sg = ethtool_op_get_sg,
395 .set_sg = ethtool_op_set_sg,
396 .get_tso = ethtool_op_get_tso,
397 .set_tso = enic_set_tso,
398 .get_coalesce = enic_get_coalesce, 341 .get_coalesce = enic_get_coalesce,
399 .set_coalesce = enic_set_coalesce, 342 .set_coalesce = enic_set_coalesce,
400 .get_flags = ethtool_op_get_flags,
401}; 343};
402 344
403static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 345static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -874,7 +816,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
874 return net_stats; 816 return net_stats;
875} 817}
876 818
877static void enic_reset_addr_lists(struct enic *enic) 819void enic_reset_addr_lists(struct enic *enic)
878{ 820{
879 enic->mc_count = 0; 821 enic->mc_count = 0;
880 enic->uc_count = 0; 822 enic->uc_count = 0;
@@ -1112,157 +1054,77 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1112 return -EINVAL; 1054 return -EINVAL;
1113} 1055}
1114 1056
1115static int enic_set_port_profile(struct enic *enic, u8 *mac)
1116{
1117 struct vic_provinfo *vp;
1118 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1119 u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1120 char uuid_str[38];
1121 char client_mac_str[18];
1122 u8 *client_mac;
1123 int err;
1124
1125 err = enic_vnic_dev_deinit(enic);
1126 if (err)
1127 return err;
1128
1129 enic_reset_addr_lists(enic);
1130
1131 switch (enic->pp.request) {
1132
1133 case PORT_REQUEST_ASSOCIATE:
1134
1135 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1136 return -EINVAL;
1137
1138 if (!is_valid_ether_addr(mac))
1139 return -EADDRNOTAVAIL;
1140
1141 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1142 VIC_PROVINFO_GENERIC_TYPE);
1143 if (!vp)
1144 return -ENOMEM;
1145
1146 vic_provinfo_add_tlv(vp,
1147 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1148 strlen(enic->pp.name) + 1, enic->pp.name);
1149
1150 if (!is_zero_ether_addr(enic->pp.mac_addr))
1151 client_mac = enic->pp.mac_addr;
1152 else
1153 client_mac = mac;
1154
1155 vic_provinfo_add_tlv(vp,
1156 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1157 ETH_ALEN, client_mac);
1158
1159 sprintf(client_mac_str, "%pM", client_mac);
1160 vic_provinfo_add_tlv(vp,
1161 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1162 sizeof(client_mac_str), client_mac_str);
1163
1164 if (enic->pp.set & ENIC_SET_INSTANCE) {
1165 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1166 vic_provinfo_add_tlv(vp,
1167 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1168 sizeof(uuid_str), uuid_str);
1169 }
1170
1171 if (enic->pp.set & ENIC_SET_HOST) {
1172 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1173 vic_provinfo_add_tlv(vp,
1174 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1175 sizeof(uuid_str), uuid_str);
1176 }
1177
1178 os_type = htons(os_type);
1179 vic_provinfo_add_tlv(vp,
1180 VIC_GENERIC_PROV_TLV_OS_TYPE,
1181 sizeof(os_type), &os_type);
1182
1183 err = enic_dev_init_prov(enic, vp);
1184 vic_provinfo_free(vp);
1185 if (err)
1186 return err;
1187 break;
1188
1189 case PORT_REQUEST_DISASSOCIATE:
1190 break;
1191
1192 default:
1193 return -EINVAL;
1194 }
1195
1196 /* Set flag to indicate that the port assoc/disassoc
1197 * request has been sent out to fw
1198 */
1199 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1200
1201 return 0;
1202}
1203
1204static int enic_set_vf_port(struct net_device *netdev, int vf, 1057static int enic_set_vf_port(struct net_device *netdev, int vf,
1205 struct nlattr *port[]) 1058 struct nlattr *port[])
1206{ 1059{
1207 struct enic *enic = netdev_priv(netdev); 1060 struct enic *enic = netdev_priv(netdev);
1208 struct enic_port_profile new_pp; 1061 struct enic_port_profile prev_pp;
1209 int err = 0; 1062 int err = 0, restore_pp = 1;
1210 1063
1211 memset(&new_pp, 0, sizeof(new_pp)); 1064 /* don't support VFs, yet */
1065 if (vf != PORT_SELF_VF)
1066 return -EOPNOTSUPP;
1212 1067
1213 if (port[IFLA_PORT_REQUEST]) { 1068 if (!port[IFLA_PORT_REQUEST])
1214 new_pp.set |= ENIC_SET_REQUEST; 1069 return -EOPNOTSUPP;
1215 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1070
1216 } 1071 memcpy(&prev_pp, &enic->pp, sizeof(enic->pp));
1072 memset(&enic->pp, 0, sizeof(enic->pp));
1073
1074 enic->pp.set |= ENIC_SET_REQUEST;
1075 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1217 1076
1218 if (port[IFLA_PORT_PROFILE]) { 1077 if (port[IFLA_PORT_PROFILE]) {
1219 new_pp.set |= ENIC_SET_NAME; 1078 enic->pp.set |= ENIC_SET_NAME;
1220 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]), 1079 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1221 PORT_PROFILE_MAX); 1080 PORT_PROFILE_MAX);
1222 } 1081 }
1223 1082
1224 if (port[IFLA_PORT_INSTANCE_UUID]) { 1083 if (port[IFLA_PORT_INSTANCE_UUID]) {
1225 new_pp.set |= ENIC_SET_INSTANCE; 1084 enic->pp.set |= ENIC_SET_INSTANCE;
1226 memcpy(new_pp.instance_uuid, 1085 memcpy(enic->pp.instance_uuid,
1227 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1086 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1228 } 1087 }
1229 1088
1230 if (port[IFLA_PORT_HOST_UUID]) { 1089 if (port[IFLA_PORT_HOST_UUID]) {
1231 new_pp.set |= ENIC_SET_HOST; 1090 enic->pp.set |= ENIC_SET_HOST;
1232 memcpy(new_pp.host_uuid, 1091 memcpy(enic->pp.host_uuid,
1233 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1092 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1234 } 1093 }
1235 1094
1236 /* don't support VFs, yet */ 1095 /* Special case handling: mac came from IFLA_VF_MAC */
1237 if (vf != PORT_SELF_VF) 1096 if (!is_zero_ether_addr(prev_pp.vf_mac))
1238 return -EOPNOTSUPP; 1097 memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN);
1239
1240 if (!(new_pp.set & ENIC_SET_REQUEST))
1241 return -EOPNOTSUPP;
1242
1243 if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1244 /* Special case handling */
1245 if (!is_zero_ether_addr(enic->pp.vf_mac))
1246 memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1247 1098
1248 if (is_zero_ether_addr(netdev->dev_addr)) 1099 if (is_zero_ether_addr(netdev->dev_addr))
1249 random_ether_addr(netdev->dev_addr); 1100 random_ether_addr(netdev->dev_addr);
1250 }
1251 1101
1252 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); 1102 err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp);
1103 if (err) {
1104 if (restore_pp) {
1105 /* Things are still the way they were: Implicit
1106 * DISASSOCIATE failed
1107 */
1108 memcpy(&enic->pp, &prev_pp, sizeof(enic->pp));
1109 } else {
1110 memset(&enic->pp, 0, sizeof(enic->pp));
1111 memset(netdev->dev_addr, 0, ETH_ALEN);
1112 }
1113 } else {
1114 /* Set flag to indicate that the port assoc/disassoc
1115 * request has been sent out to fw
1116 */
1117 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1253 1118
1254 err = enic_set_port_profile(enic, netdev->dev_addr); 1119 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1255 if (err) 1120 if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1256 goto set_port_profile_cleanup; 1121 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1122 memset(netdev->dev_addr, 0, ETH_ALEN);
1123 }
1124 }
1257 1125
1258set_port_profile_cleanup:
1259 memset(enic->pp.vf_mac, 0, ETH_ALEN); 1126 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1260 1127
1261 if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1262 memset(netdev->dev_addr, 0, ETH_ALEN);
1263 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1264 }
1265
1266 return err; 1128 return err;
1267} 1129}
1268 1130
@@ -1270,34 +1132,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1270 struct sk_buff *skb) 1132 struct sk_buff *skb)
1271{ 1133{
1272 struct enic *enic = netdev_priv(netdev); 1134 struct enic *enic = netdev_priv(netdev);
1273 int err, error, done;
1274 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1135 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1136 int err;
1275 1137
1276 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED)) 1138 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1277 return -ENODATA; 1139 return -ENODATA;
1278 1140
1279 err = enic_dev_init_done(enic, &done, &error); 1141 err = enic_process_get_pp_request(enic, enic->pp.request, &response);
1280 if (err) 1142 if (err)
1281 error = err; 1143 return err;
1282
1283 switch (error) {
1284 case ERR_SUCCESS:
1285 if (!done)
1286 response = PORT_PROFILE_RESPONSE_INPROGRESS;
1287 break;
1288 case ERR_EINVAL:
1289 response = PORT_PROFILE_RESPONSE_INVALID;
1290 break;
1291 case ERR_EBADSTATE:
1292 response = PORT_PROFILE_RESPONSE_BADSTATE;
1293 break;
1294 case ERR_ENOMEM:
1295 response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
1296 break;
1297 default:
1298 response = PORT_PROFILE_RESPONSE_ERROR;
1299 break;
1300 }
1301 1144
1302 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); 1145 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1303 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1146 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
@@ -1407,7 +1250,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1407 skb_put(skb, bytes_written); 1250 skb_put(skb, bytes_written);
1408 skb->protocol = eth_type_trans(skb, netdev); 1251 skb->protocol = eth_type_trans(skb, netdev);
1409 1252
1410 if (enic->csum_rx_enabled && !csum_not_calc) { 1253 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1411 skb->csum = htons(checksum); 1254 skb->csum = htons(checksum);
1412 skb->ip_summed = CHECKSUM_COMPLETE; 1255 skb->ip_summed = CHECKSUM_COMPLETE;
1413 } 1256 }
@@ -2536,17 +2379,18 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2536 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 2379 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2537 } 2380 }
2538 if (ENIC_SETTING(enic, TXCSUM)) 2381 if (ENIC_SETTING(enic, TXCSUM))
2539 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2382 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2540 if (ENIC_SETTING(enic, TSO)) 2383 if (ENIC_SETTING(enic, TSO))
2541 netdev->features |= NETIF_F_TSO | 2384 netdev->hw_features |= NETIF_F_TSO |
2542 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2385 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2543 if (ENIC_SETTING(enic, LRO)) 2386 if (ENIC_SETTING(enic, RXCSUM))
2544 netdev->features |= NETIF_F_GRO; 2387 netdev->hw_features |= NETIF_F_RXCSUM;
2388
2389 netdev->features |= netdev->hw_features;
2390
2545 if (using_dac) 2391 if (using_dac)
2546 netdev->features |= NETIF_F_HIGHDMA; 2392 netdev->features |= NETIF_F_HIGHDMA;
2547 2393
2548 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2549
2550 err = register_netdev(netdev); 2394 err = register_netdev(netdev);
2551 if (err) { 2395 if (err) {
2552 dev_err(dev, "Cannot register net device, aborting\n"); 2396 dev_err(dev, "Cannot register net device, aborting\n");
diff --git a/drivers/net/enic/enic_pp.c b/drivers/net/enic/enic_pp.c
new file mode 100644
index 000000000000..ffaa75dd1ded
--- /dev/null
+++ b/drivers/net/enic/enic_pp.c
@@ -0,0 +1,264 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/rtnetlink.h>
26#include <net/ip.h>
27
28#include "vnic_vic.h"
29#include "enic_res.h"
30#include "enic.h"
31#include "enic_dev.h"
32
33static int enic_set_port_profile(struct enic *enic)
34{
35 struct net_device *netdev = enic->netdev;
36 struct vic_provinfo *vp;
37 const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
38 const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
39 char uuid_str[38];
40 char client_mac_str[18];
41 u8 *client_mac;
42 int err;
43
44 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
45 return -EINVAL;
46
47 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
48 VIC_PROVINFO_GENERIC_TYPE);
49 if (!vp)
50 return -ENOMEM;
51
52 VIC_PROVINFO_ADD_TLV(vp,
53 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
54 strlen(enic->pp.name) + 1, enic->pp.name);
55
56 if (!is_zero_ether_addr(enic->pp.mac_addr))
57 client_mac = enic->pp.mac_addr;
58 else
59 client_mac = netdev->dev_addr;
60
61 VIC_PROVINFO_ADD_TLV(vp,
62 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
63 ETH_ALEN, client_mac);
64
65 snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
66 VIC_PROVINFO_ADD_TLV(vp,
67 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
68 sizeof(client_mac_str), client_mac_str);
69
70 if (enic->pp.set & ENIC_SET_INSTANCE) {
71 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
72 VIC_PROVINFO_ADD_TLV(vp,
73 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
74 sizeof(uuid_str), uuid_str);
75 }
76
77 if (enic->pp.set & ENIC_SET_HOST) {
78 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
79 VIC_PROVINFO_ADD_TLV(vp,
80 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
81 sizeof(uuid_str), uuid_str);
82 }
83
84 VIC_PROVINFO_ADD_TLV(vp,
85 VIC_GENERIC_PROV_TLV_OS_TYPE,
86 sizeof(os_type), &os_type);
87
88 err = enic_dev_status_to_errno(enic_dev_init_prov2(enic, vp));
89
90add_tlv_failure:
91 vic_provinfo_free(vp);
92
93 return err;
94}
95
96static int enic_unset_port_profile(struct enic *enic)
97{
98 int err;
99
100 err = enic_vnic_dev_deinit(enic);
101 if (err)
102 return enic_dev_status_to_errno(err);
103
104 enic_reset_addr_lists(enic);
105
106 return 0;
107}
108
109static int enic_are_pp_different(struct enic_port_profile *pp1,
110 struct enic_port_profile *pp2)
111{
112 return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
113 pp2->instance_uuid, PORT_UUID_MAX) |
114 !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
115 !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
116}
117
118static int enic_pp_preassociate(struct enic *enic,
119 struct enic_port_profile *prev_pp, int *restore_pp);
120static int enic_pp_disassociate(struct enic *enic,
121 struct enic_port_profile *prev_pp, int *restore_pp);
122static int enic_pp_preassociate_rr(struct enic *enic,
123 struct enic_port_profile *prev_pp, int *restore_pp);
124static int enic_pp_associate(struct enic *enic,
125 struct enic_port_profile *prev_pp, int *restore_pp);
126
127static int (*enic_pp_handlers[])(struct enic *enic,
128 struct enic_port_profile *prev_state, int *restore_pp) = {
129 [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
130 [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
131 [PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
132 [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
133};
134
135static const int enic_pp_handlers_count =
136 sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
137
138static int enic_pp_preassociate(struct enic *enic,
139 struct enic_port_profile *prev_pp, int *restore_pp)
140{
141 return -EOPNOTSUPP;
142}
143
144static int enic_pp_disassociate(struct enic *enic,
145 struct enic_port_profile *prev_pp, int *restore_pp)
146{
147 return enic_unset_port_profile(enic);
148}
149
150static int enic_pp_preassociate_rr(struct enic *enic,
151 struct enic_port_profile *prev_pp, int *restore_pp)
152{
153 int err;
154 int active = 0;
155
156 if (enic->pp.request != PORT_REQUEST_ASSOCIATE) {
157 /* If pre-associate is not part of an associate.
158 We always disassociate first */
159 err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic,
160 prev_pp, restore_pp);
161 if (err)
162 return err;
163
164 *restore_pp = 0;
165 }
166
167 *restore_pp = 0;
168
169 err = enic_set_port_profile(enic);
170 if (err)
171 return err;
172
173 /* If pre-associate is not part of an associate. */
174 if (enic->pp.request != PORT_REQUEST_ASSOCIATE)
175 err = enic_dev_status_to_errno(enic_dev_enable2(enic, active));
176
177 return err;
178}
179
180static int enic_pp_associate(struct enic *enic,
181 struct enic_port_profile *prev_pp, int *restore_pp)
182{
183 int err;
184 int active = 1;
185
186 /* Check if a pre-associate was called before */
187 if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
188 (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
189 enic_are_pp_different(prev_pp, &enic->pp))) {
190 err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
191 enic, prev_pp, restore_pp);
192 if (err)
193 return err;
194
195 *restore_pp = 0;
196 }
197
198 err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
199 enic, prev_pp, restore_pp);
200 if (err)
201 return err;
202
203 *restore_pp = 0;
204
205 return enic_dev_status_to_errno(enic_dev_enable2(enic, active));
206}
207
208int enic_process_set_pp_request(struct enic *enic,
209 struct enic_port_profile *prev_pp, int *restore_pp)
210{
211 if (enic->pp.request < enic_pp_handlers_count
212 && enic_pp_handlers[enic->pp.request])
213 return enic_pp_handlers[enic->pp.request](enic,
214 prev_pp, restore_pp);
215 else
216 return -EOPNOTSUPP;
217}
218
219int enic_process_get_pp_request(struct enic *enic, int request,
220 u16 *response)
221{
222 int err, status = ERR_SUCCESS;
223
224 switch (request) {
225
226 case PORT_REQUEST_PREASSOCIATE_RR:
227 case PORT_REQUEST_ASSOCIATE:
228 err = enic_dev_enable2_done(enic, &status);
229 break;
230
231 case PORT_REQUEST_DISASSOCIATE:
232 err = enic_dev_deinit_done(enic, &status);
233 break;
234
235 default:
236 return -EINVAL;
237 }
238
239 if (err)
240 status = err;
241
242 switch (status) {
243 case ERR_SUCCESS:
244 *response = PORT_PROFILE_RESPONSE_SUCCESS;
245 break;
246 case ERR_EINVAL:
247 *response = PORT_PROFILE_RESPONSE_INVALID;
248 break;
249 case ERR_EBADSTATE:
250 *response = PORT_PROFILE_RESPONSE_BADSTATE;
251 break;
252 case ERR_ENOMEM:
253 *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
254 break;
255 case ERR_EINPROGRESS:
256 *response = PORT_PROFILE_RESPONSE_INPROGRESS;
257 break;
258 default:
259 *response = PORT_PROFILE_RESPONSE_ERROR;
260 break;
261 }
262
263 return 0;
264}
diff --git a/drivers/net/enic/enic_pp.h b/drivers/net/enic/enic_pp.h
new file mode 100644
index 000000000000..699e365a944d
--- /dev/null
+++ b/drivers/net/enic/enic_pp.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _ENIC_PP_H_
20#define _ENIC_PP_H_
21
22int enic_process_set_pp_request(struct enic *enic,
23 struct enic_port_profile *prev_pp, int *restore_pp);
24int enic_process_get_pp_request(struct enic *enic, int request,
25 u16 *response);
26
27#endif /* _ENIC_PP_H_ */
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index f111a37419ce..6e5c6356e7df 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -98,9 +98,9 @@ int enic_get_vnic_config(struct enic *enic)
98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", 98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
99 enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); 99 enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
100 dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d " 100 dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d "
101 "tso/lro %d/%d intr timer %d usec rss %d\n", 101 "tso %d intr timer %d usec rss %d\n",
102 ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM), 102 ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM),
103 ENIC_SETTING(enic, TSO), ENIC_SETTING(enic, LRO), 103 ENIC_SETTING(enic, TSO),
104 c->intr_timer_usec, ENIC_SETTING(enic, RSS)); 104 c->intr_timer_usec, ENIC_SETTING(enic, RSS));
105 105
106 return 0; 106 return 0;
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index c089b362a36f..68f24ae860ae 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -786,48 +786,6 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
786 return r; 786 return r;
787} 787}
788 788
789int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
790{
791 u64 a0 = 0, a1 = 0;
792 int wait = 1000;
793 int ret;
794
795 *done = 0;
796
797 ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
798 if (ret)
799 return ret;
800
801 *done = (a0 == 0);
802
803 *err = (a0 == 0) ? (int)a1:0;
804
805 return 0;
806}
807
808int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
809{
810 u64 a0, a1 = len;
811 int wait = 1000;
812 dma_addr_t prov_pa;
813 void *prov_buf;
814 int ret;
815
816 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
817 if (!prov_buf)
818 return -ENOMEM;
819
820 memcpy(prov_buf, buf, len);
821
822 a0 = prov_pa;
823
824 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
825
826 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
827
828 return ret;
829}
830
831int vnic_dev_deinit(struct vnic_dev *vdev) 789int vnic_dev_deinit(struct vnic_dev *vdev)
832{ 790{
833 u64 a0 = 0, a1 = 0; 791 u64 a0 = 0, a1 = 0;
@@ -927,4 +885,59 @@ err_out:
927 return NULL; 885 return NULL;
928} 886}
929 887
888int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
889{
890 u64 a0, a1 = len;
891 int wait = 1000;
892 dma_addr_t prov_pa;
893 void *prov_buf;
894 int ret;
895
896 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
897 if (!prov_buf)
898 return -ENOMEM;
930 899
900 memcpy(prov_buf, buf, len);
901
902 a0 = prov_pa;
903
904 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
905
906 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
907
908 return ret;
909}
910
911int vnic_dev_enable2(struct vnic_dev *vdev, int active)
912{
913 u64 a0, a1 = 0;
914 int wait = 1000;
915
916 a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
917
918 return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
919}
920
921static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
922 int *status)
923{
924 u64 a0 = cmd, a1 = 0;
925 int wait = 1000;
926 int ret;
927
928 ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
929 if (!ret)
930 *status = (int)a0;
931
932 return ret;
933}
934
935int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
936{
937 return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
938}
939
940int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
941{
942 return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
943}
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index e837546213a8..cf482a2c9dd9 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -108,8 +108,6 @@ int vnic_dev_disable(struct vnic_dev *vdev);
108int vnic_dev_open(struct vnic_dev *vdev, int arg); 108int vnic_dev_open(struct vnic_dev *vdev, int arg);
109int vnic_dev_open_done(struct vnic_dev *vdev, int *done); 109int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
110int vnic_dev_init(struct vnic_dev *vdev, int arg); 110int vnic_dev_init(struct vnic_dev *vdev, int arg);
111int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
112int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
113int vnic_dev_deinit(struct vnic_dev *vdev); 111int vnic_dev_deinit(struct vnic_dev *vdev);
114int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); 112int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
115int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); 113int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
@@ -122,5 +120,9 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
122struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 120struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
123 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 121 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
124 unsigned int num_bars); 122 unsigned int num_bars);
123int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
124int vnic_dev_enable2(struct vnic_dev *vdev, int active);
125int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
126int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
125 127
126#endif /* _VNIC_DEV_H_ */ 128#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d833a071bac5..c5569bfb47ac 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -267,17 +267,62 @@ enum vnic_devcmd_cmd {
267 267
268 /* 268 /*
269 * As for BY_BDF except a0 is index of hvnlink subordinate vnic 269 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
270 * or SR-IOV virtual vnic */ 270 * or SR-IOV virtual vnic
271 */
271 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), 272 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
272 273
273 /* 274 /*
274 * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in 275 * For HPP toggle:
275 * (u32)a1=length of buffer in a0 276 * adapter-info-get
276 * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV 277 * in: (u64)a0=phsical address of buffer passed in from caller.
277 * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */ 278 * (u16)a1=size of buffer specified in a0.
279 * out: (u64)a0=phsical address of buffer passed in from caller.
280 * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
281 * 0 if no VIF-CONFIG-INFO TLV was ever received. */
278 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), 282 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
283
284 /* init_prov_info2:
285 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
286 * the vnic until CMD_ENABLE2 is issued.
287 * (u64)a0=paddr of vnic_devcmd_provinfo
288 * (u32)a1=sizeof provision info */
289 CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
290
291 /* enable2:
292 * (u32)a0=0 ==> standby
293 * =CMD_ENABLE2_ACTIVE ==> active
294 */
295 CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
296
297 /*
298 * cmd_status:
299 * Returns the status of the specified command
300 * Input:
301 * a0 = command for which status is being queried.
302 * Possible values are:
303 * CMD_SOFT_RESET
304 * CMD_HANG_RESET
305 * CMD_OPEN
306 * CMD_INIT
307 * CMD_INIT_PROV_INFO
308 * CMD_DEINIT
309 * CMD_INIT_PROV_INFO2
310 * CMD_ENABLE2
311 * Output:
312 * if status == STAT_ERROR
313 * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
314 * not supported
315 * if status == STAT_NONE
316 * a0 = status of the devcmd specified in a0 as follows.
317 * ERR_SUCCESS - command in a0 completed successfully
318 * ERR_EINPROGRESS - command in a0 is still in progress
319 */
320 CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
279}; 321};
280 322
323/* CMD_ENABLE2 flags */
324#define CMD_ENABLE2_ACTIVE 0x1
325
281/* flags for CMD_OPEN */ 326/* flags for CMD_OPEN */
282#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ 327#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
283 328
@@ -315,6 +360,8 @@ enum vnic_devcmd_error {
315 ERR_ETIMEDOUT = 8, 360 ERR_ETIMEDOUT = 8,
316 ERR_ELINKDOWN = 9, 361 ERR_ELINKDOWN = 9,
317 ERR_EMAXRES = 10, 362 ERR_EMAXRES = 10,
363 ERR_ENOTSUPPORTED = 11,
364 ERR_EINPROGRESS = 12,
318}; 365};
319 366
320/* 367/*
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 4725b79de0ef..24ef8cd40545 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -23,7 +23,8 @@
23 23
24#include "vnic_vic.h" 24#include "vnic_vic.h"
25 25
26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type) 26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
27 const u8 type)
27{ 28{
28 struct vic_provinfo *vp; 29 struct vic_provinfo *vp;
29 30
@@ -47,7 +48,7 @@ void vic_provinfo_free(struct vic_provinfo *vp)
47} 48}
48 49
49int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, 50int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
50 void *value) 51 const void *value)
51{ 52{
52 struct vic_provinfo_tlv *tlv; 53 struct vic_provinfo_tlv *tlv;
53 54
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index f700f5d9e81d..9ef81f148351 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -47,6 +47,7 @@ enum vic_generic_prov_os_type {
47 VIC_GENERIC_PROV_OS_TYPE_ESX = 1, 47 VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
48 VIC_GENERIC_PROV_OS_TYPE_LINUX = 2, 48 VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
49 VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3, 49 VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
50 VIC_GENERIC_PROV_OS_TYPE_SOLARIS = 4,
50}; 51};
51 52
52struct vic_provinfo { 53struct vic_provinfo {
@@ -61,14 +62,22 @@ struct vic_provinfo {
61 } tlv[0]; 62 } tlv[0];
62} __packed; 63} __packed;
63 64
65#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \
66 do { \
67 err = vic_provinfo_add_tlv(vp, tlvtype, tlvlen, data); \
68 if (err) \
69 goto add_tlv_failure; \
70 } while (0)
71
64#define VIC_PROVINFO_MAX_DATA 1385 72#define VIC_PROVINFO_MAX_DATA 1385
65#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ 73#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
66 sizeof(struct vic_provinfo)) 74 sizeof(struct vic_provinfo))
67 75
68struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type); 76struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
77 const u8 type);
69void vic_provinfo_free(struct vic_provinfo *vp); 78void vic_provinfo_free(struct vic_provinfo *vp);
70int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, 79int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
71 void *value); 80 const void *value);
72size_t vic_provinfo_size(struct vic_provinfo *vp); 81size_t vic_provinfo_size(struct vic_provinfo *vp);
73 82
74#endif /* _VNIC_VIC_H_ */ 83#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 380d0614a89a..c7ce4438e923 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1604,55 +1604,47 @@ static u32 ewrk3_get_link(struct net_device *dev)
1604 return !(cmr & CMR_LINK); 1604 return !(cmr & CMR_LINK);
1605} 1605}
1606 1606
1607static int ewrk3_phys_id(struct net_device *dev, u32 data) 1607static int ewrk3_set_phys_id(struct net_device *dev,
1608 enum ethtool_phys_id_state state)
1608{ 1609{
1609 struct ewrk3_private *lp = netdev_priv(dev); 1610 struct ewrk3_private *lp = netdev_priv(dev);
1610 unsigned long iobase = dev->base_addr; 1611 unsigned long iobase = dev->base_addr;
1611 unsigned long flags;
1612 u8 cr; 1612 u8 cr;
1613 int count;
1614
1615 /* Toggle LED 4x per second */
1616 count = data << 2;
1617 1613
1618 spin_lock_irqsave(&lp->hw_lock, flags); 1614 spin_lock_irq(&lp->hw_lock);
1619
1620 /* Bail if a PHYS_ID is already in progress */
1621 if (lp->led_mask == 0) {
1622 spin_unlock_irqrestore(&lp->hw_lock, flags);
1623 return -EBUSY;
1624 }
1625 1615
1626 /* Prevent ISR from twiddling the LED */ 1616 switch (state) {
1627 lp->led_mask = 0; 1617 case ETHTOOL_ID_ACTIVE:
1618 /* Prevent ISR from twiddling the LED */
1619 lp->led_mask = 0;
1620 spin_unlock_irq(&lp->hw_lock);
1621 return -EINVAL;
1628 1622
1629 while (count--) { 1623 case ETHTOOL_ID_ON:
1630 /* Toggle the LED */
1631 cr = inb(EWRK3_CR); 1624 cr = inb(EWRK3_CR);
1632 outb(cr ^ CR_LED, EWRK3_CR); 1625 outb(cr | CR_LED, EWRK3_CR);
1626 break;
1633 1627
1634 /* Wait a little while */ 1628 case ETHTOOL_ID_OFF:
1635 spin_unlock_irqrestore(&lp->hw_lock, flags); 1629 cr = inb(EWRK3_CR);
1636 msleep(250); 1630 outb(cr & ~CR_LED, EWRK3_CR);
1637 spin_lock_irqsave(&lp->hw_lock, flags); 1631 break;
1638 1632
1639 /* Exit if we got a signal */ 1633 case ETHTOOL_ID_INACTIVE:
1640 if (signal_pending(current)) 1634 lp->led_mask = CR_LED;
1641 break; 1635 cr = inb(EWRK3_CR);
1636 outb(cr & ~CR_LED, EWRK3_CR);
1642 } 1637 }
1638 spin_unlock_irq(&lp->hw_lock);
1643 1639
1644 lp->led_mask = CR_LED; 1640 return 0;
1645 cr = inb(EWRK3_CR);
1646 outb(cr & ~CR_LED, EWRK3_CR);
1647 spin_unlock_irqrestore(&lp->hw_lock, flags);
1648 return signal_pending(current) ? -ERESTARTSYS : 0;
1649} 1641}
1650 1642
1651static const struct ethtool_ops ethtool_ops_203 = { 1643static const struct ethtool_ops ethtool_ops_203 = {
1652 .get_drvinfo = ewrk3_get_drvinfo, 1644 .get_drvinfo = ewrk3_get_drvinfo,
1653 .get_settings = ewrk3_get_settings, 1645 .get_settings = ewrk3_get_settings,
1654 .set_settings = ewrk3_set_settings, 1646 .set_settings = ewrk3_set_settings,
1655 .phys_id = ewrk3_phys_id, 1647 .set_phys_id = ewrk3_set_phys_id,
1656}; 1648};
1657 1649
1658static const struct ethtool_ops ethtool_ops = { 1650static const struct ethtool_ops ethtool_ops = {
@@ -1660,7 +1652,7 @@ static const struct ethtool_ops ethtool_ops = {
1660 .get_settings = ewrk3_get_settings, 1652 .get_settings = ewrk3_get_settings,
1661 .set_settings = ewrk3_set_settings, 1653 .set_settings = ewrk3_set_settings,
1662 .get_link = ewrk3_get_link, 1654 .get_link = ewrk3_get_link,
1663 .phys_id = ewrk3_phys_id, 1655 .set_phys_id = ewrk3_set_phys_id,
1664}; 1656};
1665 1657
1666/* 1658/*
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 24cb953900dd..a9388944f1d3 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -956,8 +956,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
956 .get_link = ethtool_op_get_link, 956 .get_link = ethtool_op_get_link,
957 .get_msglevel = fs_get_msglevel, 957 .get_msglevel = fs_get_msglevel,
958 .set_msglevel = fs_set_msglevel, 958 .set_msglevel = fs_set_msglevel,
959 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
960 .set_sg = ethtool_op_set_sg,
961 .get_regs = fs_get_regs, 959 .get_regs = fs_get_regs,
962}; 960};
963 961
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index b2fe7edefad9..0438d3551d5c 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -382,23 +382,6 @@ extern const char gfar_driver_version[];
382#define BD_LFLAG(flags) ((flags) << 16) 382#define BD_LFLAG(flags) ((flags) << 16)
383#define BD_LENGTH_MASK 0x0000ffff 383#define BD_LENGTH_MASK 0x0000ffff
384 384
385#define CLASS_CODE_UNRECOG 0x00
386#define CLASS_CODE_DUMMY1 0x01
387#define CLASS_CODE_ETHERTYPE1 0x02
388#define CLASS_CODE_ETHERTYPE2 0x03
389#define CLASS_CODE_USER_PROG1 0x04
390#define CLASS_CODE_USER_PROG2 0x05
391#define CLASS_CODE_USER_PROG3 0x06
392#define CLASS_CODE_USER_PROG4 0x07
393#define CLASS_CODE_TCP_IPV4 0x08
394#define CLASS_CODE_UDP_IPV4 0x09
395#define CLASS_CODE_AH_ESP_IPV4 0x0a
396#define CLASS_CODE_SCTP_IPV4 0x0b
397#define CLASS_CODE_TCP_IPV6 0x0c
398#define CLASS_CODE_UDP_IPV6 0x0d
399#define CLASS_CODE_AH_ESP_IPV6 0x0e
400#define CLASS_CODE_SCTP_IPV6 0x0f
401
402#define FPR_FILER_MASK 0xFFFFFFFF 385#define FPR_FILER_MASK 0xFFFFFFFF
403#define MAX_FILER_IDX 0xFF 386#define MAX_FILER_IDX 0xFF
404 387
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 3bc8e276ba4d..0840590958dd 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -645,42 +645,6 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
645} 645}
646#endif 646#endif
647 647
648static int gfar_ethflow_to_class(int flow_type, u64 *class)
649{
650 switch (flow_type) {
651 case TCP_V4_FLOW:
652 *class = CLASS_CODE_TCP_IPV4;
653 break;
654 case UDP_V4_FLOW:
655 *class = CLASS_CODE_UDP_IPV4;
656 break;
657 case AH_V4_FLOW:
658 case ESP_V4_FLOW:
659 *class = CLASS_CODE_AH_ESP_IPV4;
660 break;
661 case SCTP_V4_FLOW:
662 *class = CLASS_CODE_SCTP_IPV4;
663 break;
664 case TCP_V6_FLOW:
665 *class = CLASS_CODE_TCP_IPV6;
666 break;
667 case UDP_V6_FLOW:
668 *class = CLASS_CODE_UDP_IPV6;
669 break;
670 case AH_V6_FLOW:
671 case ESP_V6_FLOW:
672 *class = CLASS_CODE_AH_ESP_IPV6;
673 break;
674 case SCTP_V6_FLOW:
675 *class = CLASS_CODE_SCTP_IPV6;
676 break;
677 default:
678 return 0;
679 }
680
681 return 1;
682}
683
684static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) 648static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
685{ 649{
686 u32 fcr = 0x0, fpr = FPR_FILER_MASK; 650 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
@@ -778,11 +742,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
778 case UDP_V6_FLOW: 742 case UDP_V6_FLOW:
779 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; 743 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
780 break; 744 break;
781 case IPV4_FLOW:
782 cmp_rqfpr = RQFPR_IPV4;
783 case IPV6_FLOW:
784 cmp_rqfpr = RQFPR_IPV6;
785 break;
786 default: 745 default:
787 printk(KERN_ERR "Right now this class is not supported\n"); 746 printk(KERN_ERR "Right now this class is not supported\n");
788 return 0; 747 return 0;
@@ -848,18 +807,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
848 807
849static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 808static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
850{ 809{
851 u64 class;
852
853 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
854 return -EINVAL;
855
856 if (class < CLASS_CODE_USER_PROG1 ||
857 class > CLASS_CODE_SCTP_IPV6)
858 return -EINVAL;
859
860 /* write the filer rules here */ 810 /* write the filer rules here */
861 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) 811 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
862 return -1; 812 return -EINVAL;
863 813
864 return 0; 814 return 0;
865} 815}
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 8ff68ae6b520..a7d6cad32953 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -782,7 +782,8 @@ static int ibmlana_open(struct net_device *dev)
782 782
783 /* register resources - only necessary for IRQ */ 783 /* register resources - only necessary for IRQ */
784 784
785 result = request_irq(priv->realirq, irq_handler, IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 785 result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
786 dev->name, dev);
786 if (result != 0) { 787 if (result != 0) {
787 printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq); 788 printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
788 return result; 789 return result;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 994c80939c7a..be4773f54a24 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2230,17 +2230,9 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
2230 jme_restart_rx_engine(jme); 2230 jme_restart_rx_engine(jme);
2231 } 2231 }
2232 2232
2233 if (new_mtu > 1900) {
2234 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2235 NETIF_F_TSO | NETIF_F_TSO6);
2236 } else {
2237 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2238 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2239 if (test_bit(JME_FLAG_TSO, &jme->flags))
2240 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2241 }
2242
2243 netdev->mtu = new_mtu; 2233 netdev->mtu = new_mtu;
2234 netdev_update_features(netdev);
2235
2244 jme_reset_link(jme); 2236 jme_reset_link(jme);
2245 2237
2246 return 0; 2238 return 0;
@@ -2640,19 +2632,20 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
2640} 2632}
2641 2633
2642static u32 2634static u32
2643jme_get_rx_csum(struct net_device *netdev) 2635jme_fix_features(struct net_device *netdev, u32 features)
2644{ 2636{
2645 struct jme_adapter *jme = netdev_priv(netdev); 2637 if (netdev->mtu > 1900)
2646 return jme->reg_rxmcs & RXMCS_CHECKSUM; 2638 features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
2639 return features;
2647} 2640}
2648 2641
2649static int 2642static int
2650jme_set_rx_csum(struct net_device *netdev, u32 on) 2643jme_set_features(struct net_device *netdev, u32 features)
2651{ 2644{
2652 struct jme_adapter *jme = netdev_priv(netdev); 2645 struct jme_adapter *jme = netdev_priv(netdev);
2653 2646
2654 spin_lock_bh(&jme->rxmcs_lock); 2647 spin_lock_bh(&jme->rxmcs_lock);
2655 if (on) 2648 if (features & NETIF_F_RXCSUM)
2656 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2649 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2657 else 2650 else
2658 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2651 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
@@ -2663,42 +2656,6 @@ jme_set_rx_csum(struct net_device *netdev, u32 on)
2663} 2656}
2664 2657
2665static int 2658static int
2666jme_set_tx_csum(struct net_device *netdev, u32 on)
2667{
2668 struct jme_adapter *jme = netdev_priv(netdev);
2669
2670 if (on) {
2671 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2672 if (netdev->mtu <= 1900)
2673 netdev->features |=
2674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2675 } else {
2676 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2677 netdev->features &=
2678 ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2679 }
2680
2681 return 0;
2682}
2683
2684static int
2685jme_set_tso(struct net_device *netdev, u32 on)
2686{
2687 struct jme_adapter *jme = netdev_priv(netdev);
2688
2689 if (on) {
2690 set_bit(JME_FLAG_TSO, &jme->flags);
2691 if (netdev->mtu <= 1900)
2692 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2693 } else {
2694 clear_bit(JME_FLAG_TSO, &jme->flags);
2695 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2696 }
2697
2698 return 0;
2699}
2700
2701static int
2702jme_nway_reset(struct net_device *netdev) 2659jme_nway_reset(struct net_device *netdev)
2703{ 2660{
2704 struct jme_adapter *jme = netdev_priv(netdev); 2661 struct jme_adapter *jme = netdev_priv(netdev);
@@ -2839,11 +2796,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
2839 .get_link = jme_get_link, 2796 .get_link = jme_get_link,
2840 .get_msglevel = jme_get_msglevel, 2797 .get_msglevel = jme_get_msglevel,
2841 .set_msglevel = jme_set_msglevel, 2798 .set_msglevel = jme_set_msglevel,
2842 .get_rx_csum = jme_get_rx_csum,
2843 .set_rx_csum = jme_set_rx_csum,
2844 .set_tx_csum = jme_set_tx_csum,
2845 .set_tso = jme_set_tso,
2846 .set_sg = ethtool_op_set_sg,
2847 .nway_reset = jme_nway_reset, 2799 .nway_reset = jme_nway_reset,
2848 .get_eeprom_len = jme_get_eeprom_len, 2800 .get_eeprom_len = jme_get_eeprom_len,
2849 .get_eeprom = jme_get_eeprom, 2801 .get_eeprom = jme_get_eeprom,
@@ -2903,6 +2855,8 @@ static const struct net_device_ops jme_netdev_ops = {
2903 .ndo_change_mtu = jme_change_mtu, 2855 .ndo_change_mtu = jme_change_mtu,
2904 .ndo_tx_timeout = jme_tx_timeout, 2856 .ndo_tx_timeout = jme_tx_timeout,
2905 .ndo_vlan_rx_register = jme_vlan_rx_register, 2857 .ndo_vlan_rx_register = jme_vlan_rx_register,
2858 .ndo_fix_features = jme_fix_features,
2859 .ndo_set_features = jme_set_features,
2906}; 2860};
2907 2861
2908static int __devinit 2862static int __devinit
@@ -2957,6 +2911,12 @@ jme_init_one(struct pci_dev *pdev,
2957 netdev->netdev_ops = &jme_netdev_ops; 2911 netdev->netdev_ops = &jme_netdev_ops;
2958 netdev->ethtool_ops = &jme_ethtool_ops; 2912 netdev->ethtool_ops = &jme_ethtool_ops;
2959 netdev->watchdog_timeo = TX_TIMEOUT; 2913 netdev->watchdog_timeo = TX_TIMEOUT;
2914 netdev->hw_features = NETIF_F_IP_CSUM |
2915 NETIF_F_IPV6_CSUM |
2916 NETIF_F_SG |
2917 NETIF_F_TSO |
2918 NETIF_F_TSO6 |
2919 NETIF_F_RXCSUM;
2960 netdev->features = NETIF_F_IP_CSUM | 2920 netdev->features = NETIF_F_IP_CSUM |
2961 NETIF_F_IPV6_CSUM | 2921 NETIF_F_IPV6_CSUM |
2962 NETIF_F_SG | 2922 NETIF_F_SG |
@@ -3040,8 +3000,9 @@ jme_init_one(struct pci_dev *pdev,
3040 jme->reg_txpfc = 0; 3000 jme->reg_txpfc = 0;
3041 jme->reg_pmcs = PMCS_MFEN; 3001 jme->reg_pmcs = PMCS_MFEN;
3042 jme->reg_gpreg1 = GPREG1_DEFAULT; 3002 jme->reg_gpreg1 = GPREG1_DEFAULT;
3043 set_bit(JME_FLAG_TXCSUM, &jme->flags); 3003
3044 set_bit(JME_FLAG_TSO, &jme->flags); 3004 if (jme->reg_rxmcs & RXMCS_CHECKSUM)
3005 netdev->features |= NETIF_F_RXCSUM;
3045 3006
3046 /* 3007 /*
3047 * Get Max Read Req Size from PCI Config Space 3008 * Get Max Read Req Size from PCI Config Space
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 8bf30451e821..e9aaeca96abc 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -468,8 +468,6 @@ struct jme_adapter {
468enum jme_flags_bits { 468enum jme_flags_bits {
469 JME_FLAG_MSI = 1, 469 JME_FLAG_MSI = 1,
470 JME_FLAG_SSET = 2, 470 JME_FLAG_SSET = 2,
471 JME_FLAG_TXCSUM = 3,
472 JME_FLAG_TSO = 4,
473 JME_FLAG_POLL = 5, 471 JME_FLAG_POLL = 5,
474 JME_FLAG_SHUTDOWN = 6, 472 JME_FLAG_SHUTDOWN = 6,
475}; 473};
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7f7d5708a658..2c37a3804303 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -1221,7 +1221,6 @@ struct ksz_port_info {
1221#define LINK_INT_WORKING (1 << 0) 1221#define LINK_INT_WORKING (1 << 0)
1222#define SMALL_PACKET_TX_BUG (1 << 1) 1222#define SMALL_PACKET_TX_BUG (1 << 1)
1223#define HALF_DUPLEX_SIGNAL_BUG (1 << 2) 1223#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
1224#define IPV6_CSUM_GEN_HACK (1 << 3)
1225#define RX_HUGE_FRAME (1 << 4) 1224#define RX_HUGE_FRAME (1 << 4)
1226#define STP_SUPPORT (1 << 8) 1225#define STP_SUPPORT (1 << 8)
1227 1226
@@ -3748,7 +3747,6 @@ static int hw_init(struct ksz_hw *hw)
3748 if (1 == rc) 3747 if (1 == rc)
3749 hw->features |= HALF_DUPLEX_SIGNAL_BUG; 3748 hw->features |= HALF_DUPLEX_SIGNAL_BUG;
3750 } 3749 }
3751 hw->features |= IPV6_CSUM_GEN_HACK;
3752 return rc; 3750 return rc;
3753} 3751}
3754 3752
@@ -4887,8 +4885,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4887 left = hw_alloc_pkt(hw, skb->len, num); 4885 left = hw_alloc_pkt(hw, skb->len, num);
4888 if (left) { 4886 if (left) {
4889 if (left < num || 4887 if (left < num ||
4890 ((hw->features & IPV6_CSUM_GEN_HACK) && 4888 ((CHECKSUM_PARTIAL == skb->ip_summed) &&
4891 (CHECKSUM_PARTIAL == skb->ip_summed) &&
4892 (ETH_P_IPV6 == htons(skb->protocol)))) { 4889 (ETH_P_IPV6 == htons(skb->protocol)))) {
4893 struct sk_buff *org_skb = skb; 4890 struct sk_buff *org_skb = skb;
4894 4891
@@ -6583,57 +6580,33 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
6583} 6580}
6584 6581
6585/** 6582/**
6586 * netdev_get_rx_csum - get receive checksum support 6583 * netdev_set_features - set receive checksum support
6587 * @dev: Network device. 6584 * @dev: Network device.
6588 * 6585 * @features: New device features (offloads).
6589 * This function gets receive checksum support setting.
6590 *
6591 * Return true if receive checksum is enabled; false otherwise.
6592 */
6593static u32 netdev_get_rx_csum(struct net_device *dev)
6594{
6595 struct dev_priv *priv = netdev_priv(dev);
6596 struct dev_info *hw_priv = priv->adapter;
6597 struct ksz_hw *hw = &hw_priv->hw;
6598
6599 return hw->rx_cfg &
6600 (DMA_RX_CSUM_UDP |
6601 DMA_RX_CSUM_TCP |
6602 DMA_RX_CSUM_IP);
6603}
6604
6605/**
6606 * netdev_set_rx_csum - set receive checksum support
6607 * @dev: Network device.
6608 * @data: Zero to disable receive checksum support.
6609 * 6586 *
6610 * This function sets receive checksum support setting. 6587 * This function sets receive checksum support setting.
6611 * 6588 *
6612 * Return 0 if successful; otherwise an error code. 6589 * Return 0 if successful; otherwise an error code.
6613 */ 6590 */
6614static int netdev_set_rx_csum(struct net_device *dev, u32 data) 6591static int netdev_set_features(struct net_device *dev, u32 features)
6615{ 6592{
6616 struct dev_priv *priv = netdev_priv(dev); 6593 struct dev_priv *priv = netdev_priv(dev);
6617 struct dev_info *hw_priv = priv->adapter; 6594 struct dev_info *hw_priv = priv->adapter;
6618 struct ksz_hw *hw = &hw_priv->hw; 6595 struct ksz_hw *hw = &hw_priv->hw;
6619 u32 new_setting = hw->rx_cfg;
6620 6596
6621 if (data)
6622 new_setting |=
6623 (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
6624 DMA_RX_CSUM_IP);
6625 else
6626 new_setting &=
6627 ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
6628 DMA_RX_CSUM_IP);
6629 new_setting &= ~DMA_RX_CSUM_UDP;
6630 mutex_lock(&hw_priv->lock); 6597 mutex_lock(&hw_priv->lock);
6631 if (new_setting != hw->rx_cfg) { 6598
6632 hw->rx_cfg = new_setting; 6599 /* see note in hw_setup() */
6633 if (hw->enabled) 6600 if (features & NETIF_F_RXCSUM)
6634 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); 6601 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
6635 } 6602 else
6603 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
6604
6605 if (hw->enabled)
6606 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
6607
6636 mutex_unlock(&hw_priv->lock); 6608 mutex_unlock(&hw_priv->lock);
6609
6637 return 0; 6610 return 0;
6638} 6611}
6639 6612
@@ -6658,12 +6631,6 @@ static struct ethtool_ops netdev_ethtool_ops = {
6658 .get_strings = netdev_get_strings, 6631 .get_strings = netdev_get_strings,
6659 .get_sset_count = netdev_get_sset_count, 6632 .get_sset_count = netdev_get_sset_count,
6660 .get_ethtool_stats = netdev_get_ethtool_stats, 6633 .get_ethtool_stats = netdev_get_ethtool_stats,
6661 .get_rx_csum = netdev_get_rx_csum,
6662 .set_rx_csum = netdev_set_rx_csum,
6663 .get_tx_csum = ethtool_op_get_tx_csum,
6664 .set_tx_csum = ethtool_op_set_tx_csum,
6665 .get_sg = ethtool_op_get_sg,
6666 .set_sg = ethtool_op_set_sg,
6667}; 6634};
6668 6635
6669/* 6636/*
@@ -6828,14 +6795,15 @@ static int __init netdev_init(struct net_device *dev)
6828 /* 500 ms timeout */ 6795 /* 500 ms timeout */
6829 dev->watchdog_timeo = HZ / 2; 6796 dev->watchdog_timeo = HZ / 2;
6830 6797
6831 dev->features |= NETIF_F_IP_CSUM; 6798 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
6832 6799
6833 /* 6800 /*
6834 * Hardware does not really support IPv6 checksum generation, but 6801 * Hardware does not really support IPv6 checksum generation, but
6835 * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK. 6802 * driver actually runs faster with this on.
6836 */ 6803 */
6837 dev->features |= NETIF_F_IPV6_CSUM; 6804 dev->hw_features |= NETIF_F_IPV6_CSUM;
6838 dev->features |= NETIF_F_SG; 6805
6806 dev->features |= dev->hw_features;
6839 6807
6840 sema_init(&priv->proc_sem, 1); 6808 sema_init(&priv->proc_sem, 1);
6841 6809
@@ -6860,6 +6828,7 @@ static const struct net_device_ops netdev_ops = {
6860 .ndo_start_xmit = netdev_tx, 6828 .ndo_start_xmit = netdev_tx,
6861 .ndo_tx_timeout = netdev_tx_timeout, 6829 .ndo_tx_timeout = netdev_tx_timeout,
6862 .ndo_change_mtu = netdev_change_mtu, 6830 .ndo_change_mtu = netdev_change_mtu,
6831 .ndo_set_features = netdev_set_features,
6863 .ndo_set_mac_address = netdev_set_mac_address, 6832 .ndo_set_mac_address = netdev_set_mac_address,
6864 .ndo_validate_addr = eth_validate_addr, 6833 .ndo_validate_addr = eth_validate_addr,
6865 .ndo_do_ioctl = netdev_ioctl, 6834 .ndo_do_ioctl = netdev_ioctl,
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 79ccb54ab00c..2cb4e792f871 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1171,8 +1171,7 @@ static int __init macb_probe(struct platform_device *pdev)
1171 } 1171 }
1172 1172
1173 dev->irq = platform_get_irq(pdev, 0); 1173 dev->irq = platform_get_irq(pdev, 0);
1174 err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM, 1174 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1175 dev->name, dev);
1176 if (err) { 1175 if (err) {
1177 printk(KERN_ERR 1176 printk(KERN_ERR
1178 "%s: Unable to request IRQ %d (error %d)\n", 1177 "%s: Unable to request IRQ %d (error %d)\n",
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 679dc8519c5b..77220687b92a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1177,7 +1177,7 @@ struct netxen_adapter {
1177 u8 max_sds_rings; 1177 u8 max_sds_rings;
1178 u8 driver_mismatch; 1178 u8 driver_mismatch;
1179 u8 msix_supported; 1179 u8 msix_supported;
1180 u8 rx_csum; 1180 u8 __pad;
1181 u8 pci_using_dac; 1181 u8 pci_using_dac;
1182 u8 portnum; 1182 u8 portnum;
1183 u8 physical_port; 1183 u8 physical_port;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 3bdcc803ec68..29f90baaa79b 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -676,62 +676,6 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
676 } 676 }
677} 677}
678 678
679static u32 netxen_nic_get_tx_csum(struct net_device *dev)
680{
681 return dev->features & NETIF_F_IP_CSUM;
682}
683
684static u32 netxen_nic_get_rx_csum(struct net_device *dev)
685{
686 struct netxen_adapter *adapter = netdev_priv(dev);
687 return adapter->rx_csum;
688}
689
690static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
691{
692 struct netxen_adapter *adapter = netdev_priv(dev);
693
694 if (data) {
695 adapter->rx_csum = data;
696 return 0;
697 }
698
699 if (dev->features & NETIF_F_LRO) {
700 if (netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_DISABLED))
701 return -EIO;
702
703 dev->features &= ~NETIF_F_LRO;
704 netxen_send_lro_cleanup(adapter);
705 netdev_info(dev, "disabling LRO as rx_csum is off\n");
706 }
707 adapter->rx_csum = data;
708 return 0;
709}
710
711static u32 netxen_nic_get_tso(struct net_device *dev)
712{
713 struct netxen_adapter *adapter = netdev_priv(dev);
714
715 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
716 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
717
718 return (dev->features & NETIF_F_TSO) != 0;
719}
720
721static int netxen_nic_set_tso(struct net_device *dev, u32 data)
722{
723 if (data) {
724 struct netxen_adapter *adapter = netdev_priv(dev);
725
726 dev->features |= NETIF_F_TSO;
727 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
728 dev->features |= NETIF_F_TSO6;
729 } else
730 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
731
732 return 0;
733}
734
735static void 679static void
736netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 680netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
737{ 681{
@@ -866,43 +810,6 @@ static int netxen_get_intr_coalesce(struct net_device *netdev,
866 return 0; 810 return 0;
867} 811}
868 812
869static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
870{
871 struct netxen_adapter *adapter = netdev_priv(netdev);
872 int hw_lro;
873
874 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
875 return -EINVAL;
876
877 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
878 return -EINVAL;
879
880 if (!adapter->rx_csum) {
881 netdev_info(netdev, "rx csum is off, cannot toggle LRO\n");
882 return -EINVAL;
883 }
884
885 if (!!(data & ETH_FLAG_LRO) == !!(netdev->features & NETIF_F_LRO))
886 return 0;
887
888 if (data & ETH_FLAG_LRO) {
889 hw_lro = NETXEN_NIC_LRO_ENABLED;
890 netdev->features |= NETIF_F_LRO;
891 } else {
892 hw_lro = NETXEN_NIC_LRO_DISABLED;
893 netdev->features &= ~NETIF_F_LRO;
894 }
895
896 if (netxen_config_hw_lro(adapter, hw_lro))
897 return -EIO;
898
899 if ((hw_lro == 0) && netxen_send_lro_cleanup(adapter))
900 return -EIO;
901
902
903 return 0;
904}
905
906const struct ethtool_ops netxen_nic_ethtool_ops = { 813const struct ethtool_ops netxen_nic_ethtool_ops = {
907 .get_settings = netxen_nic_get_settings, 814 .get_settings = netxen_nic_get_settings,
908 .set_settings = netxen_nic_set_settings, 815 .set_settings = netxen_nic_set_settings,
@@ -916,21 +823,12 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
916 .set_ringparam = netxen_nic_set_ringparam, 823 .set_ringparam = netxen_nic_set_ringparam,
917 .get_pauseparam = netxen_nic_get_pauseparam, 824 .get_pauseparam = netxen_nic_get_pauseparam,
918 .set_pauseparam = netxen_nic_set_pauseparam, 825 .set_pauseparam = netxen_nic_set_pauseparam,
919 .get_tx_csum = netxen_nic_get_tx_csum,
920 .set_tx_csum = ethtool_op_set_tx_csum,
921 .set_sg = ethtool_op_set_sg,
922 .get_tso = netxen_nic_get_tso,
923 .set_tso = netxen_nic_set_tso,
924 .get_wol = netxen_nic_get_wol, 826 .get_wol = netxen_nic_get_wol,
925 .set_wol = netxen_nic_set_wol, 827 .set_wol = netxen_nic_set_wol,
926 .self_test = netxen_nic_diag_test, 828 .self_test = netxen_nic_diag_test,
927 .get_strings = netxen_nic_get_strings, 829 .get_strings = netxen_nic_get_strings,
928 .get_ethtool_stats = netxen_nic_get_ethtool_stats, 830 .get_ethtool_stats = netxen_nic_get_ethtool_stats,
929 .get_sset_count = netxen_get_sset_count, 831 .get_sset_count = netxen_get_sset_count,
930 .get_rx_csum = netxen_nic_get_rx_csum,
931 .set_rx_csum = netxen_nic_set_rx_csum,
932 .get_coalesce = netxen_get_intr_coalesce, 832 .get_coalesce = netxen_get_intr_coalesce,
933 .set_coalesce = netxen_set_intr_coalesce, 833 .set_coalesce = netxen_set_intr_coalesce,
934 .get_flags = ethtool_op_get_flags,
935 .set_flags = netxen_nic_set_flags,
936}; 834};
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 731077d8d962..7f999671c7b2 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1483,7 +1483,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1483 if (!skb) 1483 if (!skb)
1484 goto no_skb; 1484 goto no_skb;
1485 1485
1486 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1486 if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
1487 && cksum == STATUS_CKSUM_OK)) {
1487 adapter->stats.csummed++; 1488 adapter->stats.csummed++;
1488 skb->ip_summed = CHECKSUM_UNNECESSARY; 1489 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489 } else 1490 } else
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e8a4b6655999..b644383017f9 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -485,6 +485,37 @@ static void netxen_set_multicast_list(struct net_device *dev)
485 adapter->set_multi(dev); 485 adapter->set_multi(dev);
486} 486}
487 487
488static u32 netxen_fix_features(struct net_device *dev, u32 features)
489{
490 if (!(features & NETIF_F_RXCSUM)) {
491 netdev_info(dev, "disabling LRO as RXCSUM is off\n");
492
493 features &= ~NETIF_F_LRO;
494 }
495
496 return features;
497}
498
499static int netxen_set_features(struct net_device *dev, u32 features)
500{
501 struct netxen_adapter *adapter = netdev_priv(dev);
502 int hw_lro;
503
504 if (!((dev->features ^ features) & NETIF_F_LRO))
505 return 0;
506
507 hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
508 : NETXEN_NIC_LRO_DISABLED;
509
510 if (netxen_config_hw_lro(adapter, hw_lro))
511 return -EIO;
512
513 if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
514 return -EIO;
515
516 return 0;
517}
518
488static const struct net_device_ops netxen_netdev_ops = { 519static const struct net_device_ops netxen_netdev_ops = {
489 .ndo_open = netxen_nic_open, 520 .ndo_open = netxen_nic_open,
490 .ndo_stop = netxen_nic_close, 521 .ndo_stop = netxen_nic_close,
@@ -495,6 +526,8 @@ static const struct net_device_ops netxen_netdev_ops = {
495 .ndo_set_mac_address = netxen_nic_set_mac, 526 .ndo_set_mac_address = netxen_nic_set_mac,
496 .ndo_change_mtu = netxen_nic_change_mtu, 527 .ndo_change_mtu = netxen_nic_change_mtu,
497 .ndo_tx_timeout = netxen_tx_timeout, 528 .ndo_tx_timeout = netxen_tx_timeout,
529 .ndo_fix_features = netxen_fix_features,
530 .ndo_set_features = netxen_set_features,
498#ifdef CONFIG_NET_POLL_CONTROLLER 531#ifdef CONFIG_NET_POLL_CONTROLLER
499 .ndo_poll_controller = netxen_nic_poll_controller, 532 .ndo_poll_controller = netxen_nic_poll_controller,
500#endif 533#endif
@@ -905,7 +938,7 @@ netxen_nic_request_irq(struct netxen_adapter *adapter)
905 struct nx_host_sds_ring *sds_ring; 938 struct nx_host_sds_ring *sds_ring;
906 int err, ring; 939 int err, ring;
907 940
908 unsigned long flags = IRQF_SAMPLE_RANDOM; 941 unsigned long flags = 0;
909 struct net_device *netdev = adapter->netdev; 942 struct net_device *netdev = adapter->netdev;
910 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 943 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
911 944
@@ -1196,7 +1229,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1196 int err = 0; 1229 int err = 0;
1197 struct pci_dev *pdev = adapter->pdev; 1230 struct pci_dev *pdev = adapter->pdev;
1198 1231
1199 adapter->rx_csum = 1;
1200 adapter->mc_enabled = 0; 1232 adapter->mc_enabled = 0;
1201 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1233 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1202 adapter->max_mc_count = 38; 1234 adapter->max_mc_count = 38;
@@ -1210,14 +1242,13 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1210 1242
1211 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 1243 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1212 1244
1213 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 1245 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1214 netdev->features |= (NETIF_F_GRO); 1246 NETIF_F_RXCSUM;
1215 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1216 1247
1217 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1248 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1218 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1249 netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
1219 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1250
1220 } 1251 netdev->vlan_features |= netdev->hw_features;
1221 1252
1222 if (adapter->pci_using_dac) { 1253 if (adapter->pci_using_dac) {
1223 netdev->features |= NETIF_F_HIGHDMA; 1254 netdev->features |= NETIF_F_HIGHDMA;
@@ -1225,10 +1256,12 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1225 } 1256 }
1226 1257
1227 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) 1258 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
1228 netdev->features |= (NETIF_F_HW_VLAN_TX); 1259 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1229 1260
1230 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) 1261 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
1231 netdev->features |= NETIF_F_LRO; 1262 netdev->hw_features |= NETIF_F_LRO;
1263
1264 netdev->features |= netdev->hw_features;
1232 1265
1233 netdev->irq = adapter->msix_entries[0].vector; 1266 netdev->irq = adapter->msix_entries[0].vector;
1234 1267
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 32678b6c6b39..9e6330bc0531 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6071,8 +6071,7 @@ static int niu_request_irq(struct niu *np)
6071 for (i = 0; i < np->num_ldg; i++) { 6071 for (i = 0; i < np->num_ldg; i++) {
6072 struct niu_ldg *lp = &np->ldg[i]; 6072 struct niu_ldg *lp = &np->ldg[i];
6073 6073
6074 err = request_irq(lp->irq, niu_interrupt, 6074 err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
6075 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
6076 np->irq_name[i], lp); 6075 np->irq_name[i], lp);
6077 if (err) 6076 if (err)
6078 goto out_free_irqs; 6077 goto out_free_irqs;
@@ -7023,6 +7022,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
7023 case UDP_V4_FLOW: 7022 case UDP_V4_FLOW:
7024 *class = CLASS_CODE_UDP_IPV4; 7023 *class = CLASS_CODE_UDP_IPV4;
7025 break; 7024 break;
7025 case AH_ESP_V4_FLOW:
7026 case AH_V4_FLOW: 7026 case AH_V4_FLOW:
7027 case ESP_V4_FLOW: 7027 case ESP_V4_FLOW:
7028 *class = CLASS_CODE_AH_ESP_IPV4; 7028 *class = CLASS_CODE_AH_ESP_IPV4;
@@ -7036,6 +7036,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
7036 case UDP_V6_FLOW: 7036 case UDP_V6_FLOW:
7037 *class = CLASS_CODE_UDP_IPV6; 7037 *class = CLASS_CODE_UDP_IPV6;
7038 break; 7038 break;
7039 case AH_ESP_V6_FLOW:
7039 case AH_V6_FLOW: 7040 case AH_V6_FLOW:
7040 case ESP_V6_FLOW: 7041 case ESP_V6_FLOW:
7041 *class = CLASS_CODE_AH_ESP_IPV6; 7042 *class = CLASS_CODE_AH_ESP_IPV6;
@@ -7889,28 +7890,31 @@ static void niu_force_led(struct niu *np, int on)
7889 nw64_mac(reg, val); 7890 nw64_mac(reg, val);
7890} 7891}
7891 7892
7892static int niu_phys_id(struct net_device *dev, u32 data) 7893static int niu_set_phys_id(struct net_device *dev,
7894 enum ethtool_phys_id_state state)
7895
7893{ 7896{
7894 struct niu *np = netdev_priv(dev); 7897 struct niu *np = netdev_priv(dev);
7895 u64 orig_led_state;
7896 int i;
7897 7898
7898 if (!netif_running(dev)) 7899 if (!netif_running(dev))
7899 return -EAGAIN; 7900 return -EAGAIN;
7900 7901
7901 if (data == 0) 7902 switch (state) {
7902 data = 2; 7903 case ETHTOOL_ID_ACTIVE:
7904 np->orig_led_state = niu_led_state_save(np);
7905 return -EINVAL;
7903 7906
7904 orig_led_state = niu_led_state_save(np); 7907 case ETHTOOL_ID_ON:
7905 for (i = 0; i < (data * 2); i++) { 7908 niu_force_led(np, 1);
7906 int on = ((i % 2) == 0); 7909 break;
7907 7910
7908 niu_force_led(np, on); 7911 case ETHTOOL_ID_OFF:
7912 niu_force_led(np, 0);
7913 break;
7909 7914
7910 if (msleep_interruptible(500)) 7915 case ETHTOOL_ID_INACTIVE:
7911 break; 7916 niu_led_state_restore(np, np->orig_led_state);
7912 } 7917 }
7913 niu_led_state_restore(np, orig_led_state);
7914 7918
7915 return 0; 7919 return 0;
7916} 7920}
@@ -7933,7 +7937,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
7933 .get_strings = niu_get_strings, 7937 .get_strings = niu_get_strings,
7934 .get_sset_count = niu_get_sset_count, 7938 .get_sset_count = niu_get_sset_count,
7935 .get_ethtool_stats = niu_get_ethtool_stats, 7939 .get_ethtool_stats = niu_get_ethtool_stats,
7936 .phys_id = niu_phys_id, 7940 .set_phys_id = niu_set_phys_id,
7937 .get_rxnfc = niu_get_nfc, 7941 .get_rxnfc = niu_get_nfc,
7938 .set_rxnfc = niu_set_nfc, 7942 .set_rxnfc = niu_set_nfc,
7939 .set_flags = niu_set_flags, 7943 .set_flags = niu_set_flags,
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index a41fa8ebe05f..51e177e1860d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3279,6 +3279,7 @@ struct niu {
3279 unsigned long xpcs_off; 3279 unsigned long xpcs_off;
3280 3280
3281 struct timer_list timer; 3281 struct timer_list timer;
3282 u64 orig_led_state;
3282 const struct niu_phy_ops *phy_ops; 3283 const struct niu_phy_ops *phy_ops;
3283 int phy_addr; 3284 int phy_addr;
3284 3285
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 768037602dff..e89afb929740 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -295,12 +295,14 @@ struct pcnet32_private {
295 struct net_device *next; 295 struct net_device *next;
296 struct mii_if_info mii_if; 296 struct mii_if_info mii_if;
297 struct timer_list watchdog_timer; 297 struct timer_list watchdog_timer;
298 struct timer_list blink_timer;
299 u32 msg_enable; /* debug message level */ 298 u32 msg_enable; /* debug message level */
300 299
301 /* each bit indicates an available PHY */ 300 /* each bit indicates an available PHY */
302 u32 phymask; 301 u32 phymask;
303 unsigned short chip_version; /* which variant this is */ 302 unsigned short chip_version; /* which variant this is */
303
304 /* saved registers during ethtool blink */
305 u16 save_regs[4];
304}; 306};
305 307
306static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 308static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -324,8 +326,6 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
324static void pcnet32_ethtool_test(struct net_device *dev, 326static void pcnet32_ethtool_test(struct net_device *dev,
325 struct ethtool_test *eth_test, u64 * data); 327 struct ethtool_test *eth_test, u64 * data);
326static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); 328static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
327static int pcnet32_phys_id(struct net_device *dev, u32 data);
328static void pcnet32_led_blink_callback(struct net_device *dev);
329static int pcnet32_get_regs_len(struct net_device *dev); 329static int pcnet32_get_regs_len(struct net_device *dev);
330static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 330static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
331 void *ptr); 331 void *ptr);
@@ -1022,7 +1022,8 @@ clean_up:
1022 return rc; 1022 return rc;
1023} /* end pcnet32_loopback_test */ 1023} /* end pcnet32_loopback_test */
1024 1024
1025static void pcnet32_led_blink_callback(struct net_device *dev) 1025static int pcnet32_set_phys_id(struct net_device *dev,
1026 enum ethtool_phys_id_state state)
1026{ 1027{
1027 struct pcnet32_private *lp = netdev_priv(dev); 1028 struct pcnet32_private *lp = netdev_priv(dev);
1028 struct pcnet32_access *a = &lp->a; 1029 struct pcnet32_access *a = &lp->a;
@@ -1030,50 +1031,31 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
1030 unsigned long flags; 1031 unsigned long flags;
1031 int i; 1032 int i;
1032 1033
1033 spin_lock_irqsave(&lp->lock, flags); 1034 switch (state) {
1034 for (i = 4; i < 8; i++) 1035 case ETHTOOL_ID_ACTIVE:
1035 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); 1036 /* Save the current value of the bcrs */
1036 spin_unlock_irqrestore(&lp->lock, flags); 1037 spin_lock_irqsave(&lp->lock, flags);
1037 1038 for (i = 4; i < 8; i++)
1038 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); 1039 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
1039} 1040 spin_unlock_irqrestore(&lp->lock, flags);
1041 return -EINVAL;
1040 1042
1041static int pcnet32_phys_id(struct net_device *dev, u32 data) 1043 case ETHTOOL_ID_ON:
1042{ 1044 case ETHTOOL_ID_OFF:
1043 struct pcnet32_private *lp = netdev_priv(dev); 1045 /* Blink the led */
1044 struct pcnet32_access *a = &lp->a; 1046 spin_lock_irqsave(&lp->lock, flags);
1045 ulong ioaddr = dev->base_addr; 1047 for (i = 4; i < 8; i++)
1046 unsigned long flags; 1048 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1047 int i, regs[4]; 1049 spin_unlock_irqrestore(&lp->lock, flags);
1050 break;
1048 1051
1049 if (!lp->blink_timer.function) { 1052 case ETHTOOL_ID_INACTIVE:
1050 init_timer(&lp->blink_timer); 1053 /* Restore the original value of the bcrs */
1051 lp->blink_timer.function = (void *)pcnet32_led_blink_callback; 1054 spin_lock_irqsave(&lp->lock, flags);
1052 lp->blink_timer.data = (unsigned long)dev; 1055 for (i = 4; i < 8; i++)
1056 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
1057 spin_unlock_irqrestore(&lp->lock, flags);
1053 } 1058 }
1054
1055 /* Save the current value of the bcrs */
1056 spin_lock_irqsave(&lp->lock, flags);
1057 for (i = 4; i < 8; i++)
1058 regs[i - 4] = a->read_bcr(ioaddr, i);
1059 spin_unlock_irqrestore(&lp->lock, flags);
1060
1061 mod_timer(&lp->blink_timer, jiffies);
1062 set_current_state(TASK_INTERRUPTIBLE);
1063
1064 /* AV: the limit here makes no sense whatsoever */
1065 if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
1066 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
1067
1068 msleep_interruptible(data * 1000);
1069 del_timer_sync(&lp->blink_timer);
1070
1071 /* Restore the original value of the bcrs */
1072 spin_lock_irqsave(&lp->lock, flags);
1073 for (i = 4; i < 8; i++)
1074 a->write_bcr(ioaddr, i, regs[i - 4]);
1075 spin_unlock_irqrestore(&lp->lock, flags);
1076
1077 return 0; 1059 return 0;
1078} 1060}
1079 1061
@@ -1450,7 +1432,7 @@ static const struct ethtool_ops pcnet32_ethtool_ops = {
1450 .set_ringparam = pcnet32_set_ringparam, 1432 .set_ringparam = pcnet32_set_ringparam,
1451 .get_strings = pcnet32_get_strings, 1433 .get_strings = pcnet32_get_strings,
1452 .self_test = pcnet32_ethtool_test, 1434 .self_test = pcnet32_ethtool_test,
1453 .phys_id = pcnet32_phys_id, 1435 .set_phys_id = pcnet32_set_phys_id,
1454 .get_regs_len = pcnet32_get_regs_len, 1436 .get_regs_len = pcnet32_get_regs_len,
1455 .get_regs = pcnet32_get_regs, 1437 .get_regs = pcnet32_get_regs,
1456 .get_sset_count = pcnet32_get_sset_count, 1438 .get_sset_count = pcnet32_get_sset_count,
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 348b4f1367c9..f3f737b91248 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3468,7 +3468,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3468{ 3468{
3469 struct net_device *ndev = qdev->ndev; 3469 struct net_device *ndev = qdev->ndev;
3470 int err; 3470 int err;
3471 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; 3471 unsigned long irq_flags = IRQF_SHARED;
3472 unsigned long hw_flags; 3472 unsigned long hw_flags;
3473 3473
3474 if (ql_alloc_mem_resources(qdev)) { 3474 if (ql_alloc_mem_resources(qdev)) {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index dc44564ef6f9..b6e0fc33585f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -29,13 +29,15 @@
29 29
30#include <linux/io.h> 30#include <linux/io.h>
31#include <asm/byteorder.h> 31#include <asm/byteorder.h>
32#include <linux/bitops.h>
33#include <linux/if_vlan.h>
32 34
33#include "qlcnic_hdr.h" 35#include "qlcnic_hdr.h"
34 36
35#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
36#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
37#define _QLCNIC_LINUX_SUBVERSION 15 39#define _QLCNIC_LINUX_SUBVERSION 16
38#define QLCNIC_LINUX_VERSIONID "5.0.15" 40#define QLCNIC_LINUX_VERSIONID "5.0.16"
39#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -93,8 +95,6 @@
93#define TX_IP_PKT 0x04 95#define TX_IP_PKT 0x04
94#define TX_TCP_LSO 0x05 96#define TX_TCP_LSO 0x05
95#define TX_TCP_LSO6 0x06 97#define TX_TCP_LSO6 0x06
96#define TX_IPSEC 0x07
97#define TX_IPSEC_CMD 0x0a
98#define TX_TCPV6_PKT 0x0b 98#define TX_TCPV6_PKT 0x0b
99#define TX_UDPV6_PKT 0x0c 99#define TX_UDPV6_PKT 0x0c
100 100
@@ -200,7 +200,7 @@ struct rcv_desc {
200 __le16 reserved; 200 __le16 reserved;
201 __le32 buffer_length; /* allocated buffer length (usually 2K) */ 201 __le32 buffer_length; /* allocated buffer length (usually 2K) */
202 __le64 addr_buffer; 202 __le64 addr_buffer;
203}; 203} __packed;
204 204
205/* opcode field in status_desc */ 205/* opcode field in status_desc */
206#define QLCNIC_SYN_OFFLOAD 0x03 206#define QLCNIC_SYN_OFFLOAD 0x03
@@ -292,6 +292,7 @@ struct uni_data_desc{
292/* Flash Defines and Structures */ 292/* Flash Defines and Structures */
293#define QLCNIC_FLT_LOCATION 0x3F1000 293#define QLCNIC_FLT_LOCATION 0x3F1000
294#define QLCNIC_FW_IMAGE_REGION 0x74 294#define QLCNIC_FW_IMAGE_REGION 0x74
295#define QLCNIC_BOOTLD_REGION 0X72
295struct qlcnic_flt_header { 296struct qlcnic_flt_header {
296 u16 version; 297 u16 version;
297 u16 len; 298 u16 len;
@@ -306,7 +307,7 @@ struct qlcnic_flt_entry {
306 u8 reserved1; 307 u8 reserved1;
307 u32 size; 308 u32 size;
308 u32 start_addr; 309 u32 start_addr;
309 u32 end_add; 310 u32 end_addr;
310}; 311};
311 312
312/* Magic number to let user know flash is programmed */ 313/* Magic number to let user know flash is programmed */
@@ -365,12 +366,6 @@ struct qlcnic_skb_frag {
365 u64 length; 366 u64 length;
366}; 367};
367 368
368struct qlcnic_recv_crb {
369 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
370 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
371 u32 sw_int_mask[NUM_STS_DESC_RINGS];
372};
373
374/* Following defines are for the state of the buffers */ 369/* Following defines are for the state of the buffers */
375#define QLCNIC_BUFFER_FREE 0 370#define QLCNIC_BUFFER_FREE 0
376#define QLCNIC_BUFFER_BUSY 1 371#define QLCNIC_BUFFER_BUSY 1
@@ -387,10 +382,10 @@ struct qlcnic_cmd_buffer {
387 382
388/* In rx_buffer, we do not need multiple fragments as is a single buffer */ 383/* In rx_buffer, we do not need multiple fragments as is a single buffer */
389struct qlcnic_rx_buffer { 384struct qlcnic_rx_buffer {
390 struct list_head list; 385 u16 ref_handle;
391 struct sk_buff *skb; 386 struct sk_buff *skb;
387 struct list_head list;
392 u64 dma; 388 u64 dma;
393 u16 ref_handle;
394}; 389};
395 390
396/* Board types */ 391/* Board types */
@@ -398,6 +393,25 @@ struct qlcnic_rx_buffer {
398#define QLCNIC_XGBE 0x02 393#define QLCNIC_XGBE 0x02
399 394
400/* 395/*
396 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
397 * adjusted based on configured MTU.
398 */
399#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
400#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
401
402#define QLCNIC_INTR_DEFAULT 0x04
403#define QLCNIC_CONFIG_INTR_COALESCE 3
404
405struct qlcnic_nic_intr_coalesce {
406 u8 type;
407 u8 sts_ring_mask;
408 u16 rx_packets;
409 u16 rx_time_us;
410 u16 flag;
411 u32 timer_out;
412};
413
414/*
401 * One hardware_context{} per adapter 415 * One hardware_context{} per adapter
402 * contains interrupt info as well shared hardware info. 416 * contains interrupt info as well shared hardware info.
403 */ 417 */
@@ -415,6 +429,8 @@ struct qlcnic_hardware_context {
415 u8 linkup; 429 u8 linkup;
416 u16 port_type; 430 u16 port_type;
417 u16 board_type; 431 u16 board_type;
432
433 struct qlcnic_nic_intr_coalesce coal;
418}; 434};
419 435
420struct qlcnic_adapter_stats { 436struct qlcnic_adapter_stats {
@@ -442,50 +458,49 @@ struct qlcnic_adapter_stats {
442 * be one Rcv Descriptor for normal packets, one for jumbo and may be others. 458 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
443 */ 459 */
444struct qlcnic_host_rds_ring { 460struct qlcnic_host_rds_ring {
445 u32 producer; 461 void __iomem *crb_rcv_producer;
462 struct rcv_desc *desc_head;
463 struct qlcnic_rx_buffer *rx_buf_arr;
446 u32 num_desc; 464 u32 num_desc;
465 u32 producer;
447 u32 dma_size; 466 u32 dma_size;
448 u32 skb_size; 467 u32 skb_size;
449 u32 flags; 468 u32 flags;
450 void __iomem *crb_rcv_producer;
451 struct rcv_desc *desc_head;
452 struct qlcnic_rx_buffer *rx_buf_arr;
453 struct list_head free_list; 469 struct list_head free_list;
454 spinlock_t lock; 470 spinlock_t lock;
455 dma_addr_t phys_addr; 471 dma_addr_t phys_addr;
456}; 472} ____cacheline_internodealigned_in_smp;
457 473
458struct qlcnic_host_sds_ring { 474struct qlcnic_host_sds_ring {
459 u32 consumer; 475 u32 consumer;
460 u32 num_desc; 476 u32 num_desc;
461 void __iomem *crb_sts_consumer; 477 void __iomem *crb_sts_consumer;
462 void __iomem *crb_intr_mask;
463 478
464 struct status_desc *desc_head; 479 struct status_desc *desc_head;
465 struct qlcnic_adapter *adapter; 480 struct qlcnic_adapter *adapter;
466 struct napi_struct napi; 481 struct napi_struct napi;
467 struct list_head free_list[NUM_RCV_DESC_RINGS]; 482 struct list_head free_list[NUM_RCV_DESC_RINGS];
468 483
484 void __iomem *crb_intr_mask;
469 int irq; 485 int irq;
470 486
471 dma_addr_t phys_addr; 487 dma_addr_t phys_addr;
472 char name[IFNAMSIZ+4]; 488 char name[IFNAMSIZ+4];
473}; 489} ____cacheline_internodealigned_in_smp;
474 490
475struct qlcnic_host_tx_ring { 491struct qlcnic_host_tx_ring {
476 u32 producer; 492 u32 producer;
477 __le32 *hw_consumer;
478 u32 sw_consumer; 493 u32 sw_consumer;
479 void __iomem *crb_cmd_producer;
480 u32 num_desc; 494 u32 num_desc;
481 495 void __iomem *crb_cmd_producer;
482 struct netdev_queue *txq;
483
484 struct qlcnic_cmd_buffer *cmd_buf_arr;
485 struct cmd_desc_type0 *desc_head; 496 struct cmd_desc_type0 *desc_head;
497 struct qlcnic_cmd_buffer *cmd_buf_arr;
498 __le32 *hw_consumer;
499
486 dma_addr_t phys_addr; 500 dma_addr_t phys_addr;
487 dma_addr_t hw_cons_phys_addr; 501 dma_addr_t hw_cons_phys_addr;
488}; 502 struct netdev_queue *txq;
503} ____cacheline_internodealigned_in_smp;
489 504
490/* 505/*
491 * Receive context. There is one such structure per instance of the 506 * Receive context. There is one such structure per instance of the
@@ -494,12 +509,12 @@ struct qlcnic_host_tx_ring {
494 * present elsewhere. 509 * present elsewhere.
495 */ 510 */
496struct qlcnic_recv_context { 511struct qlcnic_recv_context {
512 struct qlcnic_host_rds_ring *rds_rings;
513 struct qlcnic_host_sds_ring *sds_rings;
497 u32 state; 514 u32 state;
498 u16 context_id; 515 u16 context_id;
499 u16 virt_port; 516 u16 virt_port;
500 517
501 struct qlcnic_host_rds_ring *rds_rings;
502 struct qlcnic_host_sds_ring *sds_rings;
503}; 518};
504 519
505/* HW context creation */ 520/* HW context creation */
@@ -538,9 +553,6 @@ struct qlcnic_recv_context {
538#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 553#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
539#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 554#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
540#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a 555#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
541#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
542#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
543#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
544#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 556#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
545#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 557#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
546#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 558#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
@@ -549,17 +561,11 @@ struct qlcnic_recv_context {
549#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 561#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
550#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 562#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
551#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 563#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
552#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
553#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
554#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
555#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
556#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
557#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f 564#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
558 565
559#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 566#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
560#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 567#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
561#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 568#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
562#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
563#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 569#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
564#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 570#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
565#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 571#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
@@ -597,14 +603,14 @@ struct qlcnic_hostrq_sds_ring {
597 __le32 ring_size; /* Ring entries */ 603 __le32 ring_size; /* Ring entries */
598 __le16 msi_index; 604 __le16 msi_index;
599 __le16 rsvd; /* Padding */ 605 __le16 rsvd; /* Padding */
600}; 606} __packed;
601 607
602struct qlcnic_hostrq_rds_ring { 608struct qlcnic_hostrq_rds_ring {
603 __le64 host_phys_addr; /* Ring base addr */ 609 __le64 host_phys_addr; /* Ring base addr */
604 __le64 buff_size; /* Packet buffer size */ 610 __le64 buff_size; /* Packet buffer size */
605 __le32 ring_size; /* Ring entries */ 611 __le32 ring_size; /* Ring entries */
606 __le32 ring_kind; /* Class of ring */ 612 __le32 ring_kind; /* Class of ring */
607}; 613} __packed;
608 614
609struct qlcnic_hostrq_rx_ctx { 615struct qlcnic_hostrq_rx_ctx {
610 __le64 host_rsp_dma_addr; /* Response dma'd here */ 616 __le64 host_rsp_dma_addr; /* Response dma'd here */
@@ -625,17 +631,17 @@ struct qlcnic_hostrq_rx_ctx {
625 - N hostrq_rds_rings 631 - N hostrq_rds_rings
626 - N hostrq_sds_rings */ 632 - N hostrq_sds_rings */
627 char data[0]; 633 char data[0];
628}; 634} __packed;
629 635
630struct qlcnic_cardrsp_rds_ring{ 636struct qlcnic_cardrsp_rds_ring{
631 __le32 host_producer_crb; /* Crb to use */ 637 __le32 host_producer_crb; /* Crb to use */
632 __le32 rsvd1; /* Padding */ 638 __le32 rsvd1; /* Padding */
633}; 639} __packed;
634 640
635struct qlcnic_cardrsp_sds_ring { 641struct qlcnic_cardrsp_sds_ring {
636 __le32 host_consumer_crb; /* Crb to use */ 642 __le32 host_consumer_crb; /* Crb to use */
637 __le32 interrupt_crb; /* Crb to use */ 643 __le32 interrupt_crb; /* Crb to use */
638}; 644} __packed;
639 645
640struct qlcnic_cardrsp_rx_ctx { 646struct qlcnic_cardrsp_rx_ctx {
641 /* These ring offsets are relative to data[0] below */ 647 /* These ring offsets are relative to data[0] below */
@@ -654,7 +660,7 @@ struct qlcnic_cardrsp_rx_ctx {
654 - N cardrsp_rds_rings 660 - N cardrsp_rds_rings
655 - N cardrs_sds_rings */ 661 - N cardrs_sds_rings */
656 char data[0]; 662 char data[0];
657}; 663} __packed;
658 664
659#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ 665#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
660 (sizeof(HOSTRQ_RX) + \ 666 (sizeof(HOSTRQ_RX) + \
@@ -674,7 +680,7 @@ struct qlcnic_hostrq_cds_ring {
674 __le64 host_phys_addr; /* Ring base addr */ 680 __le64 host_phys_addr; /* Ring base addr */
675 __le32 ring_size; /* Ring entries */ 681 __le32 ring_size; /* Ring entries */
676 __le32 rsvd; /* Padding */ 682 __le32 rsvd; /* Padding */
677}; 683} __packed;
678 684
679struct qlcnic_hostrq_tx_ctx { 685struct qlcnic_hostrq_tx_ctx {
680 __le64 host_rsp_dma_addr; /* Response dma'd here */ 686 __le64 host_rsp_dma_addr; /* Response dma'd here */
@@ -689,12 +695,12 @@ struct qlcnic_hostrq_tx_ctx {
689 __le16 rsvd3; /* Padding */ 695 __le16 rsvd3; /* Padding */
690 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ 696 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
691 u8 reserved[128]; /* future expansion */ 697 u8 reserved[128]; /* future expansion */
692}; 698} __packed;
693 699
694struct qlcnic_cardrsp_cds_ring { 700struct qlcnic_cardrsp_cds_ring {
695 __le32 host_producer_crb; /* Crb to use */ 701 __le32 host_producer_crb; /* Crb to use */
696 __le32 interrupt_crb; /* Crb to use */ 702 __le32 interrupt_crb; /* Crb to use */
697}; 703} __packed;
698 704
699struct qlcnic_cardrsp_tx_ctx { 705struct qlcnic_cardrsp_tx_ctx {
700 __le32 host_ctx_state; /* Starting state */ 706 __le32 host_ctx_state; /* Starting state */
@@ -703,7 +709,7 @@ struct qlcnic_cardrsp_tx_ctx {
703 u8 virt_port; /* Virtual/Logical id of port */ 709 u8 virt_port; /* Virtual/Logical id of port */
704 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ 710 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
705 u8 reserved[128]; /* future expansion */ 711 u8 reserved[128]; /* future expansion */
706}; 712} __packed;
707 713
708#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) 714#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
709#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) 715#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
@@ -737,40 +743,6 @@ struct qlcnic_mac_list_s {
737 uint8_t mac_addr[ETH_ALEN+2]; 743 uint8_t mac_addr[ETH_ALEN+2];
738}; 744};
739 745
740/*
741 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
742 * adjusted based on configured MTU.
743 */
744#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
745#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
746#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
747#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
748
749#define QLCNIC_INTR_DEFAULT 0x04
750
751union qlcnic_nic_intr_coalesce_data {
752 struct {
753 u16 rx_packets;
754 u16 rx_time_us;
755 u16 tx_packets;
756 u16 tx_time_us;
757 } data;
758 u64 word;
759};
760
761struct qlcnic_nic_intr_coalesce {
762 u16 stats_time_us;
763 u16 rate_sample_time;
764 u16 flags;
765 u16 rsvd_1;
766 u32 low_threshold;
767 u32 high_threshold;
768 union qlcnic_nic_intr_coalesce_data normal;
769 union qlcnic_nic_intr_coalesce_data low;
770 union qlcnic_nic_intr_coalesce_data high;
771 union qlcnic_nic_intr_coalesce_data irq;
772};
773
774#define QLCNIC_HOST_REQUEST 0x13 746#define QLCNIC_HOST_REQUEST 0x13
775#define QLCNIC_REQUEST 0x14 747#define QLCNIC_REQUEST 0x14
776 748
@@ -782,50 +754,20 @@ struct qlcnic_nic_intr_coalesce {
782/* 754/*
783 * Driver --> Firmware 755 * Driver --> Firmware
784 */ 756 */
785#define QLCNIC_H2C_OPCODE_START 0 757#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
786#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1 758#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
787#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2 759#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
788#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 760#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
789#define QLCNIC_H2C_OPCODE_CONFIG_LED 4 761#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
790#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 762#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
791#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6 763#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
792#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7 764#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
793#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8 765#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
794#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
795#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
796#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
797#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
798#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
799#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
800#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
801#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
802#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
803#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
804#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
805#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
806#define QLCNIC_C2C_OPCODE 22
807#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
808#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
809#define QLCNIC_H2C_OPCODE_LAST 25
810/* 766/*
811 * Firmware --> Driver 767 * Firmware --> Driver
812 */ 768 */
813 769
814#define QLCNIC_C2H_OPCODE_START 128
815#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
816#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
817#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
818#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
819#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
820#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
821#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
822#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
823#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
824#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
825#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
826#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
827#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 770#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
828#define QLCNIC_C2H_OPCODE_LAST 142
829 771
830#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ 772#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
831#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ 773#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -894,7 +836,7 @@ struct qlcnic_nic_req {
894 __le64 qhdr; 836 __le64 qhdr;
895 __le64 req_hdr; 837 __le64 req_hdr;
896 __le64 words[6]; 838 __le64 words[6];
897}; 839} __packed;
898 840
899struct qlcnic_mac_req { 841struct qlcnic_mac_req {
900 u8 op; 842 u8 op;
@@ -905,7 +847,7 @@ struct qlcnic_mac_req {
905struct qlcnic_vlan_req { 847struct qlcnic_vlan_req {
906 __le16 vlan_id; 848 __le16 vlan_id;
907 __le16 rsvd[3]; 849 __le16 rsvd[3];
908}; 850} __packed;
909 851
910struct qlcnic_ipaddr { 852struct qlcnic_ipaddr {
911 __be32 ipv4; 853 __be32 ipv4;
@@ -964,14 +906,15 @@ struct qlcnic_filter_hash {
964}; 906};
965 907
966struct qlcnic_adapter { 908struct qlcnic_adapter {
967 struct qlcnic_hardware_context ahw; 909 struct qlcnic_hardware_context *ahw;
968 910 struct qlcnic_recv_context *recv_ctx;
911 struct qlcnic_host_tx_ring *tx_ring;
969 struct net_device *netdev; 912 struct net_device *netdev;
970 struct pci_dev *pdev; 913 struct pci_dev *pdev;
971 struct list_head mac_list;
972 914
973 spinlock_t tx_clean_lock; 915 bool blink_was_down;
974 spinlock_t mac_learn_lock; 916 unsigned long state;
917 u32 flags;
975 918
976 u16 num_txd; 919 u16 num_txd;
977 u16 num_rxd; 920 u16 num_rxd;
@@ -989,7 +932,6 @@ struct qlcnic_adapter {
989 932
990 u8 mc_enabled; 933 u8 mc_enabled;
991 u8 max_mc_count; 934 u8 max_mc_count;
992 u8 rss_supported;
993 u8 fw_wait_cnt; 935 u8 fw_wait_cnt;
994 u8 fw_fail_cnt; 936 u8 fw_fail_cnt;
995 u8 tx_timeo_cnt; 937 u8 tx_timeo_cnt;
@@ -1014,7 +956,6 @@ struct qlcnic_adapter {
1014 956
1015 u32 fw_hal_version; 957 u32 fw_hal_version;
1016 u32 capabilities; 958 u32 capabilities;
1017 u32 flags;
1018 u32 irq; 959 u32 irq;
1019 u32 temp; 960 u32 temp;
1020 961
@@ -1032,16 +973,14 @@ struct qlcnic_adapter {
1032 u8 mac_addr[ETH_ALEN]; 973 u8 mac_addr[ETH_ALEN];
1033 974
1034 u64 dev_rst_time; 975 u64 dev_rst_time;
976 unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
1035 977
1036 struct vlan_group *vlgrp;
1037 struct qlcnic_npar_info *npars; 978 struct qlcnic_npar_info *npars;
1038 struct qlcnic_eswitch *eswitch; 979 struct qlcnic_eswitch *eswitch;
1039 struct qlcnic_nic_template *nic_ops; 980 struct qlcnic_nic_template *nic_ops;
1040 981
1041 struct qlcnic_adapter_stats stats; 982 struct qlcnic_adapter_stats stats;
1042 983 struct list_head mac_list;
1043 struct qlcnic_recv_context recv_ctx;
1044 struct qlcnic_host_tx_ring *tx_ring;
1045 984
1046 void __iomem *tgt_mask_reg; 985 void __iomem *tgt_mask_reg;
1047 void __iomem *tgt_status_reg; 986 void __iomem *tgt_status_reg;
@@ -1052,11 +991,11 @@ struct qlcnic_adapter {
1052 991
1053 struct delayed_work fw_work; 992 struct delayed_work fw_work;
1054 993
1055 struct qlcnic_nic_intr_coalesce coal;
1056 994
1057 struct qlcnic_filter_hash fhash; 995 struct qlcnic_filter_hash fhash;
1058 996
1059 unsigned long state; 997 spinlock_t tx_clean_lock;
998 spinlock_t mac_learn_lock;
1060 __le32 file_prd_off; /*File fw product offset*/ 999 __le32 file_prd_off; /*File fw product offset*/
1061 u32 fw_version; 1000 u32 fw_version;
1062 const struct firmware *fw; 1001 const struct firmware *fw;
@@ -1078,7 +1017,7 @@ struct qlcnic_info {
1078 __le16 min_tx_bw; 1017 __le16 min_tx_bw;
1079 __le16 max_tx_bw; 1018 __le16 max_tx_bw;
1080 u8 reserved2[104]; 1019 u8 reserved2[104];
1081}; 1020} __packed;
1082 1021
1083struct qlcnic_pci_info { 1022struct qlcnic_pci_info {
1084 __le16 id; /* pci function id */ 1023 __le16 id; /* pci function id */
@@ -1092,7 +1031,7 @@ struct qlcnic_pci_info {
1092 1031
1093 u8 mac[ETH_ALEN]; 1032 u8 mac[ETH_ALEN];
1094 u8 reserved2[106]; 1033 u8 reserved2[106];
1095}; 1034} __packed;
1096 1035
1097struct qlcnic_npar_info { 1036struct qlcnic_npar_info {
1098 u16 pvid; 1037 u16 pvid;
@@ -1209,7 +1148,7 @@ struct __qlcnic_esw_statistics {
1209 __le64 local_frames; 1148 __le64 local_frames;
1210 __le64 numbytes; 1149 __le64 numbytes;
1211 __le64 rsvd[3]; 1150 __le64 rsvd[3];
1212}; 1151} __packed;
1213 1152
1214struct qlcnic_esw_statistics { 1153struct qlcnic_esw_statistics {
1215 struct __qlcnic_esw_statistics rx; 1154 struct __qlcnic_esw_statistics rx;
@@ -1293,7 +1232,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1293 1232
1294int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); 1233int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1295void qlcnic_watchdog_task(struct work_struct *work); 1234void qlcnic_watchdog_task(struct work_struct *work);
1296void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid, 1235void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1297 struct qlcnic_host_rds_ring *rds_ring); 1236 struct qlcnic_host_rds_ring *rds_ring);
1298int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); 1237int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1299void qlcnic_set_multi(struct net_device *netdev); 1238void qlcnic_set_multi(struct net_device *netdev);
@@ -1378,8 +1317,7 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1378 1317
1379static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) 1318static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1380{ 1319{
1381 smp_mb(); 1320 if (likely(tx_ring->producer < tx_ring->sw_consumer))
1382 if (tx_ring->producer < tx_ring->sw_consumer)
1383 return tx_ring->sw_consumer - tx_ring->producer; 1321 return tx_ring->sw_consumer - tx_ring->producer;
1384 else 1322 else
1385 return tx_ring->sw_consumer + tx_ring->num_desc - 1323 return tx_ring->sw_consumer + tx_ring->num_desc -
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 27631f23b3fd..050fa5a99ff7 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -67,11 +67,11 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
67int 67int
68qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 68qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
69{ 69{
70 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 70 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
71 71
72 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { 72 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
73 if (qlcnic_issue_cmd(adapter, 73 if (qlcnic_issue_cmd(adapter,
74 adapter->ahw.pci_func, 74 adapter->ahw->pci_func,
75 adapter->fw_hal_version, 75 adapter->fw_hal_version,
76 recv_ctx->context_id, 76 recv_ctx->context_id,
77 mtu, 77 mtu,
@@ -102,12 +102,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
102 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 102 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
103 u64 phys_addr; 103 u64 phys_addr;
104 104
105 int i, nrds_rings, nsds_rings; 105 u8 i, nrds_rings, nsds_rings;
106 size_t rq_size, rsp_size; 106 size_t rq_size, rsp_size;
107 u32 cap, reg, val, reg2; 107 u32 cap, reg, val, reg2;
108 int err; 108 int err;
109 109
110 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 110 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
111 111
112 nrds_rings = adapter->max_rds_rings; 112 nrds_rings = adapter->max_rds_rings;
113 nsds_rings = adapter->max_sds_rings; 113 nsds_rings = adapter->max_sds_rings;
@@ -119,14 +119,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
119 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 119 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
120 nsds_rings); 120 nsds_rings);
121 121
122 addr = pci_alloc_consistent(adapter->pdev, 122 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
123 rq_size, &hostrq_phys_addr); 123 &hostrq_phys_addr, GFP_KERNEL);
124 if (addr == NULL) 124 if (addr == NULL)
125 return -ENOMEM; 125 return -ENOMEM;
126 prq = (struct qlcnic_hostrq_rx_ctx *)addr; 126 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
127 127
128 addr = pci_alloc_consistent(adapter->pdev, 128 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
129 rsp_size, &cardrsp_phys_addr); 129 &cardrsp_phys_addr, GFP_KERNEL);
130 if (addr == NULL) { 130 if (addr == NULL) {
131 err = -ENOMEM; 131 err = -ENOMEM;
132 goto out_free_rq; 132 goto out_free_rq;
@@ -151,7 +151,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
151 151
152 prq->num_rds_rings = cpu_to_le16(nrds_rings); 152 prq->num_rds_rings = cpu_to_le16(nrds_rings);
153 prq->num_sds_rings = cpu_to_le16(nsds_rings); 153 prq->num_sds_rings = cpu_to_le16(nsds_rings);
154 prq->rds_ring_offset = cpu_to_le32(0); 154 prq->rds_ring_offset = 0;
155 155
156 val = le32_to_cpu(prq->rds_ring_offset) + 156 val = le32_to_cpu(prq->rds_ring_offset) +
157 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 157 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
@@ -187,7 +187,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
187 187
188 phys_addr = hostrq_phys_addr; 188 phys_addr = hostrq_phys_addr;
189 err = qlcnic_issue_cmd(adapter, 189 err = qlcnic_issue_cmd(adapter,
190 adapter->ahw.pci_func, 190 adapter->ahw->pci_func,
191 adapter->fw_hal_version, 191 adapter->fw_hal_version,
192 (u32)(phys_addr >> 32), 192 (u32)(phys_addr >> 32),
193 (u32)(phys_addr & 0xffffffff), 193 (u32)(phys_addr & 0xffffffff),
@@ -207,7 +207,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
207 rds_ring = &recv_ctx->rds_rings[i]; 207 rds_ring = &recv_ctx->rds_rings[i];
208 208
209 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 209 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
210 rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg; 210 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
211 } 211 }
212 212
213 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 213 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -219,8 +219,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
219 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 219 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
220 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 220 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
221 221
222 sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg; 222 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
223 sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2; 223 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
224 } 224 }
225 225
226 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 226 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -228,19 +228,20 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
228 recv_ctx->virt_port = prsp->virt_port; 228 recv_ctx->virt_port = prsp->virt_port;
229 229
230out_free_rsp: 230out_free_rsp:
231 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); 231 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
232 cardrsp_phys_addr);
232out_free_rq: 233out_free_rq:
233 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); 234 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
234 return err; 235 return err;
235} 236}
236 237
237static void 238static void
238qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) 239qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
239{ 240{
240 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 241 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
241 242
242 if (qlcnic_issue_cmd(adapter, 243 if (qlcnic_issue_cmd(adapter,
243 adapter->ahw.pci_func, 244 adapter->ahw->pci_func,
244 adapter->fw_hal_version, 245 adapter->fw_hal_version,
245 recv_ctx->context_id, 246 recv_ctx->context_id,
246 QLCNIC_DESTROY_CTX_RESET, 247 QLCNIC_DESTROY_CTX_RESET,
@@ -274,14 +275,14 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
274 *(tx_ring->hw_consumer) = 0; 275 *(tx_ring->hw_consumer) = 0;
275 276
276 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 277 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
277 rq_addr = pci_alloc_consistent(adapter->pdev, 278 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
278 rq_size, &rq_phys_addr); 279 &rq_phys_addr, GFP_KERNEL);
279 if (!rq_addr) 280 if (!rq_addr)
280 return -ENOMEM; 281 return -ENOMEM;
281 282
282 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 283 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
283 rsp_addr = pci_alloc_consistent(adapter->pdev, 284 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
284 rsp_size, &rsp_phys_addr); 285 &rsp_phys_addr, GFP_KERNEL);
285 if (!rsp_addr) { 286 if (!rsp_addr) {
286 err = -ENOMEM; 287 err = -ENOMEM;
287 goto out_free_rq; 288 goto out_free_rq;
@@ -313,7 +314,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
313 314
314 phys_addr = rq_phys_addr; 315 phys_addr = rq_phys_addr;
315 err = qlcnic_issue_cmd(adapter, 316 err = qlcnic_issue_cmd(adapter,
316 adapter->ahw.pci_func, 317 adapter->ahw->pci_func,
317 adapter->fw_hal_version, 318 adapter->fw_hal_version,
318 (u32)(phys_addr >> 32), 319 (u32)(phys_addr >> 32),
319 ((u32)phys_addr & 0xffffffff), 320 ((u32)phys_addr & 0xffffffff),
@@ -322,7 +323,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
322 323
323 if (err == QLCNIC_RCODE_SUCCESS) { 324 if (err == QLCNIC_RCODE_SUCCESS) {
324 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 325 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
325 tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp; 326 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
326 327
327 adapter->tx_context_id = 328 adapter->tx_context_id =
328 le16_to_cpu(prsp->context_id); 329 le16_to_cpu(prsp->context_id);
@@ -332,10 +333,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
332 err = -EIO; 333 err = -EIO;
333 } 334 }
334 335
335 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); 336 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
337 rsp_phys_addr);
336 338
337out_free_rq: 339out_free_rq:
338 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); 340 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
339 341
340 return err; 342 return err;
341} 343}
@@ -344,7 +346,7 @@ static void
344qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) 346qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
345{ 347{
346 if (qlcnic_issue_cmd(adapter, 348 if (qlcnic_issue_cmd(adapter,
347 adapter->ahw.pci_func, 349 adapter->ahw->pci_func,
348 adapter->fw_hal_version, 350 adapter->fw_hal_version,
349 adapter->tx_context_id, 351 adapter->tx_context_id,
350 QLCNIC_DESTROY_CTX_RESET, 352 QLCNIC_DESTROY_CTX_RESET,
@@ -361,7 +363,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
361{ 363{
362 364
363 if (qlcnic_issue_cmd(adapter, 365 if (qlcnic_issue_cmd(adapter,
364 adapter->ahw.pci_func, 366 adapter->ahw->pci_func,
365 adapter->fw_hal_version, 367 adapter->fw_hal_version,
366 reg, 368 reg,
367 0, 369 0,
@@ -378,7 +380,7 @@ int
378qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val) 380qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
379{ 381{
380 return qlcnic_issue_cmd(adapter, 382 return qlcnic_issue_cmd(adapter,
381 adapter->ahw.pci_func, 383 adapter->ahw->pci_func,
382 adapter->fw_hal_version, 384 adapter->fw_hal_version,
383 reg, 385 reg,
384 val, 386 val,
@@ -398,20 +400,19 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
398 400
399 struct pci_dev *pdev = adapter->pdev; 401 struct pci_dev *pdev = adapter->pdev;
400 402
401 recv_ctx = &adapter->recv_ctx; 403 recv_ctx = adapter->recv_ctx;
402 tx_ring = adapter->tx_ring; 404 tx_ring = adapter->tx_ring;
403 405
404 tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32), 406 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
405 &tx_ring->hw_cons_phys_addr); 407 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
406 if (tx_ring->hw_consumer == NULL) { 408 if (tx_ring->hw_consumer == NULL) {
407 dev_err(&pdev->dev, "failed to allocate tx consumer\n"); 409 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
408 return -ENOMEM; 410 return -ENOMEM;
409 } 411 }
410 *(tx_ring->hw_consumer) = 0;
411 412
412 /* cmd desc ring */ 413 /* cmd desc ring */
413 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), 414 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
414 &tx_ring->phys_addr); 415 &tx_ring->phys_addr, GFP_KERNEL);
415 416
416 if (addr == NULL) { 417 if (addr == NULL) {
417 dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); 418 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
@@ -423,9 +424,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
423 424
424 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 425 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
425 rds_ring = &recv_ctx->rds_rings[ring]; 426 rds_ring = &recv_ctx->rds_rings[ring];
426 addr = pci_alloc_consistent(adapter->pdev, 427 addr = dma_alloc_coherent(&adapter->pdev->dev,
427 RCV_DESC_RINGSIZE(rds_ring), 428 RCV_DESC_RINGSIZE(rds_ring),
428 &rds_ring->phys_addr); 429 &rds_ring->phys_addr, GFP_KERNEL);
429 if (addr == NULL) { 430 if (addr == NULL) {
430 dev_err(&pdev->dev, 431 dev_err(&pdev->dev,
431 "failed to allocate rds ring [%d]\n", ring); 432 "failed to allocate rds ring [%d]\n", ring);
@@ -439,9 +440,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
439 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 440 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
440 sds_ring = &recv_ctx->sds_rings[ring]; 441 sds_ring = &recv_ctx->sds_rings[ring];
441 442
442 addr = pci_alloc_consistent(adapter->pdev, 443 addr = dma_alloc_coherent(&adapter->pdev->dev,
443 STATUS_DESC_RINGSIZE(sds_ring), 444 STATUS_DESC_RINGSIZE(sds_ring),
444 &sds_ring->phys_addr); 445 &sds_ring->phys_addr, GFP_KERNEL);
445 if (addr == NULL) { 446 if (addr == NULL) {
446 dev_err(&pdev->dev, 447 dev_err(&pdev->dev,
447 "failed to allocate sds ring [%d]\n", ring); 448 "failed to allocate sds ring [%d]\n", ring);
@@ -501,11 +502,11 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
501 struct qlcnic_host_tx_ring *tx_ring; 502 struct qlcnic_host_tx_ring *tx_ring;
502 int ring; 503 int ring;
503 504
504 recv_ctx = &adapter->recv_ctx; 505 recv_ctx = adapter->recv_ctx;
505 506
506 tx_ring = adapter->tx_ring; 507 tx_ring = adapter->tx_ring;
507 if (tx_ring->hw_consumer != NULL) { 508 if (tx_ring->hw_consumer != NULL) {
508 pci_free_consistent(adapter->pdev, 509 dma_free_coherent(&adapter->pdev->dev,
509 sizeof(u32), 510 sizeof(u32),
510 tx_ring->hw_consumer, 511 tx_ring->hw_consumer,
511 tx_ring->hw_cons_phys_addr); 512 tx_ring->hw_cons_phys_addr);
@@ -513,7 +514,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
513 } 514 }
514 515
515 if (tx_ring->desc_head != NULL) { 516 if (tx_ring->desc_head != NULL) {
516 pci_free_consistent(adapter->pdev, 517 dma_free_coherent(&adapter->pdev->dev,
517 TX_DESC_RINGSIZE(tx_ring), 518 TX_DESC_RINGSIZE(tx_ring),
518 tx_ring->desc_head, tx_ring->phys_addr); 519 tx_ring->desc_head, tx_ring->phys_addr);
519 tx_ring->desc_head = NULL; 520 tx_ring->desc_head = NULL;
@@ -523,7 +524,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
523 rds_ring = &recv_ctx->rds_rings[ring]; 524 rds_ring = &recv_ctx->rds_rings[ring];
524 525
525 if (rds_ring->desc_head != NULL) { 526 if (rds_ring->desc_head != NULL) {
526 pci_free_consistent(adapter->pdev, 527 dma_free_coherent(&adapter->pdev->dev,
527 RCV_DESC_RINGSIZE(rds_ring), 528 RCV_DESC_RINGSIZE(rds_ring),
528 rds_ring->desc_head, 529 rds_ring->desc_head,
529 rds_ring->phys_addr); 530 rds_ring->phys_addr);
@@ -535,7 +536,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
535 sds_ring = &recv_ctx->sds_rings[ring]; 536 sds_ring = &recv_ctx->sds_rings[ring];
536 537
537 if (sds_ring->desc_head != NULL) { 538 if (sds_ring->desc_head != NULL) {
538 pci_free_consistent(adapter->pdev, 539 dma_free_coherent(&adapter->pdev->dev,
539 STATUS_DESC_RINGSIZE(sds_ring), 540 STATUS_DESC_RINGSIZE(sds_ring),
540 sds_ring->desc_head, 541 sds_ring->desc_head,
541 sds_ring->phys_addr); 542 sds_ring->phys_addr);
@@ -551,9 +552,9 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
551 int err; 552 int err;
552 u32 arg1; 553 u32 arg1;
553 554
554 arg1 = adapter->ahw.pci_func | BIT_8; 555 arg1 = adapter->ahw->pci_func | BIT_8;
555 err = qlcnic_issue_cmd(adapter, 556 err = qlcnic_issue_cmd(adapter,
556 adapter->ahw.pci_func, 557 adapter->ahw->pci_func,
557 adapter->fw_hal_version, 558 adapter->fw_hal_version,
558 arg1, 559 arg1,
559 0, 560 0,
@@ -582,15 +583,15 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
582 void *nic_info_addr; 583 void *nic_info_addr;
583 size_t nic_size = sizeof(struct qlcnic_info); 584 size_t nic_size = sizeof(struct qlcnic_info);
584 585
585 nic_info_addr = pci_alloc_consistent(adapter->pdev, 586 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
586 nic_size, &nic_dma_t); 587 &nic_dma_t, GFP_KERNEL);
587 if (!nic_info_addr) 588 if (!nic_info_addr)
588 return -ENOMEM; 589 return -ENOMEM;
589 memset(nic_info_addr, 0, nic_size); 590 memset(nic_info_addr, 0, nic_size);
590 591
591 nic_info = (struct qlcnic_info *) nic_info_addr; 592 nic_info = (struct qlcnic_info *) nic_info_addr;
592 err = qlcnic_issue_cmd(adapter, 593 err = qlcnic_issue_cmd(adapter,
593 adapter->ahw.pci_func, 594 adapter->ahw->pci_func,
594 adapter->fw_hal_version, 595 adapter->fw_hal_version,
595 MSD(nic_dma_t), 596 MSD(nic_dma_t),
596 LSD(nic_dma_t), 597 LSD(nic_dma_t),
@@ -623,7 +624,8 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
623 err = -EIO; 624 err = -EIO;
624 } 625 }
625 626
626 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); 627 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
628 nic_dma_t);
627 return err; 629 return err;
628} 630}
629 631
@@ -639,8 +641,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
639 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 641 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
640 return err; 642 return err;
641 643
642 nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size, 644 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
643 &nic_dma_t); 645 &nic_dma_t, GFP_KERNEL);
644 if (!nic_info_addr) 646 if (!nic_info_addr)
645 return -ENOMEM; 647 return -ENOMEM;
646 648
@@ -659,7 +661,7 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
659 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 661 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
660 662
661 err = qlcnic_issue_cmd(adapter, 663 err = qlcnic_issue_cmd(adapter,
662 adapter->ahw.pci_func, 664 adapter->ahw->pci_func,
663 adapter->fw_hal_version, 665 adapter->fw_hal_version,
664 MSD(nic_dma_t), 666 MSD(nic_dma_t),
665 LSD(nic_dma_t), 667 LSD(nic_dma_t),
@@ -672,7 +674,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
672 err = -EIO; 674 err = -EIO;
673 } 675 }
674 676
675 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); 677 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
678 nic_dma_t);
676 return err; 679 return err;
677} 680}
678 681
@@ -687,15 +690,15 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
687 size_t npar_size = sizeof(struct qlcnic_pci_info); 690 size_t npar_size = sizeof(struct qlcnic_pci_info);
688 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 691 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
689 692
690 pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size, 693 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
691 &pci_info_dma_t); 694 &pci_info_dma_t, GFP_KERNEL);
692 if (!pci_info_addr) 695 if (!pci_info_addr)
693 return -ENOMEM; 696 return -ENOMEM;
694 memset(pci_info_addr, 0, pci_size); 697 memset(pci_info_addr, 0, pci_size);
695 698
696 npar = (struct qlcnic_pci_info *) pci_info_addr; 699 npar = (struct qlcnic_pci_info *) pci_info_addr;
697 err = qlcnic_issue_cmd(adapter, 700 err = qlcnic_issue_cmd(adapter,
698 adapter->ahw.pci_func, 701 adapter->ahw->pci_func,
699 adapter->fw_hal_version, 702 adapter->fw_hal_version,
700 MSD(pci_info_dma_t), 703 MSD(pci_info_dma_t),
701 LSD(pci_info_dma_t), 704 LSD(pci_info_dma_t),
@@ -721,7 +724,7 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
721 err = -EIO; 724 err = -EIO;
722 } 725 }
723 726
724 pci_free_consistent(adapter->pdev, pci_size, pci_info_addr, 727 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
725 pci_info_dma_t); 728 pci_info_dma_t);
726 return err; 729 return err;
727} 730}
@@ -741,7 +744,7 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
741 arg1 |= pci_func << 8; 744 arg1 |= pci_func << 8;
742 745
743 err = qlcnic_issue_cmd(adapter, 746 err = qlcnic_issue_cmd(adapter,
744 adapter->ahw.pci_func, 747 adapter->ahw->pci_func,
745 adapter->fw_hal_version, 748 adapter->fw_hal_version,
746 arg1, 749 arg1,
747 0, 750 0,
@@ -775,14 +778,14 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
775 return -ENOMEM; 778 return -ENOMEM;
776 779
777 if (adapter->op_mode != QLCNIC_MGMT_FUNC && 780 if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
778 func != adapter->ahw.pci_func) { 781 func != adapter->ahw->pci_func) {
779 dev_err(&adapter->pdev->dev, 782 dev_err(&adapter->pdev->dev,
780 "Not privilege to query stats for func=%d", func); 783 "Not privilege to query stats for func=%d", func);
781 return -EIO; 784 return -EIO;
782 } 785 }
783 786
784 stats_addr = pci_alloc_consistent(adapter->pdev, stats_size, 787 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
785 &stats_dma_t); 788 &stats_dma_t, GFP_KERNEL);
786 if (!stats_addr) { 789 if (!stats_addr) {
787 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); 790 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
788 return -ENOMEM; 791 return -ENOMEM;
@@ -793,7 +796,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
793 arg1 |= rx_tx << 15 | stats_size << 16; 796 arg1 |= rx_tx << 15 | stats_size << 16;
794 797
795 err = qlcnic_issue_cmd(adapter, 798 err = qlcnic_issue_cmd(adapter,
796 adapter->ahw.pci_func, 799 adapter->ahw->pci_func,
797 adapter->fw_hal_version, 800 adapter->fw_hal_version,
798 arg1, 801 arg1,
799 MSD(stats_dma_t), 802 MSD(stats_dma_t),
@@ -816,7 +819,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
816 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 819 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
817 } 820 }
818 821
819 pci_free_consistent(adapter->pdev, stats_size, stats_addr, 822 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
820 stats_dma_t); 823 stats_dma_t);
821 return err; 824 return err;
822} 825}
@@ -900,7 +903,7 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
900 arg1 |= BIT_14 | rx_tx << 15; 903 arg1 |= BIT_14 | rx_tx << 15;
901 904
902 return qlcnic_issue_cmd(adapter, 905 return qlcnic_issue_cmd(adapter,
903 adapter->ahw.pci_func, 906 adapter->ahw->pci_func,
904 adapter->fw_hal_version, 907 adapter->fw_hal_version,
905 arg1, 908 arg1,
906 0, 909 0,
@@ -921,7 +924,7 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
921 u8 pci_func; 924 u8 pci_func;
922 pci_func = (*arg1 >> 8); 925 pci_func = (*arg1 >> 8);
923 err = qlcnic_issue_cmd(adapter, 926 err = qlcnic_issue_cmd(adapter,
924 adapter->ahw.pci_func, 927 adapter->ahw->pci_func,
925 adapter->fw_hal_version, 928 adapter->fw_hal_version,
926 *arg1, 929 *arg1,
927 0, 930 0,
@@ -999,7 +1002,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
999 } 1002 }
1000 1003
1001 err = qlcnic_issue_cmd(adapter, 1004 err = qlcnic_issue_cmd(adapter,
1002 adapter->ahw.pci_func, 1005 adapter->ahw->pci_func,
1003 adapter->fw_hal_version, 1006 adapter->fw_hal_version,
1004 arg1, 1007 arg1,
1005 arg2, 1008 arg2,
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 45b2755d6cba..3cd8a169694a 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -150,10 +150,10 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
150{ 150{
151 struct qlcnic_adapter *adapter = netdev_priv(dev); 151 struct qlcnic_adapter *adapter = netdev_priv(dev);
152 int check_sfp_module = 0; 152 int check_sfp_module = 0;
153 u16 pcifn = adapter->ahw.pci_func; 153 u16 pcifn = adapter->ahw->pci_func;
154 154
155 /* read which mode */ 155 /* read which mode */
156 if (adapter->ahw.port_type == QLCNIC_GBE) { 156 if (adapter->ahw->port_type == QLCNIC_GBE) {
157 ecmd->supported = (SUPPORTED_10baseT_Half | 157 ecmd->supported = (SUPPORTED_10baseT_Half |
158 SUPPORTED_10baseT_Full | 158 SUPPORTED_10baseT_Full |
159 SUPPORTED_100baseT_Half | 159 SUPPORTED_100baseT_Half |
@@ -170,7 +170,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
170 ecmd->duplex = adapter->link_duplex; 170 ecmd->duplex = adapter->link_duplex;
171 ecmd->autoneg = adapter->link_autoneg; 171 ecmd->autoneg = adapter->link_autoneg;
172 172
173 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 173 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
174 u32 val; 174 u32 val;
175 175
176 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); 176 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
@@ -201,7 +201,7 @@ skip:
201 ecmd->phy_address = adapter->physical_port; 201 ecmd->phy_address = adapter->physical_port;
202 ecmd->transceiver = XCVR_EXTERNAL; 202 ecmd->transceiver = XCVR_EXTERNAL;
203 203
204 switch (adapter->ahw.board_type) { 204 switch (adapter->ahw->board_type) {
205 case QLCNIC_BRDTYPE_P3P_REF_QG: 205 case QLCNIC_BRDTYPE_P3P_REF_QG:
206 case QLCNIC_BRDTYPE_P3P_4_GB: 206 case QLCNIC_BRDTYPE_P3P_4_GB:
207 case QLCNIC_BRDTYPE_P3P_4_GB_MM: 207 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
@@ -238,7 +238,7 @@ skip:
238 ecmd->autoneg = AUTONEG_DISABLE; 238 ecmd->autoneg = AUTONEG_DISABLE;
239 break; 239 break;
240 case QLCNIC_BRDTYPE_P3P_10G_TP: 240 case QLCNIC_BRDTYPE_P3P_10G_TP:
241 if (adapter->ahw.port_type == QLCNIC_XGBE) { 241 if (adapter->ahw->port_type == QLCNIC_XGBE) {
242 ecmd->autoneg = AUTONEG_DISABLE; 242 ecmd->autoneg = AUTONEG_DISABLE;
243 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); 243 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
244 ecmd->advertising |= 244 ecmd->advertising |=
@@ -256,7 +256,7 @@ skip:
256 break; 256 break;
257 default: 257 default:
258 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", 258 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
259 adapter->ahw.board_type); 259 adapter->ahw->board_type);
260 return -EIO; 260 return -EIO;
261 } 261 }
262 262
@@ -288,7 +288,7 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
288 __u32 status; 288 __u32 status;
289 289
290 /* read which mode */ 290 /* read which mode */
291 if (adapter->ahw.port_type == QLCNIC_GBE) { 291 if (adapter->ahw->port_type == QLCNIC_GBE) {
292 /* autonegotiation */ 292 /* autonegotiation */
293 if (qlcnic_fw_cmd_set_phy(adapter, 293 if (qlcnic_fw_cmd_set_phy(adapter,
294 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG, 294 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
@@ -340,14 +340,14 @@ static void
340qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) 340qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
341{ 341{
342 struct qlcnic_adapter *adapter = netdev_priv(dev); 342 struct qlcnic_adapter *adapter = netdev_priv(dev);
343 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 343 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
344 struct qlcnic_host_sds_ring *sds_ring; 344 struct qlcnic_host_sds_ring *sds_ring;
345 u32 *regs_buff = p; 345 u32 *regs_buff = p;
346 int ring, i = 0, j = 0; 346 int ring, i = 0, j = 0;
347 347
348 memset(p, 0, qlcnic_get_regs_len(dev)); 348 memset(p, 0, qlcnic_get_regs_len(dev));
349 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | 349 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
350 (adapter->ahw.revision_id << 16) | (adapter->pdev)->device; 350 (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
351 351
352 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); 352 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
353 regs_buff[1] = QLCNIC_MGMT_API_VERSION; 353 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
@@ -382,7 +382,7 @@ static u32 qlcnic_test_link(struct net_device *dev)
382 u32 val; 382 u32 val;
383 383
384 val = QLCRD32(adapter, CRB_XG_STATE_P3P); 384 val = QLCRD32(adapter, CRB_XG_STATE_P3P);
385 val = XG_LINK_STATE_P3P(adapter->ahw.pci_func, val); 385 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
386 return (val == XG_LINK_UP_P3P) ? 0 : 1; 386 return (val == XG_LINK_UP_P3P) ? 0 : 1;
387} 387}
388 388
@@ -482,7 +482,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
482 int port = adapter->physical_port; 482 int port = adapter->physical_port;
483 __u32 val; 483 __u32 val;
484 484
485 if (adapter->ahw.port_type == QLCNIC_GBE) { 485 if (adapter->ahw->port_type == QLCNIC_GBE) {
486 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 486 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
487 return; 487 return;
488 /* get flow control settings */ 488 /* get flow control settings */
@@ -504,7 +504,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
504 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); 504 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
505 break; 505 break;
506 } 506 }
507 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 507 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
508 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 508 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
509 return; 509 return;
510 pause->rx_pause = 1; 510 pause->rx_pause = 1;
@@ -515,7 +515,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
515 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); 515 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
516 } else { 516 } else {
517 dev_err(&netdev->dev, "Unknown board type: %x\n", 517 dev_err(&netdev->dev, "Unknown board type: %x\n",
518 adapter->ahw.port_type); 518 adapter->ahw->port_type);
519 } 519 }
520} 520}
521 521
@@ -528,7 +528,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
528 __u32 val; 528 __u32 val;
529 529
530 /* read mode */ 530 /* read mode */
531 if (adapter->ahw.port_type == QLCNIC_GBE) { 531 if (adapter->ahw->port_type == QLCNIC_GBE) {
532 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 532 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
533 return -EIO; 533 return -EIO;
534 /* set flow control */ 534 /* set flow control */
@@ -571,7 +571,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
571 break; 571 break;
572 } 572 }
573 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); 573 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
574 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 574 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
575 if (!pause->rx_pause || pause->autoneg) 575 if (!pause->rx_pause || pause->autoneg)
576 return -EOPNOTSUPP; 576 return -EOPNOTSUPP;
577 577
@@ -593,7 +593,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
593 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); 593 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
594 } else { 594 } else {
595 dev_err(&netdev->dev, "Unknown board type: %x\n", 595 dev_err(&netdev->dev, "Unknown board type: %x\n",
596 adapter->ahw.port_type); 596 adapter->ahw->port_type);
597 } 597 }
598 return 0; 598 return 0;
599} 599}
@@ -639,8 +639,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
639 goto clear_it; 639 goto clear_it;
640 640
641 adapter->diag_cnt = 0; 641 adapter->diag_cnt = 0;
642 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, 642 ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func,
643 adapter->fw_hal_version, adapter->portnum, 643 adapter->fw_hal_version, adapter->ahw->pci_func,
644 0, 0, 0x00000011); 644 0, 0, 0x00000011);
645 if (ret) 645 if (ret)
646 goto done; 646 goto done;
@@ -749,14 +749,14 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
749 return; 749 return;
750 750
751 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); 751 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
752 ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func, 752 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
753 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); 753 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
754 if (ret) 754 if (ret)
755 return; 755 return;
756 756
757 qlcnic_fill_device_stats(&index, data, &port_stats.rx); 757 qlcnic_fill_device_stats(&index, data, &port_stats.rx);
758 758
759 ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func, 759 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
760 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); 760 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
761 if (ret) 761 if (ret)
762 return; 762 return;
@@ -831,48 +831,51 @@ static int qlcnic_set_tso(struct net_device *dev, u32 data)
831 return 0; 831 return 0;
832} 832}
833 833
834static int qlcnic_blink_led(struct net_device *dev, u32 val) 834static int qlcnic_set_led(struct net_device *dev,
835 enum ethtool_phys_id_state state)
835{ 836{
836 struct qlcnic_adapter *adapter = netdev_priv(dev); 837 struct qlcnic_adapter *adapter = netdev_priv(dev);
837 int max_sds_rings = adapter->max_sds_rings; 838 int max_sds_rings = adapter->max_sds_rings;
838 int dev_down = 0;
839 int ret;
840
841 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
842 dev_down = 1;
843 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
844 return -EIO;
845 839
846 ret = qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST); 840 switch (state) {
847 if (ret) { 841 case ETHTOOL_ID_ACTIVE:
848 clear_bit(__QLCNIC_RESETTING, &adapter->state); 842 adapter->blink_was_down = false;
849 return ret; 843 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
844 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
845 return -EIO;
846
847 if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) {
848 clear_bit(__QLCNIC_RESETTING, &adapter->state);
849 return -EIO;
850 }
851 adapter->blink_was_down = true;
850 } 852 }
851 }
852 853
853 ret = adapter->nic_ops->config_led(adapter, 1, 0xf); 854 if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0)
854 if (ret) { 855 return 0;
856
855 dev_err(&adapter->pdev->dev, 857 dev_err(&adapter->pdev->dev,
856 "Failed to set LED blink state.\n"); 858 "Failed to set LED blink state.\n");
857 goto done; 859 break;
858 }
859 860
860 msleep_interruptible(val * 1000); 861 case ETHTOOL_ID_INACTIVE:
862 if (adapter->nic_ops->config_led(adapter, 0, 0xf) == 0)
863 return 0;
861 864
862 ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
863 if (ret) {
864 dev_err(&adapter->pdev->dev, 865 dev_err(&adapter->pdev->dev,
865 "Failed to reset LED blink state.\n"); 866 "Failed to reset LED blink state.\n");
866 goto done; 867 break;
868
869 default:
870 return -EINVAL;
867 } 871 }
868 872
869done: 873 if (adapter->blink_was_down) {
870 if (dev_down) {
871 qlcnic_diag_free_res(dev, max_sds_rings); 874 qlcnic_diag_free_res(dev, max_sds_rings);
872 clear_bit(__QLCNIC_RESETTING, &adapter->state); 875 clear_bit(__QLCNIC_RESETTING, &adapter->state);
873 } 876 }
874 return ret;
875 877
878 return -EIO;
876} 879}
877 880
878static void 881static void
@@ -936,8 +939,8 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
936 */ 939 */
937 if (ethcoal->rx_coalesce_usecs > 0xffff || 940 if (ethcoal->rx_coalesce_usecs > 0xffff ||
938 ethcoal->rx_max_coalesced_frames > 0xffff || 941 ethcoal->rx_max_coalesced_frames > 0xffff ||
939 ethcoal->tx_coalesce_usecs > 0xffff || 942 ethcoal->tx_coalesce_usecs ||
940 ethcoal->tx_max_coalesced_frames > 0xffff || 943 ethcoal->tx_max_coalesced_frames ||
941 ethcoal->rx_coalesce_usecs_irq || 944 ethcoal->rx_coalesce_usecs_irq ||
942 ethcoal->rx_max_coalesced_frames_irq || 945 ethcoal->rx_max_coalesced_frames_irq ||
943 ethcoal->tx_coalesce_usecs_irq || 946 ethcoal->tx_coalesce_usecs_irq ||
@@ -959,21 +962,17 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
959 962
960 if (!ethcoal->rx_coalesce_usecs || 963 if (!ethcoal->rx_coalesce_usecs ||
961 !ethcoal->rx_max_coalesced_frames) { 964 !ethcoal->rx_max_coalesced_frames) {
962 adapter->coal.flags = QLCNIC_INTR_DEFAULT; 965 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
963 adapter->coal.normal.data.rx_time_us = 966 adapter->ahw->coal.rx_time_us =
964 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; 967 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
965 adapter->coal.normal.data.rx_packets = 968 adapter->ahw->coal.rx_packets =
966 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; 969 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
967 } else { 970 } else {
968 adapter->coal.flags = 0; 971 adapter->ahw->coal.flag = 0;
969 adapter->coal.normal.data.rx_time_us = 972 adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
970 ethcoal->rx_coalesce_usecs; 973 adapter->ahw->coal.rx_packets =
971 adapter->coal.normal.data.rx_packets = 974 ethcoal->rx_max_coalesced_frames;
972 ethcoal->rx_max_coalesced_frames;
973 } 975 }
974 adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
975 adapter->coal.normal.data.tx_packets =
976 ethcoal->tx_max_coalesced_frames;
977 976
978 qlcnic_config_intr_coalesce(adapter); 977 qlcnic_config_intr_coalesce(adapter);
979 978
@@ -988,12 +987,8 @@ static int qlcnic_get_intr_coalesce(struct net_device *netdev,
988 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 987 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
989 return -EINVAL; 988 return -EINVAL;
990 989
991 ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; 990 ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
992 ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; 991 ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
993 ethcoal->rx_max_coalesced_frames =
994 adapter->coal.normal.data.rx_packets;
995 ethcoal->tx_max_coalesced_frames =
996 adapter->coal.normal.data.tx_packets;
997 992
998 return 0; 993 return 0;
999} 994}
@@ -1006,22 +1001,28 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
1006 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) 1001 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
1007 return -EINVAL; 1002 return -EINVAL;
1008 1003
1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) 1004 if (data & ETH_FLAG_LRO) {
1010 return -EINVAL;
1011 1005
1012 if (!adapter->rx_csum) { 1006 if (netdev->features & NETIF_F_LRO)
1013 dev_info(&adapter->pdev->dev, "rx csum is off, " 1007 return 0;
1014 "cannot toggle lro\n");
1015 return -EINVAL;
1016 }
1017 1008
1018 if ((data & ETH_FLAG_LRO) && (netdev->features & NETIF_F_LRO)) 1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
1019 return 0; 1010 return -EINVAL;
1011
1012 if (!adapter->rx_csum) {
1013 dev_info(&adapter->pdev->dev, "rx csum is off, "
1014 "cannot toggle lro\n");
1015 return -EINVAL;
1016 }
1020 1017
1021 if (data & ETH_FLAG_LRO) {
1022 hw_lro = QLCNIC_LRO_ENABLED; 1018 hw_lro = QLCNIC_LRO_ENABLED;
1023 netdev->features |= NETIF_F_LRO; 1019 netdev->features |= NETIF_F_LRO;
1020
1024 } else { 1021 } else {
1022
1023 if (!(netdev->features & NETIF_F_LRO))
1024 return 0;
1025
1025 hw_lro = 0; 1026 hw_lro = 0;
1026 netdev->features &= ~NETIF_F_LRO; 1027 netdev->features &= ~NETIF_F_LRO;
1027 } 1028 }
@@ -1080,7 +1081,7 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1080 .set_coalesce = qlcnic_set_intr_coalesce, 1081 .set_coalesce = qlcnic_set_intr_coalesce,
1081 .get_flags = ethtool_op_get_flags, 1082 .get_flags = ethtool_op_get_flags,
1082 .set_flags = qlcnic_set_flags, 1083 .set_flags = qlcnic_set_flags,
1083 .phys_id = qlcnic_blink_led, 1084 .set_phys_id = qlcnic_set_led,
1084 .set_msglevel = qlcnic_set_msglevel, 1085 .set_msglevel = qlcnic_set_msglevel,
1085 .get_msglevel = qlcnic_get_msglevel, 1086 .get_msglevel = qlcnic_get_msglevel,
1086}; 1087};
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 616940f0a8d0..498cca92126a 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -457,7 +457,7 @@ int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
457 457
458 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 458 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
459 459
460 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | 460 word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
461 ((u64)adapter->portnum << 16); 461 ((u64)adapter->portnum << 16);
462 req.req_hdr = cpu_to_le64(word); 462 req.req_hdr = cpu_to_le64(word);
463 463
@@ -532,33 +532,31 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
532 } 532 }
533} 533}
534 534
535#define QLCNIC_CONFIG_INTR_COALESCE 3
536
537/* 535/*
538 * Send the interrupt coalescing parameter set by ethtool to the card. 536 * Send the interrupt coalescing parameter set by ethtool to the card.
539 */ 537 */
540int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) 538int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
541{ 539{
542 struct qlcnic_nic_req req; 540 struct qlcnic_nic_req req;
543 u64 word[6]; 541 int rv;
544 int rv, i;
545 542
546 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 543 memset(&req, 0, sizeof(struct qlcnic_nic_req));
547 544
548 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 545 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
549 546
550 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); 547 req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
551 req.req_hdr = cpu_to_le64(word[0]); 548 ((u64) adapter->portnum << 16));
552
553 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
554 for (i = 0; i < 6; i++)
555 req.words[i] = cpu_to_le64(word[i]);
556 549
550 req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
551 req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
552 ((u64) adapter->ahw->coal.rx_time_us) << 16);
553 req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
554 ((u64) adapter->ahw->coal.type) << 32 |
555 ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
557 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 556 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
558 if (rv != 0) 557 if (rv != 0)
559 dev_err(&adapter->netdev->dev, 558 dev_err(&adapter->netdev->dev,
560 "Could not send interrupt coalescing parameters\n"); 559 "Could not send interrupt coalescing parameters\n");
561
562 return rv; 560 return rv;
563} 561}
564 562
@@ -568,6 +566,9 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
568 u64 word; 566 u64 word;
569 int rv; 567 int rv;
570 568
569 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
570 return 0;
571
571 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 572 memset(&req, 0, sizeof(struct qlcnic_nic_req));
572 573
573 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 574 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -713,6 +714,9 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
713 u64 word; 714 u64 word;
714 int rv; 715 int rv;
715 716
717 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
718 return 0;
719
716 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 720 memset(&req, 0, sizeof(struct qlcnic_nic_req));
717 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 721 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
718 722
@@ -780,7 +784,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
780 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; 784 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
781 785
782 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { 786 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
783 *addr = adapter->ahw.pci_base0 + m->start_2M + 787 *addr = adapter->ahw->pci_base0 + m->start_2M +
784 (off - m->start_128M); 788 (off - m->start_128M);
785 return 0; 789 return 0;
786 } 790 }
@@ -788,7 +792,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
788 /* 792 /*
789 * Not in direct map, use crb window 793 * Not in direct map, use crb window
790 */ 794 */
791 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); 795 *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
792 return 1; 796 return 1;
793} 797}
794 798
@@ -801,7 +805,7 @@ static int
801qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) 805qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
802{ 806{
803 u32 window; 807 u32 window;
804 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; 808 void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
805 809
806 off -= QLCNIC_PCI_CRBSPACE; 810 off -= QLCNIC_PCI_CRBSPACE;
807 811
@@ -838,13 +842,13 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
838 842
839 if (rv > 0) { 843 if (rv > 0) {
840 /* indirect access */ 844 /* indirect access */
841 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 845 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
842 crb_win_lock(adapter); 846 crb_win_lock(adapter);
843 rv = qlcnic_pci_set_crbwindow_2M(adapter, off); 847 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
844 if (!rv) 848 if (!rv)
845 writel(data, addr); 849 writel(data, addr);
846 crb_win_unlock(adapter); 850 crb_win_unlock(adapter);
847 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 851 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
848 return rv; 852 return rv;
849 } 853 }
850 854
@@ -869,12 +873,12 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
869 873
870 if (rv > 0) { 874 if (rv > 0) {
871 /* indirect access */ 875 /* indirect access */
872 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 876 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
873 crb_win_lock(adapter); 877 crb_win_lock(adapter);
874 if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) 878 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
875 data = readl(addr); 879 data = readl(addr);
876 crb_win_unlock(adapter); 880 crb_win_unlock(adapter);
877 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 881 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
878 return data; 882 return data;
879 } 883 }
880 884
@@ -904,9 +908,9 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
904 908
905 window = OCM_WIN_P3P(addr); 909 window = OCM_WIN_P3P(addr);
906 910
907 writel(window, adapter->ahw.ocm_win_crb); 911 writel(window, adapter->ahw->ocm_win_crb);
908 /* read back to flush */ 912 /* read back to flush */
909 readl(adapter->ahw.ocm_win_crb); 913 readl(adapter->ahw->ocm_win_crb);
910 914
911 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); 915 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
912 return 0; 916 return 0;
@@ -920,13 +924,13 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
920 int ret; 924 int ret;
921 u32 start; 925 u32 start;
922 926
923 mutex_lock(&adapter->ahw.mem_lock); 927 mutex_lock(&adapter->ahw->mem_lock);
924 928
925 ret = qlcnic_pci_set_window_2M(adapter, off, &start); 929 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
926 if (ret != 0) 930 if (ret != 0)
927 goto unlock; 931 goto unlock;
928 932
929 addr = adapter->ahw.pci_base0 + start; 933 addr = adapter->ahw->pci_base0 + start;
930 934
931 if (op == 0) /* read */ 935 if (op == 0) /* read */
932 *data = readq(addr); 936 *data = readq(addr);
@@ -934,7 +938,7 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
934 writeq(*data, addr); 938 writeq(*data, addr);
935 939
936unlock: 940unlock:
937 mutex_unlock(&adapter->ahw.mem_lock); 941 mutex_unlock(&adapter->ahw->mem_lock);
938 942
939 return ret; 943 return ret;
940} 944}
@@ -942,23 +946,23 @@ unlock:
942void 946void
943qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) 947qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
944{ 948{
945 void __iomem *addr = adapter->ahw.pci_base0 + 949 void __iomem *addr = adapter->ahw->pci_base0 +
946 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); 950 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
947 951
948 mutex_lock(&adapter->ahw.mem_lock); 952 mutex_lock(&adapter->ahw->mem_lock);
949 *data = readq(addr); 953 *data = readq(addr);
950 mutex_unlock(&adapter->ahw.mem_lock); 954 mutex_unlock(&adapter->ahw->mem_lock);
951} 955}
952 956
953void 957void
954qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) 958qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
955{ 959{
956 void __iomem *addr = adapter->ahw.pci_base0 + 960 void __iomem *addr = adapter->ahw->pci_base0 +
957 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); 961 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
958 962
959 mutex_lock(&adapter->ahw.mem_lock); 963 mutex_lock(&adapter->ahw->mem_lock);
960 writeq(data, addr); 964 writeq(data, addr);
961 mutex_unlock(&adapter->ahw.mem_lock); 965 mutex_unlock(&adapter->ahw->mem_lock);
962} 966}
963 967
964#define MAX_CTL_CHECK 1000 968#define MAX_CTL_CHECK 1000
@@ -997,7 +1001,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
997correct: 1001correct:
998 off8 = off & ~0xf; 1002 off8 = off & ~0xf;
999 1003
1000 mutex_lock(&adapter->ahw.mem_lock); 1004 mutex_lock(&adapter->ahw->mem_lock);
1001 1005
1002 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1006 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1003 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1007 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
@@ -1049,7 +1053,7 @@ correct:
1049 ret = 0; 1053 ret = 0;
1050 1054
1051done: 1055done:
1052 mutex_unlock(&adapter->ahw.mem_lock); 1056 mutex_unlock(&adapter->ahw->mem_lock);
1053 1057
1054 return ret; 1058 return ret;
1055} 1059}
@@ -1091,7 +1095,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1091correct: 1095correct:
1092 off8 = off & ~0xf; 1096 off8 = off & ~0xf;
1093 1097
1094 mutex_lock(&adapter->ahw.mem_lock); 1098 mutex_lock(&adapter->ahw->mem_lock);
1095 1099
1096 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1100 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1097 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1101 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
@@ -1121,7 +1125,7 @@ correct:
1121 ret = 0; 1125 ret = 0;
1122 } 1126 }
1123 1127
1124 mutex_unlock(&adapter->ahw.mem_lock); 1128 mutex_unlock(&adapter->ahw->mem_lock);
1125 1129
1126 return ret; 1130 return ret;
1127} 1131}
@@ -1145,7 +1149,7 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1145 if (qlcnic_rom_fast_read(adapter, offset, &board_type)) 1149 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1146 return -EIO; 1150 return -EIO;
1147 1151
1148 adapter->ahw.board_type = board_type; 1152 adapter->ahw->board_type = board_type;
1149 1153
1150 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { 1154 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1151 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); 1155 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
@@ -1164,20 +1168,20 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1164 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: 1168 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1165 case QLCNIC_BRDTYPE_P3P_10G_XFP: 1169 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1166 case QLCNIC_BRDTYPE_P3P_10000_BASE_T: 1170 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1167 adapter->ahw.port_type = QLCNIC_XGBE; 1171 adapter->ahw->port_type = QLCNIC_XGBE;
1168 break; 1172 break;
1169 case QLCNIC_BRDTYPE_P3P_REF_QG: 1173 case QLCNIC_BRDTYPE_P3P_REF_QG:
1170 case QLCNIC_BRDTYPE_P3P_4_GB: 1174 case QLCNIC_BRDTYPE_P3P_4_GB:
1171 case QLCNIC_BRDTYPE_P3P_4_GB_MM: 1175 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1172 adapter->ahw.port_type = QLCNIC_GBE; 1176 adapter->ahw->port_type = QLCNIC_GBE;
1173 break; 1177 break;
1174 case QLCNIC_BRDTYPE_P3P_10G_TP: 1178 case QLCNIC_BRDTYPE_P3P_10G_TP:
1175 adapter->ahw.port_type = (adapter->portnum < 2) ? 1179 adapter->ahw->port_type = (adapter->portnum < 2) ?
1176 QLCNIC_XGBE : QLCNIC_GBE; 1180 QLCNIC_XGBE : QLCNIC_GBE;
1177 break; 1181 break;
1178 default: 1182 default:
1179 dev_err(&pdev->dev, "unknown board type %x\n", board_type); 1183 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1180 adapter->ahw.port_type = QLCNIC_XGBE; 1184 adapter->ahw->port_type = QLCNIC_XGBE;
1181 break; 1185 break;
1182 } 1186 }
1183 1187
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index a7f1d5b7e811..4ec0eeb6bff9 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -94,7 +94,7 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
94 struct qlcnic_rx_buffer *rx_buf; 94 struct qlcnic_rx_buffer *rx_buf;
95 int i, ring; 95 int i, ring;
96 96
97 recv_ctx = &adapter->recv_ctx; 97 recv_ctx = adapter->recv_ctx;
98 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 98 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
99 rds_ring = &recv_ctx->rds_rings[ring]; 99 rds_ring = &recv_ctx->rds_rings[ring];
100 for (i = 0; i < rds_ring->num_desc; ++i) { 100 for (i = 0; i < rds_ring->num_desc; ++i) {
@@ -119,7 +119,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
119 struct qlcnic_rx_buffer *rx_buf; 119 struct qlcnic_rx_buffer *rx_buf;
120 int i, ring; 120 int i, ring;
121 121
122 recv_ctx = &adapter->recv_ctx; 122 recv_ctx = adapter->recv_ctx;
123 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 123 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
124 rds_ring = &recv_ctx->rds_rings[ring]; 124 rds_ring = &recv_ctx->rds_rings[ring];
125 125
@@ -173,7 +173,7 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
173 struct qlcnic_host_tx_ring *tx_ring; 173 struct qlcnic_host_tx_ring *tx_ring;
174 int ring; 174 int ring;
175 175
176 recv_ctx = &adapter->recv_ctx; 176 recv_ctx = adapter->recv_ctx;
177 177
178 if (recv_ctx->rds_rings == NULL) 178 if (recv_ctx->rds_rings == NULL)
179 goto skip_rds; 179 goto skip_rds;
@@ -226,7 +226,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
226 } 226 }
227 tx_ring->cmd_buf_arr = cmd_buf_arr; 227 tx_ring->cmd_buf_arr = cmd_buf_arr;
228 228
229 recv_ctx = &adapter->recv_ctx; 229 recv_ctx = adapter->recv_ctx;
230 230
231 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); 231 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
232 rds_ring = kzalloc(size, GFP_KERNEL); 232 rds_ring = kzalloc(size, GFP_KERNEL);
@@ -864,7 +864,7 @@ nomn:
864 for (i = 0; i < entries; i++) { 864 for (i = 0; i < entries; i++) {
865 865
866 __le32 flags, file_chiprev, offs; 866 __le32 flags, file_chiprev, offs;
867 u8 chiprev = adapter->ahw.revision_id; 867 u8 chiprev = adapter->ahw->revision_id;
868 u32 flagbit; 868 u32 flagbit;
869 869
870 offs = cpu_to_le32(ptab_descr->findex) + 870 offs = cpu_to_le32(ptab_descr->findex) +
@@ -1130,9 +1130,20 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1130 } else { 1130 } else {
1131 u64 data; 1131 u64 data;
1132 u32 hi, lo; 1132 u32 hi, lo;
1133 1133 int ret;
1134 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; 1134 struct qlcnic_flt_entry bootld_entry;
1135 flashaddr = QLCNIC_BOOTLD_START; 1135
1136 ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
1137 &bootld_entry);
1138 if (!ret) {
1139 size = bootld_entry.size / 8;
1140 flashaddr = bootld_entry.start_addr;
1141 } else {
1142 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1143 flashaddr = QLCNIC_BOOTLD_START;
1144 dev_info(&pdev->dev,
1145 "using legacy method to get flash fw region");
1146 }
1136 1147
1137 for (i = 0; i < size; i++) { 1148 for (i = 0; i < size; i++) {
1138 if (qlcnic_rom_fast_read(adapter, 1149 if (qlcnic_rom_fast_read(adapter,
@@ -1394,7 +1405,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1394 return skb; 1405 return skb;
1395} 1406}
1396 1407
1397static int 1408static inline int
1398qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, 1409qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1399 u16 *vlan_tag) 1410 u16 *vlan_tag)
1400{ 1411{
@@ -1425,7 +1436,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1425 int ring, u64 sts_data0) 1436 int ring, u64 sts_data0)
1426{ 1437{
1427 struct net_device *netdev = adapter->netdev; 1438 struct net_device *netdev = adapter->netdev;
1428 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1439 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1429 struct qlcnic_rx_buffer *buffer; 1440 struct qlcnic_rx_buffer *buffer;
1430 struct sk_buff *skb; 1441 struct sk_buff *skb;
1431 struct qlcnic_host_rds_ring *rds_ring; 1442 struct qlcnic_host_rds_ring *rds_ring;
@@ -1467,10 +1478,10 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1467 1478
1468 skb->protocol = eth_type_trans(skb, netdev); 1479 skb->protocol = eth_type_trans(skb, netdev);
1469 1480
1470 if ((vid != 0xffff) && adapter->vlgrp) 1481 if (vid != 0xffff)
1471 vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb); 1482 __vlan_hwaccel_put_tag(skb, vid);
1472 else 1483
1473 napi_gro_receive(&sds_ring->napi, skb); 1484 napi_gro_receive(&sds_ring->napi, skb);
1474 1485
1475 adapter->stats.rx_pkts++; 1486 adapter->stats.rx_pkts++;
1476 adapter->stats.rxbytes += length; 1487 adapter->stats.rxbytes += length;
@@ -1488,7 +1499,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1488 int ring, u64 sts_data0, u64 sts_data1) 1499 int ring, u64 sts_data0, u64 sts_data1)
1489{ 1500{
1490 struct net_device *netdev = adapter->netdev; 1501 struct net_device *netdev = adapter->netdev;
1491 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1502 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1492 struct qlcnic_rx_buffer *buffer; 1503 struct qlcnic_rx_buffer *buffer;
1493 struct sk_buff *skb; 1504 struct sk_buff *skb;
1494 struct qlcnic_host_rds_ring *rds_ring; 1505 struct qlcnic_host_rds_ring *rds_ring;
@@ -1552,10 +1563,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1552 1563
1553 length = skb->len; 1564 length = skb->len;
1554 1565
1555 if ((vid != 0xffff) && adapter->vlgrp) 1566 if (vid != 0xffff)
1556 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid); 1567 __vlan_hwaccel_put_tag(skb, vid);
1557 else 1568 netif_receive_skb(skb);
1558 netif_receive_skb(skb);
1559 1569
1560 adapter->stats.lro_pkts++; 1570 adapter->stats.lro_pkts++;
1561 adapter->stats.lrobytes += length; 1571 adapter->stats.lrobytes += length;
@@ -1625,7 +1635,7 @@ skip:
1625 1635
1626 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1636 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1627 struct qlcnic_host_rds_ring *rds_ring = 1637 struct qlcnic_host_rds_ring *rds_ring =
1628 &adapter->recv_ctx.rds_rings[ring]; 1638 &adapter->recv_ctx->rds_rings[ring];
1629 1639
1630 if (!list_empty(&sds_ring->free_list[ring])) { 1640 if (!list_empty(&sds_ring->free_list[ring])) {
1631 list_for_each(cur, &sds_ring->free_list[ring]) { 1641 list_for_each(cur, &sds_ring->free_list[ring]) {
@@ -1651,12 +1661,13 @@ skip:
1651} 1661}
1652 1662
1653void 1663void
1654qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid, 1664qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1655 struct qlcnic_host_rds_ring *rds_ring) 1665 struct qlcnic_host_rds_ring *rds_ring)
1656{ 1666{
1657 struct rcv_desc *pdesc; 1667 struct rcv_desc *pdesc;
1658 struct qlcnic_rx_buffer *buffer; 1668 struct qlcnic_rx_buffer *buffer;
1659 int producer, count = 0; 1669 int count = 0;
1670 u32 producer;
1660 struct list_head *head; 1671 struct list_head *head;
1661 1672
1662 producer = rds_ring->producer; 1673 producer = rds_ring->producer;
@@ -1696,7 +1707,8 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1696{ 1707{
1697 struct rcv_desc *pdesc; 1708 struct rcv_desc *pdesc;
1698 struct qlcnic_rx_buffer *buffer; 1709 struct qlcnic_rx_buffer *buffer;
1699 int producer, count = 0; 1710 int count = 0;
1711 uint32_t producer;
1700 struct list_head *head; 1712 struct list_head *head;
1701 1713
1702 if (!spin_trylock(&rds_ring->lock)) 1714 if (!spin_trylock(&rds_ring->lock))
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cd88c7e1bfa9..7f9edb2f1474 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/swab.h> 14#include <linux/swab.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <linux/if_vlan.h>
17#include <net/ip.h> 16#include <net/ip.h>
18#include <linux/ipv6.h> 17#include <linux/ipv6.h>
19#include <linux/inetdevice.h> 18#include <linux/inetdevice.h>
@@ -98,6 +97,9 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
99static void qlcnic_set_netdev_features(struct qlcnic_adapter *, 98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *); 99 struct qlcnic_esw_func_cfg *);
100static void qlcnic_vlan_rx_add(struct net_device *, u16);
101static void qlcnic_vlan_rx_del(struct net_device *, u16);
102
101/* PCI Device ID Table */ 103/* PCI Device ID Table */
102#define ENTRY(device) \ 104#define ENTRY(device) \
103 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -113,7 +115,7 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
113MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); 115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
114 116
115 117
116void 118inline void
117qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
118 struct qlcnic_host_tx_ring *tx_ring) 120 struct qlcnic_host_tx_ring *tx_ring)
119{ 121{
@@ -169,7 +171,7 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
169{ 171{
170 int ring; 172 int ring;
171 struct qlcnic_host_sds_ring *sds_ring; 173 struct qlcnic_host_sds_ring *sds_ring;
172 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
173 175
174 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
175 return -ENOMEM; 177 return -ENOMEM;
@@ -193,14 +195,14 @@ qlcnic_napi_del(struct qlcnic_adapter *adapter)
193{ 195{
194 int ring; 196 int ring;
195 struct qlcnic_host_sds_ring *sds_ring; 197 struct qlcnic_host_sds_ring *sds_ring;
196 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
197 199
198 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
199 sds_ring = &recv_ctx->sds_rings[ring]; 201 sds_ring = &recv_ctx->sds_rings[ring];
200 netif_napi_del(&sds_ring->napi); 202 netif_napi_del(&sds_ring->napi);
201 } 203 }
202 204
203 qlcnic_free_sds_rings(&adapter->recv_ctx); 205 qlcnic_free_sds_rings(adapter->recv_ctx);
204} 206}
205 207
206static void 208static void
@@ -208,7 +210,7 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
208{ 210{
209 int ring; 211 int ring;
210 struct qlcnic_host_sds_ring *sds_ring; 212 struct qlcnic_host_sds_ring *sds_ring;
211 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
212 214
213 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
214 return; 216 return;
@@ -225,7 +227,7 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
225{ 227{
226 int ring; 228 int ring;
227 struct qlcnic_host_sds_ring *sds_ring; 229 struct qlcnic_host_sds_ring *sds_ring;
228 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
229 231
230 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
231 return; 233 return;
@@ -317,13 +319,6 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
317 return 0; 319 return 0;
318} 320}
319 321
320static void qlcnic_vlan_rx_register(struct net_device *netdev,
321 struct vlan_group *grp)
322{
323 struct qlcnic_adapter *adapter = netdev_priv(netdev);
324 adapter->vlgrp = grp;
325}
326
327static const struct net_device_ops qlcnic_netdev_ops = { 322static const struct net_device_ops qlcnic_netdev_ops = {
328 .ndo_open = qlcnic_open, 323 .ndo_open = qlcnic_open,
329 .ndo_stop = qlcnic_close, 324 .ndo_stop = qlcnic_close,
@@ -334,7 +329,8 @@ static const struct net_device_ops qlcnic_netdev_ops = {
334 .ndo_set_mac_address = qlcnic_set_mac, 329 .ndo_set_mac_address = qlcnic_set_mac,
335 .ndo_change_mtu = qlcnic_change_mtu, 330 .ndo_change_mtu = qlcnic_change_mtu,
336 .ndo_tx_timeout = qlcnic_tx_timeout, 331 .ndo_tx_timeout = qlcnic_tx_timeout,
337 .ndo_vlan_rx_register = qlcnic_vlan_rx_register, 332 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
333 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
338#ifdef CONFIG_NET_POLL_CONTROLLER 334#ifdef CONFIG_NET_POLL_CONTROLLER
339 .ndo_poll_controller = qlcnic_poll_controller, 335 .ndo_poll_controller = qlcnic_poll_controller,
340#endif 336#endif
@@ -359,7 +355,7 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
359 struct pci_dev *pdev = adapter->pdev; 355 struct pci_dev *pdev = adapter->pdev;
360 int err, num_msix; 356 int err, num_msix;
361 357
362 if (adapter->rss_supported) { 358 if (adapter->msix_supported) {
363 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? 359 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
364 MSIX_ENTRIES_PER_ADAPTER : 2; 360 MSIX_ENTRIES_PER_ADAPTER : 2;
365 } else 361 } else
@@ -369,7 +365,7 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
369 365
370 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); 366 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
371 367
372 legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; 368 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
373 369
374 adapter->int_vec_bit = legacy_intrp->int_vec_bit; 370 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
375 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, 371 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
@@ -391,8 +387,7 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
391 adapter->flags |= QLCNIC_MSIX_ENABLED; 387 adapter->flags |= QLCNIC_MSIX_ENABLED;
392 qlcnic_set_msix_bit(pdev, 1); 388 qlcnic_set_msix_bit(pdev, 1);
393 389
394 if (adapter->rss_supported) 390 adapter->max_sds_rings = num_msix;
395 adapter->max_sds_rings = num_msix;
396 391
397 dev_info(&pdev->dev, "using msi-x interrupts\n"); 392 dev_info(&pdev->dev, "using msi-x interrupts\n");
398 return; 393 return;
@@ -407,7 +402,7 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
407 if (use_msi && !pci_enable_msi(pdev)) { 402 if (use_msi && !pci_enable_msi(pdev)) {
408 adapter->flags |= QLCNIC_MSI_ENABLED; 403 adapter->flags |= QLCNIC_MSI_ENABLED;
409 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, 404 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
410 msi_tgt_status[adapter->ahw.pci_func]); 405 msi_tgt_status[adapter->ahw->pci_func]);
411 dev_info(&pdev->dev, "using msi interrupts\n"); 406 dev_info(&pdev->dev, "using msi interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq; 407 adapter->msix_entries[0].vector = pdev->irq;
413 return; 408 return;
@@ -429,8 +424,8 @@ qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
429static void 424static void
430qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) 425qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
431{ 426{
432 if (adapter->ahw.pci_base0 != NULL) 427 if (adapter->ahw->pci_base0 != NULL)
433 iounmap(adapter->ahw.pci_base0); 428 iounmap(adapter->ahw->pci_base0);
434} 429}
435 430
436static int 431static int
@@ -464,8 +459,10 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
464 459
465 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 460 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
466 pfn = pci_info[i].id; 461 pfn = pci_info[i].id;
467 if (pfn > QLCNIC_MAX_PCI_FUNC) 462 if (pfn > QLCNIC_MAX_PCI_FUNC) {
468 return QL_STATUS_INVALID_PARAM; 463 ret = QL_STATUS_INVALID_PARAM;
464 goto err_eswitch;
465 }
469 adapter->npars[pfn].active = (u8)pci_info[i].active; 466 adapter->npars[pfn].active = (u8)pci_info[i].active;
470 adapter->npars[pfn].type = (u8)pci_info[i].type; 467 adapter->npars[pfn].type = (u8)pci_info[i].type;
471 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; 468 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
@@ -498,7 +495,7 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
498 u32 ref_count; 495 u32 ref_count;
499 int i, ret = 1; 496 int i, ret = 1;
500 u32 data = QLCNIC_MGMT_FUNC; 497 u32 data = QLCNIC_MGMT_FUNC;
501 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 498 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
502 499
503 /* If other drivers are not in use set their privilege level */ 500 /* If other drivers are not in use set their privilege level */
504 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); 501 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
@@ -510,16 +507,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
510 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 507 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
511 id = i; 508 id = i;
512 if (adapter->npars[i].type != QLCNIC_TYPE_NIC || 509 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
513 id == adapter->ahw.pci_func) 510 id == adapter->ahw->pci_func)
514 continue; 511 continue;
515 data |= (qlcnic_config_npars & 512 data |= (qlcnic_config_npars &
516 QLC_DEV_SET_DRV(0xf, id)); 513 QLC_DEV_SET_DRV(0xf, id));
517 } 514 }
518 } else { 515 } else {
519 data = readl(priv_op); 516 data = readl(priv_op);
520 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) | 517 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
521 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, 518 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
522 adapter->ahw.pci_func)); 519 adapter->ahw->pci_func));
523 } 520 }
524 writel(data, priv_op); 521 writel(data, priv_op);
525 qlcnic_api_unlock(adapter); 522 qlcnic_api_unlock(adapter);
@@ -537,22 +534,23 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
537 u32 op_mode, priv_level; 534 u32 op_mode, priv_level;
538 535
539 /* Determine FW API version */ 536 /* Determine FW API version */
540 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API); 537 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
538 QLCNIC_FW_API);
541 539
542 /* Find PCI function number */ 540 /* Find PCI function number */
543 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); 541 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
544 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE; 542 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
545 msix_base = readl(msix_base_addr); 543 msix_base = readl(msix_base_addr);
546 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; 544 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
547 adapter->ahw.pci_func = func; 545 adapter->ahw->pci_func = func;
548 546
549 /* Determine function privilege level */ 547 /* Determine function privilege level */
550 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 548 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
551 op_mode = readl(priv_op); 549 op_mode = readl(priv_op);
552 if (op_mode == QLC_DEV_DRV_DEFAULT) 550 if (op_mode == QLC_DEV_DRV_DEFAULT)
553 priv_level = QLCNIC_MGMT_FUNC; 551 priv_level = QLCNIC_MGMT_FUNC;
554 else 552 else
555 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 553 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
556 554
557 if (priv_level == QLCNIC_NON_PRIV_FUNC) { 555 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
558 adapter->op_mode = QLCNIC_NON_PRIV_FUNC; 556 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
@@ -591,13 +589,14 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
591 589
592 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 590 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
593 591
594 adapter->ahw.pci_base0 = mem_ptr0; 592 adapter->ahw->pci_base0 = mem_ptr0;
595 adapter->ahw.pci_len0 = pci_len0; 593 adapter->ahw->pci_len0 = pci_len0;
596 594
597 qlcnic_check_vf(adapter); 595 qlcnic_check_vf(adapter);
598 596
599 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, 597 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
600 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func))); 598 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
599 adapter->ahw->pci_func)));
601 600
602 return 0; 601 return 0;
603} 602}
@@ -639,7 +638,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
639 638
640 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 639 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
641 fw_major, fw_minor, fw_build); 640 fw_major, fw_minor, fw_build);
642 if (adapter->ahw.port_type == QLCNIC_XGBE) { 641 if (adapter->ahw->port_type == QLCNIC_XGBE) {
643 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 642 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
644 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; 643 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
645 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; 644 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
@@ -651,7 +650,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
651 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 650 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
652 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 651 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
653 652
654 } else if (adapter->ahw.port_type == QLCNIC_GBE) { 653 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
655 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; 654 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
656 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 655 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
657 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 656 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
@@ -659,7 +658,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
659 } 658 }
660 659
661 adapter->msix_supported = !!use_msi_x; 660 adapter->msix_supported = !!use_msi_x;
662 adapter->rss_supported = !!use_msi_x;
663 661
664 adapter->num_txd = MAX_CMD_DESCRIPTORS; 662 adapter->num_txd = MAX_CMD_DESCRIPTORS;
665 663
@@ -672,7 +670,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
672 int err; 670 int err;
673 struct qlcnic_info nic_info; 671 struct qlcnic_info nic_info;
674 672
675 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func); 673 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
676 if (err) 674 if (err)
677 return err; 675 return err;
678 676
@@ -708,6 +706,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
708} 706}
709 707
710static void 708static void
709qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
710{
711 struct qlcnic_adapter *adapter = netdev_priv(netdev);
712 set_bit(vid, adapter->vlans);
713}
714
715static void
716qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
717{
718 struct qlcnic_adapter *adapter = netdev_priv(netdev);
719
720 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
721 clear_bit(vid, adapter->vlans);
722}
723
724static void
711qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, 725qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
712 struct qlcnic_esw_func_cfg *esw_cfg) 726 struct qlcnic_esw_func_cfg *esw_cfg)
713{ 727{
@@ -734,7 +748,7 @@ qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
734 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 748 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
735 return 0; 749 return 0;
736 750
737 esw_cfg.pci_func = adapter->ahw.pci_func; 751 esw_cfg.pci_func = adapter->ahw->pci_func;
738 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) 752 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
739 return -EIO; 753 return -EIO;
740 qlcnic_set_vlan_config(adapter, &esw_cfg); 754 qlcnic_set_vlan_config(adapter, &esw_cfg);
@@ -753,13 +767,14 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
753 features = (NETIF_F_SG | NETIF_F_IP_CSUM | 767 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
754 NETIF_F_IPV6_CSUM | NETIF_F_GRO); 768 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
755 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | 769 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
756 NETIF_F_IPV6_CSUM); 770 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
757 771
758 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { 772 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
759 features |= (NETIF_F_TSO | NETIF_F_TSO6); 773 features |= (NETIF_F_TSO | NETIF_F_TSO6);
760 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); 774 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
761 } 775 }
762 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 776
777 if (netdev->features & NETIF_F_LRO)
763 features |= NETIF_F_LRO; 778 features |= NETIF_F_LRO;
764 779
765 if (esw_cfg->offload_flags & BIT_0) { 780 if (esw_cfg->offload_flags & BIT_0) {
@@ -791,14 +806,14 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
791 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) 806 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
792 return 0; 807 return 0;
793 808
794 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 809 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
795 op_mode = readl(priv_op); 810 op_mode = readl(priv_op);
796 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 811 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
797 812
798 if (op_mode == QLC_DEV_DRV_DEFAULT) 813 if (op_mode == QLC_DEV_DRV_DEFAULT)
799 priv_level = QLCNIC_MGMT_FUNC; 814 priv_level = QLCNIC_MGMT_FUNC;
800 else 815 else
801 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 816 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
802 817
803 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 818 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
804 if (priv_level == QLCNIC_MGMT_FUNC) { 819 if (priv_level == QLCNIC_MGMT_FUNC) {
@@ -1038,7 +1053,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1038 1053
1039 unsigned long flags = 0; 1054 unsigned long flags = 0;
1040 struct net_device *netdev = adapter->netdev; 1055 struct net_device *netdev = adapter->netdev;
1041 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1056 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1042 1057
1043 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1058 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1044 handler = qlcnic_tmp_intr; 1059 handler = qlcnic_tmp_intr;
@@ -1075,7 +1090,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1075 int ring; 1090 int ring;
1076 struct qlcnic_host_sds_ring *sds_ring; 1091 struct qlcnic_host_sds_ring *sds_ring;
1077 1092
1078 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1093 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1079 1094
1080 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1095 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1081 sds_ring = &recv_ctx->sds_rings[ring]; 1096 sds_ring = &recv_ctx->sds_rings[ring];
@@ -1083,20 +1098,6 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1083 } 1098 }
1084} 1099}
1085 1100
1086static void
1087qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1088{
1089 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1090 adapter->coal.normal.data.rx_time_us =
1091 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1092 adapter->coal.normal.data.rx_packets =
1093 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1094 adapter->coal.normal.data.tx_time_us =
1095 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1096 adapter->coal.normal.data.tx_packets =
1097 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1098}
1099
1100static int 1101static int
1101__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) 1102__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1102{ 1103{
@@ -1115,14 +1116,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1115 return -EIO; 1116 return -EIO;
1116 1117
1117 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1118 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1118 rds_ring = &adapter->recv_ctx.rds_rings[ring]; 1119 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1119 qlcnic_post_rx_buffers(adapter, ring, rds_ring); 1120 qlcnic_post_rx_buffers(adapter, rds_ring);
1120 } 1121 }
1121 1122
1122 qlcnic_set_multi(netdev); 1123 qlcnic_set_multi(netdev);
1123 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); 1124 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1124 1125
1125 adapter->ahw.linkup = 0; 1126 adapter->ahw->linkup = 0;
1126 1127
1127 if (adapter->max_sds_rings > 1) 1128 if (adapter->max_sds_rings > 1)
1128 qlcnic_config_rss(adapter, 1); 1129 qlcnic_config_rss(adapter, 1);
@@ -1230,8 +1231,6 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
1230 goto err_out_free_hw; 1231 goto err_out_free_hw;
1231 } 1232 }
1232 1233
1233 qlcnic_init_coalesce_defaults(adapter);
1234
1235 qlcnic_create_sysfs_entries(adapter); 1234 qlcnic_create_sysfs_entries(adapter);
1236 1235
1237 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; 1236 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
@@ -1272,7 +1271,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1272 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1271 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1273 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1272 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1274 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1273 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1275 sds_ring = &adapter->recv_ctx.sds_rings[ring]; 1274 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1276 qlcnic_disable_int(sds_ring); 1275 qlcnic_disable_int(sds_ring);
1277 } 1276 }
1278 } 1277 }
@@ -1293,6 +1292,44 @@ out:
1293 netif_device_attach(netdev); 1292 netif_device_attach(netdev);
1294} 1293}
1295 1294
1295static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1296{
1297 int err = 0;
1298 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1299 GFP_KERNEL);
1300 if (!adapter->ahw) {
1301 dev_err(&adapter->pdev->dev,
1302 "Failed to allocate recv ctx resources for adapter\n");
1303 err = -ENOMEM;
1304 goto err_out;
1305 }
1306 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1307 GFP_KERNEL);
1308 if (!adapter->recv_ctx) {
1309 dev_err(&adapter->pdev->dev,
1310 "Failed to allocate recv ctx resources for adapter\n");
1311 kfree(adapter->ahw);
1312 adapter->ahw = NULL;
1313 err = -ENOMEM;
1314 goto err_out;
1315 }
1316 /* Initialize interrupt coalesce parameters */
1317 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1318 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1319 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1320err_out:
1321 return err;
1322}
1323
1324static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1325{
1326 kfree(adapter->recv_ctx);
1327 adapter->recv_ctx = NULL;
1328
1329 kfree(adapter->ahw);
1330 adapter->ahw = NULL;
1331}
1332
1296int qlcnic_diag_alloc_res(struct net_device *netdev, int test) 1333int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1297{ 1334{
1298 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1335 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -1325,13 +1362,13 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1325 } 1362 }
1326 1363
1327 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1364 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1328 rds_ring = &adapter->recv_ctx.rds_rings[ring]; 1365 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1329 qlcnic_post_rx_buffers(adapter, ring, rds_ring); 1366 qlcnic_post_rx_buffers(adapter, rds_ring);
1330 } 1367 }
1331 1368
1332 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1369 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1333 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1370 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1334 sds_ring = &adapter->recv_ctx.sds_rings[ring]; 1371 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1335 qlcnic_enable_int(sds_ring); 1372 qlcnic_enable_int(sds_ring);
1336 } 1373 }
1337 } 1374 }
@@ -1413,7 +1450,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1413 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1450 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1414 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX); 1451 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
1415 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1452 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1416 NETIF_F_IPV6_CSUM); 1453 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
1417 1454
1418 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { 1455 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1419 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); 1456 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
@@ -1501,23 +1538,26 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1501 adapter = netdev_priv(netdev); 1538 adapter = netdev_priv(netdev);
1502 adapter->netdev = netdev; 1539 adapter->netdev = netdev;
1503 adapter->pdev = pdev; 1540 adapter->pdev = pdev;
1504 adapter->dev_rst_time = jiffies;
1505 1541
1542 if (qlcnic_alloc_adapter_resources(adapter))
1543 goto err_out_free_netdev;
1544
1545 adapter->dev_rst_time = jiffies;
1506 revision_id = pdev->revision; 1546 revision_id = pdev->revision;
1507 adapter->ahw.revision_id = revision_id; 1547 adapter->ahw->revision_id = revision_id;
1508 1548
1509 rwlock_init(&adapter->ahw.crb_lock); 1549 rwlock_init(&adapter->ahw->crb_lock);
1510 mutex_init(&adapter->ahw.mem_lock); 1550 mutex_init(&adapter->ahw->mem_lock);
1511 1551
1512 spin_lock_init(&adapter->tx_clean_lock); 1552 spin_lock_init(&adapter->tx_clean_lock);
1513 INIT_LIST_HEAD(&adapter->mac_list); 1553 INIT_LIST_HEAD(&adapter->mac_list);
1514 1554
1515 err = qlcnic_setup_pci_map(adapter); 1555 err = qlcnic_setup_pci_map(adapter);
1516 if (err) 1556 if (err)
1517 goto err_out_free_netdev; 1557 goto err_out_free_hw;
1518 1558
1519 /* This will be reset for mezz cards */ 1559 /* This will be reset for mezz cards */
1520 adapter->portnum = adapter->ahw.pci_func; 1560 adapter->portnum = adapter->ahw->pci_func;
1521 1561
1522 err = qlcnic_get_board_info(adapter); 1562 err = qlcnic_get_board_info(adapter);
1523 if (err) { 1563 if (err) {
@@ -1545,7 +1585,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1545 1585
1546 pr_info("%s: %s Board Chip rev 0x%x\n", 1586 pr_info("%s: %s Board Chip rev 0x%x\n",
1547 module_name(THIS_MODULE), 1587 module_name(THIS_MODULE),
1548 brd_name, adapter->ahw.revision_id); 1588 brd_name, adapter->ahw->revision_id);
1549 } 1589 }
1550 1590
1551 qlcnic_clear_stats(adapter); 1591 qlcnic_clear_stats(adapter);
@@ -1560,7 +1600,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1560 1600
1561 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 1601 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1562 1602
1563 switch (adapter->ahw.port_type) { 1603 switch (adapter->ahw->port_type) {
1564 case QLCNIC_GBE: 1604 case QLCNIC_GBE:
1565 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", 1605 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1566 adapter->netdev->name); 1606 adapter->netdev->name);
@@ -1585,6 +1625,9 @@ err_out_decr_ref:
1585err_out_iounmap: 1625err_out_iounmap:
1586 qlcnic_cleanup_pci_map(adapter); 1626 qlcnic_cleanup_pci_map(adapter);
1587 1627
1628err_out_free_hw:
1629 qlcnic_free_adapter_resources(adapter);
1630
1588err_out_free_netdev: 1631err_out_free_netdev:
1589 free_netdev(netdev); 1632 free_netdev(netdev);
1590 1633
@@ -1638,6 +1681,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1638 pci_disable_device(pdev); 1681 pci_disable_device(pdev);
1639 pci_set_drvdata(pdev, NULL); 1682 pci_set_drvdata(pdev, NULL);
1640 1683
1684 qlcnic_free_adapter_resources(adapter);
1641 free_netdev(netdev); 1685 free_netdev(netdev);
1642} 1686}
1643static int __qlcnic_shutdown(struct pci_dev *pdev) 1687static int __qlcnic_shutdown(struct pci_dev *pdev)
@@ -1819,6 +1863,7 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1819 vlan_req->vlan_id = vlan_id; 1863 vlan_req->vlan_id = vlan_id;
1820 1864
1821 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); 1865 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1866 smp_mb();
1822} 1867}
1823 1868
1824#define QLCNIC_MAC_HASH(MAC)\ 1869#define QLCNIC_MAC_HASH(MAC)\
@@ -1879,58 +1924,122 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
1879 spin_unlock(&adapter->mac_learn_lock); 1924 spin_unlock(&adapter->mac_learn_lock);
1880} 1925}
1881 1926
1882static void 1927static int
1883qlcnic_tso_check(struct net_device *netdev, 1928qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
1884 struct qlcnic_host_tx_ring *tx_ring,
1885 struct cmd_desc_type0 *first_desc, 1929 struct cmd_desc_type0 *first_desc,
1886 struct sk_buff *skb) 1930 struct sk_buff *skb)
1887{ 1931{
1888 u8 opcode = TX_ETHER_PKT; 1932 u8 opcode = 0, hdr_len = 0;
1889 __be16 protocol = skb->protocol; 1933 u16 flags = 0, vlan_tci = 0;
1890 u16 flags = 0; 1934 int copied, offset, copy_len;
1891 int copied, offset, copy_len, hdr_len = 0, tso = 0;
1892 struct cmd_desc_type0 *hwdesc; 1935 struct cmd_desc_type0 *hwdesc;
1893 struct vlan_ethhdr *vh; 1936 struct vlan_ethhdr *vh;
1894 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1937 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1938 u16 protocol = ntohs(skb->protocol);
1895 u32 producer = tx_ring->producer; 1939 u32 producer = tx_ring->producer;
1896 __le16 vlan_oob = first_desc->flags_opcode & 1940
1897 cpu_to_le16(FLAGS_VLAN_OOB); 1941 if (protocol == ETH_P_8021Q) {
1942 vh = (struct vlan_ethhdr *)skb->data;
1943 flags = FLAGS_VLAN_TAGGED;
1944 vlan_tci = vh->h_vlan_TCI;
1945 } else if (vlan_tx_tag_present(skb)) {
1946 flags = FLAGS_VLAN_OOB;
1947 vlan_tci = vlan_tx_tag_get(skb);
1948 }
1949 if (unlikely(adapter->pvid)) {
1950 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1951 return -EIO;
1952 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
1953 goto set_flags;
1954
1955 flags = FLAGS_VLAN_OOB;
1956 vlan_tci = adapter->pvid;
1957 }
1958set_flags:
1959 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
1960 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1898 1961
1899 if (*(skb->data) & BIT_0) { 1962 if (*(skb->data) & BIT_0) {
1900 flags |= BIT_0; 1963 flags |= BIT_0;
1901 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); 1964 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1902 } 1965 }
1903 1966 opcode = TX_ETHER_PKT;
1904 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1967 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1905 skb_shinfo(skb)->gso_size > 0) { 1968 skb_shinfo(skb)->gso_size > 0) {
1906 1969
1907 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1970 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1908 1971
1909 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 1972 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1910 first_desc->total_hdr_length = hdr_len; 1973 first_desc->total_hdr_length = hdr_len;
1911 if (vlan_oob) { 1974
1975 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
1976
1977 /* For LSO, we need to copy the MAC/IP/TCP headers into
1978 * the descriptor ring */
1979 copied = 0;
1980 offset = 2;
1981
1982 if (flags & FLAGS_VLAN_OOB) {
1912 first_desc->total_hdr_length += VLAN_HLEN; 1983 first_desc->total_hdr_length += VLAN_HLEN;
1913 first_desc->tcp_hdr_offset = VLAN_HLEN; 1984 first_desc->tcp_hdr_offset = VLAN_HLEN;
1914 first_desc->ip_hdr_offset = VLAN_HLEN; 1985 first_desc->ip_hdr_offset = VLAN_HLEN;
1915 /* Only in case of TSO on vlan device */ 1986 /* Only in case of TSO on vlan device */
1916 flags |= FLAGS_VLAN_TAGGED; 1987 flags |= FLAGS_VLAN_TAGGED;
1988
1989 /* Create a TSO vlan header template for firmware */
1990
1991 hwdesc = &tx_ring->desc_head[producer];
1992 tx_ring->cmd_buf_arr[producer].skb = NULL;
1993
1994 copy_len = min((int)sizeof(struct cmd_desc_type0) -
1995 offset, hdr_len + VLAN_HLEN);
1996
1997 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
1998 skb_copy_from_linear_data(skb, vh, 12);
1999 vh->h_vlan_proto = htons(ETH_P_8021Q);
2000 vh->h_vlan_TCI = htons(vlan_tci);
2001
2002 skb_copy_from_linear_data_offset(skb, 12,
2003 (char *)vh + 16, copy_len - 16);
2004
2005 copied = copy_len - VLAN_HLEN;
2006 offset = 0;
2007
2008 producer = get_next_index(producer, tx_ring->num_desc);
1917 } 2009 }
1918 2010
1919 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? 2011 while (copied < hdr_len) {
1920 TX_TCP_LSO6 : TX_TCP_LSO; 2012
1921 tso = 1; 2013 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2014 offset, (hdr_len - copied));
2015
2016 hwdesc = &tx_ring->desc_head[producer];
2017 tx_ring->cmd_buf_arr[producer].skb = NULL;
2018
2019 skb_copy_from_linear_data_offset(skb, copied,
2020 (char *) hwdesc + offset, copy_len);
2021
2022 copied += copy_len;
2023 offset = 0;
2024
2025 producer = get_next_index(producer, tx_ring->num_desc);
2026 }
2027
2028 tx_ring->producer = producer;
2029 smp_mb();
2030 adapter->stats.lso_frames++;
1922 2031
1923 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2032 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1924 u8 l4proto; 2033 u8 l4proto;
1925 2034
1926 if (protocol == cpu_to_be16(ETH_P_IP)) { 2035 if (protocol == ETH_P_IP) {
1927 l4proto = ip_hdr(skb)->protocol; 2036 l4proto = ip_hdr(skb)->protocol;
1928 2037
1929 if (l4proto == IPPROTO_TCP) 2038 if (l4proto == IPPROTO_TCP)
1930 opcode = TX_TCP_PKT; 2039 opcode = TX_TCP_PKT;
1931 else if (l4proto == IPPROTO_UDP) 2040 else if (l4proto == IPPROTO_UDP)
1932 opcode = TX_UDP_PKT; 2041 opcode = TX_UDP_PKT;
1933 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { 2042 } else if (protocol == ETH_P_IPV6) {
1934 l4proto = ipv6_hdr(skb)->nexthdr; 2043 l4proto = ipv6_hdr(skb)->nexthdr;
1935 2044
1936 if (l4proto == IPPROTO_TCP) 2045 if (l4proto == IPPROTO_TCP)
@@ -1939,63 +2048,11 @@ qlcnic_tso_check(struct net_device *netdev,
1939 opcode = TX_UDPV6_PKT; 2048 opcode = TX_UDPV6_PKT;
1940 } 2049 }
1941 } 2050 }
1942
1943 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 2051 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1944 first_desc->ip_hdr_offset += skb_network_offset(skb); 2052 first_desc->ip_hdr_offset += skb_network_offset(skb);
1945 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); 2053 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1946 2054
1947 if (!tso) 2055 return 0;
1948 return;
1949
1950 /* For LSO, we need to copy the MAC/IP/TCP headers into
1951 * the descriptor ring
1952 */
1953 copied = 0;
1954 offset = 2;
1955
1956 if (vlan_oob) {
1957 /* Create a TSO vlan header template for firmware */
1958
1959 hwdesc = &tx_ring->desc_head[producer];
1960 tx_ring->cmd_buf_arr[producer].skb = NULL;
1961
1962 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1963 hdr_len + VLAN_HLEN);
1964
1965 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1966 skb_copy_from_linear_data(skb, vh, 12);
1967 vh->h_vlan_proto = htons(ETH_P_8021Q);
1968 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1969
1970 skb_copy_from_linear_data_offset(skb, 12,
1971 (char *)vh + 16, copy_len - 16);
1972
1973 copied = copy_len - VLAN_HLEN;
1974 offset = 0;
1975
1976 producer = get_next_index(producer, tx_ring->num_desc);
1977 }
1978
1979 while (copied < hdr_len) {
1980
1981 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1982 (hdr_len - copied));
1983
1984 hwdesc = &tx_ring->desc_head[producer];
1985 tx_ring->cmd_buf_arr[producer].skb = NULL;
1986
1987 skb_copy_from_linear_data_offset(skb, copied,
1988 (char *)hwdesc + offset, copy_len);
1989
1990 copied += copy_len;
1991 offset = 0;
1992
1993 producer = get_next_index(producer, tx_ring->num_desc);
1994 }
1995
1996 tx_ring->producer = producer;
1997 barrier();
1998 adapter->stats.lso_frames++;
1999} 2056}
2000 2057
2001static int 2058static int
@@ -2046,39 +2103,21 @@ out_err:
2046 return -ENOMEM; 2103 return -ENOMEM;
2047} 2104}
2048 2105
2049static int 2106static void
2050qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter, 2107qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2051 struct sk_buff *skb, 2108 struct qlcnic_cmd_buffer *pbuf)
2052 struct cmd_desc_type0 *first_desc)
2053{ 2109{
2054 u8 opcode = 0; 2110 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2055 u16 flags = 0; 2111 int nr_frags = skb_shinfo(skb)->nr_frags;
2056 __be16 protocol = skb->protocol; 2112 int i;
2057 struct vlan_ethhdr *vh;
2058 2113
2059 if (protocol == cpu_to_be16(ETH_P_8021Q)) { 2114 for (i = 0; i < nr_frags; i++) {
2060 vh = (struct vlan_ethhdr *)skb->data; 2115 nf = &pbuf->frag_array[i+1];
2061 protocol = vh->h_vlan_encapsulated_proto; 2116 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2062 flags = FLAGS_VLAN_TAGGED;
2063 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2064 } else if (vlan_tx_tag_present(skb)) {
2065 flags = FLAGS_VLAN_OOB;
2066 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2067 } 2117 }
2068 if (unlikely(adapter->pvid)) {
2069 if (first_desc->vlan_TCI &&
2070 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2071 return -EIO;
2072 if (first_desc->vlan_TCI &&
2073 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2074 goto set_flags;
2075 2118
2076 flags = FLAGS_VLAN_OOB; 2119 nf = &pbuf->frag_array[0];
2077 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid); 2120 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2078 }
2079set_flags:
2080 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2081 return 0;
2082} 2121}
2083 2122
2084static inline void 2123static inline void
@@ -2102,7 +2141,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2102 int i, k; 2141 int i, k;
2103 2142
2104 u32 producer; 2143 u32 producer;
2105 int frag_count, no_of_desc; 2144 int frag_count;
2106 u32 num_txd = tx_ring->num_desc; 2145 u32 num_txd = tx_ring->num_desc;
2107 2146
2108 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 2147 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -2119,12 +2158,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2119 2158
2120 frag_count = skb_shinfo(skb)->nr_frags + 1; 2159 frag_count = skb_shinfo(skb)->nr_frags + 1;
2121 2160
2122 /* 4 fragments per cmd des */
2123 no_of_desc = (frag_count + 3) >> 2;
2124
2125 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 2161 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
2126 netif_stop_queue(netdev); 2162 netif_stop_queue(netdev);
2127 smp_mb();
2128 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) 2163 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2129 netif_start_queue(netdev); 2164 netif_start_queue(netdev);
2130 else { 2165 else {
@@ -2141,9 +2176,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2141 first_desc = hwdesc = &tx_ring->desc_head[producer]; 2176 first_desc = hwdesc = &tx_ring->desc_head[producer];
2142 qlcnic_clear_cmddesc((u64 *)hwdesc); 2177 qlcnic_clear_cmddesc((u64 *)hwdesc);
2143 2178
2144 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2145 goto drop_packet;
2146
2147 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { 2179 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2148 adapter->stats.tx_dma_map_error++; 2180 adapter->stats.tx_dma_map_error++;
2149 goto drop_packet; 2181 goto drop_packet;
@@ -2187,8 +2219,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2187 } 2219 }
2188 2220
2189 tx_ring->producer = get_next_index(producer, num_txd); 2221 tx_ring->producer = get_next_index(producer, num_txd);
2222 smp_mb();
2190 2223
2191 qlcnic_tso_check(netdev, tx_ring, first_desc, skb); 2224 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2225 goto unwind_buff;
2192 2226
2193 if (qlcnic_mac_learn) 2227 if (qlcnic_mac_learn)
2194 qlcnic_send_filter(adapter, tx_ring, first_desc, skb); 2228 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
@@ -2200,6 +2234,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2200 2234
2201 return NETDEV_TX_OK; 2235 return NETDEV_TX_OK;
2202 2236
2237unwind_buff:
2238 qlcnic_unmap_buffers(pdev, skb, pbuf);
2203drop_packet: 2239drop_packet:
2204 adapter->stats.txdropped++; 2240 adapter->stats.txdropped++;
2205 dev_kfree_skb_any(skb); 2241 dev_kfree_skb_any(skb);
@@ -2246,16 +2282,16 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2246{ 2282{
2247 struct net_device *netdev = adapter->netdev; 2283 struct net_device *netdev = adapter->netdev;
2248 2284
2249 if (adapter->ahw.linkup && !linkup) { 2285 if (adapter->ahw->linkup && !linkup) {
2250 netdev_info(netdev, "NIC Link is down\n"); 2286 netdev_info(netdev, "NIC Link is down\n");
2251 adapter->ahw.linkup = 0; 2287 adapter->ahw->linkup = 0;
2252 if (netif_running(netdev)) { 2288 if (netif_running(netdev)) {
2253 netif_carrier_off(netdev); 2289 netif_carrier_off(netdev);
2254 netif_stop_queue(netdev); 2290 netif_stop_queue(netdev);
2255 } 2291 }
2256 } else if (!adapter->ahw.linkup && linkup) { 2292 } else if (!adapter->ahw->linkup && linkup) {
2257 netdev_info(netdev, "NIC Link is up\n"); 2293 netdev_info(netdev, "NIC Link is up\n");
2258 adapter->ahw.linkup = 1; 2294 adapter->ahw->linkup = 1;
2259 if (netif_running(netdev)) { 2295 if (netif_running(netdev)) {
2260 netif_carrier_on(netdev); 2296 netif_carrier_on(netdev);
2261 netif_wake_queue(netdev); 2297 netif_wake_queue(netdev);
@@ -2491,7 +2527,7 @@ static void qlcnic_poll_controller(struct net_device *netdev)
2491 int ring; 2527 int ring;
2492 struct qlcnic_host_sds_ring *sds_ring; 2528 struct qlcnic_host_sds_ring *sds_ring;
2493 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2529 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2494 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 2530 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2495 2531
2496 disable_irq(adapter->irq); 2532 disable_irq(adapter->irq);
2497 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 2533 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -3501,7 +3537,7 @@ validate_esw_config(struct qlcnic_adapter *adapter,
3501 u8 pci_func; 3537 u8 pci_func;
3502 int i; 3538 int i;
3503 3539
3504 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE); 3540 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
3505 3541
3506 for (i = 0; i < count; i++) { 3542 for (i = 0; i < count; i++) {
3507 pci_func = esw_cfg[i].pci_func; 3543 pci_func = esw_cfg[i].pci_func;
@@ -3567,13 +3603,13 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3567 if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) 3603 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3568 return QL_STATUS_INVALID_PARAM; 3604 return QL_STATUS_INVALID_PARAM;
3569 3605
3570 if (adapter->ahw.pci_func != esw_cfg[i].pci_func) 3606 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
3571 continue; 3607 continue;
3572 3608
3573 op_mode = esw_cfg[i].op_mode; 3609 op_mode = esw_cfg[i].op_mode;
3574 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); 3610 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3575 esw_cfg[i].op_mode = op_mode; 3611 esw_cfg[i].op_mode = op_mode;
3576 esw_cfg[i].pci_func = adapter->ahw.pci_func; 3612 esw_cfg[i].pci_func = adapter->ahw->pci_func;
3577 3613
3578 switch (esw_cfg[i].op_mode) { 3614 switch (esw_cfg[i].op_mode) {
3579 case QLCNIC_PORT_DEFAULTS: 3615 case QLCNIC_PORT_DEFAULTS:
@@ -3954,14 +3990,14 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3954 dev_info(dev, "failed to create crb sysfs entry\n"); 3990 dev_info(dev, "failed to create crb sysfs entry\n");
3955 if (device_create_bin_file(dev, &bin_attr_mem)) 3991 if (device_create_bin_file(dev, &bin_attr_mem))
3956 dev_info(dev, "failed to create mem sysfs entry\n"); 3992 dev_info(dev, "failed to create mem sysfs entry\n");
3993 if (device_create_bin_file(dev, &bin_attr_pci_config))
3994 dev_info(dev, "failed to create pci config sysfs entry");
3957 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 3995 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3958 return; 3996 return;
3959 if (device_create_bin_file(dev, &bin_attr_esw_config)) 3997 if (device_create_bin_file(dev, &bin_attr_esw_config))
3960 dev_info(dev, "failed to create esw config sysfs entry"); 3998 dev_info(dev, "failed to create esw config sysfs entry");
3961 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 3999 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3962 return; 4000 return;
3963 if (device_create_bin_file(dev, &bin_attr_pci_config))
3964 dev_info(dev, "failed to create pci config sysfs entry");
3965 if (device_create_bin_file(dev, &bin_attr_npar_config)) 4001 if (device_create_bin_file(dev, &bin_attr_npar_config))
3966 dev_info(dev, "failed to create npar config sysfs entry"); 4002 dev_info(dev, "failed to create npar config sysfs entry");
3967 if (device_create_bin_file(dev, &bin_attr_pm_config)) 4003 if (device_create_bin_file(dev, &bin_attr_pm_config))
@@ -3982,12 +4018,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3982 device_remove_file(dev, &dev_attr_diag_mode); 4018 device_remove_file(dev, &dev_attr_diag_mode);
3983 device_remove_bin_file(dev, &bin_attr_crb); 4019 device_remove_bin_file(dev, &bin_attr_crb);
3984 device_remove_bin_file(dev, &bin_attr_mem); 4020 device_remove_bin_file(dev, &bin_attr_mem);
4021 device_remove_bin_file(dev, &bin_attr_pci_config);
3985 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4022 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3986 return; 4023 return;
3987 device_remove_bin_file(dev, &bin_attr_esw_config); 4024 device_remove_bin_file(dev, &bin_attr_esw_config);
3988 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 4025 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3989 return; 4026 return;
3990 device_remove_bin_file(dev, &bin_attr_pci_config);
3991 device_remove_bin_file(dev, &bin_attr_npar_config); 4027 device_remove_bin_file(dev, &bin_attr_npar_config);
3992 device_remove_bin_file(dev, &bin_attr_pm_config); 4028 device_remove_bin_file(dev, &bin_attr_pm_config);
3993 device_remove_bin_file(dev, &bin_attr_esw_stats); 4029 device_remove_bin_file(dev, &bin_attr_esw_stats);
@@ -4034,14 +4070,10 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4034 4070
4035 qlcnic_config_indev_addr(adapter, netdev, event); 4071 qlcnic_config_indev_addr(adapter, netdev, event);
4036 4072
4037 if (!adapter->vlgrp) 4073 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4038 return; 4074 dev = vlan_find_dev(netdev, vid);
4039
4040 for (vid = 0; vid < VLAN_N_VID; vid++) {
4041 dev = vlan_group_get_device(adapter->vlgrp, vid);
4042 if (!dev) 4075 if (!dev)
4043 continue; 4076 continue;
4044
4045 qlcnic_config_indev_addr(adapter, dev, event); 4077 qlcnic_config_indev_addr(adapter, dev, event);
4046 } 4078 }
4047} 4079}
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 8149cc9de4ca..687754da2a9f 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -412,31 +412,31 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
412 return 0; 412 return 0;
413} 413}
414 414
415static int ql_phys_id(struct net_device *ndev, u32 data) 415static int ql_set_phys_id(struct net_device *ndev,
416 enum ethtool_phys_id_state state)
417
416{ 418{
417 struct ql_adapter *qdev = netdev_priv(ndev); 419 struct ql_adapter *qdev = netdev_priv(ndev);
418 u32 led_reg, i;
419 int status;
420 420
421 /* Save the current LED settings */ 421 switch (state) {
422 status = ql_mb_get_led_cfg(qdev); 422 case ETHTOOL_ID_ACTIVE:
423 if (status) 423 /* Save the current LED settings */
424 return status; 424 if (ql_mb_get_led_cfg(qdev))
425 led_reg = qdev->led_config; 425 return -EIO;
426 426
427 /* Start blinking the led */ 427 /* Start blinking */
428 if (!data || data > 300)
429 data = 300;
430
431 for (i = 0; i < (data * 10); i++)
432 ql_mb_set_led_cfg(qdev, QL_LED_BLINK); 428 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
429 return 0;
433 430
434 /* Restore LED settings */ 431 case ETHTOOL_ID_INACTIVE:
435 status = ql_mb_set_led_cfg(qdev, led_reg); 432 /* Restore LED settings */
436 if (status) 433 if (ql_mb_set_led_cfg(qdev, qdev->led_config))
437 return status; 434 return -EIO;
435 return 0;
438 436
439 return 0; 437 default:
438 return -EINVAL;
439 }
440} 440}
441 441
442static int ql_start_loopback(struct ql_adapter *qdev) 442static int ql_start_loopback(struct ql_adapter *qdev)
@@ -703,7 +703,7 @@ const struct ethtool_ops qlge_ethtool_ops = {
703 .get_msglevel = ql_get_msglevel, 703 .get_msglevel = ql_get_msglevel,
704 .set_msglevel = ql_set_msglevel, 704 .set_msglevel = ql_set_msglevel,
705 .get_link = ethtool_op_get_link, 705 .get_link = ethtool_op_get_link,
706 .phys_id = ql_phys_id, 706 .set_phys_id = ql_set_phys_id,
707 .self_test = ql_self_test, 707 .self_test = ql_self_test,
708 .get_pauseparam = ql_get_pauseparam, 708 .get_pauseparam = ql_get_pauseparam,
709 .set_pauseparam = ql_set_pauseparam, 709 .set_pauseparam = ql_set_pauseparam,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 493b0de3848b..058524f3eb49 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -37,6 +37,8 @@
37 37
38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" 38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" 39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
40#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
41#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
40#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 42#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
41 43
42#ifdef RTL8169_DEBUG 44#ifdef RTL8169_DEBUG
@@ -127,6 +129,9 @@ enum mac_version {
127 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP 129 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
128 RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E 130 RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
129 RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E 131 RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
132 RTL_GIGA_MAC_VER_31 = 0x1f, // 8168DP
133 RTL_GIGA_MAC_VER_32 = 0x20, // 8168E
134 RTL_GIGA_MAC_VER_33 = 0x21, // 8168E
130}; 135};
131 136
132#define _R(NAME,MAC,MASK) \ 137#define _R(NAME,MAC,MASK) \
@@ -166,7 +171,10 @@ static const struct {
166 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E 171 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
167 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E 172 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
168 _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E 173 _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
169 _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E 174 _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880), // PCI-E
175 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_31, 0xff7e1880), // PCI-E
176 _R("RTL8168e/8111e", RTL_GIGA_MAC_VER_32, 0xff7e1880), // PCI-E
177 _R("RTL8168e/8111e", RTL_GIGA_MAC_VER_33, 0xff7e1880) // PCI-E
170}; 178};
171#undef _R 179#undef _R
172 180
@@ -315,7 +323,9 @@ enum rtl8168_registers {
315#define OCPAR_FLAG 0x80000000 323#define OCPAR_FLAG 0x80000000
316#define OCPAR_GPHY_WRITE_CMD 0x8000f060 324#define OCPAR_GPHY_WRITE_CMD 0x8000f060
317#define OCPAR_GPHY_READ_CMD 0x0000f060 325#define OCPAR_GPHY_READ_CMD 0x0000f060
318 RDSAR1 = 0xd0 /* 8168c only. Undocumented on 8168dp */ 326 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
327 MISC = 0xf0, /* 8168e only. */
328 txpla_rst = (1 << 29)
319}; 329};
320 330
321enum rtl_register_content { 331enum rtl_register_content {
@@ -393,6 +403,7 @@ enum rtl_register_content {
393 BWF = (1 << 6), /* Accept Broadcast wakeup frame */ 403 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
394 MWF = (1 << 5), /* Accept Multicast wakeup frame */ 404 MWF = (1 << 5), /* Accept Multicast wakeup frame */
395 UWF = (1 << 4), /* Accept Unicast wakeup frame */ 405 UWF = (1 << 4), /* Accept Unicast wakeup frame */
406 spi_en = (1 << 3),
396 LanWake = (1 << 1), /* LanWake enable/disable */ 407 LanWake = (1 << 1), /* LanWake enable/disable */
397 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 408 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
398 409
@@ -577,6 +588,8 @@ MODULE_LICENSE("GPL");
577MODULE_VERSION(RTL8169_VERSION); 588MODULE_VERSION(RTL8169_VERSION);
578MODULE_FIRMWARE(FIRMWARE_8168D_1); 589MODULE_FIRMWARE(FIRMWARE_8168D_1);
579MODULE_FIRMWARE(FIRMWARE_8168D_2); 590MODULE_FIRMWARE(FIRMWARE_8168D_2);
591MODULE_FIRMWARE(FIRMWARE_8168E_1);
592MODULE_FIRMWARE(FIRMWARE_8168E_2);
580MODULE_FIRMWARE(FIRMWARE_8105E_1); 593MODULE_FIRMWARE(FIRMWARE_8105E_1);
581 594
582static int rtl8169_open(struct net_device *dev); 595static int rtl8169_open(struct net_device *dev);
@@ -651,12 +664,18 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
651static void rtl8168_driver_start(struct rtl8169_private *tp) 664static void rtl8168_driver_start(struct rtl8169_private *tp)
652{ 665{
653 int i; 666 int i;
667 u32 reg;
654 668
655 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); 669 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
656 670
671 if (tp->mac_version == RTL_GIGA_MAC_VER_31)
672 reg = 0xb8;
673 else
674 reg = 0x10;
675
657 for (i = 0; i < 10; i++) { 676 for (i = 0; i < 10; i++) {
658 msleep(10); 677 msleep(10);
659 if (ocp_read(tp, 0x0f, 0x0010) & 0x00000800) 678 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
660 break; 679 break;
661 } 680 }
662} 681}
@@ -664,16 +683,36 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
664static void rtl8168_driver_stop(struct rtl8169_private *tp) 683static void rtl8168_driver_stop(struct rtl8169_private *tp)
665{ 684{
666 int i; 685 int i;
686 u32 reg;
667 687
668 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); 688 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
669 689
690 if (tp->mac_version == RTL_GIGA_MAC_VER_31)
691 reg = 0xb8;
692 else
693 reg = 0x10;
694
670 for (i = 0; i < 10; i++) { 695 for (i = 0; i < 10; i++) {
671 msleep(10); 696 msleep(10);
672 if ((ocp_read(tp, 0x0f, 0x0010) & 0x00000800) == 0) 697 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
673 break; 698 break;
674 } 699 }
675} 700}
676 701
702static int r8168dp_check_dash(struct rtl8169_private *tp)
703{
704 u32 reg;
705
706 if (tp->mac_version == RTL_GIGA_MAC_VER_31)
707 reg = 0xb8;
708 else
709 reg = 0x10;
710
711 if (ocp_read(tp, 0xF, reg) & 0x00008000)
712 return 1;
713 else
714 return 0;
715}
677 716
678static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 717static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
679{ 718{
@@ -1247,14 +1286,15 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1247 return ret; 1286 return ret;
1248} 1287}
1249 1288
1250static u32 rtl8169_get_rx_csum(struct net_device *dev) 1289static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
1251{ 1290{
1252 struct rtl8169_private *tp = netdev_priv(dev); 1291 if (dev->mtu > MSSMask)
1292 features &= ~NETIF_F_ALL_TSO;
1253 1293
1254 return tp->cp_cmd & RxChkSum; 1294 return features;
1255} 1295}
1256 1296
1257static int rtl8169_set_rx_csum(struct net_device *dev, u32 data) 1297static int rtl8169_set_features(struct net_device *dev, u32 features)
1258{ 1298{
1259 struct rtl8169_private *tp = netdev_priv(dev); 1299 struct rtl8169_private *tp = netdev_priv(dev);
1260 void __iomem *ioaddr = tp->mmio_addr; 1300 void __iomem *ioaddr = tp->mmio_addr;
@@ -1262,11 +1302,16 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1262 1302
1263 spin_lock_irqsave(&tp->lock, flags); 1303 spin_lock_irqsave(&tp->lock, flags);
1264 1304
1265 if (data) 1305 if (features & NETIF_F_RXCSUM)
1266 tp->cp_cmd |= RxChkSum; 1306 tp->cp_cmd |= RxChkSum;
1267 else 1307 else
1268 tp->cp_cmd &= ~RxChkSum; 1308 tp->cp_cmd &= ~RxChkSum;
1269 1309
1310 if (dev->features & NETIF_F_HW_VLAN_RX)
1311 tp->cp_cmd |= RxVlan;
1312 else
1313 tp->cp_cmd &= ~RxVlan;
1314
1270 RTL_W16(CPlusCmd, tp->cp_cmd); 1315 RTL_W16(CPlusCmd, tp->cp_cmd);
1271 RTL_R16(CPlusCmd); 1316 RTL_R16(CPlusCmd);
1272 1317
@@ -1282,27 +1327,6 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1282 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1327 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1283} 1328}
1284 1329
1285#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
1286
1287static void rtl8169_vlan_mode(struct net_device *dev)
1288{
1289 struct rtl8169_private *tp = netdev_priv(dev);
1290 void __iomem *ioaddr = tp->mmio_addr;
1291 unsigned long flags;
1292
1293 spin_lock_irqsave(&tp->lock, flags);
1294 if (dev->features & NETIF_F_HW_VLAN_RX)
1295 tp->cp_cmd |= RxVlan;
1296 else
1297 tp->cp_cmd &= ~RxVlan;
1298 RTL_W16(CPlusCmd, tp->cp_cmd);
1299 /* PCI commit */
1300 RTL_R16(CPlusCmd);
1301 spin_unlock_irqrestore(&tp->lock, flags);
1302
1303 dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
1304}
1305
1306static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) 1330static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1307{ 1331{
1308 u32 opts2 = le32_to_cpu(desc->opts2); 1332 u32 opts2 = le32_to_cpu(desc->opts2);
@@ -1483,28 +1507,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1483 } 1507 }
1484} 1508}
1485 1509
1486static int rtl8169_set_flags(struct net_device *dev, u32 data)
1487{
1488 struct rtl8169_private *tp = netdev_priv(dev);
1489 unsigned long old_feat = dev->features;
1490 int rc;
1491
1492 if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
1493 !(data & ETH_FLAG_RXVLAN)) {
1494 netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
1495 return -EINVAL;
1496 }
1497
1498 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
1499 if (rc)
1500 return rc;
1501
1502 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
1503 rtl8169_vlan_mode(dev);
1504
1505 return 0;
1506}
1507
1508static const struct ethtool_ops rtl8169_ethtool_ops = { 1510static const struct ethtool_ops rtl8169_ethtool_ops = {
1509 .get_drvinfo = rtl8169_get_drvinfo, 1511 .get_drvinfo = rtl8169_get_drvinfo,
1510 .get_regs_len = rtl8169_get_regs_len, 1512 .get_regs_len = rtl8169_get_regs_len,
@@ -1513,19 +1515,12 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1513 .set_settings = rtl8169_set_settings, 1515 .set_settings = rtl8169_set_settings,
1514 .get_msglevel = rtl8169_get_msglevel, 1516 .get_msglevel = rtl8169_get_msglevel,
1515 .set_msglevel = rtl8169_set_msglevel, 1517 .set_msglevel = rtl8169_set_msglevel,
1516 .get_rx_csum = rtl8169_get_rx_csum,
1517 .set_rx_csum = rtl8169_set_rx_csum,
1518 .set_tx_csum = ethtool_op_set_tx_csum,
1519 .set_sg = ethtool_op_set_sg,
1520 .set_tso = ethtool_op_set_tso,
1521 .get_regs = rtl8169_get_regs, 1518 .get_regs = rtl8169_get_regs,
1522 .get_wol = rtl8169_get_wol, 1519 .get_wol = rtl8169_get_wol,
1523 .set_wol = rtl8169_set_wol, 1520 .set_wol = rtl8169_set_wol,
1524 .get_strings = rtl8169_get_strings, 1521 .get_strings = rtl8169_get_strings,
1525 .get_sset_count = rtl8169_get_sset_count, 1522 .get_sset_count = rtl8169_get_sset_count,
1526 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1523 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1527 .set_flags = rtl8169_set_flags,
1528 .get_flags = ethtool_op_get_flags,
1529}; 1524};
1530 1525
1531static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1526static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1547,6 +1542,11 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1547 u32 val; 1542 u32 val;
1548 int mac_version; 1543 int mac_version;
1549 } mac_info[] = { 1544 } mac_info[] = {
1545 /* 8168E family. */
1546 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1547 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1548 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1549
1550 /* 8168D family. */ 1550 /* 8168D family. */
1551 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, 1551 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1552 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, 1552 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
@@ -1555,6 +1555,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1555 /* 8168DP family. */ 1555 /* 8168DP family. */
1556 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, 1556 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1557 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, 1557 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1558 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
1558 1559
1559 /* 8168C family. */ 1560 /* 8168C family. */
1560 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, 1561 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
@@ -1574,6 +1575,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1574 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1575 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1575 1576
1576 /* 8101 family. */ 1577 /* 8101 family. */
1578 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1577 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 1579 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1578 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, 1580 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1579 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, 1581 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
@@ -2436,6 +2438,93 @@ static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2436 rtl_patchphy(tp, 0x0d, 1 << 5); 2438 rtl_patchphy(tp, 0x0d, 1 << 5);
2437} 2439}
2438 2440
2441static void rtl8168e_hw_phy_config(struct rtl8169_private *tp)
2442{
2443 static const struct phy_reg phy_reg_init[] = {
2444 /* Enable Delay cap */
2445 { 0x1f, 0x0005 },
2446 { 0x05, 0x8b80 },
2447 { 0x06, 0xc896 },
2448 { 0x1f, 0x0000 },
2449
2450 /* Channel estimation fine tune */
2451 { 0x1f, 0x0001 },
2452 { 0x0b, 0x6c20 },
2453 { 0x07, 0x2872 },
2454 { 0x1c, 0xefff },
2455 { 0x1f, 0x0003 },
2456 { 0x14, 0x6420 },
2457 { 0x1f, 0x0000 },
2458
2459 /* Update PFM & 10M TX idle timer */
2460 { 0x1f, 0x0007 },
2461 { 0x1e, 0x002f },
2462 { 0x15, 0x1919 },
2463 { 0x1f, 0x0000 },
2464
2465 { 0x1f, 0x0007 },
2466 { 0x1e, 0x00ac },
2467 { 0x18, 0x0006 },
2468 { 0x1f, 0x0000 }
2469 };
2470
2471 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2472
2473 /* DCO enable for 10M IDLE Power */
2474 rtl_writephy(tp, 0x1f, 0x0007);
2475 rtl_writephy(tp, 0x1e, 0x0023);
2476 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2477 rtl_writephy(tp, 0x1f, 0x0000);
2478
2479 /* For impedance matching */
2480 rtl_writephy(tp, 0x1f, 0x0002);
2481 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
2482 rtl_writephy(tp, 0x1F, 0x0000);
2483
2484 /* PHY auto speed down */
2485 rtl_writephy(tp, 0x1f, 0x0007);
2486 rtl_writephy(tp, 0x1e, 0x002d);
2487 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2488 rtl_writephy(tp, 0x1f, 0x0000);
2489 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2490
2491 rtl_writephy(tp, 0x1f, 0x0005);
2492 rtl_writephy(tp, 0x05, 0x8b86);
2493 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2494 rtl_writephy(tp, 0x1f, 0x0000);
2495
2496 rtl_writephy(tp, 0x1f, 0x0005);
2497 rtl_writephy(tp, 0x05, 0x8b85);
2498 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2499 rtl_writephy(tp, 0x1f, 0x0007);
2500 rtl_writephy(tp, 0x1e, 0x0020);
2501 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2502 rtl_writephy(tp, 0x1f, 0x0006);
2503 rtl_writephy(tp, 0x00, 0x5a00);
2504 rtl_writephy(tp, 0x1f, 0x0000);
2505 rtl_writephy(tp, 0x0d, 0x0007);
2506 rtl_writephy(tp, 0x0e, 0x003c);
2507 rtl_writephy(tp, 0x0d, 0x4007);
2508 rtl_writephy(tp, 0x0e, 0x0000);
2509 rtl_writephy(tp, 0x0d, 0x0000);
2510}
2511
2512static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2513{
2514 if (rtl_apply_firmware(tp, FIRMWARE_8168E_1) < 0)
2515 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2516
2517 rtl8168e_hw_phy_config(tp);
2518}
2519
2520static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2521{
2522 if (rtl_apply_firmware(tp, FIRMWARE_8168E_2) < 0)
2523 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2524
2525 rtl8168e_hw_phy_config(tp);
2526}
2527
2439static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 2528static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2440{ 2529{
2441 static const struct phy_reg phy_reg_init[] = { 2530 static const struct phy_reg phy_reg_init[] = {
@@ -2551,6 +2640,12 @@ static void rtl_hw_phy_config(struct net_device *dev)
2551 case RTL_GIGA_MAC_VER_30: 2640 case RTL_GIGA_MAC_VER_30:
2552 rtl8105e_hw_phy_config(tp); 2641 rtl8105e_hw_phy_config(tp);
2553 break; 2642 break;
2643 case RTL_GIGA_MAC_VER_32:
2644 rtl8168e_1_hw_phy_config(tp);
2645 break;
2646 case RTL_GIGA_MAC_VER_33:
2647 rtl8168e_2_hw_phy_config(tp);
2648 break;
2554 2649
2555 default: 2650 default:
2556 break; 2651 break;
@@ -2840,6 +2935,8 @@ static const struct net_device_ops rtl8169_netdev_ops = {
2840 .ndo_tx_timeout = rtl8169_tx_timeout, 2935 .ndo_tx_timeout = rtl8169_tx_timeout,
2841 .ndo_validate_addr = eth_validate_addr, 2936 .ndo_validate_addr = eth_validate_addr,
2842 .ndo_change_mtu = rtl8169_change_mtu, 2937 .ndo_change_mtu = rtl8169_change_mtu,
2938 .ndo_fix_features = rtl8169_fix_features,
2939 .ndo_set_features = rtl8169_set_features,
2843 .ndo_set_mac_address = rtl_set_mac_address, 2940 .ndo_set_mac_address = rtl_set_mac_address,
2844 .ndo_do_ioctl = rtl8169_ioctl, 2941 .ndo_do_ioctl = rtl8169_ioctl,
2845 .ndo_set_multicast_list = rtl_set_rx_mode, 2942 .ndo_set_multicast_list = rtl_set_rx_mode,
@@ -2859,6 +2956,7 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
2859 ops->read = r8168dp_1_mdio_read; 2956 ops->read = r8168dp_1_mdio_read;
2860 break; 2957 break;
2861 case RTL_GIGA_MAC_VER_28: 2958 case RTL_GIGA_MAC_VER_28:
2959 case RTL_GIGA_MAC_VER_31:
2862 ops->write = r8168dp_2_mdio_write; 2960 ops->write = r8168dp_2_mdio_write;
2863 ops->read = r8168dp_2_mdio_read; 2961 ops->read = r8168dp_2_mdio_read;
2864 break; 2962 break;
@@ -2900,15 +2998,59 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
2900static void r8168_phy_power_up(struct rtl8169_private *tp) 2998static void r8168_phy_power_up(struct rtl8169_private *tp)
2901{ 2999{
2902 rtl_writephy(tp, 0x1f, 0x0000); 3000 rtl_writephy(tp, 0x1f, 0x0000);
2903 rtl_writephy(tp, 0x0e, 0x0000); 3001 switch (tp->mac_version) {
3002 case RTL_GIGA_MAC_VER_11:
3003 case RTL_GIGA_MAC_VER_12:
3004 case RTL_GIGA_MAC_VER_17:
3005 case RTL_GIGA_MAC_VER_18:
3006 case RTL_GIGA_MAC_VER_19:
3007 case RTL_GIGA_MAC_VER_20:
3008 case RTL_GIGA_MAC_VER_21:
3009 case RTL_GIGA_MAC_VER_22:
3010 case RTL_GIGA_MAC_VER_23:
3011 case RTL_GIGA_MAC_VER_24:
3012 case RTL_GIGA_MAC_VER_25:
3013 case RTL_GIGA_MAC_VER_26:
3014 case RTL_GIGA_MAC_VER_27:
3015 case RTL_GIGA_MAC_VER_28:
3016 case RTL_GIGA_MAC_VER_31:
3017 rtl_writephy(tp, 0x0e, 0x0000);
3018 break;
3019 default:
3020 break;
3021 }
2904 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); 3022 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
2905} 3023}
2906 3024
2907static void r8168_phy_power_down(struct rtl8169_private *tp) 3025static void r8168_phy_power_down(struct rtl8169_private *tp)
2908{ 3026{
2909 rtl_writephy(tp, 0x1f, 0x0000); 3027 rtl_writephy(tp, 0x1f, 0x0000);
2910 rtl_writephy(tp, 0x0e, 0x0200); 3028 switch (tp->mac_version) {
2911 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); 3029 case RTL_GIGA_MAC_VER_32:
3030 case RTL_GIGA_MAC_VER_33:
3031 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3032 break;
3033
3034 case RTL_GIGA_MAC_VER_11:
3035 case RTL_GIGA_MAC_VER_12:
3036 case RTL_GIGA_MAC_VER_17:
3037 case RTL_GIGA_MAC_VER_18:
3038 case RTL_GIGA_MAC_VER_19:
3039 case RTL_GIGA_MAC_VER_20:
3040 case RTL_GIGA_MAC_VER_21:
3041 case RTL_GIGA_MAC_VER_22:
3042 case RTL_GIGA_MAC_VER_23:
3043 case RTL_GIGA_MAC_VER_24:
3044 case RTL_GIGA_MAC_VER_25:
3045 case RTL_GIGA_MAC_VER_26:
3046 case RTL_GIGA_MAC_VER_27:
3047 case RTL_GIGA_MAC_VER_28:
3048 case RTL_GIGA_MAC_VER_31:
3049 rtl_writephy(tp, 0x0e, 0x0200);
3050 default:
3051 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3052 break;
3053 }
2912} 3054}
2913 3055
2914static void r8168_pll_power_down(struct rtl8169_private *tp) 3056static void r8168_pll_power_down(struct rtl8169_private *tp)
@@ -2916,8 +3058,9 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2916 void __iomem *ioaddr = tp->mmio_addr; 3058 void __iomem *ioaddr = tp->mmio_addr;
2917 3059
2918 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3060 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2919 (tp->mac_version == RTL_GIGA_MAC_VER_28)) && 3061 (tp->mac_version == RTL_GIGA_MAC_VER_28) ||
2920 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { 3062 (tp->mac_version == RTL_GIGA_MAC_VER_31)) &&
3063 r8168dp_check_dash(tp)) {
2921 return; 3064 return;
2922 } 3065 }
2923 3066
@@ -2927,6 +3070,10 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2927 return; 3070 return;
2928 } 3071 }
2929 3072
3073 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3074 tp->mac_version == RTL_GIGA_MAC_VER_33)
3075 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3076
2930 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3077 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
2931 rtl_writephy(tp, 0x1f, 0x0000); 3078 rtl_writephy(tp, 0x1f, 0x0000);
2932 rtl_writephy(tp, MII_BMCR, 0x0000); 3079 rtl_writephy(tp, MII_BMCR, 0x0000);
@@ -2943,6 +3090,9 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2943 case RTL_GIGA_MAC_VER_26: 3090 case RTL_GIGA_MAC_VER_26:
2944 case RTL_GIGA_MAC_VER_27: 3091 case RTL_GIGA_MAC_VER_27:
2945 case RTL_GIGA_MAC_VER_28: 3092 case RTL_GIGA_MAC_VER_28:
3093 case RTL_GIGA_MAC_VER_31:
3094 case RTL_GIGA_MAC_VER_32:
3095 case RTL_GIGA_MAC_VER_33:
2946 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 3096 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
2947 break; 3097 break;
2948 } 3098 }
@@ -2953,8 +3103,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2953 void __iomem *ioaddr = tp->mmio_addr; 3103 void __iomem *ioaddr = tp->mmio_addr;
2954 3104
2955 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3105 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2956 (tp->mac_version == RTL_GIGA_MAC_VER_28)) && 3106 (tp->mac_version == RTL_GIGA_MAC_VER_28) ||
2957 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { 3107 (tp->mac_version == RTL_GIGA_MAC_VER_31)) &&
3108 r8168dp_check_dash(tp)) {
2958 return; 3109 return;
2959 } 3110 }
2960 3111
@@ -2963,6 +3114,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2963 case RTL_GIGA_MAC_VER_26: 3114 case RTL_GIGA_MAC_VER_26:
2964 case RTL_GIGA_MAC_VER_27: 3115 case RTL_GIGA_MAC_VER_27:
2965 case RTL_GIGA_MAC_VER_28: 3116 case RTL_GIGA_MAC_VER_28:
3117 case RTL_GIGA_MAC_VER_31:
3118 case RTL_GIGA_MAC_VER_32:
3119 case RTL_GIGA_MAC_VER_33:
2966 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 3120 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
2967 break; 3121 break;
2968 } 3122 }
@@ -3017,6 +3171,9 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3017 case RTL_GIGA_MAC_VER_26: 3171 case RTL_GIGA_MAC_VER_26:
3018 case RTL_GIGA_MAC_VER_27: 3172 case RTL_GIGA_MAC_VER_27:
3019 case RTL_GIGA_MAC_VER_28: 3173 case RTL_GIGA_MAC_VER_28:
3174 case RTL_GIGA_MAC_VER_31:
3175 case RTL_GIGA_MAC_VER_32:
3176 case RTL_GIGA_MAC_VER_33:
3020 ops->down = r8168_pll_power_down; 3177 ops->down = r8168_pll_power_down;
3021 ops->up = r8168_pll_power_up; 3178 ops->up = r8168_pll_power_up;
3022 break; 3179 break;
@@ -3226,7 +3383,19 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3226 3383
3227 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 3384 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3228 3385
3229 dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO; 3386 /* don't enable SG, IP_CSUM and TSO by default - it might not work
3387 * properly for all devices */
3388 dev->features |= NETIF_F_RXCSUM |
3389 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3390
3391 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3392 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3393 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3394 NETIF_F_HIGHDMA;
3395
3396 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3397 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
3398 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
3230 3399
3231 tp->intr_mask = 0xffff; 3400 tp->intr_mask = 0xffff;
3232 tp->hw_start = cfg->hw_start; 3401 tp->hw_start = cfg->hw_start;
@@ -3249,7 +3418,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3249 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); 3418 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
3250 3419
3251 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3420 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
3252 (tp->mac_version == RTL_GIGA_MAC_VER_28)) { 3421 (tp->mac_version == RTL_GIGA_MAC_VER_28) ||
3422 (tp->mac_version == RTL_GIGA_MAC_VER_31)) {
3253 rtl8168_driver_start(tp); 3423 rtl8168_driver_start(tp);
3254 } 3424 }
3255 3425
@@ -3282,7 +3452,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3282 struct rtl8169_private *tp = netdev_priv(dev); 3452 struct rtl8169_private *tp = netdev_priv(dev);
3283 3453
3284 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3454 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
3285 (tp->mac_version == RTL_GIGA_MAC_VER_28)) { 3455 (tp->mac_version == RTL_GIGA_MAC_VER_28) ||
3456 (tp->mac_version == RTL_GIGA_MAC_VER_31)) {
3286 rtl8168_driver_stop(tp); 3457 rtl8168_driver_stop(tp);
3287 } 3458 }
3288 3459
@@ -3344,7 +3515,7 @@ static int rtl8169_open(struct net_device *dev)
3344 3515
3345 rtl8169_init_phy(dev, tp); 3516 rtl8169_init_phy(dev, tp);
3346 3517
3347 rtl8169_vlan_mode(dev); 3518 rtl8169_set_features(dev, dev->features);
3348 3519
3349 rtl_pll_power_up(tp); 3520 rtl_pll_power_up(tp);
3350 3521
@@ -3382,7 +3553,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3382 rtl8169_irq_mask_and_ack(ioaddr); 3553 rtl8169_irq_mask_and_ack(ioaddr);
3383 3554
3384 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 3555 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3385 tp->mac_version == RTL_GIGA_MAC_VER_28) { 3556 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3557 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3386 while (RTL_R8(TxPoll) & NPQ) 3558 while (RTL_R8(TxPoll) & NPQ)
3387 udelay(20); 3559 udelay(20);
3388 3560
@@ -3779,6 +3951,17 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
3779 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 3951 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3780} 3952}
3781 3953
3954static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
3955{
3956 rtl_csi_access_enable_1(ioaddr);
3957
3958 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3959
3960 RTL_W8(MaxTxPacketSize, TxPacketMax);
3961
3962 rtl_disable_clock_request(pdev);
3963}
3964
3782static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) 3965static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
3783{ 3966{
3784 static const struct ephy_info e_info_8168d_4[] = { 3967 static const struct ephy_info e_info_8168d_4[] = {
@@ -3805,6 +3988,41 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
3805 rtl_enable_clock_request(pdev); 3988 rtl_enable_clock_request(pdev);
3806} 3989}
3807 3990
3991static void rtl_hw_start_8168e(void __iomem *ioaddr, struct pci_dev *pdev)
3992{
3993 static const struct ephy_info e_info_8168e[] = {
3994 { 0x00, 0x0200, 0x0100 },
3995 { 0x00, 0x0000, 0x0004 },
3996 { 0x06, 0x0002, 0x0001 },
3997 { 0x06, 0x0000, 0x0030 },
3998 { 0x07, 0x0000, 0x2000 },
3999 { 0x00, 0x0000, 0x0020 },
4000 { 0x03, 0x5800, 0x2000 },
4001 { 0x03, 0x0000, 0x0001 },
4002 { 0x01, 0x0800, 0x1000 },
4003 { 0x07, 0x0000, 0x4000 },
4004 { 0x1e, 0x0000, 0x2000 },
4005 { 0x19, 0xffff, 0xfe6c },
4006 { 0x0a, 0x0000, 0x0040 }
4007 };
4008
4009 rtl_csi_access_enable_2(ioaddr);
4010
4011 rtl_ephy_init(ioaddr, e_info_8168e, ARRAY_SIZE(e_info_8168e));
4012
4013 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4014
4015 RTL_W8(MaxTxPacketSize, TxPacketMax);
4016
4017 rtl_disable_clock_request(pdev);
4018
4019 /* Reset tx FIFO pointer */
4020 RTL_W32(MISC, RTL_R32(MISC) | txpla_rst);
4021 RTL_W32(MISC, RTL_R32(MISC) & ~txpla_rst);
4022
4023 RTL_W8(Config5, RTL_R8(Config5) & ~spi_en);
4024}
4025
3808static void rtl_hw_start_8168(struct net_device *dev) 4026static void rtl_hw_start_8168(struct net_device *dev)
3809{ 4027{
3810 struct rtl8169_private *tp = netdev_priv(dev); 4028 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3842,55 +4060,63 @@ static void rtl_hw_start_8168(struct net_device *dev)
3842 switch (tp->mac_version) { 4060 switch (tp->mac_version) {
3843 case RTL_GIGA_MAC_VER_11: 4061 case RTL_GIGA_MAC_VER_11:
3844 rtl_hw_start_8168bb(ioaddr, pdev); 4062 rtl_hw_start_8168bb(ioaddr, pdev);
3845 break; 4063 break;
3846 4064
3847 case RTL_GIGA_MAC_VER_12: 4065 case RTL_GIGA_MAC_VER_12:
3848 case RTL_GIGA_MAC_VER_17: 4066 case RTL_GIGA_MAC_VER_17:
3849 rtl_hw_start_8168bef(ioaddr, pdev); 4067 rtl_hw_start_8168bef(ioaddr, pdev);
3850 break; 4068 break;
3851 4069
3852 case RTL_GIGA_MAC_VER_18: 4070 case RTL_GIGA_MAC_VER_18:
3853 rtl_hw_start_8168cp_1(ioaddr, pdev); 4071 rtl_hw_start_8168cp_1(ioaddr, pdev);
3854 break; 4072 break;
3855 4073
3856 case RTL_GIGA_MAC_VER_19: 4074 case RTL_GIGA_MAC_VER_19:
3857 rtl_hw_start_8168c_1(ioaddr, pdev); 4075 rtl_hw_start_8168c_1(ioaddr, pdev);
3858 break; 4076 break;
3859 4077
3860 case RTL_GIGA_MAC_VER_20: 4078 case RTL_GIGA_MAC_VER_20:
3861 rtl_hw_start_8168c_2(ioaddr, pdev); 4079 rtl_hw_start_8168c_2(ioaddr, pdev);
3862 break; 4080 break;
3863 4081
3864 case RTL_GIGA_MAC_VER_21: 4082 case RTL_GIGA_MAC_VER_21:
3865 rtl_hw_start_8168c_3(ioaddr, pdev); 4083 rtl_hw_start_8168c_3(ioaddr, pdev);
3866 break; 4084 break;
3867 4085
3868 case RTL_GIGA_MAC_VER_22: 4086 case RTL_GIGA_MAC_VER_22:
3869 rtl_hw_start_8168c_4(ioaddr, pdev); 4087 rtl_hw_start_8168c_4(ioaddr, pdev);
3870 break; 4088 break;
3871 4089
3872 case RTL_GIGA_MAC_VER_23: 4090 case RTL_GIGA_MAC_VER_23:
3873 rtl_hw_start_8168cp_2(ioaddr, pdev); 4091 rtl_hw_start_8168cp_2(ioaddr, pdev);
3874 break; 4092 break;
3875 4093
3876 case RTL_GIGA_MAC_VER_24: 4094 case RTL_GIGA_MAC_VER_24:
3877 rtl_hw_start_8168cp_3(ioaddr, pdev); 4095 rtl_hw_start_8168cp_3(ioaddr, pdev);
3878 break; 4096 break;
3879 4097
3880 case RTL_GIGA_MAC_VER_25: 4098 case RTL_GIGA_MAC_VER_25:
3881 case RTL_GIGA_MAC_VER_26: 4099 case RTL_GIGA_MAC_VER_26:
3882 case RTL_GIGA_MAC_VER_27: 4100 case RTL_GIGA_MAC_VER_27:
3883 rtl_hw_start_8168d(ioaddr, pdev); 4101 rtl_hw_start_8168d(ioaddr, pdev);
3884 break; 4102 break;
3885 4103
3886 case RTL_GIGA_MAC_VER_28: 4104 case RTL_GIGA_MAC_VER_28:
3887 rtl_hw_start_8168d_4(ioaddr, pdev); 4105 rtl_hw_start_8168d_4(ioaddr, pdev);
3888 break; 4106 break;
4107 case RTL_GIGA_MAC_VER_31:
4108 rtl_hw_start_8168dp(ioaddr, pdev);
4109 break;
4110
4111 case RTL_GIGA_MAC_VER_32:
4112 case RTL_GIGA_MAC_VER_33:
4113 rtl_hw_start_8168e(ioaddr, pdev);
4114 break;
3889 4115
3890 default: 4116 default:
3891 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 4117 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
3892 dev->name, tp->mac_version); 4118 dev->name, tp->mac_version);
3893 break; 4119 break;
3894 } 4120 }
3895 4121
3896 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 4122 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -4062,6 +4288,8 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
4062 return -EINVAL; 4288 return -EINVAL;
4063 4289
4064 dev->mtu = new_mtu; 4290 dev->mtu = new_mtu;
4291 netdev_update_features(dev);
4292
4065 return 0; 4293 return 0;
4066} 4294}
4067 4295
@@ -4386,12 +4614,11 @@ err_out:
4386 4614
4387static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) 4615static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
4388{ 4616{
4389 if (dev->features & NETIF_F_TSO) { 4617 u32 mss = skb_shinfo(skb)->gso_size;
4390 u32 mss = skb_shinfo(skb)->gso_size; 4618
4619 if (mss)
4620 return LargeSend | ((mss & MSSMask) << MSSShift);
4391 4621
4392 if (mss)
4393 return LargeSend | ((mss & MSSMask) << MSSShift);
4394 }
4395 if (skb->ip_summed == CHECKSUM_PARTIAL) { 4622 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4396 const struct iphdr *ip = ip_hdr(skb); 4623 const struct iphdr *ip = ip_hdr(skb);
4397 4624
@@ -4755,6 +4982,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4755 case RTL_GIGA_MAC_VER_24: 4982 case RTL_GIGA_MAC_VER_24:
4756 case RTL_GIGA_MAC_VER_27: 4983 case RTL_GIGA_MAC_VER_27:
4757 case RTL_GIGA_MAC_VER_28: 4984 case RTL_GIGA_MAC_VER_28:
4985 case RTL_GIGA_MAC_VER_31:
4758 /* Experimental science. Pktgen proof. */ 4986 /* Experimental science. Pktgen proof. */
4759 case RTL_GIGA_MAC_VER_12: 4987 case RTL_GIGA_MAC_VER_12:
4760 case RTL_GIGA_MAC_VER_25: 4988 case RTL_GIGA_MAC_VER_25:
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 337bdcd5abc9..ca8e75e9a7ee 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5484,83 +5484,79 @@ static void s2io_ethtool_gregs(struct net_device *dev,
5484 } 5484 }
5485} 5485}
5486 5486
5487/** 5487/*
5488 * s2io_phy_id - timer function that alternates adapter LED. 5488 * s2io_set_led - control NIC led
5489 * @data : address of the private member of the device structure, which
5490 * is a pointer to the s2io_nic structure, provided as an u32.
5491 * Description: This is actually the timer function that alternates the
5492 * adapter LED bit of the adapter control bit to set/reset every time on
5493 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5494 * once every second.
5495 */ 5489 */
5496static void s2io_phy_id(unsigned long data) 5490static void s2io_set_led(struct s2io_nic *sp, bool on)
5497{ 5491{
5498 struct s2io_nic *sp = (struct s2io_nic *)data;
5499 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5492 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5500 u64 val64 = 0; 5493 u16 subid = sp->pdev->subsystem_device;
5501 u16 subid; 5494 u64 val64;
5502 5495
5503 subid = sp->pdev->subsystem_device;
5504 if ((sp->device_type == XFRAME_II_DEVICE) || 5496 if ((sp->device_type == XFRAME_II_DEVICE) ||
5505 ((subid & 0xFF) >= 0x07)) { 5497 ((subid & 0xFF) >= 0x07)) {
5506 val64 = readq(&bar0->gpio_control); 5498 val64 = readq(&bar0->gpio_control);
5507 val64 ^= GPIO_CTRL_GPIO_0; 5499 if (on)
5500 val64 |= GPIO_CTRL_GPIO_0;
5501 else
5502 val64 &= ~GPIO_CTRL_GPIO_0;
5503
5508 writeq(val64, &bar0->gpio_control); 5504 writeq(val64, &bar0->gpio_control);
5509 } else { 5505 } else {
5510 val64 = readq(&bar0->adapter_control); 5506 val64 = readq(&bar0->adapter_control);
5511 val64 ^= ADAPTER_LED_ON; 5507 if (on)
5508 val64 |= ADAPTER_LED_ON;
5509 else
5510 val64 &= ~ADAPTER_LED_ON;
5511
5512 writeq(val64, &bar0->adapter_control); 5512 writeq(val64, &bar0->adapter_control);
5513 } 5513 }
5514 5514
5515 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5516} 5515}
5517 5516
5518/** 5517/**
5519 * s2io_ethtool_idnic - To physically identify the nic on the system. 5518 * s2io_ethtool_set_led - To physically identify the nic on the system.
5520 * @sp : private member of the device structure, which is a pointer to the 5519 * @dev : network device
5521 * s2io_nic structure. 5520 * @state: led setting
5522 * @id : pointer to the structure with identification parameters given by 5521 *
5523 * ethtool.
5524 * Description: Used to physically identify the NIC on the system. 5522 * Description: Used to physically identify the NIC on the system.
5525 * The Link LED will blink for a time specified by the user for 5523 * The Link LED will blink for a time specified by the user for
5526 * identification. 5524 * identification.
5527 * NOTE: The Link has to be Up to be able to blink the LED. Hence 5525 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5528 * identification is possible only if it's link is up. 5526 * identification is possible only if it's link is up.
5529 * Return value:
5530 * int , returns 0 on success
5531 */ 5527 */
5532 5528
5533static int s2io_ethtool_idnic(struct net_device *dev, u32 data) 5529static int s2io_ethtool_set_led(struct net_device *dev,
5530 enum ethtool_phys_id_state state)
5534{ 5531{
5535 u64 val64 = 0, last_gpio_ctrl_val;
5536 struct s2io_nic *sp = netdev_priv(dev); 5532 struct s2io_nic *sp = netdev_priv(dev);
5537 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5533 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5538 u16 subid; 5534 u16 subid = sp->pdev->subsystem_device;
5539 5535
5540 subid = sp->pdev->subsystem_device;
5541 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5542 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { 5536 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5543 val64 = readq(&bar0->adapter_control); 5537 u64 val64 = readq(&bar0->adapter_control);
5544 if (!(val64 & ADAPTER_CNTL_EN)) { 5538 if (!(val64 & ADAPTER_CNTL_EN)) {
5545 pr_err("Adapter Link down, cannot blink LED\n"); 5539 pr_err("Adapter Link down, cannot blink LED\n");
5546 return -EFAULT; 5540 return -EAGAIN;
5547 } 5541 }
5548 } 5542 }
5549 if (sp->id_timer.function == NULL) {
5550 init_timer(&sp->id_timer);
5551 sp->id_timer.function = s2io_phy_id;
5552 sp->id_timer.data = (unsigned long)sp;
5553 }
5554 mod_timer(&sp->id_timer, jiffies);
5555 if (data)
5556 msleep_interruptible(data * HZ);
5557 else
5558 msleep_interruptible(MAX_FLICKER_TIME);
5559 del_timer_sync(&sp->id_timer);
5560 5543
5561 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) { 5544 switch (state) {
5562 writeq(last_gpio_ctrl_val, &bar0->gpio_control); 5545 case ETHTOOL_ID_ACTIVE:
5563 last_gpio_ctrl_val = readq(&bar0->gpio_control); 5546 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5547 return -EINVAL;
5548
5549 case ETHTOOL_ID_ON:
5550 s2io_set_led(sp, true);
5551 break;
5552
5553 case ETHTOOL_ID_OFF:
5554 s2io_set_led(sp, false);
5555 break;
5556
5557 case ETHTOOL_ID_INACTIVE:
5558 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5559 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5564 } 5560 }
5565 5561
5566 return 0; 5562 return 0;
@@ -6776,7 +6772,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
6776 .set_ufo = ethtool_op_set_ufo, 6772 .set_ufo = ethtool_op_set_ufo,
6777 .self_test = s2io_ethtool_test, 6773 .self_test = s2io_ethtool_test,
6778 .get_strings = s2io_ethtool_get_strings, 6774 .get_strings = s2io_ethtool_get_strings,
6779 .phys_id = s2io_ethtool_idnic, 6775 .set_phys_id = s2io_ethtool_set_led,
6780 .get_ethtool_stats = s2io_get_ethtool_stats, 6776 .get_ethtool_stats = s2io_get_ethtool_stats,
6781 .get_sset_count = s2io_get_sset_count, 6777 .get_sset_count = s2io_get_sset_count,
6782}; 6778};
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 2d144979f6f8..628fd278866a 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -893,9 +893,6 @@ struct s2io_nic {
893 u16 all_multi_pos; 893 u16 all_multi_pos;
894 u16 promisc_flg; 894 u16 promisc_flg;
895 895
896 /* Id timer, used to blink NIC to physically identify NIC. */
897 struct timer_list id_timer;
898
899 /* Restart timer, used to restart NIC if the device is stuck and 896 /* Restart timer, used to restart NIC if the device is stuck and
900 * a schedule task that will set the correct Link state once the 897 * a schedule task that will set the correct Link state once the
901 * NIC's PHY has stabilized after a state change. 898 * NIC's PHY has stabilized after a state change.
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679e4c4d..db72a6e054e1 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1874,6 +1874,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1874 /* Otherwise efx_start_port() will do this */ 1874 /* Otherwise efx_start_port() will do this */
1875} 1875}
1876 1876
1877static int efx_set_features(struct net_device *net_dev, u32 data)
1878{
1879 struct efx_nic *efx = netdev_priv(net_dev);
1880
1881 /* If disabling RX n-tuple filtering, clear existing filters */
1882 if (net_dev->features & ~data & NETIF_F_NTUPLE)
1883 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
1884
1885 return 0;
1886}
1887
1877static const struct net_device_ops efx_netdev_ops = { 1888static const struct net_device_ops efx_netdev_ops = {
1878 .ndo_open = efx_net_open, 1889 .ndo_open = efx_net_open,
1879 .ndo_stop = efx_net_stop, 1890 .ndo_stop = efx_net_stop,
@@ -1885,6 +1896,7 @@ static const struct net_device_ops efx_netdev_ops = {
1885 .ndo_change_mtu = efx_change_mtu, 1896 .ndo_change_mtu = efx_change_mtu,
1886 .ndo_set_mac_address = efx_set_mac_address, 1897 .ndo_set_mac_address = efx_set_mac_address,
1887 .ndo_set_multicast_list = efx_set_multicast_list, 1898 .ndo_set_multicast_list = efx_set_multicast_list,
1899 .ndo_set_features = efx_set_features,
1888#ifdef CONFIG_NET_POLL_CONTROLLER 1900#ifdef CONFIG_NET_POLL_CONTROLLER
1889 .ndo_poll_controller = efx_netpoll, 1901 .ndo_poll_controller = efx_netpoll,
1890#endif 1902#endif
@@ -2269,7 +2281,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2269 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2281 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2270 2282
2271 efx->net_dev = net_dev; 2283 efx->net_dev = net_dev;
2272 efx->rx_checksum_enabled = true;
2273 spin_lock_init(&efx->stats_lock); 2284 spin_lock_init(&efx->stats_lock);
2274 mutex_init(&efx->mac_lock); 2285 mutex_init(&efx->mac_lock);
2275 efx->mac_op = type->default_mac_ops; 2286 efx->mac_op = type->default_mac_ops;
@@ -2452,12 +2463,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2452 return -ENOMEM; 2463 return -ENOMEM;
2453 net_dev->features |= (type->offload_features | NETIF_F_SG | 2464 net_dev->features |= (type->offload_features | NETIF_F_SG |
2454 NETIF_F_HIGHDMA | NETIF_F_TSO | 2465 NETIF_F_HIGHDMA | NETIF_F_TSO |
2455 NETIF_F_GRO); 2466 NETIF_F_RXCSUM);
2456 if (type->offload_features & NETIF_F_V6_CSUM) 2467 if (type->offload_features & NETIF_F_V6_CSUM)
2457 net_dev->features |= NETIF_F_TSO6; 2468 net_dev->features |= NETIF_F_TSO6;
2458 /* Mask for features that also apply to VLAN devices */ 2469 /* Mask for features that also apply to VLAN devices */
2459 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2470 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2460 NETIF_F_HIGHDMA | NETIF_F_TSO); 2471 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
2472 NETIF_F_RXCSUM);
2473 /* All offloads can be toggled */
2474 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2461 efx = netdev_priv(net_dev); 2475 efx = netdev_priv(net_dev);
2462 pci_set_drvdata(pci_dev, efx); 2476 pci_set_drvdata(pci_dev, efx);
2463 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2477 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 807178ef65ad..644f7c1d6e7b 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -178,19 +178,27 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
178 */ 178 */
179 179
180/* Identify device by flashing LEDs */ 180/* Identify device by flashing LEDs */
181static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) 181static int efx_ethtool_phys_id(struct net_device *net_dev,
182 enum ethtool_phys_id_state state)
182{ 183{
183 struct efx_nic *efx = netdev_priv(net_dev); 184 struct efx_nic *efx = netdev_priv(net_dev);
185 enum efx_led_mode mode;
184 186
185 do { 187 switch (state) {
186 efx->type->set_id_led(efx, EFX_LED_ON); 188 case ETHTOOL_ID_ON:
187 schedule_timeout_interruptible(HZ / 2); 189 mode = EFX_LED_ON;
188 190 break;
189 efx->type->set_id_led(efx, EFX_LED_OFF); 191 case ETHTOOL_ID_OFF:
190 schedule_timeout_interruptible(HZ / 2); 192 mode = EFX_LED_OFF;
191 } while (!signal_pending(current) && --count != 0); 193 break;
194 case ETHTOOL_ID_INACTIVE:
195 mode = EFX_LED_DEFAULT;
196 break;
197 default:
198 return -EINVAL;
199 }
192 200
193 efx->type->set_id_led(efx, EFX_LED_DEFAULT); 201 efx->type->set_id_led(efx, mode);
194 return 0; 202 return 0;
195} 203}
196 204
@@ -518,72 +526,6 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
518 } 526 }
519} 527}
520 528
521static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
522{
523 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
524 u32 features;
525
526 features = NETIF_F_TSO;
527 if (efx->type->offload_features & NETIF_F_V6_CSUM)
528 features |= NETIF_F_TSO6;
529
530 if (enable)
531 net_dev->features |= features;
532 else
533 net_dev->features &= ~features;
534
535 return 0;
536}
537
538static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
539{
540 struct efx_nic *efx = netdev_priv(net_dev);
541 u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
542
543 if (enable)
544 net_dev->features |= features;
545 else
546 net_dev->features &= ~features;
547
548 return 0;
549}
550
551static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
552{
553 struct efx_nic *efx = netdev_priv(net_dev);
554
555 /* No way to stop the hardware doing the checks; we just
556 * ignore the result.
557 */
558 efx->rx_checksum_enabled = !!enable;
559
560 return 0;
561}
562
563static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
564{
565 struct efx_nic *efx = netdev_priv(net_dev);
566
567 return efx->rx_checksum_enabled;
568}
569
570static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
571{
572 struct efx_nic *efx = netdev_priv(net_dev);
573 u32 supported = (efx->type->offload_features &
574 (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
575 int rc;
576
577 rc = ethtool_op_set_flags(net_dev, data, supported);
578 if (rc)
579 return rc;
580
581 if (!(data & ETH_FLAG_NTUPLE))
582 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
583
584 return 0;
585}
586
587static void efx_ethtool_self_test(struct net_device *net_dev, 529static void efx_ethtool_self_test(struct net_device *net_dev,
588 struct ethtool_test *test, u64 *data) 530 struct ethtool_test *test, u64 *data)
589{ 531{
@@ -1070,22 +1012,10 @@ const struct ethtool_ops efx_ethtool_ops = {
1070 .set_ringparam = efx_ethtool_set_ringparam, 1012 .set_ringparam = efx_ethtool_set_ringparam,
1071 .get_pauseparam = efx_ethtool_get_pauseparam, 1013 .get_pauseparam = efx_ethtool_get_pauseparam,
1072 .set_pauseparam = efx_ethtool_set_pauseparam, 1014 .set_pauseparam = efx_ethtool_set_pauseparam,
1073 .get_rx_csum = efx_ethtool_get_rx_csum,
1074 .set_rx_csum = efx_ethtool_set_rx_csum,
1075 .get_tx_csum = ethtool_op_get_tx_csum,
1076 /* Need to enable/disable IPv6 too */
1077 .set_tx_csum = efx_ethtool_set_tx_csum,
1078 .get_sg = ethtool_op_get_sg,
1079 .set_sg = ethtool_op_set_sg,
1080 .get_tso = ethtool_op_get_tso,
1081 /* Need to enable/disable TSO-IPv6 too */
1082 .set_tso = efx_ethtool_set_tso,
1083 .get_flags = ethtool_op_get_flags,
1084 .set_flags = efx_ethtool_set_flags,
1085 .get_sset_count = efx_ethtool_get_sset_count, 1015 .get_sset_count = efx_ethtool_get_sset_count,
1086 .self_test = efx_ethtool_self_test, 1016 .self_test = efx_ethtool_self_test,
1087 .get_strings = efx_ethtool_get_strings, 1017 .get_strings = efx_ethtool_get_strings,
1088 .phys_id = efx_ethtool_phys_id, 1018 .set_phys_id = efx_ethtool_phys_id,
1089 .get_ethtool_stats = efx_ethtool_get_stats, 1019 .get_ethtool_stats = efx_ethtool_get_stats,
1090 .get_wol = efx_ethtool_get_wol, 1020 .get_wol = efx_ethtool_get_wol,
1091 .set_wol = efx_ethtool_set_wol, 1021 .set_wol = efx_ethtool_set_wol,
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 9ffa9a6b55a0..92a9067e8e77 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -681,7 +681,6 @@ struct efx_filter_state;
681 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock 681 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
682 * @port_initialized: Port initialized? 682 * @port_initialized: Port initialized?
683 * @net_dev: Operating system network device. Consider holding the rtnl lock 683 * @net_dev: Operating system network device. Consider holding the rtnl lock
684 * @rx_checksum_enabled: RX checksumming enabled
685 * @stats_buffer: DMA buffer for statistics 684 * @stats_buffer: DMA buffer for statistics
686 * @mac_op: MAC interface 685 * @mac_op: MAC interface
687 * @phy_type: PHY type 686 * @phy_type: PHY type
@@ -771,7 +770,6 @@ struct efx_nic {
771 770
772 bool port_initialized; 771 bool port_initialized;
773 struct net_device *net_dev; 772 struct net_device *net_dev;
774 bool rx_checksum_enabled;
775 773
776 struct efx_buffer stats_buffer; 774 struct efx_buffer stats_buffer;
777 775
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e8396614daf3..2594f39c3ba4 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -850,7 +850,6 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850 unsigned expected_ptr; 850 unsigned expected_ptr;
851 bool rx_ev_pkt_ok, discard = false, checksummed; 851 bool rx_ev_pkt_ok, discard = false, checksummed;
852 struct efx_rx_queue *rx_queue; 852 struct efx_rx_queue *rx_queue;
853 struct efx_nic *efx = channel->efx;
854 853
855 /* Basic packet information */ 854 /* Basic packet information */
856 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 855 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -873,9 +872,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
873 * UDP/IP, then we can rely on the hardware checksum. 872 * UDP/IP, then we can rely on the hardware checksum.
874 */ 873 */
875 checksummed = 874 checksummed =
876 likely(efx->rx_checksum_enabled) && 875 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
877 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 876 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
878 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
879 } else { 877 } else {
880 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 878 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
881 checksummed = false; 879 checksummed = false;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index c0fdb59030fb..b7dc891b4461 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -605,6 +605,9 @@ void __efx_rx_packet(struct efx_channel *channel,
605 skb_record_rx_queue(skb, channel->channel); 605 skb_record_rx_queue(skb, channel->channel);
606 } 606 }
607 607
608 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
609 checksummed = false;
610
608 if (likely(checksummed || rx_buf->is_page)) { 611 if (likely(checksummed || rx_buf->is_page)) {
609 efx_rx_packet_gro(channel, rx_buf, eh, checksummed); 612 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
610 return; 613 return;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 35b28f42d208..310dcbce2519 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -537,46 +537,6 @@ static int skge_nway_reset(struct net_device *dev)
537 return 0; 537 return 0;
538} 538}
539 539
540static int skge_set_sg(struct net_device *dev, u32 data)
541{
542 struct skge_port *skge = netdev_priv(dev);
543 struct skge_hw *hw = skge->hw;
544
545 if (hw->chip_id == CHIP_ID_GENESIS && data)
546 return -EOPNOTSUPP;
547 return ethtool_op_set_sg(dev, data);
548}
549
550static int skge_set_tx_csum(struct net_device *dev, u32 data)
551{
552 struct skge_port *skge = netdev_priv(dev);
553 struct skge_hw *hw = skge->hw;
554
555 if (hw->chip_id == CHIP_ID_GENESIS && data)
556 return -EOPNOTSUPP;
557
558 return ethtool_op_set_tx_csum(dev, data);
559}
560
561static u32 skge_get_rx_csum(struct net_device *dev)
562{
563 struct skge_port *skge = netdev_priv(dev);
564
565 return skge->rx_csum;
566}
567
568/* Only Yukon supports checksum offload. */
569static int skge_set_rx_csum(struct net_device *dev, u32 data)
570{
571 struct skge_port *skge = netdev_priv(dev);
572
573 if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
574 return -EOPNOTSUPP;
575
576 skge->rx_csum = data;
577 return 0;
578}
579
580static void skge_get_pauseparam(struct net_device *dev, 540static void skge_get_pauseparam(struct net_device *dev,
581 struct ethtool_pauseparam *ecmd) 541 struct ethtool_pauseparam *ecmd)
582{ 542{
@@ -786,28 +746,27 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
786} 746}
787 747
788/* blink LED's for finding board */ 748/* blink LED's for finding board */
789static int skge_phys_id(struct net_device *dev, u32 data) 749static int skge_set_phys_id(struct net_device *dev,
750 enum ethtool_phys_id_state state)
790{ 751{
791 struct skge_port *skge = netdev_priv(dev); 752 struct skge_port *skge = netdev_priv(dev);
792 unsigned long ms;
793 enum led_mode mode = LED_MODE_TST;
794 753
795 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 754 switch (state) {
796 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000; 755 case ETHTOOL_ID_ACTIVE:
797 else 756 return -EINVAL;
798 ms = data * 1000;
799 757
800 while (ms > 0) { 758 case ETHTOOL_ID_ON:
801 skge_led(skge, mode); 759 skge_led(skge, LED_MODE_TST);
802 mode ^= LED_MODE_TST; 760 break;
803 761
804 if (msleep_interruptible(BLINK_MS)) 762 case ETHTOOL_ID_OFF:
805 break; 763 skge_led(skge, LED_MODE_OFF);
806 ms -= BLINK_MS; 764 break;
807 }
808 765
809 /* back to regular LED state */ 766 case ETHTOOL_ID_INACTIVE:
810 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); 767 /* back to regular LED state */
768 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
769 }
811 770
812 return 0; 771 return 0;
813} 772}
@@ -925,12 +884,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
925 .set_pauseparam = skge_set_pauseparam, 884 .set_pauseparam = skge_set_pauseparam,
926 .get_coalesce = skge_get_coalesce, 885 .get_coalesce = skge_get_coalesce,
927 .set_coalesce = skge_set_coalesce, 886 .set_coalesce = skge_set_coalesce,
928 .set_sg = skge_set_sg,
929 .set_tx_csum = skge_set_tx_csum,
930 .get_rx_csum = skge_get_rx_csum,
931 .set_rx_csum = skge_set_rx_csum,
932 .get_strings = skge_get_strings, 887 .get_strings = skge_get_strings,
933 .phys_id = skge_phys_id, 888 .set_phys_id = skge_set_phys_id,
934 .get_sset_count = skge_get_sset_count, 889 .get_sset_count = skge_get_sset_count,
935 .get_ethtool_stats = skge_get_ethtool_stats, 890 .get_ethtool_stats = skge_get_ethtool_stats,
936}; 891};
@@ -3085,7 +3040,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3085 } 3040 }
3086 3041
3087 skb_put(skb, len); 3042 skb_put(skb, len);
3088 if (skge->rx_csum) { 3043
3044 if (dev->features & NETIF_F_RXCSUM) {
3089 skb->csum = csum; 3045 skb->csum = csum;
3090 skb->ip_summed = CHECKSUM_COMPLETE; 3046 skb->ip_summed = CHECKSUM_COMPLETE;
3091 } 3047 }
@@ -3847,10 +3803,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3847 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); 3803 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3848 3804
3849 if (hw->chip_id != CHIP_ID_GENESIS) { 3805 if (hw->chip_id != CHIP_ID_GENESIS) {
3850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3806 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3851 skge->rx_csum = 1; 3807 NETIF_F_RXCSUM;
3808 dev->features |= dev->hw_features;
3852 } 3809 }
3853 dev->features |= NETIF_F_GRO;
3854 3810
3855 /* read the mac address */ 3811 /* read the mac address */
3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3812 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 51c0214ac25c..598bf7a1a55e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2460,7 +2460,6 @@ struct skge_port {
2460 struct timer_list link_timer; 2460 struct timer_list link_timer;
2461 enum pause_control flow_control; 2461 enum pause_control flow_control;
2462 enum pause_status flow_status; 2462 enum pause_status flow_status;
2463 u8 rx_csum;
2464 u8 blink_on; 2463 u8 blink_on;
2465 u8 wol; 2464 u8 wol;
2466 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 2465 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ff8d262dc276..336c762515b2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3826,23 +3826,24 @@ static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3826} 3826}
3827 3827
3828/* blink LED's for finding board */ 3828/* blink LED's for finding board */
3829static int sky2_phys_id(struct net_device *dev, u32 data) 3829static int sky2_set_phys_id(struct net_device *dev,
3830 enum ethtool_phys_id_state state)
3830{ 3831{
3831 struct sky2_port *sky2 = netdev_priv(dev); 3832 struct sky2_port *sky2 = netdev_priv(dev);
3832 unsigned int i;
3833
3834 if (data == 0)
3835 data = UINT_MAX;
3836 3833
3837 for (i = 0; i < data; i++) { 3834 switch (state) {
3835 case ETHTOOL_ID_ACTIVE:
3836 return -EINVAL;
3837 case ETHTOOL_ID_INACTIVE:
3838 sky2_led(sky2, MO_LED_NORM);
3839 break;
3840 case ETHTOOL_ID_ON:
3838 sky2_led(sky2, MO_LED_ON); 3841 sky2_led(sky2, MO_LED_ON);
3839 if (msleep_interruptible(500)) 3842 break;
3840 break; 3843 case ETHTOOL_ID_OFF:
3841 sky2_led(sky2, MO_LED_OFF); 3844 sky2_led(sky2, MO_LED_OFF);
3842 if (msleep_interruptible(500)) 3845 break;
3843 break;
3844 } 3846 }
3845 sky2_led(sky2, MO_LED_NORM);
3846 3847
3847 return 0; 3848 return 0;
3848} 3849}
@@ -4269,7 +4270,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4269 .set_ringparam = sky2_set_ringparam, 4270 .set_ringparam = sky2_set_ringparam,
4270 .get_pauseparam = sky2_get_pauseparam, 4271 .get_pauseparam = sky2_get_pauseparam,
4271 .set_pauseparam = sky2_set_pauseparam, 4272 .set_pauseparam = sky2_set_pauseparam,
4272 .phys_id = sky2_phys_id, 4273 .set_phys_id = sky2_set_phys_id,
4273 .get_sset_count = sky2_get_sset_count, 4274 .get_sset_count = sky2_get_sset_count,
4274 .get_ethtool_stats = sky2_get_ethtool_stats, 4275 .get_ethtool_stats = sky2_get_ethtool_stats,
4275 .set_flags = sky2_set_flags, 4276 .set_flags = sky2_set_flags,
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 4b42ecc63dcf..b8faab7780da 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -29,6 +29,8 @@
29 * 29 *
30 */ 30 */
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#include <linux/crc32.h> 34#include <linux/crc32.h>
33#include <linux/delay.h> 35#include <linux/delay.h>
34#include <linux/errno.h> 36#include <linux/errno.h>
@@ -248,8 +250,8 @@ static int smsc911x_mac_complete(struct smsc911x_data *pdata)
248 if (!(val & MAC_CSR_CMD_CSR_BUSY_)) 250 if (!(val & MAC_CSR_CMD_CSR_BUSY_))
249 return 0; 251 return 0;
250 } 252 }
251 SMSC_WARNING(HW, "Timed out waiting for MAC not BUSY. " 253 SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. "
252 "MAC_CSR_CMD: 0x%08X", val); 254 "MAC_CSR_CMD: 0x%08X", val);
253 return -EIO; 255 return -EIO;
254} 256}
255 257
@@ -262,7 +264,7 @@ static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
262 264
263 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); 265 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
264 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { 266 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
265 SMSC_WARNING(HW, "MAC busy at entry"); 267 SMSC_WARN(pdata, hw, "MAC busy at entry");
266 return 0xFFFFFFFF; 268 return 0xFFFFFFFF;
267 } 269 }
268 270
@@ -277,7 +279,7 @@ static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
277 if (likely(smsc911x_mac_complete(pdata) == 0)) 279 if (likely(smsc911x_mac_complete(pdata) == 0))
278 return smsc911x_reg_read(pdata, MAC_CSR_DATA); 280 return smsc911x_reg_read(pdata, MAC_CSR_DATA);
279 281
280 SMSC_WARNING(HW, "MAC busy after read"); 282 SMSC_WARN(pdata, hw, "MAC busy after read");
281 return 0xFFFFFFFF; 283 return 0xFFFFFFFF;
282} 284}
283 285
@@ -291,8 +293,8 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
291 293
292 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); 294 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
293 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { 295 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
294 SMSC_WARNING(HW, 296 SMSC_WARN(pdata, hw,
295 "smsc911x_mac_write failed, MAC busy at entry"); 297 "smsc911x_mac_write failed, MAC busy at entry");
296 return; 298 return;
297 } 299 }
298 300
@@ -310,8 +312,7 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
310 if (likely(smsc911x_mac_complete(pdata) == 0)) 312 if (likely(smsc911x_mac_complete(pdata) == 0))
311 return; 313 return;
312 314
313 SMSC_WARNING(HW, 315 SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write");
314 "smsc911x_mac_write failed, MAC busy after write");
315} 316}
316 317
317/* Get a phy register */ 318/* Get a phy register */
@@ -326,8 +327,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
326 327
327 /* Confirm MII not busy */ 328 /* Confirm MII not busy */
328 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { 329 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
329 SMSC_WARNING(HW, 330 SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???");
330 "MII is busy in smsc911x_mii_read???");
331 reg = -EIO; 331 reg = -EIO;
332 goto out; 332 goto out;
333 } 333 }
@@ -343,7 +343,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
343 goto out; 343 goto out;
344 } 344 }
345 345
346 SMSC_WARNING(HW, "Timed out waiting for MII read to finish"); 346 SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish");
347 reg = -EIO; 347 reg = -EIO;
348 348
349out: 349out:
@@ -364,8 +364,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
364 364
365 /* Confirm MII not busy */ 365 /* Confirm MII not busy */
366 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { 366 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
367 SMSC_WARNING(HW, 367 SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???");
368 "MII is busy in smsc911x_mii_write???");
369 reg = -EIO; 368 reg = -EIO;
370 goto out; 369 goto out;
371 } 370 }
@@ -385,7 +384,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
385 goto out; 384 goto out;
386 } 385 }
387 386
388 SMSC_WARNING(HW, "Timed out waiting for MII write to finish"); 387 SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish");
389 reg = -EIO; 388 reg = -EIO;
390 389
391out: 390out:
@@ -426,18 +425,20 @@ static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
426 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); 425 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
427 426
428 if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { 427 if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) {
429 SMSC_TRACE(HW, "Forcing internal PHY"); 428 SMSC_TRACE(pdata, hw, "Forcing internal PHY");
430 pdata->using_extphy = 0; 429 pdata->using_extphy = 0;
431 } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { 430 } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) {
432 SMSC_TRACE(HW, "Forcing external PHY"); 431 SMSC_TRACE(pdata, hw, "Forcing external PHY");
433 smsc911x_phy_enable_external(pdata); 432 smsc911x_phy_enable_external(pdata);
434 pdata->using_extphy = 1; 433 pdata->using_extphy = 1;
435 } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { 434 } else if (hwcfg & HW_CFG_EXT_PHY_DET_) {
436 SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET set, using external PHY"); 435 SMSC_TRACE(pdata, hw,
436 "HW_CFG EXT_PHY_DET set, using external PHY");
437 smsc911x_phy_enable_external(pdata); 437 smsc911x_phy_enable_external(pdata);
438 pdata->using_extphy = 1; 438 pdata->using_extphy = 1;
439 } else { 439 } else {
440 SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET clear, using internal PHY"); 440 SMSC_TRACE(pdata, hw,
441 "HW_CFG EXT_PHY_DET clear, using internal PHY");
441 pdata->using_extphy = 0; 442 pdata->using_extphy = 0;
442 } 443 }
443} 444}
@@ -509,13 +510,13 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
509 } while ((i--) && (!status)); 510 } while ((i--) && (!status));
510 511
511 if (!status) { 512 if (!status) {
512 SMSC_WARNING(HW, "Failed to transmit " 513 SMSC_WARN(pdata, hw,
513 "during loopback test"); 514 "Failed to transmit during loopback test");
514 continue; 515 continue;
515 } 516 }
516 if (status & TX_STS_ES_) { 517 if (status & TX_STS_ES_) {
517 SMSC_WARNING(HW, "Transmit encountered " 518 SMSC_WARN(pdata, hw,
518 "errors during loopback test"); 519 "Transmit encountered errors during loopback test");
519 continue; 520 continue;
520 } 521 }
521 522
@@ -527,13 +528,13 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
527 } while ((i--) && (!status)); 528 } while ((i--) && (!status));
528 529
529 if (!status) { 530 if (!status) {
530 SMSC_WARNING(HW, 531 SMSC_WARN(pdata, hw,
531 "Failed to receive during loopback test"); 532 "Failed to receive during loopback test");
532 continue; 533 continue;
533 } 534 }
534 if (status & RX_STS_ES_) { 535 if (status & RX_STS_ES_) {
535 SMSC_WARNING(HW, "Receive encountered " 536 SMSC_WARN(pdata, hw,
536 "errors during loopback test"); 537 "Receive encountered errors during loopback test");
537 continue; 538 continue;
538 } 539 }
539 540
@@ -546,9 +547,9 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
546 smsc911x_rx_readfifo(pdata, (unsigned int *)bufp, rdsz); 547 smsc911x_rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
547 548
548 if (pktlength != (MIN_PACKET_SIZE + 4)) { 549 if (pktlength != (MIN_PACKET_SIZE + 4)) {
549 SMSC_WARNING(HW, "Unexpected packet size " 550 SMSC_WARN(pdata, hw, "Unexpected packet size "
550 "during loop back test, size=%d, will retry", 551 "during loop back test, size=%d, will retry",
551 pktlength); 552 pktlength);
552 } else { 553 } else {
553 unsigned int j; 554 unsigned int j;
554 int mismatch = 0; 555 int mismatch = 0;
@@ -560,12 +561,12 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
560 } 561 }
561 } 562 }
562 if (!mismatch) { 563 if (!mismatch) {
563 SMSC_TRACE(HW, "Successfully verified " 564 SMSC_TRACE(pdata, hw, "Successfully verified "
564 "loopback packet"); 565 "loopback packet");
565 return 0; 566 return 0;
566 } else { 567 } else {
567 SMSC_WARNING(HW, "Data mismatch " 568 SMSC_WARN(pdata, hw, "Data mismatch "
568 "during loop back test, will retry"); 569 "during loop back test, will retry");
569 } 570 }
570 } 571 }
571 } 572 }
@@ -582,7 +583,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
582 BUG_ON(!phy_dev); 583 BUG_ON(!phy_dev);
583 BUG_ON(!phy_dev->bus); 584 BUG_ON(!phy_dev->bus);
584 585
585 SMSC_TRACE(HW, "Performing PHY BCR Reset"); 586 SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
586 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); 587 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
587 do { 588 do {
588 msleep(1); 589 msleep(1);
@@ -591,7 +592,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
591 } while ((i--) && (temp & BMCR_RESET)); 592 } while ((i--) && (temp & BMCR_RESET));
592 593
593 if (temp & BMCR_RESET) { 594 if (temp & BMCR_RESET) {
594 SMSC_WARNING(HW, "PHY reset failed to complete."); 595 SMSC_WARN(pdata, hw, "PHY reset failed to complete");
595 return -EIO; 596 return -EIO;
596 } 597 }
597 /* Extra delay required because the phy may not be completed with 598 /* Extra delay required because the phy may not be completed with
@@ -695,11 +696,11 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
695 else 696 else
696 afc &= ~0xF; 697 afc &= ~0xF;
697 698
698 SMSC_TRACE(HW, "rx pause %s, tx pause %s", 699 SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s",
699 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), 700 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
700 (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); 701 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
701 } else { 702 } else {
702 SMSC_TRACE(HW, "half duplex"); 703 SMSC_TRACE(pdata, hw, "half duplex");
703 flow = 0; 704 flow = 0;
704 afc |= 0xF; 705 afc |= 0xF;
705 } 706 }
@@ -722,17 +723,17 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
722 723
723 if (phy_dev->duplex != pdata->last_duplex) { 724 if (phy_dev->duplex != pdata->last_duplex) {
724 unsigned int mac_cr; 725 unsigned int mac_cr;
725 SMSC_TRACE(HW, "duplex state has changed"); 726 SMSC_TRACE(pdata, hw, "duplex state has changed");
726 727
727 spin_lock_irqsave(&pdata->mac_lock, flags); 728 spin_lock_irqsave(&pdata->mac_lock, flags);
728 mac_cr = smsc911x_mac_read(pdata, MAC_CR); 729 mac_cr = smsc911x_mac_read(pdata, MAC_CR);
729 if (phy_dev->duplex) { 730 if (phy_dev->duplex) {
730 SMSC_TRACE(HW, 731 SMSC_TRACE(pdata, hw,
731 "configuring for full duplex mode"); 732 "configuring for full duplex mode");
732 mac_cr |= MAC_CR_FDPX_; 733 mac_cr |= MAC_CR_FDPX_;
733 } else { 734 } else {
734 SMSC_TRACE(HW, 735 SMSC_TRACE(pdata, hw,
735 "configuring for half duplex mode"); 736 "configuring for half duplex mode");
736 mac_cr &= ~MAC_CR_FDPX_; 737 mac_cr &= ~MAC_CR_FDPX_;
737 } 738 }
738 smsc911x_mac_write(pdata, MAC_CR, mac_cr); 739 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
@@ -744,9 +745,9 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
744 745
745 carrier = netif_carrier_ok(dev); 746 carrier = netif_carrier_ok(dev);
746 if (carrier != pdata->last_carrier) { 747 if (carrier != pdata->last_carrier) {
747 SMSC_TRACE(HW, "carrier state has changed"); 748 SMSC_TRACE(pdata, hw, "carrier state has changed");
748 if (carrier) { 749 if (carrier) {
749 SMSC_TRACE(HW, "configuring for carrier OK"); 750 SMSC_TRACE(pdata, hw, "configuring for carrier OK");
750 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && 751 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
751 (!pdata->using_extphy)) { 752 (!pdata->using_extphy)) {
752 /* Restore original GPIO configuration */ 753 /* Restore original GPIO configuration */
@@ -755,7 +756,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
755 pdata->gpio_setting); 756 pdata->gpio_setting);
756 } 757 }
757 } else { 758 } else {
758 SMSC_TRACE(HW, "configuring for no carrier"); 759 SMSC_TRACE(pdata, hw, "configuring for no carrier");
759 /* Check global setting that LED1 760 /* Check global setting that LED1
760 * usage is 10/100 indicator */ 761 * usage is 10/100 indicator */
761 pdata->gpio_setting = smsc911x_reg_read(pdata, 762 pdata->gpio_setting = smsc911x_reg_read(pdata,
@@ -787,25 +788,25 @@ static int smsc911x_mii_probe(struct net_device *dev)
787 /* find the first phy */ 788 /* find the first phy */
788 phydev = phy_find_first(pdata->mii_bus); 789 phydev = phy_find_first(pdata->mii_bus);
789 if (!phydev) { 790 if (!phydev) {
790 pr_err("%s: no PHY found\n", dev->name); 791 netdev_err(dev, "no PHY found\n");
791 return -ENODEV; 792 return -ENODEV;
792 } 793 }
793 794
794 SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X", 795 SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
795 phydev->addr, phydev->phy_id); 796 phydev->addr, phydev->phy_id);
796 797
797 ret = phy_connect_direct(dev, phydev, 798 ret = phy_connect_direct(dev, phydev,
798 &smsc911x_phy_adjust_link, 0, 799 &smsc911x_phy_adjust_link, 0,
799 pdata->config.phy_interface); 800 pdata->config.phy_interface);
800 801
801 if (ret) { 802 if (ret) {
802 pr_err("%s: Could not attach to PHY\n", dev->name); 803 netdev_err(dev, "Could not attach to PHY\n");
803 return ret; 804 return ret;
804 } 805 }
805 806
806 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 807 netdev_info(dev,
807 dev->name, phydev->drv->name, 808 "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
808 dev_name(&phydev->dev), phydev->irq); 809 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
809 810
810 /* mask with MAC supported features */ 811 /* mask with MAC supported features */
811 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 812 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -818,13 +819,13 @@ static int smsc911x_mii_probe(struct net_device *dev)
818 819
819#ifdef USE_PHY_WORK_AROUND 820#ifdef USE_PHY_WORK_AROUND
820 if (smsc911x_phy_loopbacktest(dev) < 0) { 821 if (smsc911x_phy_loopbacktest(dev) < 0) {
821 SMSC_WARNING(HW, "Failed Loop Back Test"); 822 SMSC_WARN(pdata, hw, "Failed Loop Back Test");
822 return -ENODEV; 823 return -ENODEV;
823 } 824 }
824 SMSC_TRACE(HW, "Passed Loop Back Test"); 825 SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
825#endif /* USE_PHY_WORK_AROUND */ 826#endif /* USE_PHY_WORK_AROUND */
826 827
827 SMSC_TRACE(HW, "phy initialised successfully"); 828 SMSC_TRACE(pdata, hw, "phy initialised successfully");
828 return 0; 829 return 0;
829} 830}
830 831
@@ -860,8 +861,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
860 smsc911x_phy_initialise_external(pdata); 861 smsc911x_phy_initialise_external(pdata);
861 break; 862 break;
862 default: 863 default:
863 SMSC_TRACE(HW, "External PHY is not supported, " 864 SMSC_TRACE(pdata, hw, "External PHY is not supported, "
864 "using internal PHY"); 865 "using internal PHY");
865 pdata->using_extphy = 0; 866 pdata->using_extphy = 0;
866 break; 867 break;
867 } 868 }
@@ -872,12 +873,12 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
872 } 873 }
873 874
874 if (mdiobus_register(pdata->mii_bus)) { 875 if (mdiobus_register(pdata->mii_bus)) {
875 SMSC_WARNING(PROBE, "Error registering mii bus"); 876 SMSC_WARN(pdata, probe, "Error registering mii bus");
876 goto err_out_free_bus_2; 877 goto err_out_free_bus_2;
877 } 878 }
878 879
879 if (smsc911x_mii_probe(dev) < 0) { 880 if (smsc911x_mii_probe(dev) < 0) {
880 SMSC_WARNING(PROBE, "Error registering mii bus"); 881 SMSC_WARN(pdata, probe, "Error registering mii bus");
881 goto err_out_unregister_bus_3; 882 goto err_out_unregister_bus_3;
882 } 883 }
883 884
@@ -913,8 +914,7 @@ static void smsc911x_tx_update_txcounters(struct net_device *dev)
913 * does not reference a hardware defined reserved bit 914 * does not reference a hardware defined reserved bit
914 * but rather a driver defined one. 915 * but rather a driver defined one.
915 */ 916 */
916 SMSC_WARNING(HW, 917 SMSC_WARN(pdata, hw, "Packet tag reserved bit is high");
917 "Packet tag reserved bit is high");
918 } else { 918 } else {
919 if (unlikely(tx_stat & TX_STS_ES_)) { 919 if (unlikely(tx_stat & TX_STS_ES_)) {
920 dev->stats.tx_errors++; 920 dev->stats.tx_errors++;
@@ -977,8 +977,8 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
977 } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); 977 } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout);
978 978
979 if (unlikely(timeout == 0)) 979 if (unlikely(timeout == 0))
980 SMSC_WARNING(HW, "Timed out waiting for " 980 SMSC_WARN(pdata, hw, "Timed out waiting for "
981 "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); 981 "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
982 } else { 982 } else {
983 unsigned int temp; 983 unsigned int temp;
984 while (pktwords--) 984 while (pktwords--)
@@ -1021,8 +1021,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1021 smsc911x_rx_counterrors(dev, rxstat); 1021 smsc911x_rx_counterrors(dev, rxstat);
1022 1022
1023 if (unlikely(rxstat & RX_STS_ES_)) { 1023 if (unlikely(rxstat & RX_STS_ES_)) {
1024 SMSC_WARNING(RX_ERR, 1024 SMSC_WARN(pdata, rx_err,
1025 "Discarding packet with error bit set"); 1025 "Discarding packet with error bit set");
1026 /* Packet has an error, discard it and continue with 1026 /* Packet has an error, discard it and continue with
1027 * the next */ 1027 * the next */
1028 smsc911x_rx_fastforward(pdata, pktwords); 1028 smsc911x_rx_fastforward(pdata, pktwords);
@@ -1032,8 +1032,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1032 1032
1033 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1033 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
1034 if (unlikely(!skb)) { 1034 if (unlikely(!skb)) {
1035 SMSC_WARNING(RX_ERR, 1035 SMSC_WARN(pdata, rx_err,
1036 "Unable to allocate skb for rx packet"); 1036 "Unable to allocate skb for rx packet");
1037 /* Drop the packet and stop this polling iteration */ 1037 /* Drop the packet and stop this polling iteration */
1038 smsc911x_rx_fastforward(pdata, pktwords); 1038 smsc911x_rx_fastforward(pdata, pktwords);
1039 dev->stats.rx_dropped++; 1039 dev->stats.rx_dropped++;
@@ -1083,8 +1083,8 @@ static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata)
1083 smsc911x_mac_write(pdata, MAC_CR, mac_cr); 1083 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
1084 smsc911x_mac_write(pdata, HASHH, pdata->hashhi); 1084 smsc911x_mac_write(pdata, HASHH, pdata->hashhi);
1085 smsc911x_mac_write(pdata, HASHL, pdata->hashlo); 1085 smsc911x_mac_write(pdata, HASHL, pdata->hashlo);
1086 SMSC_TRACE(HW, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", 1086 SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X",
1087 mac_cr, pdata->hashhi, pdata->hashlo); 1087 mac_cr, pdata->hashhi, pdata->hashlo);
1088} 1088}
1089 1089
1090static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) 1090static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
@@ -1102,7 +1102,7 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1102 1102
1103 /* Check Rx has stopped */ 1103 /* Check Rx has stopped */
1104 if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) 1104 if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_)
1105 SMSC_WARNING(DRV, "Rx not stopped"); 1105 SMSC_WARN(pdata, drv, "Rx not stopped");
1106 1106
1107 /* Perform the update - safe to do now Rx has stopped */ 1107 /* Perform the update - safe to do now Rx has stopped */
1108 smsc911x_rx_multicast_update(pdata); 1108 smsc911x_rx_multicast_update(pdata);
@@ -1131,7 +1131,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
1131 } while ((--timeout) && (temp & HW_CFG_SRST_)); 1131 } while ((--timeout) && (temp & HW_CFG_SRST_));
1132 1132
1133 if (unlikely(temp & HW_CFG_SRST_)) { 1133 if (unlikely(temp & HW_CFG_SRST_)) {
1134 SMSC_WARNING(DRV, "Failed to complete reset"); 1134 SMSC_WARN(pdata, drv, "Failed to complete reset");
1135 return -EIO; 1135 return -EIO;
1136 } 1136 }
1137 return 0; 1137 return 0;
@@ -1160,18 +1160,18 @@ static int smsc911x_open(struct net_device *dev)
1160 1160
1161 /* if the phy is not yet registered, retry later*/ 1161 /* if the phy is not yet registered, retry later*/
1162 if (!pdata->phy_dev) { 1162 if (!pdata->phy_dev) {
1163 SMSC_WARNING(HW, "phy_dev is NULL"); 1163 SMSC_WARN(pdata, hw, "phy_dev is NULL");
1164 return -EAGAIN; 1164 return -EAGAIN;
1165 } 1165 }
1166 1166
1167 if (!is_valid_ether_addr(dev->dev_addr)) { 1167 if (!is_valid_ether_addr(dev->dev_addr)) {
1168 SMSC_WARNING(HW, "dev_addr is not a valid MAC address"); 1168 SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
1169 return -EADDRNOTAVAIL; 1169 return -EADDRNOTAVAIL;
1170 } 1170 }
1171 1171
1172 /* Reset the LAN911x */ 1172 /* Reset the LAN911x */
1173 if (smsc911x_soft_reset(pdata)) { 1173 if (smsc911x_soft_reset(pdata)) {
1174 SMSC_WARNING(HW, "soft reset failed"); 1174 SMSC_WARN(pdata, hw, "soft reset failed");
1175 return -EIO; 1175 return -EIO;
1176 } 1176 }
1177 1177
@@ -1191,8 +1191,8 @@ static int smsc911x_open(struct net_device *dev)
1191 } 1191 }
1192 1192
1193 if (unlikely(timeout == 0)) 1193 if (unlikely(timeout == 0))
1194 SMSC_WARNING(IFUP, 1194 SMSC_WARN(pdata, ifup,
1195 "Timed out waiting for EEPROM busy bit to clear"); 1195 "Timed out waiting for EEPROM busy bit to clear");
1196 1196
1197 smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); 1197 smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
1198 1198
@@ -1210,22 +1210,22 @@ static int smsc911x_open(struct net_device *dev)
1210 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); 1210 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
1211 1211
1212 if (pdata->config.irq_polarity) { 1212 if (pdata->config.irq_polarity) {
1213 SMSC_TRACE(IFUP, "irq polarity: active high"); 1213 SMSC_TRACE(pdata, ifup, "irq polarity: active high");
1214 intcfg |= INT_CFG_IRQ_POL_; 1214 intcfg |= INT_CFG_IRQ_POL_;
1215 } else { 1215 } else {
1216 SMSC_TRACE(IFUP, "irq polarity: active low"); 1216 SMSC_TRACE(pdata, ifup, "irq polarity: active low");
1217 } 1217 }
1218 1218
1219 if (pdata->config.irq_type) { 1219 if (pdata->config.irq_type) {
1220 SMSC_TRACE(IFUP, "irq type: push-pull"); 1220 SMSC_TRACE(pdata, ifup, "irq type: push-pull");
1221 intcfg |= INT_CFG_IRQ_TYPE_; 1221 intcfg |= INT_CFG_IRQ_TYPE_;
1222 } else { 1222 } else {
1223 SMSC_TRACE(IFUP, "irq type: open drain"); 1223 SMSC_TRACE(pdata, ifup, "irq type: open drain");
1224 } 1224 }
1225 1225
1226 smsc911x_reg_write(pdata, INT_CFG, intcfg); 1226 smsc911x_reg_write(pdata, INT_CFG, intcfg);
1227 1227
1228 SMSC_TRACE(IFUP, "Testing irq handler using IRQ %d", dev->irq); 1228 SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq);
1229 pdata->software_irq_signal = 0; 1229 pdata->software_irq_signal = 0;
1230 smp_wmb(); 1230 smp_wmb();
1231 1231
@@ -1241,14 +1241,15 @@ static int smsc911x_open(struct net_device *dev)
1241 } 1241 }
1242 1242
1243 if (!pdata->software_irq_signal) { 1243 if (!pdata->software_irq_signal) {
1244 dev_warn(&dev->dev, "ISR failed signaling test (IRQ %d)\n", 1244 netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
1245 dev->irq); 1245 dev->irq);
1246 return -ENODEV; 1246 return -ENODEV;
1247 } 1247 }
1248 SMSC_TRACE(IFUP, "IRQ handler passed test using IRQ %d", dev->irq); 1248 SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
1249 dev->irq);
1249 1250
1250 dev_info(&dev->dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", 1251 netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
1251 (unsigned long)pdata->ioaddr, dev->irq); 1252 (unsigned long)pdata->ioaddr, dev->irq);
1252 1253
1253 /* Reset the last known duplex and carrier */ 1254 /* Reset the last known duplex and carrier */
1254 pdata->last_duplex = -1; 1255 pdata->last_duplex = -1;
@@ -1313,7 +1314,7 @@ static int smsc911x_stop(struct net_device *dev)
1313 if (pdata->phy_dev) 1314 if (pdata->phy_dev)
1314 phy_stop(pdata->phy_dev); 1315 phy_stop(pdata->phy_dev);
1315 1316
1316 SMSC_TRACE(IFDOWN, "Interface stopped"); 1317 SMSC_TRACE(pdata, ifdown, "Interface stopped");
1317 return 0; 1318 return 0;
1318} 1319}
1319 1320
@@ -1331,8 +1332,8 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1331 freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; 1332 freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_;
1332 1333
1333 if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) 1334 if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD))
1334 SMSC_WARNING(TX_ERR, 1335 SMSC_WARN(pdata, tx_err,
1335 "Tx data fifo low, space available: %d", freespace); 1336 "Tx data fifo low, space available: %d", freespace);
1336 1337
1337 /* Word alignment adjustment */ 1338 /* Word alignment adjustment */
1338 tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; 1339 tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16;
@@ -1432,7 +1433,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1432 * receiving data */ 1433 * receiving data */
1433 if (!pdata->multicast_update_pending) { 1434 if (!pdata->multicast_update_pending) {
1434 unsigned int temp; 1435 unsigned int temp;
1435 SMSC_TRACE(HW, "scheduling mcast update"); 1436 SMSC_TRACE(pdata, hw, "scheduling mcast update");
1436 pdata->multicast_update_pending = 1; 1437 pdata->multicast_update_pending = 1;
1437 1438
1438 /* Request the hardware to stop, then perform the 1439 /* Request the hardware to stop, then perform the
@@ -1474,7 +1475,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1474 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { 1475 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1475 /* Called when there is a multicast update scheduled and 1476 /* Called when there is a multicast update scheduled and
1476 * it is now safe to complete the update */ 1477 * it is now safe to complete the update */
1477 SMSC_TRACE(INTR, "RX Stop interrupt"); 1478 SMSC_TRACE(pdata, intr, "RX Stop interrupt");
1478 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); 1479 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1479 if (pdata->multicast_update_pending) 1480 if (pdata->multicast_update_pending)
1480 smsc911x_rx_multicast_update_workaround(pdata); 1481 smsc911x_rx_multicast_update_workaround(pdata);
@@ -1491,7 +1492,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1491 } 1492 }
1492 1493
1493 if (unlikely(intsts & inten & INT_STS_RXE_)) { 1494 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1494 SMSC_TRACE(INTR, "RX Error interrupt"); 1495 SMSC_TRACE(pdata, intr, "RX Error interrupt");
1495 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); 1496 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1496 serviced = IRQ_HANDLED; 1497 serviced = IRQ_HANDLED;
1497 } 1498 }
@@ -1505,8 +1506,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1505 /* Schedule a NAPI poll */ 1506 /* Schedule a NAPI poll */
1506 __napi_schedule(&pdata->napi); 1507 __napi_schedule(&pdata->napi);
1507 } else { 1508 } else {
1508 SMSC_WARNING(RX_ERR, 1509 SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
1509 "napi_schedule_prep failed");
1510 } 1510 }
1511 serviced = IRQ_HANDLED; 1511 serviced = IRQ_HANDLED;
1512 } 1512 }
@@ -1543,7 +1543,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
1543 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 1543 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
1544 spin_unlock_irq(&pdata->mac_lock); 1544 spin_unlock_irq(&pdata->mac_lock);
1545 1545
1546 dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); 1546 netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
1547 1547
1548 return 0; 1548 return 0;
1549} 1549}
@@ -1649,9 +1649,9 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1649 int timeout = 100; 1649 int timeout = 100;
1650 u32 e2cmd; 1650 u32 e2cmd;
1651 1651
1652 SMSC_TRACE(DRV, "op 0x%08x", op); 1652 SMSC_TRACE(pdata, drv, "op 0x%08x", op);
1653 if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { 1653 if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
1654 SMSC_WARNING(DRV, "Busy at start"); 1654 SMSC_WARN(pdata, drv, "Busy at start");
1655 return -EBUSY; 1655 return -EBUSY;
1656 } 1656 }
1657 1657
@@ -1664,12 +1664,12 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1664 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); 1664 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
1665 1665
1666 if (!timeout) { 1666 if (!timeout) {
1667 SMSC_TRACE(DRV, "TIMED OUT"); 1667 SMSC_TRACE(pdata, drv, "TIMED OUT");
1668 return -EAGAIN; 1668 return -EAGAIN;
1669 } 1669 }
1670 1670
1671 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { 1671 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
1672 SMSC_TRACE(DRV, "Error occurred during eeprom operation"); 1672 SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation");
1673 return -EINVAL; 1673 return -EINVAL;
1674 } 1674 }
1675 1675
@@ -1682,7 +1682,7 @@ static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata,
1682 u32 op = E2P_CMD_EPC_CMD_READ_ | address; 1682 u32 op = E2P_CMD_EPC_CMD_READ_ | address;
1683 int ret; 1683 int ret;
1684 1684
1685 SMSC_TRACE(DRV, "address 0x%x", address); 1685 SMSC_TRACE(pdata, drv, "address 0x%x", address);
1686 ret = smsc911x_eeprom_send_cmd(pdata, op); 1686 ret = smsc911x_eeprom_send_cmd(pdata, op);
1687 1687
1688 if (!ret) 1688 if (!ret)
@@ -1698,7 +1698,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1698 u32 temp; 1698 u32 temp;
1699 int ret; 1699 int ret;
1700 1700
1701 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data); 1701 SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data);
1702 ret = smsc911x_eeprom_send_cmd(pdata, op); 1702 ret = smsc911x_eeprom_send_cmd(pdata, op);
1703 1703
1704 if (!ret) { 1704 if (!ret) {
@@ -1811,26 +1811,26 @@ static int __devinit smsc911x_init(struct net_device *dev)
1811 struct smsc911x_data *pdata = netdev_priv(dev); 1811 struct smsc911x_data *pdata = netdev_priv(dev);
1812 unsigned int byte_test; 1812 unsigned int byte_test;
1813 1813
1814 SMSC_TRACE(PROBE, "Driver Parameters:"); 1814 SMSC_TRACE(pdata, probe, "Driver Parameters:");
1815 SMSC_TRACE(PROBE, "LAN base: 0x%08lX", 1815 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
1816 (unsigned long)pdata->ioaddr); 1816 (unsigned long)pdata->ioaddr);
1817 SMSC_TRACE(PROBE, "IRQ: %d", dev->irq); 1817 SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq);
1818 SMSC_TRACE(PROBE, "PHY will be autodetected."); 1818 SMSC_TRACE(pdata, probe, "PHY will be autodetected.");
1819 1819
1820 spin_lock_init(&pdata->dev_lock); 1820 spin_lock_init(&pdata->dev_lock);
1821 spin_lock_init(&pdata->mac_lock); 1821 spin_lock_init(&pdata->mac_lock);
1822 1822
1823 if (pdata->ioaddr == 0) { 1823 if (pdata->ioaddr == 0) {
1824 SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000"); 1824 SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
1825 return -ENODEV; 1825 return -ENODEV;
1826 } 1826 }
1827 1827
1828 /* Check byte ordering */ 1828 /* Check byte ordering */
1829 byte_test = smsc911x_reg_read(pdata, BYTE_TEST); 1829 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1830 SMSC_TRACE(PROBE, "BYTE_TEST: 0x%08X", byte_test); 1830 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
1831 if (byte_test == 0x43218765) { 1831 if (byte_test == 0x43218765) {
1832 SMSC_TRACE(PROBE, "BYTE_TEST looks swapped, " 1832 SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, "
1833 "applying WORD_SWAP"); 1833 "applying WORD_SWAP");
1834 smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); 1834 smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff);
1835 1835
1836 /* 1 dummy read of BYTE_TEST is needed after a write to 1836 /* 1 dummy read of BYTE_TEST is needed after a write to
@@ -1841,12 +1841,13 @@ static int __devinit smsc911x_init(struct net_device *dev)
1841 } 1841 }
1842 1842
1843 if (byte_test != 0x87654321) { 1843 if (byte_test != 0x87654321) {
1844 SMSC_WARNING(DRV, "BYTE_TEST: 0x%08X", byte_test); 1844 SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test);
1845 if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { 1845 if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) {
1846 SMSC_WARNING(PROBE, 1846 SMSC_WARN(pdata, probe,
1847 "top 16 bits equal to bottom 16 bits"); 1847 "top 16 bits equal to bottom 16 bits");
1848 SMSC_TRACE(PROBE, "This may mean the chip is set " 1848 SMSC_TRACE(pdata, probe,
1849 "for 32 bit while the bus is reading 16 bit"); 1849 "This may mean the chip is set "
1850 "for 32 bit while the bus is reading 16 bit");
1850 } 1851 }
1851 return -ENODEV; 1852 return -ENODEV;
1852 } 1853 }
@@ -1881,17 +1882,18 @@ static int __devinit smsc911x_init(struct net_device *dev)
1881 break; 1882 break;
1882 1883
1883 default: 1884 default:
1884 SMSC_WARNING(PROBE, "LAN911x not identified, idrev: 0x%08X", 1885 SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X",
1885 pdata->idrev); 1886 pdata->idrev);
1886 return -ENODEV; 1887 return -ENODEV;
1887 } 1888 }
1888 1889
1889 SMSC_TRACE(PROBE, "LAN911x identified, idrev: 0x%08X, generation: %d", 1890 SMSC_TRACE(pdata, probe,
1890 pdata->idrev, pdata->generation); 1891 "LAN911x identified, idrev: 0x%08X, generation: %d",
1892 pdata->idrev, pdata->generation);
1891 1893
1892 if (pdata->generation == 0) 1894 if (pdata->generation == 0)
1893 SMSC_WARNING(PROBE, 1895 SMSC_WARN(pdata, probe,
1894 "This driver is not intended for this chip revision"); 1896 "This driver is not intended for this chip revision");
1895 1897
1896 /* workaround for platforms without an eeprom, where the mac address 1898 /* workaround for platforms without an eeprom, where the mac address
1897 * is stored elsewhere and set by the bootloader. This saves the 1899 * is stored elsewhere and set by the bootloader. This saves the
@@ -1931,7 +1933,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
1931 BUG_ON(!pdata->ioaddr); 1933 BUG_ON(!pdata->ioaddr);
1932 BUG_ON(!pdata->phy_dev); 1934 BUG_ON(!pdata->phy_dev);
1933 1935
1934 SMSC_TRACE(IFDOWN, "Stopping driver."); 1936 SMSC_TRACE(pdata, ifdown, "Stopping driver");
1935 1937
1936 phy_disconnect(pdata->phy_dev); 1938 phy_disconnect(pdata->phy_dev);
1937 pdata->phy_dev = NULL; 1939 pdata->phy_dev = NULL;
@@ -1965,11 +1967,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1965 int res_size, irq_flags; 1967 int res_size, irq_flags;
1966 int retval; 1968 int retval;
1967 1969
1968 pr_info("%s: Driver version %s.\n", SMSC_CHIPNAME, SMSC_DRV_VERSION); 1970 pr_info("Driver version %s\n", SMSC_DRV_VERSION);
1969 1971
1970 /* platform data specifies irq & dynamic bus configuration */ 1972 /* platform data specifies irq & dynamic bus configuration */
1971 if (!pdev->dev.platform_data) { 1973 if (!pdev->dev.platform_data) {
1972 pr_warning("%s: platform_data not provided\n", SMSC_CHIPNAME); 1974 pr_warn("platform_data not provided\n");
1973 retval = -ENODEV; 1975 retval = -ENODEV;
1974 goto out_0; 1976 goto out_0;
1975 } 1977 }
@@ -1979,8 +1981,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1979 if (!res) 1981 if (!res)
1980 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1982 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1981 if (!res) { 1983 if (!res) {
1982 pr_warning("%s: Could not allocate resource.\n", 1984 pr_warn("Could not allocate resource\n");
1983 SMSC_CHIPNAME);
1984 retval = -ENODEV; 1985 retval = -ENODEV;
1985 goto out_0; 1986 goto out_0;
1986 } 1987 }
@@ -1988,8 +1989,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1988 1989
1989 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1990 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1990 if (!irq_res) { 1991 if (!irq_res) {
1991 pr_warning("%s: Could not allocate irq resource.\n", 1992 pr_warn("Could not allocate irq resource\n");
1992 SMSC_CHIPNAME);
1993 retval = -ENODEV; 1993 retval = -ENODEV;
1994 goto out_0; 1994 goto out_0;
1995 } 1995 }
@@ -2001,7 +2001,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2001 2001
2002 dev = alloc_etherdev(sizeof(struct smsc911x_data)); 2002 dev = alloc_etherdev(sizeof(struct smsc911x_data));
2003 if (!dev) { 2003 if (!dev) {
2004 pr_warning("%s: Could not allocate device.\n", SMSC_CHIPNAME); 2004 pr_warn("Could not allocate device\n");
2005 retval = -ENOMEM; 2005 retval = -ENOMEM;
2006 goto out_release_io_1; 2006 goto out_release_io_1;
2007 } 2007 }
@@ -2021,8 +2021,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2021 pdata->msg_enable = ((1 << debug) - 1); 2021 pdata->msg_enable = ((1 << debug) - 1);
2022 2022
2023 if (pdata->ioaddr == NULL) { 2023 if (pdata->ioaddr == NULL) {
2024 SMSC_WARNING(PROBE, 2024 SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
2025 "Error smsc911x base address invalid");
2026 retval = -ENOMEM; 2025 retval = -ENOMEM;
2027 goto out_free_netdev_2; 2026 goto out_free_netdev_2;
2028 } 2027 }
@@ -2047,8 +2046,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2047 retval = request_irq(dev->irq, smsc911x_irqhandler, 2046 retval = request_irq(dev->irq, smsc911x_irqhandler,
2048 irq_flags | IRQF_SHARED, dev->name, dev); 2047 irq_flags | IRQF_SHARED, dev->name, dev);
2049 if (retval) { 2048 if (retval) {
2050 SMSC_WARNING(PROBE, 2049 SMSC_WARN(pdata, probe,
2051 "Unable to claim requested irq: %d", dev->irq); 2050 "Unable to claim requested irq: %d", dev->irq);
2052 goto out_unmap_io_3; 2051 goto out_unmap_io_3;
2053 } 2052 }
2054 2053
@@ -2056,17 +2055,16 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2056 2055
2057 retval = register_netdev(dev); 2056 retval = register_netdev(dev);
2058 if (retval) { 2057 if (retval) {
2059 SMSC_WARNING(PROBE, 2058 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
2060 "Error %i registering device", retval);
2061 goto out_unset_drvdata_4; 2059 goto out_unset_drvdata_4;
2062 } else { 2060 } else {
2063 SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name); 2061 SMSC_TRACE(pdata, probe,
2062 "Network interface: \"%s\"", dev->name);
2064 } 2063 }
2065 2064
2066 retval = smsc911x_mii_init(pdev, dev); 2065 retval = smsc911x_mii_init(pdev, dev);
2067 if (retval) { 2066 if (retval) {
2068 SMSC_WARNING(PROBE, 2067 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
2069 "Error %i initialising mii", retval);
2070 goto out_unregister_netdev_5; 2068 goto out_unregister_netdev_5;
2071 } 2069 }
2072 2070
@@ -2075,10 +2073,12 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2075 /* Check if mac address has been specified when bringing interface up */ 2073 /* Check if mac address has been specified when bringing interface up */
2076 if (is_valid_ether_addr(dev->dev_addr)) { 2074 if (is_valid_ether_addr(dev->dev_addr)) {
2077 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2075 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2078 SMSC_TRACE(PROBE, "MAC Address is specified by configuration"); 2076 SMSC_TRACE(pdata, probe,
2077 "MAC Address is specified by configuration");
2079 } else if (is_valid_ether_addr(pdata->config.mac)) { 2078 } else if (is_valid_ether_addr(pdata->config.mac)) {
2080 memcpy(dev->dev_addr, pdata->config.mac, 6); 2079 memcpy(dev->dev_addr, pdata->config.mac, 6);
2081 SMSC_TRACE(PROBE, "MAC Address specified by platform data"); 2080 SMSC_TRACE(pdata, probe,
2081 "MAC Address specified by platform data");
2082 } else { 2082 } else {
2083 /* Try reading mac address from device. if EEPROM is present 2083 /* Try reading mac address from device. if EEPROM is present
2084 * it will already have been set */ 2084 * it will already have been set */
@@ -2086,20 +2086,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2086 2086
2087 if (is_valid_ether_addr(dev->dev_addr)) { 2087 if (is_valid_ether_addr(dev->dev_addr)) {
2088 /* eeprom values are valid so use them */ 2088 /* eeprom values are valid so use them */
2089 SMSC_TRACE(PROBE, 2089 SMSC_TRACE(pdata, probe,
2090 "Mac Address is read from LAN911x EEPROM"); 2090 "Mac Address is read from LAN911x EEPROM");
2091 } else { 2091 } else {
2092 /* eeprom values are invalid, generate random MAC */ 2092 /* eeprom values are invalid, generate random MAC */
2093 random_ether_addr(dev->dev_addr); 2093 random_ether_addr(dev->dev_addr);
2094 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2094 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2095 SMSC_TRACE(PROBE, 2095 SMSC_TRACE(pdata, probe,
2096 "MAC Address is set to random_ether_addr"); 2096 "MAC Address is set to random_ether_addr");
2097 } 2097 }
2098 } 2098 }
2099 2099
2100 spin_unlock_irq(&pdata->mac_lock); 2100 spin_unlock_irq(&pdata->mac_lock);
2101 2101
2102 dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); 2102 netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
2103 2103
2104 return 0; 2104 return 0;
2105 2105
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 50f712e99e96..8d67aacf8867 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -33,25 +33,21 @@
33 * can be successfully looped back */ 33 * can be successfully looped back */
34#define USE_PHY_WORK_AROUND 34#define USE_PHY_WORK_AROUND
35 35
36#define DPRINTK(nlevel, klevel, fmt, args...) \
37 ((void)((NETIF_MSG_##nlevel & pdata->msg_enable) && \
38 printk(KERN_##klevel "%s: %s: " fmt "\n", \
39 pdata->dev->name, __func__, ## args)))
40
41#if USE_DEBUG >= 1 36#if USE_DEBUG >= 1
42#define SMSC_WARNING(nlevel, fmt, args...) \ 37#define SMSC_WARN(pdata, nlevel, fmt, args...) \
43 DPRINTK(nlevel, WARNING, fmt, ## args) 38 netif_warn(pdata, nlevel, (pdata)->dev, \
39 "%s: " fmt "\n", __func__, ##args)
44#else 40#else
45#define SMSC_WARNING(nlevel, fmt, args...) \ 41#define SMSC_WARN(pdata, nlevel, fmt, args...) \
46 ({ do {} while (0); 0; }) 42 no_printk(fmt "\n", ##args)
47#endif 43#endif
48 44
49#if USE_DEBUG >= 2 45#if USE_DEBUG >= 2
50#define SMSC_TRACE(nlevel, fmt, args...) \ 46#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
51 DPRINTK(nlevel, INFO, fmt, ## args) 47 netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args)
52#else 48#else
53#define SMSC_TRACE(nlevel, fmt, args...) \ 49#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
54 ({ do {} while (0); 0; }) 50 no_printk(fmt "\n", ##args)
55#endif 51#endif
56 52
57#ifdef CONFIG_DEBUG_SPINLOCK 53#ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index fd719edc7f7c..156a805c6c23 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -197,13 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
197 } 197 }
198} 198}
199 199
200static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
201{
202 struct stmmac_priv *priv = netdev_priv(dev);
203
204 return priv->rx_coe;
205}
206
207static void 200static void
208stmmac_get_pauseparam(struct net_device *netdev, 201stmmac_get_pauseparam(struct net_device *netdev,
209 struct ethtool_pauseparam *pause) 202 struct ethtool_pauseparam *pause)
@@ -358,11 +351,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
358 .get_regs = stmmac_ethtool_gregs, 351 .get_regs = stmmac_ethtool_gregs,
359 .get_regs_len = stmmac_ethtool_get_regs_len, 352 .get_regs_len = stmmac_ethtool_get_regs_len,
360 .get_link = ethtool_op_get_link, 353 .get_link = ethtool_op_get_link,
361 .get_rx_csum = stmmac_ethtool_get_rx_csum,
362 .get_tx_csum = ethtool_op_get_tx_csum,
363 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
364 .get_sg = ethtool_op_get_sg,
365 .set_sg = ethtool_op_set_sg,
366 .get_pauseparam = stmmac_get_pauseparam, 354 .get_pauseparam = stmmac_get_pauseparam,
367 .set_pauseparam = stmmac_set_pauseparam, 355 .set_pauseparam = stmmac_set_pauseparam,
368 .get_ethtool_stats = stmmac_get_ethtool_stats, 356 .get_ethtool_stats = stmmac_get_ethtool_stats,
@@ -370,8 +358,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
370 .get_wol = stmmac_get_wol, 358 .get_wol = stmmac_get_wol,
371 .set_wol = stmmac_set_wol, 359 .set_wol = stmmac_set_wol,
372 .get_sset_count = stmmac_get_sset_count, 360 .get_sset_count = stmmac_get_sset_count,
373 .get_tso = ethtool_op_get_tso,
374 .set_tso = ethtool_op_set_tso,
375}; 361};
376 362
377void stmmac_set_ethtool_ops(struct net_device *netdev) 363void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index cc973fc38405..ba9daeccb8af 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -139,7 +139,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
139 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 139 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
140 140
141static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 141static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
142static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
143 142
144/** 143/**
145 * stmmac_verify_args - verify the driver parameters. 144 * stmmac_verify_args - verify the driver parameters.
@@ -831,6 +830,7 @@ static int stmmac_open(struct net_device *dev)
831 pr_info("stmmac: Rx Checksum Offload Engine supported\n"); 830 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
832 if (priv->plat->tx_coe) 831 if (priv->plat->tx_coe)
833 pr_info("\tTX Checksum insertion supported\n"); 832 pr_info("\tTX Checksum insertion supported\n");
833 netdev_update_features(dev);
834 834
835 /* Initialise the MMC (if present) to disable all interrupts. */ 835 /* Initialise the MMC (if present) to disable all interrupts. */
836 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 836 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
@@ -934,46 +934,6 @@ static int stmmac_release(struct net_device *dev)
934 return 0; 934 return 0;
935} 935}
936 936
937/*
938 * To perform emulated hardware segmentation on skb.
939 */
940static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
941{
942 struct sk_buff *segs, *curr_skb;
943 int gso_segs = skb_shinfo(skb)->gso_segs;
944
945 /* Estimate the number of fragments in the worst case */
946 if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
947 netif_stop_queue(priv->dev);
948 TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
949 __func__);
950 if (stmmac_tx_avail(priv) < gso_segs)
951 return NETDEV_TX_BUSY;
952
953 netif_wake_queue(priv->dev);
954 }
955 TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
956 skb, skb->len);
957
958 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
959 if (IS_ERR(segs))
960 goto sw_tso_end;
961
962 do {
963 curr_skb = segs;
964 segs = segs->next;
965 TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
966 "*next %p\n", curr_skb->len, curr_skb, segs);
967 curr_skb->next = NULL;
968 stmmac_xmit(curr_skb, priv->dev);
969 } while (segs);
970
971sw_tso_end:
972 dev_kfree_skb(skb);
973
974 return NETDEV_TX_OK;
975}
976
977static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, 937static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
978 struct net_device *dev, 938 struct net_device *dev,
979 int csum_insertion) 939 int csum_insertion)
@@ -1051,16 +1011,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1051 !skb_is_gso(skb) ? "isn't" : "is"); 1011 !skb_is_gso(skb) ? "isn't" : "is");
1052#endif 1012#endif
1053 1013
1054 if (unlikely(skb_is_gso(skb))) 1014 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1055 return stmmac_sw_tso(priv, skb);
1056
1057 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1058 if (unlikely((!priv->plat->tx_coe) ||
1059 (priv->no_csum_insertion)))
1060 skb_checksum_help(skb);
1061 else
1062 csum_insertion = 1;
1063 }
1064 1015
1065 desc = priv->dma_tx + entry; 1016 desc = priv->dma_tx + entry;
1066 first = desc; 1017 first = desc;
@@ -1380,18 +1331,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1380 return -EINVAL; 1331 return -EINVAL;
1381 } 1332 }
1382 1333
1334 dev->mtu = new_mtu;
1335 netdev_update_features(dev);
1336
1337 return 0;
1338}
1339
1340static u32 stmmac_fix_features(struct net_device *dev, u32 features)
1341{
1342 struct stmmac_priv *priv = netdev_priv(dev);
1343
1344 if (!priv->rx_coe)
1345 features &= ~NETIF_F_RXCSUM;
1346 if (!priv->plat->tx_coe)
1347 features &= ~NETIF_F_ALL_CSUM;
1348
1383 /* Some GMAC devices have a bugged Jumbo frame support that 1349 /* Some GMAC devices have a bugged Jumbo frame support that
1384 * needs to have the Tx COE disabled for oversized frames 1350 * needs to have the Tx COE disabled for oversized frames
1385 * (due to limited buffer sizes). In this case we disable 1351 * (due to limited buffer sizes). In this case we disable
1386 * the TX csum insertionin the TDES and not use SF. */ 1352 * the TX csum insertionin the TDES and not use SF. */
1387 if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) 1353 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
1388 priv->no_csum_insertion = 1; 1354 features &= ~NETIF_F_ALL_CSUM;
1389 else
1390 priv->no_csum_insertion = 0;
1391 1355
1392 dev->mtu = new_mtu; 1356 return features;
1393
1394 return 0;
1395} 1357}
1396 1358
1397static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 1359static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
@@ -1471,6 +1433,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1471 .ndo_start_xmit = stmmac_xmit, 1433 .ndo_start_xmit = stmmac_xmit,
1472 .ndo_stop = stmmac_release, 1434 .ndo_stop = stmmac_release,
1473 .ndo_change_mtu = stmmac_change_mtu, 1435 .ndo_change_mtu = stmmac_change_mtu,
1436 .ndo_fix_features = stmmac_fix_features,
1474 .ndo_set_multicast_list = stmmac_multicast_list, 1437 .ndo_set_multicast_list = stmmac_multicast_list,
1475 .ndo_tx_timeout = stmmac_tx_timeout, 1438 .ndo_tx_timeout = stmmac_tx_timeout,
1476 .ndo_do_ioctl = stmmac_ioctl, 1439 .ndo_do_ioctl = stmmac_ioctl,
@@ -1501,8 +1464,8 @@ static int stmmac_probe(struct net_device *dev)
1501 dev->netdev_ops = &stmmac_netdev_ops; 1464 dev->netdev_ops = &stmmac_netdev_ops;
1502 stmmac_set_ethtool_ops(dev); 1465 stmmac_set_ethtool_ops(dev);
1503 1466
1504 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | 1467 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1505 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1468 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1506 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1469 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1507#ifdef STMMAC_VLAN_TAG_USED 1470#ifdef STMMAC_VLAN_TAG_USED
1508 /* Both mac100 and gmac support receive VLAN tag detection */ 1471 /* Both mac100 and gmac support receive VLAN tag detection */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index d3be735c4719..81b6eb8ed4d7 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -3146,7 +3146,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3146 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3146 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3147 3147
3148 /* GEM can do it all... */ 3148 /* GEM can do it all... */
3149 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3149 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3150 dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX;
3150 if (pci_using_dac) 3151 if (pci_using_dac)
3151 dev->features |= NETIF_F_HIGHDMA; 3152 dev->features |= NETIF_F_HIGHDMA;
3152 3153
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index eb4f59fb01e9..80e907df36b4 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2788,7 +2788,8 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2788 dev->ethtool_ops = &hme_ethtool_ops; 2788 dev->ethtool_ops = &hme_ethtool_ops;
2789 2789
2790 /* Happy Meal can do it all... */ 2790 /* Happy Meal can do it all... */
2791 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2791 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2792 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2792 2793
2793 dev->irq = op->archdata.irqs[0]; 2794 dev->irq = op->archdata.irqs[0];
2794 2795
@@ -3113,7 +3114,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3113 dev->dma = 0; 3114 dev->dma = 0;
3114 3115
3115 /* Happy Meal can do it all... */ 3116 /* Happy Meal can do it all... */
3116 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 3117 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3118 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3117 3119
3118#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 3120#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3119 /* Hook up PCI register/descriptor accessors. */ 3121 /* Hook up PCI register/descriptor accessors. */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index b8c5f35577e4..9d7defc2628d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -97,14 +97,12 @@
97 * them in the NIC onboard memory. 97 * them in the NIC onboard memory.
98 */ 98 */
99#define TG3_RX_STD_RING_SIZE(tp) \ 99#define TG3_RX_STD_RING_SIZE(tp) \
100 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \ 100 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \ 101 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 RX_STD_MAX_SIZE_5717 : 512)
103#define TG3_DEF_RX_RING_PENDING 200 102#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JMB_RING_SIZE(tp) \ 103#define TG3_RX_JMB_RING_SIZE(tp) \
105 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \ 104 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \ 105 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
107 1024 : 256)
108#define TG3_DEF_RX_JUMBO_RING_PENDING 100 106#define TG3_DEF_RX_JUMBO_RING_PENDING 100
109#define TG3_RSS_INDIR_TBL_SIZE 128 107#define TG3_RSS_INDIR_TBL_SIZE 128
110 108
@@ -266,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
269 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
270 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
271 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1044,8 +1043,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1044 u32 reg; 1043 u32 reg;
1045 struct phy_device *phydev; 1044 struct phy_device *phydev;
1046 1045
1047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 1046 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
1048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1049 u32 is_serdes; 1047 u32 is_serdes;
1050 1048
1051 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; 1049 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
@@ -1623,8 +1621,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1623 u32 reg; 1621 u32 reg;
1624 1622
1625 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1623 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1626 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 1624 ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
1627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1628 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 1625 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1629 return; 1626 return;
1630 1627
@@ -2047,8 +2044,7 @@ static int tg3_phy_reset(struct tg3 *tp)
2047 } 2044 }
2048 } 2045 }
2049 2046
2050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2047 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
2052 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2048 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2053 return 0; 2049 return 0;
2054 2050
@@ -2130,7 +2126,8 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2130 2126
2131 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2127 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 2128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) && 2129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2134 tp->pdev_peer != tp->pdev) { 2131 tp->pdev_peer != tp->pdev) {
2135 struct net_device *dev_peer; 2132 struct net_device *dev_peer;
2136 2133
@@ -4394,6 +4391,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4394 4391
4395static int tg3_setup_phy(struct tg3 *tp, int force_reset) 4392static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4396{ 4393{
4394 u32 val;
4397 int err; 4395 int err;
4398 4396
4399 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 4397 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
@@ -4404,7 +4402,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4404 err = tg3_setup_copper_phy(tp, force_reset); 4402 err = tg3_setup_copper_phy(tp, force_reset);
4405 4403
4406 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { 4404 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4407 u32 val, scale; 4405 u32 scale;
4408 4406
4409 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 4407 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4410 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 4408 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
@@ -4419,17 +4417,20 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4419 tw32(GRC_MISC_CFG, val); 4417 tw32(GRC_MISC_CFG, val);
4420 } 4418 }
4421 4419
4420 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4421 (6 << TX_LENGTHS_IPG_SHIFT);
4422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4423 val |= tr32(MAC_TX_LENGTHS) &
4424 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4425 TX_LENGTHS_CNT_DWN_VAL_MSK);
4426
4422 if (tp->link_config.active_speed == SPEED_1000 && 4427 if (tp->link_config.active_speed == SPEED_1000 &&
4423 tp->link_config.active_duplex == DUPLEX_HALF) 4428 tp->link_config.active_duplex == DUPLEX_HALF)
4424 tw32(MAC_TX_LENGTHS, 4429 tw32(MAC_TX_LENGTHS, val |
4425 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 4430 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4426 (6 << TX_LENGTHS_IPG_SHIFT) |
4427 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4428 else 4431 else
4429 tw32(MAC_TX_LENGTHS, 4432 tw32(MAC_TX_LENGTHS, val |
4430 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 4433 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4431 (6 << TX_LENGTHS_IPG_SHIFT) |
4432 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4433 4434
4434 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 4435 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4435 if (netif_carrier_ok(tp->dev)) { 4436 if (netif_carrier_ok(tp->dev)) {
@@ -4441,7 +4442,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4441 } 4442 }
4442 4443
4443 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { 4444 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4444 u32 val = tr32(PCIE_PWR_MGMT_THRESH); 4445 val = tr32(PCIE_PWR_MGMT_THRESH);
4445 if (!netif_carrier_ok(tp->dev)) 4446 if (!netif_carrier_ok(tp->dev))
4446 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 4447 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4447 tp->pwrmgmt_thresh; 4448 tp->pwrmgmt_thresh;
@@ -4815,7 +4816,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4815 skb = copy_skb; 4816 skb = copy_skb;
4816 } 4817 }
4817 4818
4818 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && 4819 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4819 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 4820 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4820 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 4821 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4821 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 4822 >> RXD_TCPCSUM_SHIFT) == 0xffff))
@@ -6126,6 +6127,16 @@ dma_error:
6126 return NETDEV_TX_OK; 6127 return NETDEV_TX_OK;
6127} 6128}
6128 6129
6130static u32 tg3_fix_features(struct net_device *dev, u32 features)
6131{
6132 struct tg3 *tp = netdev_priv(dev);
6133
6134 if (dev->mtu > ETH_DATA_LEN && (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6135 features &= ~NETIF_F_ALL_TSO;
6136
6137 return features;
6138}
6139
6129static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 6140static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6130 int new_mtu) 6141 int new_mtu)
6131{ 6142{
@@ -6133,14 +6144,16 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6133 6144
6134 if (new_mtu > ETH_DATA_LEN) { 6145 if (new_mtu > ETH_DATA_LEN) {
6135 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 6146 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6147 netdev_update_features(dev);
6136 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 6148 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6137 ethtool_op_set_tso(dev, 0);
6138 } else { 6149 } else {
6139 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 6150 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6140 } 6151 }
6141 } else { 6152 } else {
6142 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 6153 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6143 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 6154 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6155 netdev_update_features(dev);
6156 }
6144 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 6157 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6145 } 6158 }
6146} 6159}
@@ -7097,7 +7110,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7097 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 7110 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7098 /* Force PCIe 1.0a mode */ 7111 /* Force PCIe 1.0a mode */
7099 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7112 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7100 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 7113 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
7101 tr32(TG3_PCIE_PHY_TSTCTL) == 7114 tr32(TG3_PCIE_PHY_TSTCTL) ==
7102 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 7115 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7103 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 7116 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
@@ -7250,12 +7263,17 @@ static int tg3_chip_reset(struct tg3 *tp)
7250 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 7263 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7251 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 7264 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7252 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7265 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7253 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 7266 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
7254 val = tr32(0x7c00); 7267 val = tr32(0x7c00);
7255 7268
7256 tw32(0x7c00, val | (1 << 25)); 7269 tw32(0x7c00, val | (1 << 25));
7257 } 7270 }
7258 7271
7272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7273 val = tr32(TG3_CPMU_CLCK_ORIDE);
7274 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7275 }
7276
7259 /* Reprobe ASF enable state. */ 7277 /* Reprobe ASF enable state. */
7260 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; 7278 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7261 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; 7279 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
@@ -7673,8 +7691,7 @@ static void tg3_rings_reset(struct tg3 *tp)
7673 /* Disable all transmit rings but the first. */ 7691 /* Disable all transmit rings but the first. */
7674 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7692 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7675 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 7693 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7676 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7694 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7678 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 7695 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7679 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7696 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7680 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 7697 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
@@ -7688,8 +7705,7 @@ static void tg3_rings_reset(struct tg3 *tp)
7688 7705
7689 7706
7690 /* Disable all receive return rings but the first. */ 7707 /* Disable all receive return rings but the first. */
7691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7708 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7693 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 7709 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7694 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7710 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7695 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 7711 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
@@ -7960,7 +7976,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7960 if (err) 7976 if (err)
7961 return err; 7977 return err;
7962 7978
7963 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 7979 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
7964 val = tr32(TG3PCI_DMA_RW_CTRL) & 7980 val = tr32(TG3PCI_DMA_RW_CTRL) &
7965 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 7981 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7966 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) 7982 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
@@ -8091,8 +8107,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8091 ((u64) tpr->rx_std_mapping >> 32)); 8107 ((u64) tpr->rx_std_mapping >> 32));
8092 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 8108 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8093 ((u64) tpr->rx_std_mapping & 0xffffffff)); 8109 ((u64) tpr->rx_std_mapping & 0xffffffff));
8094 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 8110 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
8095 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
8096 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 8111 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8097 NIC_SRAM_RX_BUFFER_DESC); 8112 NIC_SRAM_RX_BUFFER_DESC);
8098 8113
@@ -8115,9 +8130,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8115 ((u64) tpr->rx_jmb_mapping >> 32)); 8130 ((u64) tpr->rx_jmb_mapping >> 32));
8116 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 8131 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8117 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 8132 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8133 val = TG3_RX_JMB_RING_SIZE(tp) <<
8134 BDINFO_FLAGS_MAXLEN_SHIFT;
8118 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 8135 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8119 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 8136 val | BDINFO_FLAGS_USE_EXT_RECV);
8120 BDINFO_FLAGS_USE_EXT_RECV);
8121 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) || 8137 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 8138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8123 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 8139 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
@@ -8127,17 +8143,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8127 BDINFO_FLAGS_DISABLED); 8143 BDINFO_FLAGS_DISABLED);
8128 } 8144 }
8129 8145
8130 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 8146 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 8147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8132 val = RX_STD_MAX_SIZE_5705; 8148 val = TG3_RX_STD_MAX_SIZE_5700;
8133 else 8149 else
8134 val = RX_STD_MAX_SIZE_5717; 8150 val = TG3_RX_STD_MAX_SIZE_5717;
8135 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 8151 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8136 val |= (TG3_RX_STD_DMA_SZ << 2); 8152 val |= (TG3_RX_STD_DMA_SZ << 2);
8137 } else 8153 } else
8138 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 8154 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8139 } else 8155 } else
8140 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; 8156 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8141 8157
8142 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 8158 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8143 8159
@@ -8148,7 +8164,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8148 tp->rx_jumbo_pending : 0; 8164 tp->rx_jumbo_pending : 0;
8149 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 8165 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8150 8166
8151 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 8167 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8152 tw32(STD_REPLENISH_LWM, 32); 8168 tw32(STD_REPLENISH_LWM, 32);
8153 tw32(JMB_REPLENISH_LWM, 16); 8169 tw32(JMB_REPLENISH_LWM, 16);
8154 } 8170 }
@@ -8165,10 +8181,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8165 /* The slot time is changed by tg3_setup_phy if we 8181 /* The slot time is changed by tg3_setup_phy if we
8166 * run at gigabit with half duplex. 8182 * run at gigabit with half duplex.
8167 */ 8183 */
8168 tw32(MAC_TX_LENGTHS, 8184 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8169 (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 8185 (6 << TX_LENGTHS_IPG_SHIFT) |
8170 (6 << TX_LENGTHS_IPG_SHIFT) | 8186 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8171 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 8187
8188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8189 val |= tr32(MAC_TX_LENGTHS) &
8190 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8191 TX_LENGTHS_CNT_DWN_VAL_MSK);
8192
8193 tw32(MAC_TX_LENGTHS, val);
8172 8194
8173 /* Receive rules. */ 8195 /* Receive rules. */
8174 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 8196 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
@@ -8215,13 +8237,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 8237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8216 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 8238 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8217 8239
8240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8241 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8242
8218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 8243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 8244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 8245 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 8246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8222 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8247 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
8223 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8248 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8225 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 8251 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8226 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 8252 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8227 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 8253 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -8233,7 +8259,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8233 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8259 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8234 } 8260 }
8235 8261
8236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8237 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 8264 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8238 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | 8265 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8239 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 8266 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
@@ -8421,8 +8448,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8421 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 8448 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8422 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 8449 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8423 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 8450 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8451 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
8425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8426 val |= RCVDBDI_MODE_LRG_RING_SZ; 8452 val |= RCVDBDI_MODE_LRG_RING_SZ;
8427 tw32(RCVDBDI_MODE, val); 8453 tw32(RCVDBDI_MODE, val);
8428 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 8454 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
@@ -8447,9 +8473,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8447 } 8473 }
8448 8474
8449 tp->tx_mode = TX_MODE_ENABLE; 8475 tp->tx_mode = TX_MODE_ENABLE;
8476
8450 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 8477 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 8478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8452 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 8479 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8480
8481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8482 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8483 tp->tx_mode &= ~val;
8484 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8485 }
8486
8453 tw32_f(MAC_TX_MODE, tp->tx_mode); 8487 tw32_f(MAC_TX_MODE, tp->tx_mode);
8454 udelay(100); 8488 udelay(100);
8455 8489
@@ -8839,12 +8873,12 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
8839 fn = tg3_msi; 8873 fn = tg3_msi;
8840 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 8874 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8841 fn = tg3_msi_1shot; 8875 fn = tg3_msi_1shot;
8842 flags = IRQF_SAMPLE_RANDOM; 8876 flags = 0;
8843 } else { 8877 } else {
8844 fn = tg3_interrupt; 8878 fn = tg3_interrupt;
8845 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 8879 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8846 fn = tg3_interrupt_tagged; 8880 fn = tg3_interrupt_tagged;
8847 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; 8881 flags = IRQF_SHARED;
8848 } 8882 }
8849 8883
8850 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 8884 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
@@ -8868,7 +8902,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8868 * Turn off MSI one shot mode. Otherwise this test has no 8902 * Turn off MSI one shot mode. Otherwise this test has no
8869 * observable way to know whether the interrupt was delivered. 8903 * observable way to know whether the interrupt was delivered.
8870 */ 8904 */
8871 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 8905 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
8872 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8906 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8873 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 8907 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8874 tw32(MSGINT_MODE, val); 8908 tw32(MSGINT_MODE, val);
@@ -8911,7 +8945,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8911 8945
8912 if (intr_ok) { 8946 if (intr_ok) {
8913 /* Reenable MSI one shot mode. */ 8947 /* Reenable MSI one shot mode. */
8914 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 8948 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
8915 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8949 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8916 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 8950 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8917 tw32(MSGINT_MODE, val); 8951 tw32(MSGINT_MODE, val);
@@ -9058,7 +9092,9 @@ static bool tg3_enable_msix(struct tg3 *tp)
9058 9092
9059 if (tp->irq_cnt > 1) { 9093 if (tp->irq_cnt > 1) {
9060 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9094 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 9095
9096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9062 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; 9098 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9063 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); 9099 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9064 } 9100 }
@@ -9214,7 +9250,7 @@ static int tg3_open(struct net_device *dev)
9214 goto err_out2; 9250 goto err_out2;
9215 } 9251 }
9216 9252
9217 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 9253 if (!(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9218 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 9254 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9219 u32 val = tr32(PCIE_TRANSACTION_CFG); 9255 u32 val = tr32(PCIE_TRANSACTION_CFG);
9220 9256
@@ -9997,33 +10033,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
9997 tp->msg_enable = value; 10033 tp->msg_enable = value;
9998} 10034}
9999 10035
10000static int tg3_set_tso(struct net_device *dev, u32 value)
10001{
10002 struct tg3 *tp = netdev_priv(dev);
10003
10004 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
10005 if (value)
10006 return -EINVAL;
10007 return 0;
10008 }
10009 if ((dev->features & NETIF_F_IPV6_CSUM) &&
10010 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
10011 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
10012 if (value) {
10013 dev->features |= NETIF_F_TSO6;
10014 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
10015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10016 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
10017 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
10018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
10019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10020 dev->features |= NETIF_F_TSO_ECN;
10021 } else
10022 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
10023 }
10024 return ethtool_op_set_tso(dev, value);
10025}
10026
10027static int tg3_nway_reset(struct net_device *dev) 10036static int tg3_nway_reset(struct net_device *dev)
10028{ 10037{
10029 struct tg3 *tp = netdev_priv(dev); 10038 struct tg3 *tp = netdev_priv(dev);
@@ -10246,50 +10255,6 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10246 return err; 10255 return err;
10247} 10256}
10248 10257
10249static u32 tg3_get_rx_csum(struct net_device *dev)
10250{
10251 struct tg3 *tp = netdev_priv(dev);
10252 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10253}
10254
10255static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10256{
10257 struct tg3 *tp = netdev_priv(dev);
10258
10259 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10260 if (data != 0)
10261 return -EINVAL;
10262 return 0;
10263 }
10264
10265 spin_lock_bh(&tp->lock);
10266 if (data)
10267 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10268 else
10269 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10270 spin_unlock_bh(&tp->lock);
10271
10272 return 0;
10273}
10274
10275static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10276{
10277 struct tg3 *tp = netdev_priv(dev);
10278
10279 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10280 if (data != 0)
10281 return -EINVAL;
10282 return 0;
10283 }
10284
10285 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10286 ethtool_op_set_tx_ipv6_csum(dev, data);
10287 else
10288 ethtool_op_set_tx_csum(dev, data);
10289
10290 return 0;
10291}
10292
10293static int tg3_get_sset_count(struct net_device *dev, int sset) 10258static int tg3_get_sset_count(struct net_device *dev, int sset)
10294{ 10259{
10295 switch (sset) { 10260 switch (sset) {
@@ -10317,35 +10282,38 @@ static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10317 } 10282 }
10318} 10283}
10319 10284
10320static int tg3_phys_id(struct net_device *dev, u32 data) 10285static int tg3_set_phys_id(struct net_device *dev,
10286 enum ethtool_phys_id_state state)
10321{ 10287{
10322 struct tg3 *tp = netdev_priv(dev); 10288 struct tg3 *tp = netdev_priv(dev);
10323 int i;
10324 10289
10325 if (!netif_running(tp->dev)) 10290 if (!netif_running(tp->dev))
10326 return -EAGAIN; 10291 return -EAGAIN;
10327 10292
10328 if (data == 0) 10293 switch (state) {
10329 data = UINT_MAX / 2; 10294 case ETHTOOL_ID_ACTIVE:
10295 return -EINVAL;
10330 10296
10331 for (i = 0; i < (data * 2); i++) { 10297 case ETHTOOL_ID_ON:
10332 if ((i % 2) == 0) 10298 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10333 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 10299 LED_CTRL_1000MBPS_ON |
10334 LED_CTRL_1000MBPS_ON | 10300 LED_CTRL_100MBPS_ON |
10335 LED_CTRL_100MBPS_ON | 10301 LED_CTRL_10MBPS_ON |
10336 LED_CTRL_10MBPS_ON | 10302 LED_CTRL_TRAFFIC_OVERRIDE |
10337 LED_CTRL_TRAFFIC_OVERRIDE | 10303 LED_CTRL_TRAFFIC_BLINK |
10338 LED_CTRL_TRAFFIC_BLINK | 10304 LED_CTRL_TRAFFIC_LED);
10339 LED_CTRL_TRAFFIC_LED); 10305 break;
10340 10306
10341 else 10307 case ETHTOOL_ID_OFF:
10342 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 10308 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10343 LED_CTRL_TRAFFIC_OVERRIDE); 10309 LED_CTRL_TRAFFIC_OVERRIDE);
10310 break;
10344 10311
10345 if (msleep_interruptible(500)) 10312 case ETHTOOL_ID_INACTIVE:
10346 break; 10313 tw32(MAC_LED_CTRL, tp->led_ctrl);
10314 break;
10347 } 10315 }
10348 tw32(MAC_LED_CTRL, tp->led_ctrl); 10316
10349 return 0; 10317 return 0;
10350} 10318}
10351 10319
@@ -10850,8 +10818,7 @@ static int tg3_test_memory(struct tg3 *tp)
10850 int err = 0; 10818 int err = 0;
10851 int i; 10819 int i;
10852 10820
10853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 10821 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
10854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10855 mem_tbl = mem_tbl_5717; 10822 mem_tbl = mem_tbl_5717;
10856 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 10823 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10857 mem_tbl = mem_tbl_57765; 10824 mem_tbl = mem_tbl_57765;
@@ -11364,14 +11331,9 @@ static const struct ethtool_ops tg3_ethtool_ops = {
11364 .set_ringparam = tg3_set_ringparam, 11331 .set_ringparam = tg3_set_ringparam,
11365 .get_pauseparam = tg3_get_pauseparam, 11332 .get_pauseparam = tg3_get_pauseparam,
11366 .set_pauseparam = tg3_set_pauseparam, 11333 .set_pauseparam = tg3_set_pauseparam,
11367 .get_rx_csum = tg3_get_rx_csum,
11368 .set_rx_csum = tg3_set_rx_csum,
11369 .set_tx_csum = tg3_set_tx_csum,
11370 .set_sg = ethtool_op_set_sg,
11371 .set_tso = tg3_set_tso,
11372 .self_test = tg3_self_test, 11334 .self_test = tg3_self_test,
11373 .get_strings = tg3_get_strings, 11335 .get_strings = tg3_get_strings,
11374 .phys_id = tg3_phys_id, 11336 .set_phys_id = tg3_set_phys_id,
11375 .get_ethtool_stats = tg3_get_ethtool_stats, 11337 .get_ethtool_stats = tg3_get_ethtool_stats,
11376 .get_coalesce = tg3_get_coalesce, 11338 .get_coalesce = tg3_get_coalesce,
11377 .set_coalesce = tg3_set_coalesce, 11339 .set_coalesce = tg3_set_coalesce,
@@ -11843,6 +11805,8 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11843 11805
11844 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11806 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11845 case FLASH_5717VENDOR_ATMEL_MDB021D: 11807 case FLASH_5717VENDOR_ATMEL_MDB021D:
11808 /* Detect size with tg3_nvram_get_size() */
11809 break;
11846 case FLASH_5717VENDOR_ATMEL_ADB021B: 11810 case FLASH_5717VENDOR_ATMEL_ADB021B:
11847 case FLASH_5717VENDOR_ATMEL_ADB021D: 11811 case FLASH_5717VENDOR_ATMEL_ADB021D:
11848 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11812 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
@@ -11868,8 +11832,10 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11868 11832
11869 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11833 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11870 case FLASH_5717VENDOR_ST_M_M25PE20: 11834 case FLASH_5717VENDOR_ST_M_M25PE20:
11871 case FLASH_5717VENDOR_ST_A_M25PE20:
11872 case FLASH_5717VENDOR_ST_M_M45PE20: 11835 case FLASH_5717VENDOR_ST_M_M45PE20:
11836 /* Detect size with tg3_nvram_get_size() */
11837 break;
11838 case FLASH_5717VENDOR_ST_A_M25PE20:
11873 case FLASH_5717VENDOR_ST_A_M45PE20: 11839 case FLASH_5717VENDOR_ST_A_M45PE20:
11874 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 11840 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11875 break; 11841 break;
@@ -11888,6 +11854,118 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11888 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 11854 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11889} 11855}
11890 11856
11857static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
11858{
11859 u32 nvcfg1, nvmpinstrp;
11860
11861 nvcfg1 = tr32(NVRAM_CFG1);
11862 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
11863
11864 switch (nvmpinstrp) {
11865 case FLASH_5720_EEPROM_HD:
11866 case FLASH_5720_EEPROM_LD:
11867 tp->nvram_jedecnum = JEDEC_ATMEL;
11868 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11869
11870 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11871 tw32(NVRAM_CFG1, nvcfg1);
11872 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
11873 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11874 else
11875 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
11876 return;
11877 case FLASH_5720VENDOR_M_ATMEL_DB011D:
11878 case FLASH_5720VENDOR_A_ATMEL_DB011B:
11879 case FLASH_5720VENDOR_A_ATMEL_DB011D:
11880 case FLASH_5720VENDOR_M_ATMEL_DB021D:
11881 case FLASH_5720VENDOR_A_ATMEL_DB021B:
11882 case FLASH_5720VENDOR_A_ATMEL_DB021D:
11883 case FLASH_5720VENDOR_M_ATMEL_DB041D:
11884 case FLASH_5720VENDOR_A_ATMEL_DB041B:
11885 case FLASH_5720VENDOR_A_ATMEL_DB041D:
11886 case FLASH_5720VENDOR_M_ATMEL_DB081D:
11887 case FLASH_5720VENDOR_A_ATMEL_DB081D:
11888 case FLASH_5720VENDOR_ATMEL_45USPT:
11889 tp->nvram_jedecnum = JEDEC_ATMEL;
11890 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11891 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11892
11893 switch (nvmpinstrp) {
11894 case FLASH_5720VENDOR_M_ATMEL_DB021D:
11895 case FLASH_5720VENDOR_A_ATMEL_DB021B:
11896 case FLASH_5720VENDOR_A_ATMEL_DB021D:
11897 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11898 break;
11899 case FLASH_5720VENDOR_M_ATMEL_DB041D:
11900 case FLASH_5720VENDOR_A_ATMEL_DB041B:
11901 case FLASH_5720VENDOR_A_ATMEL_DB041D:
11902 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11903 break;
11904 case FLASH_5720VENDOR_M_ATMEL_DB081D:
11905 case FLASH_5720VENDOR_A_ATMEL_DB081D:
11906 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11907 break;
11908 default:
11909 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11910 break;
11911 }
11912 break;
11913 case FLASH_5720VENDOR_M_ST_M25PE10:
11914 case FLASH_5720VENDOR_M_ST_M45PE10:
11915 case FLASH_5720VENDOR_A_ST_M25PE10:
11916 case FLASH_5720VENDOR_A_ST_M45PE10:
11917 case FLASH_5720VENDOR_M_ST_M25PE20:
11918 case FLASH_5720VENDOR_M_ST_M45PE20:
11919 case FLASH_5720VENDOR_A_ST_M25PE20:
11920 case FLASH_5720VENDOR_A_ST_M45PE20:
11921 case FLASH_5720VENDOR_M_ST_M25PE40:
11922 case FLASH_5720VENDOR_M_ST_M45PE40:
11923 case FLASH_5720VENDOR_A_ST_M25PE40:
11924 case FLASH_5720VENDOR_A_ST_M45PE40:
11925 case FLASH_5720VENDOR_M_ST_M25PE80:
11926 case FLASH_5720VENDOR_M_ST_M45PE80:
11927 case FLASH_5720VENDOR_A_ST_M25PE80:
11928 case FLASH_5720VENDOR_A_ST_M45PE80:
11929 case FLASH_5720VENDOR_ST_25USPT:
11930 case FLASH_5720VENDOR_ST_45USPT:
11931 tp->nvram_jedecnum = JEDEC_ST;
11932 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11933 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11934
11935 switch (nvmpinstrp) {
11936 case FLASH_5720VENDOR_M_ST_M25PE20:
11937 case FLASH_5720VENDOR_M_ST_M45PE20:
11938 case FLASH_5720VENDOR_A_ST_M25PE20:
11939 case FLASH_5720VENDOR_A_ST_M45PE20:
11940 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11941 break;
11942 case FLASH_5720VENDOR_M_ST_M25PE40:
11943 case FLASH_5720VENDOR_M_ST_M45PE40:
11944 case FLASH_5720VENDOR_A_ST_M25PE40:
11945 case FLASH_5720VENDOR_A_ST_M45PE40:
11946 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11947 break;
11948 case FLASH_5720VENDOR_M_ST_M25PE80:
11949 case FLASH_5720VENDOR_M_ST_M45PE80:
11950 case FLASH_5720VENDOR_A_ST_M25PE80:
11951 case FLASH_5720VENDOR_A_ST_M45PE80:
11952 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11953 break;
11954 default:
11955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11956 break;
11957 }
11958 break;
11959 default:
11960 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11961 return;
11962 }
11963
11964 tg3_nvram_get_pagesize(tp, nvcfg1);
11965 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11966 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11967}
11968
11891/* Chips other than 5700/5701 use the NVRAM for fetching info. */ 11969/* Chips other than 5700/5701 use the NVRAM for fetching info. */
11892static void __devinit tg3_nvram_init(struct tg3 *tp) 11970static void __devinit tg3_nvram_init(struct tg3 *tp)
11893{ 11971{
@@ -11935,6 +12013,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11935 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 12013 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 12014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11937 tg3_get_5717_nvram_info(tp); 12015 tg3_get_5717_nvram_info(tp);
12016 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12017 tg3_get_5720_nvram_info(tp);
11938 else 12018 else
11939 tg3_get_nvram_info(tp); 12019 tg3_get_nvram_info(tp);
11940 12020
@@ -12472,7 +12552,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12472 if (cfg2 & (1 << 18)) 12552 if (cfg2 & (1 << 18))
12473 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 12553 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12474 12554
12475 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) || 12555 if (((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) ||
12476 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12556 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12477 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) && 12557 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12478 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12558 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
@@ -12480,7 +12560,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12480 12560
12481 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 12561 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12482 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 12562 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12483 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 12563 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
12484 u32 cfg3; 12564 u32 cfg3;
12485 12565
12486 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 12566 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
@@ -13118,21 +13198,15 @@ done:
13118 13198
13119static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 13199static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13120 13200
13121static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
13122{
13123 dev->vlan_features |= flags;
13124}
13125
13126static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 13201static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13127{ 13202{
13128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13203 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
13129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13204 return TG3_RX_RET_MAX_SIZE_5717;
13130 return 4096;
13131 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 13205 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13132 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 13206 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13133 return 1024; 13207 return TG3_RX_RET_MAX_SIZE_5700;
13134 else 13208 else
13135 return 512; 13209 return TG3_RX_RET_MAX_SIZE_5705;
13136} 13210}
13137 13211
13138static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { 13212static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
@@ -13177,7 +13251,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13177 13251
13178 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 13252 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13179 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 13253 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13180 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719) 13254 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13255 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13181 pci_read_config_dword(tp->pdev, 13256 pci_read_config_dword(tp->pdev,
13182 TG3PCI_GEN2_PRODID_ASICREV, 13257 TG3PCI_GEN2_PRODID_ASICREV,
13183 &prod_id_asic_rev); 13258 &prod_id_asic_rev);
@@ -13332,14 +13407,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13332 13407
13333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 13408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 13409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 13410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13336 tp->pdev_peer = tg3_find_peer(tp); 13412 tp->pdev_peer = tg3_find_peer(tp);
13337 13413
13338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 13415 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13341 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS; 13417 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13342 13418
13419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13420 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13421 tp->tg3_flags3 |= TG3_FLG3_57765_PLUS;
13422
13343 /* Intentionally exclude ASIC_REV_5906 */ 13423 /* Intentionally exclude ASIC_REV_5906 */
13344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -13347,7 +13427,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13350 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) 13430 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13351 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; 13431 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13352 13432
13353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 13433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -13364,22 +13444,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13364 /* 5700 B0 chips do not support checksumming correctly due 13444 /* 5700 B0 chips do not support checksumming correctly due
13365 * to hardware bugs. 13445 * to hardware bugs.
13366 */ 13446 */
13367 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) 13447 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13368 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; 13448 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13369 else {
13370 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13371 13449
13372 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13373 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 13450 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13374 features |= NETIF_F_IPV6_CSUM; 13451 features |= NETIF_F_IPV6_CSUM;
13375 tp->dev->features |= features; 13452 tp->dev->features |= features;
13376 vlan_features_add(tp->dev, features); 13453 tp->dev->hw_features |= features;
13454 tp->dev->vlan_features |= features;
13377 } 13455 }
13378 13456
13379 /* Determine TSO capabilities */ 13457 /* Determine TSO capabilities */
13380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13381 ; /* Do nothing. HW bug. */ 13459 ; /* Do nothing. HW bug. */
13382 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13460 else if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
13383 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13461 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13384 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13462 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13415,7 +13493,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13415 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 13493 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13416 } 13494 }
13417 13495
13418 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 13496 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
13419 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 13497 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13420 tp->irq_max = TG3_IRQ_MAX_VECS; 13498 tp->irq_max = TG3_IRQ_MAX_VECS;
13421 } 13499 }
@@ -13430,7 +13508,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13430 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13508 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13431 } 13509 }
13432 13510
13433 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 13511 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13512 tp->tg3_flags3 |= TG3_FLG3_LRG_PROD_RING_CAP;
13513
13514 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
13434 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) 13515 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13435 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13516 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13436 13517
@@ -13449,7 +13530,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13449 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13530 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13450 13531
13451 tp->pcie_readrq = 4096; 13532 tp->pcie_readrq = 4096;
13452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13453 tp->pcie_readrq = 2048; 13535 tp->pcie_readrq = 2048;
13454 13536
13455 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 13537 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
@@ -13636,7 +13718,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13639 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) 13721 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13640 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 13722 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13641 13723
13642 /* Set up tp->grc_local_ctrl before calling tg_power_up(). 13724 /* Set up tp->grc_local_ctrl before calling tg_power_up().
@@ -13715,7 +13797,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13715 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 13797 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13716 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 13798 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13717 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && 13799 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13718 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 13800 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
13719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13721 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -13756,7 +13838,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13756 13838
13757 /* Initialize data/descriptor byte/word swapping. */ 13839 /* Initialize data/descriptor byte/word swapping. */
13758 val = tr32(GRC_MODE); 13840 val = tr32(GRC_MODE);
13759 val &= GRC_MODE_HOST_STACKUP; 13841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13842 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
13843 GRC_MODE_WORD_SWAP_B2HRX_DATA |
13844 GRC_MODE_B2HRX_ENABLE |
13845 GRC_MODE_HTX2B_ENABLE |
13846 GRC_MODE_HOST_STACKUP);
13847 else
13848 val &= GRC_MODE_HOST_STACKUP;
13849
13760 tw32(GRC_MODE, val | tp->grc_mode); 13850 tw32(GRC_MODE, val | tp->grc_mode);
13761 13851
13762 tg3_switch_clocks(tp); 13852 tg3_switch_clocks(tp);
@@ -13961,8 +14051,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
13961 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 14051 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13962 else 14052 else
13963 tg3_nvram_unlock(tp); 14053 tg3_nvram_unlock(tp);
13964 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14054 } else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13966 if (PCI_FUNC(tp->pdev->devfn) & 1) 14055 if (PCI_FUNC(tp->pdev->devfn) & 1)
13967 mac_offset = 0xcc; 14056 mac_offset = 0xcc;
13968 if (PCI_FUNC(tp->pdev->devfn) > 1) 14057 if (PCI_FUNC(tp->pdev->devfn) > 1)
@@ -14051,7 +14140,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14051#endif 14140#endif
14052#endif 14141#endif
14053 14142
14054 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 14143 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14055 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 14144 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14056 goto out; 14145 goto out;
14057 } 14146 }
@@ -14268,7 +14357,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14268 14357
14269 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 14358 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14270 14359
14271 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 14360 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
14272 goto out; 14361 goto out;
14273 14362
14274 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 14363 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
@@ -14443,7 +14532,7 @@ out_nofree:
14443 14532
14444static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14533static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14445{ 14534{
14446 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 14535 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14447 tp->bufmgr_config.mbuf_read_dma_low_water = 14536 tp->bufmgr_config.mbuf_read_dma_low_water =
14448 DEFAULT_MB_RDMA_LOW_WATER_5705; 14537 DEFAULT_MB_RDMA_LOW_WATER_5705;
14449 tp->bufmgr_config.mbuf_mac_rx_low_water = 14538 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14521,6 +14610,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14521 case TG3_PHY_ID_BCM5718S: return "5718S"; 14610 case TG3_PHY_ID_BCM5718S: return "5718S";
14522 case TG3_PHY_ID_BCM57765: return "57765"; 14611 case TG3_PHY_ID_BCM57765: return "57765";
14523 case TG3_PHY_ID_BCM5719C: return "5719C"; 14612 case TG3_PHY_ID_BCM5719C: return "5719C";
14613 case TG3_PHY_ID_BCM5720C: return "5720C";
14524 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 14614 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14525 case 0: return "serdes"; 14615 case 0: return "serdes";
14526 default: return "unknown"; 14616 default: return "unknown";
@@ -14633,6 +14723,7 @@ static const struct net_device_ops tg3_netdev_ops = {
14633 .ndo_do_ioctl = tg3_ioctl, 14723 .ndo_do_ioctl = tg3_ioctl,
14634 .ndo_tx_timeout = tg3_tx_timeout, 14724 .ndo_tx_timeout = tg3_tx_timeout,
14635 .ndo_change_mtu = tg3_change_mtu, 14725 .ndo_change_mtu = tg3_change_mtu,
14726 .ndo_fix_features = tg3_fix_features,
14636#ifdef CONFIG_NET_POLL_CONTROLLER 14727#ifdef CONFIG_NET_POLL_CONTROLLER
14637 .ndo_poll_controller = tg3_poll_controller, 14728 .ndo_poll_controller = tg3_poll_controller,
14638#endif 14729#endif
@@ -14663,6 +14754,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14663 u32 sndmbx, rcvmbx, intmbx; 14754 u32 sndmbx, rcvmbx, intmbx;
14664 char str[40]; 14755 char str[40];
14665 u64 dma_mask, persist_dma_mask; 14756 u64 dma_mask, persist_dma_mask;
14757 u32 hw_features = 0;
14666 14758
14667 printk_once(KERN_INFO "%s\n", version); 14759 printk_once(KERN_INFO "%s\n", version);
14668 14760
@@ -14759,8 +14851,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14759 } 14851 }
14760 14852
14761 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 14853 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14762 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 14854 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
14763 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14764 dev->netdev_ops = &tg3_netdev_ops; 14855 dev->netdev_ops = &tg3_netdev_ops;
14765 else 14856 else
14766 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14857 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14824,27 +14915,25 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14824 * is off by default, but can be enabled using ethtool. 14915 * is off by default, but can be enabled using ethtool.
14825 */ 14916 */
14826 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && 14917 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14827 (dev->features & NETIF_F_IP_CSUM)) { 14918 (dev->features & NETIF_F_IP_CSUM))
14828 dev->features |= NETIF_F_TSO; 14919 hw_features |= NETIF_F_TSO;
14829 vlan_features_add(dev, NETIF_F_TSO);
14830 }
14831 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || 14920 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14832 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { 14921 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14833 if (dev->features & NETIF_F_IPV6_CSUM) { 14922 if (dev->features & NETIF_F_IPV6_CSUM)
14834 dev->features |= NETIF_F_TSO6; 14923 hw_features |= NETIF_F_TSO6;
14835 vlan_features_add(dev, NETIF_F_TSO6);
14836 }
14837 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || 14924 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14926 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14840 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14927 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { 14929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14843 dev->features |= NETIF_F_TSO_ECN; 14930 hw_features |= NETIF_F_TSO_ECN;
14844 vlan_features_add(dev, NETIF_F_TSO_ECN);
14845 }
14846 } 14931 }
14847 14932
14933 dev->hw_features |= hw_features;
14934 dev->features |= hw_features;
14935 dev->vlan_features |= hw_features;
14936
14848 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14937 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14849 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14938 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14850 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14939 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14973,7 +15062,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14973 } 15062 }
14974 15063
14975 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 15064 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14976 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 15065 (dev->features & NETIF_F_RXCSUM) != 0,
14977 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 15066 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14978 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 15067 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14979 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 15068 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5e96706ad108..829a84ad80f2 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -25,9 +25,13 @@
25 25
26#define TG3_RX_INTERNAL_RING_SZ_5906 32 26#define TG3_RX_INTERNAL_RING_SZ_5906 32
27 27
28#define RX_STD_MAX_SIZE_5705 512 28#define TG3_RX_STD_MAX_SIZE_5700 512
29#define RX_STD_MAX_SIZE_5717 2048 29#define TG3_RX_STD_MAX_SIZE_5717 2048
30#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ 30#define TG3_RX_JMB_MAX_SIZE_5700 256
31#define TG3_RX_JMB_MAX_SIZE_5717 1024
32#define TG3_RX_RET_MAX_SIZE_5700 1024
33#define TG3_RX_RET_MAX_SIZE_5705 512
34#define TG3_RX_RET_MAX_SIZE_5717 4096
31 35
32/* First 256 bytes are a mirror of PCI config space. */ 36/* First 256 bytes are a mirror of PCI config space. */
33#define TG3PCI_VENDOR 0x00000000 37#define TG3PCI_VENDOR 0x00000000
@@ -54,6 +58,7 @@
54#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 58#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
55#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 59#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
56#define TG3PCI_DEVICE_TIGON3_5719 0x1657 60#define TG3PCI_DEVICE_TIGON3_5719 0x1657
61#define TG3PCI_DEVICE_TIGON3_5720 0x165f
57/* 0x04 --> 0x2c unused */ 62/* 0x04 --> 0x2c unused */
58#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM 63#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
59#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 64#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -163,6 +168,7 @@
163#define ASIC_REV_5717 0x5717 168#define ASIC_REV_5717 0x5717
164#define ASIC_REV_57765 0x57785 169#define ASIC_REV_57765 0x57785
165#define ASIC_REV_5719 0x5719 170#define ASIC_REV_5719 0x5719
171#define ASIC_REV_5720 0x5720
166#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 172#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
167#define CHIPREV_5700_AX 0x70 173#define CHIPREV_5700_AX 0x70
168#define CHIPREV_5700_BX 0x71 174#define CHIPREV_5700_BX 0x71
@@ -473,6 +479,8 @@
473#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 479#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
474#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 480#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
475#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 481#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
482#define TX_MODE_JMB_FRM_LEN 0x00400000
483#define TX_MODE_CNT_DN_MODE 0x00800000
476#define MAC_TX_STATUS 0x00000460 484#define MAC_TX_STATUS 0x00000460
477#define TX_STATUS_XOFFED 0x00000001 485#define TX_STATUS_XOFFED 0x00000001
478#define TX_STATUS_SENT_XOFF 0x00000002 486#define TX_STATUS_SENT_XOFF 0x00000002
@@ -487,6 +495,8 @@
487#define TX_LENGTHS_IPG_SHIFT 8 495#define TX_LENGTHS_IPG_SHIFT 8
488#define TX_LENGTHS_IPG_CRS_MASK 0x00003000 496#define TX_LENGTHS_IPG_CRS_MASK 0x00003000
489#define TX_LENGTHS_IPG_CRS_SHIFT 12 497#define TX_LENGTHS_IPG_CRS_SHIFT 12
498#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000
499#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000
490#define MAC_RX_MODE 0x00000468 500#define MAC_RX_MODE 0x00000468
491#define RX_MODE_RESET 0x00000001 501#define RX_MODE_RESET 0x00000001
492#define RX_MODE_ENABLE 0x00000002 502#define RX_MODE_ENABLE 0x00000002
@@ -1079,6 +1089,9 @@
1079#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 1089#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
1080/* 0x3620 --> 0x3630 unused */ 1090/* 0x3620 --> 0x3630 unused */
1081 1091
1092#define TG3_CPMU_CLCK_ORIDE 0x00003624
1093#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
1094
1082#define TG3_CPMU_CLCK_STAT 0x00003630 1095#define TG3_CPMU_CLCK_STAT 0x00003630
1083#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1096#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1084#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1097#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -1321,6 +1334,7 @@
1321#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 1334#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
1322#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 1335#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
1323#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 1336#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
1337#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000
1324#define RDMAC_STATUS 0x00004804 1338#define RDMAC_STATUS 0x00004804
1325#define RDMAC_STATUS_TGTABORT 0x00000004 1339#define RDMAC_STATUS_TGTABORT 0x00000004
1326#define RDMAC_STATUS_MSTABORT 0x00000008 1340#define RDMAC_STATUS_MSTABORT 0x00000008
@@ -1613,6 +1627,8 @@
1613#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 1627#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004
1614#define GRC_MODE_BSWAP_DATA 0x00000010 1628#define GRC_MODE_BSWAP_DATA 0x00000010
1615#define GRC_MODE_WSWAP_DATA 0x00000020 1629#define GRC_MODE_WSWAP_DATA 0x00000020
1630#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040
1631#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080
1616#define GRC_MODE_SPLITHDR 0x00000100 1632#define GRC_MODE_SPLITHDR 0x00000100
1617#define GRC_MODE_NOFRM_CRACKING 0x00000200 1633#define GRC_MODE_NOFRM_CRACKING 0x00000200
1618#define GRC_MODE_INCL_CRC 0x00000400 1634#define GRC_MODE_INCL_CRC 0x00000400
@@ -1620,8 +1636,10 @@
1620#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 1636#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000
1621#define GRC_MODE_NOIRQ_ON_RCV 0x00004000 1637#define GRC_MODE_NOIRQ_ON_RCV 0x00004000
1622#define GRC_MODE_FORCE_PCI32BIT 0x00008000 1638#define GRC_MODE_FORCE_PCI32BIT 0x00008000
1639#define GRC_MODE_B2HRX_ENABLE 0x00008000
1623#define GRC_MODE_HOST_STACKUP 0x00010000 1640#define GRC_MODE_HOST_STACKUP 0x00010000
1624#define GRC_MODE_HOST_SENDBDS 0x00020000 1641#define GRC_MODE_HOST_SENDBDS 0x00020000
1642#define GRC_MODE_HTX2B_ENABLE 0x00040000
1625#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 1643#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
1626#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 1644#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
1627#define GRC_MODE_PCIE_TL_SEL 0x00000000 1645#define GRC_MODE_PCIE_TL_SEL 0x00000000
@@ -1818,6 +1836,38 @@
1818#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 1836#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000
1819#define FLASH_5717VENDOR_ST_25USPT 0x03400002 1837#define FLASH_5717VENDOR_ST_25USPT 0x03400002
1820#define FLASH_5717VENDOR_ST_45USPT 0x03400001 1838#define FLASH_5717VENDOR_ST_45USPT 0x03400001
1839#define FLASH_5720_EEPROM_HD 0x00000001
1840#define FLASH_5720_EEPROM_LD 0x00000003
1841#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
1842#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
1843#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
1844#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003
1845#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000
1846#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002
1847#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001
1848#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003
1849#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000
1850#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002
1851#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001
1852#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003
1853#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000
1854#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002
1855#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001
1856#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000
1857#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002
1858#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001
1859#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003
1860#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000
1861#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002
1862#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001
1863#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003
1864#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000
1865#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002
1866#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001
1867#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003
1868#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000
1869#define FLASH_5720VENDOR_ST_25USPT 0x03c00002
1870#define FLASH_5720VENDOR_ST_45USPT 0x03c00001
1821#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 1871#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
1822#define FLASH_5752PAGE_SIZE_256 0x00000000 1872#define FLASH_5752PAGE_SIZE_256 0x00000000
1823#define FLASH_5752PAGE_SIZE_512 0x10000000 1873#define FLASH_5752PAGE_SIZE_512 0x10000000
@@ -2833,7 +2883,6 @@ struct tg3 {
2833 u32 tg3_flags; 2883 u32 tg3_flags;
2834#define TG3_FLAG_TAGGED_STATUS 0x00000001 2884#define TG3_FLAG_TAGGED_STATUS 0x00000001
2835#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2885#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
2836#define TG3_FLAG_RX_CHECKSUMS 0x00000004
2837#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 2886#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
2838#define TG3_FLAG_ENABLE_ASF 0x00000020 2887#define TG3_FLAG_ENABLE_ASF 0x00000020
2839#define TG3_FLAG_ASPM_WORKAROUND 0x00000040 2888#define TG3_FLAG_ASPM_WORKAROUND 0x00000040
@@ -2859,7 +2908,6 @@ struct tg3 {
2859#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2908#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2860#define TG3_FLAG_CPMU_PRESENT 0x04000000 2909#define TG3_FLAG_CPMU_PRESENT 0x04000000
2861#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2910#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2862#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2863#define TG3_FLAG_JUMBO_CAPABLE 0x20000000 2911#define TG3_FLAG_JUMBO_CAPABLE 0x20000000
2864#define TG3_FLAG_CHIP_RESETTING 0x40000000 2912#define TG3_FLAG_CHIP_RESETTING 0x40000000
2865#define TG3_FLAG_INIT_COMPLETE 0x80000000 2913#define TG3_FLAG_INIT_COMPLETE 0x80000000
@@ -2897,6 +2945,7 @@ struct tg3 {
2897#define TG3_FLG3_5701_DMA_BUG 0x00000008 2945#define TG3_FLG3_5701_DMA_BUG 0x00000008
2898#define TG3_FLG3_USE_PHYLIB 0x00000010 2946#define TG3_FLG3_USE_PHYLIB 0x00000010
2899#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2947#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2948#define TG3_FLG3_LRG_PROD_RING_CAP 0x00000080
2900#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100 2949#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
2901#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2950#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2902#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 2951#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
@@ -2910,8 +2959,9 @@ struct tg3 {
2910#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 2959#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2911#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2960#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2912#define TG3_FLG3_L1PLLPD_EN 0x00800000 2961#define TG3_FLG3_L1PLLPD_EN 0x00800000
2913#define TG3_FLG3_5717_PLUS 0x01000000 2962#define TG3_FLG3_57765_PLUS 0x01000000
2914#define TG3_FLG3_APE_HAS_NCSI 0x02000000 2963#define TG3_FLG3_APE_HAS_NCSI 0x02000000
2964#define TG3_FLG3_5717_PLUS 0x04000000
2915 2965
2916 struct timer_list timer; 2966 struct timer_list timer;
2917 u16 timer_counter; 2967 u16 timer_counter;
@@ -2983,6 +3033,7 @@ struct tg3 {
2983#define TG3_PHY_ID_BCM5718S 0xbc050ff0 3033#define TG3_PHY_ID_BCM5718S 0xbc050ff0
2984#define TG3_PHY_ID_BCM57765 0x5c0d8a40 3034#define TG3_PHY_ID_BCM57765 0x5c0d8a40
2985#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 3035#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
3036#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
2986#define TG3_PHY_ID_BCM5906 0xdc00ac40 3037#define TG3_PHY_ID_BCM5906 0xdc00ac40
2987#define TG3_PHY_ID_BCM8002 0x60010140 3038#define TG3_PHY_ID_BCM8002 0x60010140
2988#define TG3_PHY_ID_INVALID 0xffffffff 3039#define TG3_PHY_ID_INVALID 0xffffffff
@@ -3049,6 +3100,7 @@ struct tg3 {
3049 3100
3050 int nvram_lock_cnt; 3101 int nvram_lock_cnt;
3051 u32 nvram_size; 3102 u32 nvram_size;
3103#define TG3_NVRAM_SIZE_2KB 0x00000800
3052#define TG3_NVRAM_SIZE_64KB 0x00010000 3104#define TG3_NVRAM_SIZE_64KB 0x00010000
3053#define TG3_NVRAM_SIZE_128KB 0x00020000 3105#define TG3_NVRAM_SIZE_128KB 0x00020000
3054#define TG3_NVRAM_SIZE_256KB 0x00040000 3106#define TG3_NVRAM_SIZE_256KB 0x00040000
@@ -3064,6 +3116,9 @@ struct tg3 {
3064#define JEDEC_SAIFUN 0x4f 3116#define JEDEC_SAIFUN 0x4f
3065#define JEDEC_SST 0xbf 3117#define JEDEC_SST 0xbf
3066 3118
3119#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
3120#define ATMEL_AT24C02_PAGE_SIZE (8)
3121
3067#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB 3122#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB
3068#define ATMEL_AT24C64_PAGE_SIZE (32) 3123#define ATMEL_AT24C64_PAGE_SIZE (32)
3069 3124
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6f92e48f02d3..537fbc0a4401 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -410,7 +410,6 @@ static const struct ethtool_ops uec_ethtool_ops = {
410 .set_ringparam = uec_set_ringparam, 410 .set_ringparam = uec_set_ringparam,
411 .get_pauseparam = uec_get_pauseparam, 411 .get_pauseparam = uec_get_pauseparam,
412 .set_pauseparam = uec_set_pauseparam, 412 .set_pauseparam = uec_set_pauseparam,
413 .set_sg = ethtool_op_set_sg,
414 .get_sset_count = uec_get_sset_count, 413 .get_sset_count = uec_get_sset_count,
415 .get_strings = uec_get_strings, 414 .get_strings = uec_get_strings,
416 .get_ethtool_stats = uec_get_ethtool_stats, 415 .get_ethtool_stats = uec_get_ethtool_stats,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3ec22c307797..9d4f9117260f 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -258,7 +258,7 @@ config USB_NET_NET1080
258 optionally with LEDs that indicate traffic 258 optionally with LEDs that indicate traffic
259 259
260config USB_NET_PLUSB 260config USB_NET_PLUSB
261 tristate "Prolific PL-2301/2302 based cables" 261 tristate "Prolific PL-2301/2302/25A1 based cables"
262 # if the handshake/init/reset problems, from original 'plusb', 262 # if the handshake/init/reset problems, from original 'plusb',
263 # are ever resolved ... then remove "experimental" 263 # are ever resolved ... then remove "experimental"
264 depends on USB_USBNET && EXPERIMENTAL 264 depends on USB_USBNET && EXPERIMENTAL
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 823c53751307..217aec8a768f 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -45,6 +45,14 @@
45 * seems to get wedged under load. Prolific docs are weak, and 45 * seems to get wedged under load. Prolific docs are weak, and
46 * don't identify differences between PL2301 and PL2302, much less 46 * don't identify differences between PL2301 and PL2302, much less
47 * anything to explain the different PL2302 versions observed. 47 * anything to explain the different PL2302 versions observed.
48 *
49 * NOTE: pl2501 has several modes, including pl2301 and pl2302
50 * compatibility. Some docs suggest the difference between 2301
51 * and 2302 is only to make MS-Windows use a different driver...
52 *
53 * pl25a1 glue based on patch from Tony Gibbs. Prolific "docs" on
54 * this chip are as usual incomplete about what control messages
55 * are supported.
48 */ 56 */
49 57
50/* 58/*
@@ -86,16 +94,20 @@ pl_set_QuickLink_features(struct usbnet *dev, int val)
86 94
87static int pl_reset(struct usbnet *dev) 95static int pl_reset(struct usbnet *dev)
88{ 96{
97 int status;
98
89 /* some units seem to need this reset, others reject it utterly. 99 /* some units seem to need this reset, others reject it utterly.
90 * FIXME be more like "naplink" or windows drivers. 100 * FIXME be more like "naplink" or windows drivers.
91 */ 101 */
92 (void) pl_set_QuickLink_features(dev, 102 status = pl_set_QuickLink_features(dev,
93 PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E); 103 PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E);
104 if (status != 0 && netif_msg_probe(dev))
105 netif_dbg(dev, link, dev->net, "pl_reset --> %d\n", status);
94 return 0; 106 return 0;
95} 107}
96 108
97static const struct driver_info prolific_info = { 109static const struct driver_info prolific_info = {
98 .description = "Prolific PL-2301/PL-2302", 110 .description = "Prolific PL-2301/PL-2302/PL-25A1",
99 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT, 111 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
100 /* some PL-2302 versions seem to fail usb_set_interface() */ 112 /* some PL-2302 versions seem to fail usb_set_interface() */
101 .reset = pl_reset, 113 .reset = pl_reset,
@@ -111,6 +123,7 @@ static const struct driver_info prolific_info = {
111 123
112static const struct usb_device_id products [] = { 124static const struct usb_device_id products [] = {
113 125
126/* full speed cables */
114{ 127{
115 USB_DEVICE(0x067b, 0x0000), // PL-2301 128 USB_DEVICE(0x067b, 0x0000), // PL-2301
116 .driver_info = (unsigned long) &prolific_info, 129 .driver_info = (unsigned long) &prolific_info,
@@ -119,6 +132,15 @@ static const struct usb_device_id products [] = {
119 .driver_info = (unsigned long) &prolific_info, 132 .driver_info = (unsigned long) &prolific_info,
120}, 133},
121 134
135/* high speed cables */
136{
137 USB_DEVICE(0x067b, 0x25a1), /* PL-25A1, no eeprom */
138 .driver_info = (unsigned long) &prolific_info,
139}, {
140 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
141 .driver_info = (unsigned long) &prolific_info,
142},
143
122 { }, // END 144 { }, // END
123}; 145};
124MODULE_DEVICE_TABLE(usb, products); 146MODULE_DEVICE_TABLE(usb, products);
@@ -134,16 +156,16 @@ static struct usb_driver plusb_driver = {
134 156
135static int __init plusb_init(void) 157static int __init plusb_init(void)
136{ 158{
137 return usb_register(&plusb_driver); 159 return usb_register(&plusb_driver);
138} 160}
139module_init(plusb_init); 161module_init(plusb_init);
140 162
141static void __exit plusb_exit(void) 163static void __exit plusb_exit(void)
142{ 164{
143 usb_deregister(&plusb_driver); 165 usb_deregister(&plusb_driver);
144} 166}
145module_exit(plusb_exit); 167module_exit(plusb_exit);
146 168
147MODULE_AUTHOR("David Brownell"); 169MODULE_AUTHOR("David Brownell");
148MODULE_DESCRIPTION("Prolific PL-2301/2302 USB Host to Host Link Driver"); 170MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
149MODULE_LICENSE("GPL"); 171MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 753ee6eb7edd..860a20c938b4 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -65,7 +65,6 @@ struct smsc75xx_priv {
65 struct usbnet *dev; 65 struct usbnet *dev;
66 u32 rfe_ctl; 66 u32 rfe_ctl;
67 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; 67 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
68 bool use_rx_csum;
69 struct mutex dataport_mutex; 68 struct mutex dataport_mutex;
70 spinlock_t rfe_ctl_lock; 69 spinlock_t rfe_ctl_lock;
71 struct work_struct set_multicast; 70 struct work_struct set_multicast;
@@ -548,28 +547,6 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
548 "unexpected interrupt, intdata=0x%08X", intdata); 547 "unexpected interrupt, intdata=0x%08X", intdata);
549} 548}
550 549
551/* Enable or disable Rx checksum offload engine */
552static int smsc75xx_set_rx_csum_offload(struct usbnet *dev)
553{
554 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
555 unsigned long flags;
556 int ret;
557
558 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
559
560 if (pdata->use_rx_csum)
561 pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
562 else
563 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
564
565 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
566
567 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
568 check_warn_return(ret, "Error writing RFE_CTL");
569
570 return 0;
571}
572
573static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) 550static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
574{ 551{
575 return MAX_EEPROM_SIZE; 552 return MAX_EEPROM_SIZE;
@@ -599,34 +576,6 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
599 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); 576 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
600} 577}
601 578
602static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev)
603{
604 struct usbnet *dev = netdev_priv(netdev);
605 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
606
607 return pdata->use_rx_csum;
608}
609
610static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
611{
612 struct usbnet *dev = netdev_priv(netdev);
613 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
614
615 pdata->use_rx_csum = !!val;
616
617 return smsc75xx_set_rx_csum_offload(dev);
618}
619
620static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data)
621{
622 if (data)
623 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
624 else
625 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
626
627 return 0;
628}
629
630static const struct ethtool_ops smsc75xx_ethtool_ops = { 579static const struct ethtool_ops smsc75xx_ethtool_ops = {
631 .get_link = usbnet_get_link, 580 .get_link = usbnet_get_link,
632 .nway_reset = usbnet_nway_reset, 581 .nway_reset = usbnet_nway_reset,
@@ -638,12 +587,6 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
638 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, 587 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
639 .get_eeprom = smsc75xx_ethtool_get_eeprom, 588 .get_eeprom = smsc75xx_ethtool_get_eeprom,
640 .set_eeprom = smsc75xx_ethtool_set_eeprom, 589 .set_eeprom = smsc75xx_ethtool_set_eeprom,
641 .get_tx_csum = ethtool_op_get_tx_csum,
642 .set_tx_csum = ethtool_op_set_tx_hw_csum,
643 .get_rx_csum = smsc75xx_ethtool_get_rx_csum,
644 .set_rx_csum = smsc75xx_ethtool_set_rx_csum,
645 .get_tso = ethtool_op_get_tso,
646 .set_tso = smsc75xx_ethtool_set_tso,
647}; 590};
648 591
649static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 592static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -782,6 +725,30 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
782 return usbnet_change_mtu(netdev, new_mtu); 725 return usbnet_change_mtu(netdev, new_mtu);
783} 726}
784 727
728/* Enable or disable Rx checksum offload engine */
729static int smsc75xx_set_features(struct net_device *netdev, u32 features)
730{
731 struct usbnet *dev = netdev_priv(netdev);
732 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
733 unsigned long flags;
734 int ret;
735
736 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
737
738 if (features & NETIF_F_RXCSUM)
739 pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
740 else
741 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
742
743 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
744 /* it's racing here! */
745
746 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
747 check_warn_return(ret, "Error writing RFE_CTL");
748
749 return 0;
750}
751
785static int smsc75xx_reset(struct usbnet *dev) 752static int smsc75xx_reset(struct usbnet *dev)
786{ 753{
787 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 754 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -960,11 +927,7 @@ static int smsc75xx_reset(struct usbnet *dev)
960 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl); 927 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
961 928
962 /* Enable or disable checksum offload engines */ 929 /* Enable or disable checksum offload engines */
963 ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE); 930 smsc75xx_set_features(dev->net, dev->net->features);
964 ret = smsc75xx_set_rx_csum_offload(dev);
965 check_warn_return(ret, "Failed to set rx csum offload: %d", ret);
966
967 smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE);
968 931
969 smsc75xx_set_multicast(dev->net); 932 smsc75xx_set_multicast(dev->net);
970 933
@@ -1037,6 +1000,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
1037 .ndo_validate_addr = eth_validate_addr, 1000 .ndo_validate_addr = eth_validate_addr,
1038 .ndo_do_ioctl = smsc75xx_ioctl, 1001 .ndo_do_ioctl = smsc75xx_ioctl,
1039 .ndo_set_multicast_list = smsc75xx_set_multicast, 1002 .ndo_set_multicast_list = smsc75xx_set_multicast,
1003 .ndo_set_features = smsc75xx_set_features,
1040}; 1004};
1041 1005
1042static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) 1006static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -1065,10 +1029,17 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1065 1029
1066 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); 1030 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
1067 1031
1068 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1032 if (DEFAULT_TX_CSUM_ENABLE) {
1033 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1034 if (DEFAULT_TSO_ENABLE)
1035 dev->net->features |= NETIF_F_SG |
1036 NETIF_F_TSO | NETIF_F_TSO6;
1037 }
1038 if (DEFAULT_RX_CSUM_ENABLE)
1039 dev->net->features |= NETIF_F_RXCSUM;
1069 1040
1070 /* We have to advertise SG otherwise TSO cannot be enabled */ 1041 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1071 dev->net->features |= NETIF_F_SG; 1042 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
1072 1043
1073 /* Init all registers */ 1044 /* Init all registers */
1074 ret = smsc75xx_reset(dev); 1045 ret = smsc75xx_reset(dev);
@@ -1091,10 +1062,11 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1091 } 1062 }
1092} 1063}
1093 1064
1094static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a, 1065static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
1095 u32 rx_cmd_b) 1066 u32 rx_cmd_a, u32 rx_cmd_b)
1096{ 1067{
1097 if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { 1068 if (!(dev->net->features & NETIF_F_RXCSUM) ||
1069 unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
1098 skb->ip_summed = CHECKSUM_NONE; 1070 skb->ip_summed = CHECKSUM_NONE;
1099 } else { 1071 } else {
1100 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); 1072 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
@@ -1104,8 +1076,6 @@ static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a,
1104 1076
1105static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1077static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1106{ 1078{
1107 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1108
1109 while (skb->len > 0) { 1079 while (skb->len > 0) {
1110 u32 rx_cmd_a, rx_cmd_b, align_count, size; 1080 u32 rx_cmd_a, rx_cmd_b, align_count, size;
1111 struct sk_buff *ax_skb; 1081 struct sk_buff *ax_skb;
@@ -1145,11 +1115,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1145 1115
1146 /* last frame in this batch */ 1116 /* last frame in this batch */
1147 if (skb->len == size) { 1117 if (skb->len == size) {
1148 if (pdata->use_rx_csum) 1118 smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a,
1149 smsc75xx_rx_csum_offload(skb, rx_cmd_a, 1119 rx_cmd_b);
1150 rx_cmd_b);
1151 else
1152 skb->ip_summed = CHECKSUM_NONE;
1153 1120
1154 skb_trim(skb, skb->len - 4); /* remove fcs */ 1121 skb_trim(skb, skb->len - 4); /* remove fcs */
1155 skb->truesize = size + sizeof(struct sk_buff); 1122 skb->truesize = size + sizeof(struct sk_buff);
@@ -1167,11 +1134,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1167 ax_skb->data = packet; 1134 ax_skb->data = packet;
1168 skb_set_tail_pointer(ax_skb, size); 1135 skb_set_tail_pointer(ax_skb, size);
1169 1136
1170 if (pdata->use_rx_csum) 1137 smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a,
1171 smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a, 1138 rx_cmd_b);
1172 rx_cmd_b);
1173 else
1174 ax_skb->ip_summed = CHECKSUM_NONE;
1175 1139
1176 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ 1140 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
1177 ax_skb->truesize = size + sizeof(struct sk_buff); 1141 ax_skb->truesize = size + sizeof(struct sk_buff);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 47a6c870b51f..24f4b3739dd2 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -52,8 +52,6 @@ struct smsc95xx_priv {
52 u32 hash_hi; 52 u32 hash_hi;
53 u32 hash_lo; 53 u32 hash_lo;
54 spinlock_t mac_cr_lock; 54 spinlock_t mac_cr_lock;
55 bool use_tx_csum;
56 bool use_rx_csum;
57}; 55};
58 56
59struct usb_context { 57struct usb_context {
@@ -517,22 +515,24 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
517} 515}
518 516
519/* Enable or disable Tx & Rx checksum offload engines */ 517/* Enable or disable Tx & Rx checksum offload engines */
520static int smsc95xx_set_csums(struct usbnet *dev) 518static int smsc95xx_set_features(struct net_device *netdev, u32 features)
521{ 519{
522 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 520 struct usbnet *dev = netdev_priv(netdev);
523 u32 read_buf; 521 u32 read_buf;
524 int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 522 int ret;
523
524 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
525 if (ret < 0) { 525 if (ret < 0) {
526 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret); 526 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
527 return ret; 527 return ret;
528 } 528 }
529 529
530 if (pdata->use_tx_csum) 530 if (features & NETIF_F_HW_CSUM)
531 read_buf |= Tx_COE_EN_; 531 read_buf |= Tx_COE_EN_;
532 else 532 else
533 read_buf &= ~Tx_COE_EN_; 533 read_buf &= ~Tx_COE_EN_;
534 534
535 if (pdata->use_rx_csum) 535 if (features & NETIF_F_RXCSUM)
536 read_buf |= Rx_COE_EN_; 536 read_buf |= Rx_COE_EN_;
537 else 537 else
538 read_buf &= ~Rx_COE_EN_; 538 read_buf &= ~Rx_COE_EN_;
@@ -576,43 +576,6 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
576 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data); 576 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
577} 577}
578 578
579static u32 smsc95xx_ethtool_get_rx_csum(struct net_device *netdev)
580{
581 struct usbnet *dev = netdev_priv(netdev);
582 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
583
584 return pdata->use_rx_csum;
585}
586
587static int smsc95xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
588{
589 struct usbnet *dev = netdev_priv(netdev);
590 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
591
592 pdata->use_rx_csum = !!val;
593
594 return smsc95xx_set_csums(dev);
595}
596
597static u32 smsc95xx_ethtool_get_tx_csum(struct net_device *netdev)
598{
599 struct usbnet *dev = netdev_priv(netdev);
600 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
601
602 return pdata->use_tx_csum;
603}
604
605static int smsc95xx_ethtool_set_tx_csum(struct net_device *netdev, u32 val)
606{
607 struct usbnet *dev = netdev_priv(netdev);
608 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
609
610 pdata->use_tx_csum = !!val;
611
612 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
613 return smsc95xx_set_csums(dev);
614}
615
616static const struct ethtool_ops smsc95xx_ethtool_ops = { 579static const struct ethtool_ops smsc95xx_ethtool_ops = {
617 .get_link = usbnet_get_link, 580 .get_link = usbnet_get_link,
618 .nway_reset = usbnet_nway_reset, 581 .nway_reset = usbnet_nway_reset,
@@ -624,10 +587,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
624 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, 587 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
625 .get_eeprom = smsc95xx_ethtool_get_eeprom, 588 .get_eeprom = smsc95xx_ethtool_get_eeprom,
626 .set_eeprom = smsc95xx_ethtool_set_eeprom, 589 .set_eeprom = smsc95xx_ethtool_set_eeprom,
627 .get_tx_csum = smsc95xx_ethtool_get_tx_csum,
628 .set_tx_csum = smsc95xx_ethtool_set_tx_csum,
629 .get_rx_csum = smsc95xx_ethtool_get_rx_csum,
630 .set_rx_csum = smsc95xx_ethtool_set_rx_csum,
631}; 590};
632 591
633static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 592static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -755,7 +714,6 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
755static int smsc95xx_reset(struct usbnet *dev) 714static int smsc95xx_reset(struct usbnet *dev)
756{ 715{
757 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 716 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
758 struct net_device *netdev = dev->net;
759 u32 read_buf, write_buf, burst_cap; 717 u32 read_buf, write_buf, burst_cap;
760 int ret = 0, timeout; 718 int ret = 0, timeout;
761 719
@@ -975,12 +933,7 @@ static int smsc95xx_reset(struct usbnet *dev)
975 } 933 }
976 934
977 /* Enable or disable checksum offload engines */ 935 /* Enable or disable checksum offload engines */
978 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum); 936 smsc95xx_set_features(dev->net, dev->net->features);
979 ret = smsc95xx_set_csums(dev);
980 if (ret < 0) {
981 netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
982 return ret;
983 }
984 937
985 smsc95xx_set_multicast(dev->net); 938 smsc95xx_set_multicast(dev->net);
986 939
@@ -1019,6 +972,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
1019 .ndo_validate_addr = eth_validate_addr, 972 .ndo_validate_addr = eth_validate_addr,
1020 .ndo_do_ioctl = smsc95xx_ioctl, 973 .ndo_do_ioctl = smsc95xx_ioctl,
1021 .ndo_set_multicast_list = smsc95xx_set_multicast, 974 .ndo_set_multicast_list = smsc95xx_set_multicast,
975 .ndo_set_features = smsc95xx_set_features,
1022}; 976};
1023 977
1024static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) 978static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -1045,8 +999,12 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1045 999
1046 spin_lock_init(&pdata->mac_cr_lock); 1000 spin_lock_init(&pdata->mac_cr_lock);
1047 1001
1048 pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE; 1002 if (DEFAULT_TX_CSUM_ENABLE)
1049 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1003 dev->net->features |= NETIF_F_HW_CSUM;
1004 if (DEFAULT_RX_CSUM_ENABLE)
1005 dev->net->features |= NETIF_F_RXCSUM;
1006
1007 dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1050 1008
1051 smsc95xx_init_mac_address(dev); 1009 smsc95xx_init_mac_address(dev);
1052 1010
@@ -1056,7 +1014,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1056 dev->net->netdev_ops = &smsc95xx_netdev_ops; 1014 dev->net->netdev_ops = &smsc95xx_netdev_ops;
1057 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1015 dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
1058 dev->net->flags |= IFF_MULTICAST; 1016 dev->net->flags |= IFF_MULTICAST;
1059 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD; 1017 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1060 return 0; 1018 return 0;
1061} 1019}
1062 1020
@@ -1080,8 +1038,6 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1080 1038
1081static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1039static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1082{ 1040{
1083 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1084
1085 while (skb->len > 0) { 1041 while (skb->len > 0) {
1086 u32 header, align_count; 1042 u32 header, align_count;
1087 struct sk_buff *ax_skb; 1043 struct sk_buff *ax_skb;
@@ -1123,7 +1079,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1123 1079
1124 /* last frame in this batch */ 1080 /* last frame in this batch */
1125 if (skb->len == size) { 1081 if (skb->len == size) {
1126 if (pdata->use_rx_csum) 1082 if (dev->net->features & NETIF_F_RXCSUM)
1127 smsc95xx_rx_csum_offload(skb); 1083 smsc95xx_rx_csum_offload(skb);
1128 skb_trim(skb, skb->len - 4); /* remove fcs */ 1084 skb_trim(skb, skb->len - 4); /* remove fcs */
1129 skb->truesize = size + sizeof(struct sk_buff); 1085 skb->truesize = size + sizeof(struct sk_buff);
@@ -1141,7 +1097,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1141 ax_skb->data = packet; 1097 ax_skb->data = packet;
1142 skb_set_tail_pointer(ax_skb, size); 1098 skb_set_tail_pointer(ax_skb, size);
1143 1099
1144 if (pdata->use_rx_csum) 1100 if (dev->net->features & NETIF_F_RXCSUM)
1145 smsc95xx_rx_csum_offload(ax_skb); 1101 smsc95xx_rx_csum_offload(ax_skb);
1146 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ 1102 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
1147 ax_skb->truesize = size + sizeof(struct sk_buff); 1103 ax_skb->truesize = size + sizeof(struct sk_buff);
@@ -1174,8 +1130,7 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
1174static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, 1130static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1175 struct sk_buff *skb, gfp_t flags) 1131 struct sk_buff *skb, gfp_t flags)
1176{ 1132{
1177 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 1133 bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
1178 bool csum = pdata->use_tx_csum && (skb->ip_summed == CHECKSUM_PARTIAL);
1179 int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD; 1134 int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
1180 u32 tx_cmd_a, tx_cmd_b; 1135 u32 tx_cmd_a, tx_cmd_b;
1181 1136
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2de9b90c5f8f..654228849951 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -36,7 +36,6 @@ struct veth_net_stats {
36struct veth_priv { 36struct veth_priv {
37 struct net_device *peer; 37 struct net_device *peer;
38 struct veth_net_stats __percpu *stats; 38 struct veth_net_stats __percpu *stats;
39 unsigned ip_summed;
40}; 39};
41 40
42/* 41/*
@@ -99,47 +98,10 @@ static void veth_get_ethtool_stats(struct net_device *dev,
99 data[0] = priv->peer->ifindex; 98 data[0] = priv->peer->ifindex;
100} 99}
101 100
102static u32 veth_get_rx_csum(struct net_device *dev)
103{
104 struct veth_priv *priv;
105
106 priv = netdev_priv(dev);
107 return priv->ip_summed == CHECKSUM_UNNECESSARY;
108}
109
110static int veth_set_rx_csum(struct net_device *dev, u32 data)
111{
112 struct veth_priv *priv;
113
114 priv = netdev_priv(dev);
115 priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
116 return 0;
117}
118
119static u32 veth_get_tx_csum(struct net_device *dev)
120{
121 return (dev->features & NETIF_F_NO_CSUM) != 0;
122}
123
124static int veth_set_tx_csum(struct net_device *dev, u32 data)
125{
126 if (data)
127 dev->features |= NETIF_F_NO_CSUM;
128 else
129 dev->features &= ~NETIF_F_NO_CSUM;
130 return 0;
131}
132
133static const struct ethtool_ops veth_ethtool_ops = { 101static const struct ethtool_ops veth_ethtool_ops = {
134 .get_settings = veth_get_settings, 102 .get_settings = veth_get_settings,
135 .get_drvinfo = veth_get_drvinfo, 103 .get_drvinfo = veth_get_drvinfo,
136 .get_link = ethtool_op_get_link, 104 .get_link = ethtool_op_get_link,
137 .get_rx_csum = veth_get_rx_csum,
138 .set_rx_csum = veth_set_rx_csum,
139 .get_tx_csum = veth_get_tx_csum,
140 .set_tx_csum = veth_set_tx_csum,
141 .get_sg = ethtool_op_get_sg,
142 .set_sg = ethtool_op_set_sg,
143 .get_strings = veth_get_strings, 105 .get_strings = veth_get_strings,
144 .get_sset_count = veth_get_sset_count, 106 .get_sset_count = veth_get_sset_count,
145 .get_ethtool_stats = veth_get_ethtool_stats, 107 .get_ethtool_stats = veth_get_ethtool_stats,
@@ -168,8 +130,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
168 130
169 /* don't change ip_summed == CHECKSUM_PARTIAL, as that 131 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
170 will cause bad checksum on forwarded packets */ 132 will cause bad checksum on forwarded packets */
171 if (skb->ip_summed == CHECKSUM_NONE) 133 if (skb->ip_summed == CHECKSUM_NONE &&
172 skb->ip_summed = rcv_priv->ip_summed; 134 rcv->features & NETIF_F_RXCSUM)
135 skb->ip_summed = CHECKSUM_UNNECESSARY;
173 136
174 length = skb->len; 137 length = skb->len;
175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) 138 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
@@ -304,6 +267,8 @@ static void veth_setup(struct net_device *dev)
304 dev->ethtool_ops = &veth_ethtool_ops; 267 dev->ethtool_ops = &veth_ethtool_ops;
305 dev->features |= NETIF_F_LLTX; 268 dev->features |= NETIF_F_LLTX;
306 dev->destructor = veth_dev_free; 269 dev->destructor = veth_dev_free;
270
271 dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
307} 272}
308 273
309/* 274/*
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index eb5d75df5d5d..0422a79acfd7 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1703,7 +1703,7 @@ static void rhine_tx(struct net_device *dev)
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) 1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{ 1704{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; 1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1706 return ntohs(*(u16 *)trailer); 1706 return be16_to_cpup((__be16 *)trailer);
1707} 1707}
1708 1708
1709/* Process up to limit frames from receive ring */ 1709/* Process up to limit frames from receive ring */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 4fe051753842..baf04b0a6575 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2600,8 +2600,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2600 /* 2600 /*
2601 * Handle hardware checksum 2601 * Handle hardware checksum
2602 */ 2602 */
2603 if ((dev->features & NETIF_F_IP_CSUM) && 2603 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2605 const struct iphdr *ip = ip_hdr(skb); 2604 const struct iphdr *ip = ip_hdr(skb);
2606 if (ip->protocol == IPPROTO_TCP) 2605 if (ip->protocol == IPPROTO_TCP)
2607 td_ptr->tdesc1.TCR |= TCR0_TCPCK; 2606 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
@@ -2841,6 +2840,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2841 dev->ethtool_ops = &velocity_ethtool_ops; 2840 dev->ethtool_ops = &velocity_ethtool_ops;
2842 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2841 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2843 2842
2843 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2844 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2844 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2845 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; 2845 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2846 2846
@@ -3457,13 +3457,10 @@ static const struct ethtool_ops velocity_ethtool_ops = {
3457 .get_settings = velocity_get_settings, 3457 .get_settings = velocity_get_settings,
3458 .set_settings = velocity_set_settings, 3458 .set_settings = velocity_set_settings,
3459 .get_drvinfo = velocity_get_drvinfo, 3459 .get_drvinfo = velocity_get_drvinfo,
3460 .set_tx_csum = ethtool_op_set_tx_csum,
3461 .get_tx_csum = ethtool_op_get_tx_csum,
3462 .get_wol = velocity_ethtool_get_wol, 3460 .get_wol = velocity_ethtool_get_wol,
3463 .set_wol = velocity_ethtool_set_wol, 3461 .set_wol = velocity_ethtool_set_wol,
3464 .get_msglevel = velocity_get_msglevel, 3462 .get_msglevel = velocity_get_msglevel,
3465 .set_msglevel = velocity_set_msglevel, 3463 .set_msglevel = velocity_set_msglevel,
3466 .set_sg = ethtool_op_set_sg,
3467 .get_link = velocity_get_link, 3464 .get_link = velocity_get_link,
3468 .get_coalesce = velocity_get_coalesce, 3465 .get_coalesce = velocity_get_coalesce,
3469 .set_coalesce = velocity_set_coalesce, 3466 .set_coalesce = velocity_set_coalesce,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 82dba5aaf423..0cb0b0632672 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -710,17 +710,6 @@ static int virtnet_close(struct net_device *dev)
710 return 0; 710 return 0;
711} 711}
712 712
713static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
714{
715 struct virtnet_info *vi = netdev_priv(dev);
716 struct virtio_device *vdev = vi->vdev;
717
718 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
719 return -ENOSYS;
720
721 return ethtool_op_set_tx_hw_csum(dev, data);
722}
723
724static void virtnet_set_rx_mode(struct net_device *dev) 713static void virtnet_set_rx_mode(struct net_device *dev)
725{ 714{
726 struct virtnet_info *vi = netdev_priv(dev); 715 struct virtnet_info *vi = netdev_priv(dev);
@@ -822,10 +811,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
822} 811}
823 812
824static const struct ethtool_ops virtnet_ethtool_ops = { 813static const struct ethtool_ops virtnet_ethtool_ops = {
825 .set_tx_csum = virtnet_set_tx_csum,
826 .set_sg = ethtool_op_set_sg,
827 .set_tso = ethtool_op_set_tso,
828 .set_ufo = ethtool_op_set_ufo,
829 .get_link = ethtool_op_get_link, 814 .get_link = ethtool_op_get_link,
830}; 815};
831 816
@@ -912,22 +897,29 @@ static int virtnet_probe(struct virtio_device *vdev)
912 SET_NETDEV_DEV(dev, &vdev->dev); 897 SET_NETDEV_DEV(dev, &vdev->dev);
913 898
914 /* Do we support "hardware" checksums? */ 899 /* Do we support "hardware" checksums? */
915 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 900 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
916 /* This opens up the world of extra features. */ 901 /* This opens up the world of extra features. */
917 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 902 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
918 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 903 if (csum)
919 dev->features |= NETIF_F_TSO | NETIF_F_UFO 904 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
905
906 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
907 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
920 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 908 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
921 } 909 }
922 /* Individual feature bits: what can host handle? */ 910 /* Individual feature bits: what can host handle? */
923 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 911 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
924 dev->features |= NETIF_F_TSO; 912 dev->hw_features |= NETIF_F_TSO;
925 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 913 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
926 dev->features |= NETIF_F_TSO6; 914 dev->hw_features |= NETIF_F_TSO6;
927 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 915 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
928 dev->features |= NETIF_F_TSO_ECN; 916 dev->hw_features |= NETIF_F_TSO_ECN;
929 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 917 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
930 dev->features |= NETIF_F_UFO; 918 dev->hw_features |= NETIF_F_UFO;
919
920 if (gso)
921 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
922 /* (!csum && gso) case will be fixed by register_netdev() */
931 } 923 }
932 924
933 /* Configuration may specify what MAC to use. Otherwise random. */ 925 /* Configuration may specify what MAC to use. Otherwise random. */
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index c5eb034107fd..43c458323f83 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -134,22 +134,29 @@ static void vxge_ethtool_gregs(struct net_device *dev,
134/** 134/**
135 * vxge_ethtool_idnic - To physically identify the nic on the system. 135 * vxge_ethtool_idnic - To physically identify the nic on the system.
136 * @dev : device pointer. 136 * @dev : device pointer.
137 * @id : pointer to the structure with identification parameters given by 137 * @state : requested LED state
138 * ethtool.
139 * 138 *
140 * Used to physically identify the NIC on the system. 139 * Used to physically identify the NIC on the system.
141 * The Link LED will blink for a time specified by the user.
142 * Return value:
143 * 0 on success 140 * 0 on success
144 */ 141 */
145static int vxge_ethtool_idnic(struct net_device *dev, u32 data) 142static int vxge_ethtool_idnic(struct net_device *dev,
143 enum ethtool_phys_id_state state)
146{ 144{
147 struct vxgedev *vdev = netdev_priv(dev); 145 struct vxgedev *vdev = netdev_priv(dev);
148 struct __vxge_hw_device *hldev = vdev->devh; 146 struct __vxge_hw_device *hldev = vdev->devh;
149 147
150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); 148 switch (state) {
151 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); 149 case ETHTOOL_ID_ACTIVE:
152 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF); 150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
151 break;
152
153 case ETHTOOL_ID_INACTIVE:
154 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
155 break;
156
157 default:
158 return -EINVAL;
159 }
153 160
154 return 0; 161 return 0;
155} 162}
@@ -1183,7 +1190,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1183 .get_tso = ethtool_op_get_tso, 1190 .get_tso = ethtool_op_get_tso,
1184 .set_tso = vxge_ethtool_op_set_tso, 1191 .set_tso = vxge_ethtool_op_set_tso,
1185 .get_strings = vxge_ethtool_get_strings, 1192 .get_strings = vxge_ethtool_get_strings,
1186 .phys_id = vxge_ethtool_idnic, 1193 .set_phys_id = vxge_ethtool_idnic,
1187 .get_sset_count = vxge_ethtool_get_sset_count, 1194 .get_sset_count = vxge_ethtool_get_sset_count,
1188 .get_ethtool_stats = vxge_get_ethtool_stats, 1195 .get_ethtool_stats = vxge_get_ethtool_stats,
1189 .set_flags = vxge_set_flags, 1196 .set_flags = vxge_set_flags,
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 22b8c3505991..1ce729d6af75 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -26,7 +26,7 @@ struct backend_info {
26 struct xenvif *vif; 26 struct xenvif *vif;
27 enum xenbus_state frontend_state; 27 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 28 struct xenbus_watch hotplug_status_watch;
29 int have_hotplug_status_watch:1; 29 u8 have_hotplug_status_watch:1;
30}; 30};
31 31
32static int connect_rings(struct backend_info *); 32static int connect_rings(struct backend_info *);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5c8d9c385be0..db9a763aaa7f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1140,6 +1140,42 @@ static void xennet_uninit(struct net_device *dev)
1140 gnttab_free_grant_references(np->gref_rx_head); 1140 gnttab_free_grant_references(np->gref_rx_head);
1141} 1141}
1142 1142
1143static u32 xennet_fix_features(struct net_device *dev, u32 features)
1144{
1145 struct netfront_info *np = netdev_priv(dev);
1146 int val;
1147
1148 if (features & NETIF_F_SG) {
1149 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1150 "%d", &val) < 0)
1151 val = 0;
1152
1153 if (!val)
1154 features &= ~NETIF_F_SG;
1155 }
1156
1157 if (features & NETIF_F_TSO) {
1158 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1159 "feature-gso-tcpv4", "%d", &val) < 0)
1160 val = 0;
1161
1162 if (!val)
1163 features &= ~NETIF_F_TSO;
1164 }
1165
1166 return features;
1167}
1168
1169static int xennet_set_features(struct net_device *dev, u32 features)
1170{
1171 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1172 netdev_info(dev, "Reducing MTU because no SG offload");
1173 dev->mtu = ETH_DATA_LEN;
1174 }
1175
1176 return 0;
1177}
1178
1143static const struct net_device_ops xennet_netdev_ops = { 1179static const struct net_device_ops xennet_netdev_ops = {
1144 .ndo_open = xennet_open, 1180 .ndo_open = xennet_open,
1145 .ndo_uninit = xennet_uninit, 1181 .ndo_uninit = xennet_uninit,
@@ -1148,6 +1184,8 @@ static const struct net_device_ops xennet_netdev_ops = {
1148 .ndo_change_mtu = xennet_change_mtu, 1184 .ndo_change_mtu = xennet_change_mtu,
1149 .ndo_set_mac_address = eth_mac_addr, 1185 .ndo_set_mac_address = eth_mac_addr,
1150 .ndo_validate_addr = eth_validate_addr, 1186 .ndo_validate_addr = eth_validate_addr,
1187 .ndo_fix_features = xennet_fix_features,
1188 .ndo_set_features = xennet_set_features,
1151}; 1189};
1152 1190
1153static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1191static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
@@ -1209,7 +1247,17 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1209 netdev->netdev_ops = &xennet_netdev_ops; 1247 netdev->netdev_ops = &xennet_netdev_ops;
1210 1248
1211 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1249 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1212 netdev->features = NETIF_F_IP_CSUM; 1250 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1251 NETIF_F_GSO_ROBUST;
1252 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1253
1254 /*
1255 * Assume that all hw features are available for now. This set
1256 * will be adjusted by the call to netdev_update_features() in
1257 * xennet_connect() which is the earliest point where we can
1258 * negotiate with the backend regarding supported features.
1259 */
1260 netdev->features |= netdev->hw_features;
1213 1261
1214 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1262 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1215 SET_NETDEV_DEV(netdev, &dev->dev); 1263 SET_NETDEV_DEV(netdev, &dev->dev);
@@ -1416,8 +1464,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1416 goto fail; 1464 goto fail;
1417 1465
1418 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, 1466 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1419 IRQF_SAMPLE_RANDOM, netdev->name, 1467 0, netdev->name, netdev);
1420 netdev);
1421 if (err < 0) 1468 if (err < 0)
1422 goto fail; 1469 goto fail;
1423 netdev->irq = err; 1470 netdev->irq = err;
@@ -1510,54 +1557,6 @@ again:
1510 return err; 1557 return err;
1511} 1558}
1512 1559
1513static int xennet_set_sg(struct net_device *dev, u32 data)
1514{
1515 if (data) {
1516 struct netfront_info *np = netdev_priv(dev);
1517 int val;
1518
1519 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1520 "%d", &val) < 0)
1521 val = 0;
1522 if (!val)
1523 return -ENOSYS;
1524 } else if (dev->mtu > ETH_DATA_LEN)
1525 dev->mtu = ETH_DATA_LEN;
1526
1527 return ethtool_op_set_sg(dev, data);
1528}
1529
1530static int xennet_set_tso(struct net_device *dev, u32 data)
1531{
1532 if (data) {
1533 struct netfront_info *np = netdev_priv(dev);
1534 int val;
1535
1536 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1537 "feature-gso-tcpv4", "%d", &val) < 0)
1538 val = 0;
1539 if (!val)
1540 return -ENOSYS;
1541 }
1542
1543 return ethtool_op_set_tso(dev, data);
1544}
1545
1546static void xennet_set_features(struct net_device *dev)
1547{
1548 /* Turn off all GSO bits except ROBUST. */
1549 dev->features &= ~NETIF_F_GSO_MASK;
1550 dev->features |= NETIF_F_GSO_ROBUST;
1551 xennet_set_sg(dev, 0);
1552
1553 /* We need checksum offload to enable scatter/gather and TSO. */
1554 if (!(dev->features & NETIF_F_IP_CSUM))
1555 return;
1556
1557 if (!xennet_set_sg(dev, 1))
1558 xennet_set_tso(dev, 1);
1559}
1560
1561static int xennet_connect(struct net_device *dev) 1560static int xennet_connect(struct net_device *dev)
1562{ 1561{
1563 struct netfront_info *np = netdev_priv(dev); 1562 struct netfront_info *np = netdev_priv(dev);
@@ -1582,7 +1581,7 @@ static int xennet_connect(struct net_device *dev)
1582 if (err) 1581 if (err)
1583 return err; 1582 return err;
1584 1583
1585 xennet_set_features(dev); 1584 netdev_update_features(dev);
1586 1585
1587 spin_lock_bh(&np->rx_lock); 1586 spin_lock_bh(&np->rx_lock);
1588 spin_lock_irq(&np->tx_lock); 1587 spin_lock_irq(&np->tx_lock);
@@ -1710,9 +1709,6 @@ static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1710 1709
1711static const struct ethtool_ops xennet_ethtool_ops = 1710static const struct ethtool_ops xennet_ethtool_ops =
1712{ 1711{
1713 .set_tx_csum = ethtool_op_set_tx_csum,
1714 .set_sg = xennet_set_sg,
1715 .set_tso = xennet_set_tso,
1716 .get_link = ethtool_op_get_link, 1712 .get_link = ethtool_op_get_link,
1717 1713
1718 .get_sset_count = xennet_get_sset_count, 1714 .get_sset_count = xennet_get_sset_count,