aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/a2065.c10
-rw-r--r--drivers/net/ariadne.c10
-rw-r--r--drivers/net/atlx/atl2.c22
-rw-r--r--drivers/net/bfin_mac.c13
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/bonding/bond_alb.h2
-rw-r--r--drivers/net/bonding/bond_main.c58
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/c_can/c_can_platform.c9
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/davinci_cpdma.c11
-rw-r--r--drivers/net/davinci_cpdma.h1
-rw-r--r--drivers/net/davinci_emac.c5
-rw-r--r--drivers/net/dm9000.c8
-rw-r--r--drivers/net/ftmac100.c2
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/irda/via-ircc.c94
-rw-r--r--drivers/net/jme.c30
-rw-r--r--drivers/net/ks8842.c3
-rw-r--r--drivers/net/ksz884x.c2
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/mlx4/alloc.c13
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c38
-rw-r--r--drivers/net/mlx4/en_ethtool.c66
-rw-r--r--drivers/net/mlx4/en_main.c22
-rw-r--r--drivers/net/mlx4/en_netdev.c202
-rw-r--r--drivers/net/mlx4/en_port.c13
-rw-r--r--drivers/net/mlx4/en_port.h19
-rw-r--r--drivers/net/mlx4/en_rx.c11
-rw-r--r--drivers/net/mlx4/en_tx.c72
-rw-r--r--drivers/net/mlx4/eq.c111
-rw-r--r--drivers/net/mlx4/fw.c25
-rw-r--r--drivers/net/mlx4/fw.h3
-rw-r--r--drivers/net/mlx4/main.c122
-rw-r--r--drivers/net/mlx4/mcg.c647
-rw-r--r--drivers/net/mlx4/mlx4.h50
-rw-r--r--drivers/net/mlx4/mlx4_en.h27
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/port.c165
-rw-r--r--drivers/net/mlx4/profile.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c38
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/phy/phy_device.c8
-rw-r--r--drivers/net/ppp_deflate.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/rionet.c6
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/skfp/Makefile2
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/tg3.c6
-rw-r--r--drivers/net/usb/Kconfig15
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c23
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/cdc_subset.c8
-rw-r--r--drivers/net/usb/gl620a.c2
-rw-r--r--drivers/net/usb/lg-vl600.c346
-rw-r--r--drivers/net/usb/net1080.c2
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c17
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/usb/zaurus.c8
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vxge/vxge-ethtool.c4
-rw-r--r--drivers/net/wan/lmc/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c10
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c7
-rw-r--r--drivers/net/wireless/orinoco/cfg.c3
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/p54/p54spi.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c31
-rw-r--r--drivers/net/wireless/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
98 files changed, 2272 insertions, 476 deletions
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index f142cc21e45..deaa8bc16cf 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -711,14 +711,14 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
711 return -EBUSY; 711 return -EBUSY;
712 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 712 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
713 if (!r2) { 713 if (!r2) {
714 release_resource(r1); 714 release_mem_region(base_addr, sizeof(struct lance_regs));
715 return -EBUSY; 715 return -EBUSY;
716 } 716 }
717 717
718 dev = alloc_etherdev(sizeof(struct lance_private)); 718 dev = alloc_etherdev(sizeof(struct lance_private));
719 if (dev == NULL) { 719 if (dev == NULL) {
720 release_resource(r1); 720 release_mem_region(base_addr, sizeof(struct lance_regs));
721 release_resource(r2); 721 release_mem_region(mem_start, A2065_RAM_SIZE);
722 return -ENOMEM; 722 return -ENOMEM;
723 } 723 }
724 724
@@ -764,8 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
764 764
765 err = register_netdev(dev); 765 err = register_netdev(dev);
766 if (err) { 766 if (err) {
767 release_resource(r1); 767 release_mem_region(base_addr, sizeof(struct lance_regs));
768 release_resource(r2); 768 release_mem_region(mem_start, A2065_RAM_SIZE);
769 free_netdev(dev); 769 free_netdev(dev);
770 return err; 770 return err;
771 } 771 }
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 7ca0eded256..b7f45cd756a 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -182,14 +182,14 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
182 return -EBUSY; 182 return -EBUSY;
183 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); 183 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
184 if (!r2) { 184 if (!r2) {
185 release_resource(r1); 185 release_mem_region(base_addr, sizeof(struct Am79C960));
186 return -EBUSY; 186 return -EBUSY;
187 } 187 }
188 188
189 dev = alloc_etherdev(sizeof(struct ariadne_private)); 189 dev = alloc_etherdev(sizeof(struct ariadne_private));
190 if (dev == NULL) { 190 if (dev == NULL) {
191 release_resource(r1); 191 release_mem_region(base_addr, sizeof(struct Am79C960));
192 release_resource(r2); 192 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
193 return -ENOMEM; 193 return -ENOMEM;
194 } 194 }
195 195
@@ -213,8 +213,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
213 213
214 err = register_netdev(dev); 214 err = register_netdev(dev);
215 if (err) { 215 if (err) {
216 release_resource(r1); 216 release_mem_region(base_addr, sizeof(struct Am79C960));
217 release_resource(r2); 217 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
218 free_netdev(dev); 218 free_netdev(dev);
219 return err; 219 return err;
220 } 220 }
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index e637e9f28fd..937ef1afa5d 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1996,13 +1996,15 @@ static int atl2_set_eeprom(struct net_device *netdev,
1996 if (!eeprom_buff) 1996 if (!eeprom_buff)
1997 return -ENOMEM; 1997 return -ENOMEM;
1998 1998
1999 ptr = (u32 *)eeprom_buff; 1999 ptr = eeprom_buff;
2000 2000
2001 if (eeprom->offset & 3) { 2001 if (eeprom->offset & 3) {
2002 /* need read/modify/write of first changed EEPROM word */ 2002 /* need read/modify/write of first changed EEPROM word */
2003 /* only the second byte of the word is being modified */ 2003 /* only the second byte of the word is being modified */
2004 if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) 2004 if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
2005 return -EIO; 2005 ret_val = -EIO;
2006 goto out;
2007 }
2006 ptr++; 2008 ptr++;
2007 } 2009 }
2008 if (((eeprom->offset + eeprom->len) & 3)) { 2010 if (((eeprom->offset + eeprom->len) & 3)) {
@@ -2011,18 +2013,22 @@ static int atl2_set_eeprom(struct net_device *netdev,
2011 * only the first byte of the word is being modified 2013 * only the first byte of the word is being modified
2012 */ 2014 */
2013 if (!atl2_read_eeprom(hw, last_dword * 4, 2015 if (!atl2_read_eeprom(hw, last_dword * 4,
2014 &(eeprom_buff[last_dword - first_dword]))) 2016 &(eeprom_buff[last_dword - first_dword]))) {
2015 return -EIO; 2017 ret_val = -EIO;
2018 goto out;
2019 }
2016 } 2020 }
2017 2021
2018 /* Device's eeprom is always little-endian, word addressable */ 2022 /* Device's eeprom is always little-endian, word addressable */
2019 memcpy(ptr, bytes, eeprom->len); 2023 memcpy(ptr, bytes, eeprom->len);
2020 2024
2021 for (i = 0; i < last_dword - first_dword + 1; i++) { 2025 for (i = 0; i < last_dword - first_dword + 1; i++) {
2022 if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) 2026 if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
2023 return -EIO; 2027 ret_val = -EIO;
2028 goto out;
2029 }
2024 } 2030 }
2025 2031 out:
2026 kfree(eeprom_buff); 2032 kfree(eeprom_buff);
2027 return ret_val; 2033 return ret_val;
2028} 2034}
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 22abfb39d81..68d45ba2d9b 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1237,8 +1237,17 @@ static int bfin_mac_enable(struct phy_device *phydev)
1237 1237
1238 if (phydev->interface == PHY_INTERFACE_MODE_RMII) { 1238 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1239 opmode |= RMII; /* For Now only 100MBit are supported */ 1239 opmode |= RMII; /* For Now only 100MBit are supported */
1240#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 1240#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1241 opmode |= TE; 1241 if (__SILICON_REVISION__ < 3) {
1242 /*
1243 * This isn't publicly documented (fun times!), but in
1244 * silicon <=0.2, the RX and TX pins are clocked together.
1245 * So in order to recv, we must enable the transmit side
1246 * as well. This will cause a spurious TX interrupt too,
1247 * but we can easily consume that.
1248 */
1249 opmode |= TE;
1250 }
1242#endif 1251#endif
1243 } 1252 }
1244 1253
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d1865cc9731..8e6d618b530 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -8317,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8317#endif 8317#endif
8318}; 8318};
8319 8319
8320static void inline vlan_features_add(struct net_device *dev, u32 flags) 8320static inline void vlan_features_add(struct net_device *dev, u32 flags)
8321{ 8321{
8322 dev->vlan_features |= flags; 8322 dev->vlan_features |= flags;
8323} 8323}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 118c28aa471..4b3e3587840 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -74,7 +74,7 @@ struct tlb_client_info {
74 * packets to a Client that the Hash function 74 * packets to a Client that the Hash function
75 * gave this entry index. 75 * gave this entry index.
76 */ 76 */
77 u32 tx_bytes; /* Each Client acumulates the BytesTx that 77 u32 tx_bytes; /* Each Client accumulates the BytesTx that
78 * were tranmitted to it, and after each 78 * were tranmitted to it, and after each
79 * CallBack the LoadHistory is devided 79 * CallBack the LoadHistory is devided
80 * by the balance interval 80 * by the balance interval
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1a6e9eb7af4..16d6fe95469 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1482,21 +1482,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1482{ 1482{
1483 struct sk_buff *skb = *pskb; 1483 struct sk_buff *skb = *pskb;
1484 struct slave *slave; 1484 struct slave *slave;
1485 struct net_device *bond_dev;
1486 struct bonding *bond; 1485 struct bonding *bond;
1487 1486
1488 slave = bond_slave_get_rcu(skb->dev);
1489 bond_dev = ACCESS_ONCE(slave->dev->master);
1490 if (unlikely(!bond_dev))
1491 return RX_HANDLER_PASS;
1492
1493 skb = skb_share_check(skb, GFP_ATOMIC); 1487 skb = skb_share_check(skb, GFP_ATOMIC);
1494 if (unlikely(!skb)) 1488 if (unlikely(!skb))
1495 return RX_HANDLER_CONSUMED; 1489 return RX_HANDLER_CONSUMED;
1496 1490
1497 *pskb = skb; 1491 *pskb = skb;
1498 1492
1499 bond = netdev_priv(bond_dev); 1493 slave = bond_slave_get_rcu(skb->dev);
1494 bond = slave->bond;
1500 1495
1501 if (bond->params.arp_interval) 1496 if (bond->params.arp_interval)
1502 slave->dev->last_rx = jiffies; 1497 slave->dev->last_rx = jiffies;
@@ -1505,10 +1500,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1505 return RX_HANDLER_EXACT; 1500 return RX_HANDLER_EXACT;
1506 } 1501 }
1507 1502
1508 skb->dev = bond_dev; 1503 skb->dev = bond->dev;
1509 1504
1510 if (bond->params.mode == BOND_MODE_ALB && 1505 if (bond->params.mode == BOND_MODE_ALB &&
1511 bond_dev->priv_flags & IFF_BRIDGE_PORT && 1506 bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1512 skb->pkt_type == PACKET_HOST) { 1507 skb->pkt_type == PACKET_HOST) {
1513 1508
1514 if (unlikely(skb_cow_head(skb, 1509 if (unlikely(skb_cow_head(skb,
@@ -1516,7 +1511,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1516 kfree_skb(skb); 1511 kfree_skb(skb);
1517 return RX_HANDLER_CONSUMED; 1512 return RX_HANDLER_CONSUMED;
1518 } 1513 }
1519 memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN); 1514 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
1520 } 1515 }
1521 1516
1522 return RX_HANDLER_ANOTHER; 1517 return RX_HANDLER_ANOTHER;
@@ -1698,20 +1693,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1698 pr_debug("Error %d calling netdev_set_bond_master\n", res); 1693 pr_debug("Error %d calling netdev_set_bond_master\n", res);
1699 goto err_restore_mac; 1694 goto err_restore_mac;
1700 } 1695 }
1701 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1702 new_slave);
1703 if (res) {
1704 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1705 goto err_unset_master;
1706 }
1707 1696
1708 /* open the slave since the application closed it */ 1697 /* open the slave since the application closed it */
1709 res = dev_open(slave_dev); 1698 res = dev_open(slave_dev);
1710 if (res) { 1699 if (res) {
1711 pr_debug("Opening slave %s failed\n", slave_dev->name); 1700 pr_debug("Opening slave %s failed\n", slave_dev->name);
1712 goto err_unreg_rxhandler; 1701 goto err_unset_master;
1713 } 1702 }
1714 1703
1704 new_slave->bond = bond;
1715 new_slave->dev = slave_dev; 1705 new_slave->dev = slave_dev;
1716 slave_dev->priv_flags |= IFF_BONDING; 1706 slave_dev->priv_flags |= IFF_BONDING;
1717 1707
@@ -1907,6 +1897,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1907 if (res) 1897 if (res)
1908 goto err_close; 1898 goto err_close;
1909 1899
1900 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1901 new_slave);
1902 if (res) {
1903 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1904 goto err_dest_symlinks;
1905 }
1906
1910 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1907 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
1911 bond_dev->name, slave_dev->name, 1908 bond_dev->name, slave_dev->name,
1912 bond_is_active_slave(new_slave) ? "n active" : " backup", 1909 bond_is_active_slave(new_slave) ? "n active" : " backup",
@@ -1916,13 +1913,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1916 return 0; 1913 return 0;
1917 1914
1918/* Undo stages on error */ 1915/* Undo stages on error */
1916err_dest_symlinks:
1917 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1918
1919err_close: 1919err_close:
1920 dev_close(slave_dev); 1920 dev_close(slave_dev);
1921 1921
1922err_unreg_rxhandler:
1923 netdev_rx_handler_unregister(slave_dev);
1924 synchronize_net();
1925
1926err_unset_master: 1922err_unset_master:
1927 netdev_set_bond_master(slave_dev, NULL); 1923 netdev_set_bond_master(slave_dev, NULL);
1928 1924
@@ -1988,6 +1984,14 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1988 return -EINVAL; 1984 return -EINVAL;
1989 } 1985 }
1990 1986
1987 /* unregister rx_handler early so bond_handle_frame wouldn't be called
1988 * for this slave anymore.
1989 */
1990 netdev_rx_handler_unregister(slave_dev);
1991 write_unlock_bh(&bond->lock);
1992 synchronize_net();
1993 write_lock_bh(&bond->lock);
1994
1991 if (!bond->params.fail_over_mac) { 1995 if (!bond->params.fail_over_mac) {
1992 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && 1996 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
1993 bond->slave_cnt > 1) 1997 bond->slave_cnt > 1)
@@ -2104,8 +2108,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2104 netif_addr_unlock_bh(bond_dev); 2108 netif_addr_unlock_bh(bond_dev);
2105 } 2109 }
2106 2110
2107 netdev_rx_handler_unregister(slave_dev);
2108 synchronize_net();
2109 netdev_set_bond_master(slave_dev, NULL); 2111 netdev_set_bond_master(slave_dev, NULL);
2110 2112
2111 slave_disable_netpoll(slave); 2113 slave_disable_netpoll(slave);
@@ -2130,7 +2132,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2130} 2132}
2131 2133
2132/* 2134/*
2133* First release a slave and than destroy the bond if no more slaves are left. 2135* First release a slave and then destroy the bond if no more slaves are left.
2134* Must be under rtnl_lock when this function is called. 2136* Must be under rtnl_lock when this function is called.
2135*/ 2137*/
2136static int bond_release_and_destroy(struct net_device *bond_dev, 2138static int bond_release_and_destroy(struct net_device *bond_dev,
@@ -2186,6 +2188,12 @@ static int bond_release_all(struct net_device *bond_dev)
2186 */ 2188 */
2187 write_unlock_bh(&bond->lock); 2189 write_unlock_bh(&bond->lock);
2188 2190
2191 /* unregister rx_handler early so bond_handle_frame wouldn't
2192 * be called for this slave anymore.
2193 */
2194 netdev_rx_handler_unregister(slave_dev);
2195 synchronize_net();
2196
2189 if (bond_is_lb(bond)) { 2197 if (bond_is_lb(bond)) {
2190 /* must be called only after the slave 2198 /* must be called only after the slave
2191 * has been detached from the list 2199 * has been detached from the list
@@ -2217,8 +2225,6 @@ static int bond_release_all(struct net_device *bond_dev)
2217 netif_addr_unlock_bh(bond_dev); 2225 netif_addr_unlock_bh(bond_dev);
2218 } 2226 }
2219 2227
2220 netdev_rx_handler_unregister(slave_dev);
2221 synchronize_net();
2222 netdev_set_bond_master(slave_dev, NULL); 2228 netdev_set_bond_master(slave_dev, NULL);
2223 2229
2224 slave_disable_netpoll(slave); 2230 slave_disable_netpoll(slave);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 6b26962fd0e..90736cb4d97 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -187,6 +187,7 @@ struct slave {
187 struct net_device *dev; /* first - useful for panic debug */ 187 struct net_device *dev; /* first - useful for panic debug */
188 struct slave *next; 188 struct slave *next;
189 struct slave *prev; 189 struct slave *prev;
190 struct bonding *bond; /* our master */
190 int delay; 191 int delay;
191 unsigned long jiffies; 192 unsigned long jiffies;
192 unsigned long last_arp_rx; 193 unsigned long last_arp_rx;
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b38d987da67..9560b9d624b 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,6 +1,4 @@
1ifeq ($(CONFIG_CAIF_DEBUG),y) 1ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
2EXTRA_CFLAGS += -DDEBUG
3endif
4 2
5# Serial interface 3# Serial interface
6obj-$(CONFIG_CAIF_TTY) += caif_serial.o 4obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 14050786218..31552959aed 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -588,14 +588,9 @@ static void c_can_chip_config(struct net_device *dev)
588{ 588{
589 struct c_can_priv *priv = netdev_priv(dev); 589 struct c_can_priv *priv = netdev_priv(dev);
590 590
591 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 591 /* enable automatic retransmission */
592 /* disable automatic retransmission */ 592 priv->write_reg(priv, &priv->regs->control,
593 priv->write_reg(priv, &priv->regs->control, 593 CONTROL_ENABLE_AR);
594 CONTROL_DISABLE_AR);
595 else
596 /* enable automatic retransmission */
597 priv->write_reg(priv, &priv->regs->control,
598 CONTROL_ENABLE_AR);
599 594
600 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & 595 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
601 CAN_CTRLMODE_LOOPBACK)) { 596 CAN_CTRLMODE_LOOPBACK)) {
@@ -633,9 +628,6 @@ static void c_can_start(struct net_device *dev)
633{ 628{
634 struct c_can_priv *priv = netdev_priv(dev); 629 struct c_can_priv *priv = netdev_priv(dev);
635 630
636 /* enable status change, error and module interrupts */
637 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
638
639 /* basic c_can configuration */ 631 /* basic c_can configuration */
640 c_can_chip_config(dev); 632 c_can_chip_config(dev);
641 633
@@ -643,6 +635,9 @@ static void c_can_start(struct net_device *dev)
643 635
644 /* reset tx helper pointers */ 636 /* reset tx helper pointers */
645 priv->tx_next = priv->tx_echo = 0; 637 priv->tx_next = priv->tx_echo = 0;
638
639 /* enable status change, error and module interrupts */
640 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
646} 641}
647 642
648static void c_can_stop(struct net_device *dev) 643static void c_can_stop(struct net_device *dev)
@@ -704,7 +699,6 @@ static void c_can_do_tx(struct net_device *dev)
704 699
705 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 700 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
706 msg_obj_no = get_tx_echo_msg_obj(priv); 701 msg_obj_no = get_tx_echo_msg_obj(priv);
707 c_can_inval_msg_object(dev, 0, msg_obj_no);
708 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 702 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
709 if (!(val & (1 << msg_obj_no))) { 703 if (!(val & (1 << msg_obj_no))) {
710 can_get_echo_skb(dev, 704 can_get_echo_skb(dev,
@@ -713,6 +707,7 @@ static void c_can_do_tx(struct net_device *dev)
713 &priv->regs->ifregs[0].msg_cntrl) 707 &priv->regs->ifregs[0].msg_cntrl)
714 & IF_MCONT_DLC_MASK; 708 & IF_MCONT_DLC_MASK;
715 stats->tx_packets++; 709 stats->tx_packets++;
710 c_can_inval_msg_object(dev, 0, msg_obj_no);
716 } 711 }
717 } 712 }
718 713
@@ -1112,8 +1107,7 @@ struct net_device *alloc_c_can_dev(void)
1112 priv->can.bittiming_const = &c_can_bittiming_const; 1107 priv->can.bittiming_const = &c_can_bittiming_const;
1113 priv->can.do_set_mode = c_can_set_mode; 1108 priv->can.do_set_mode = c_can_set_mode;
1114 priv->can.do_get_berr_counter = c_can_get_berr_counter; 1109 priv->can.do_get_berr_counter = c_can_get_berr_counter;
1115 priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT | 1110 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1116 CAN_CTRLMODE_LOOPBACK |
1117 CAN_CTRLMODE_LISTENONLY | 1111 CAN_CTRLMODE_LISTENONLY |
1118 CAN_CTRLMODE_BERR_REPORTING; 1112 CAN_CTRLMODE_BERR_REPORTING;
1119 1113
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e629b961ae2..cc90824f2c9 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -73,7 +73,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
73 void __iomem *addr; 73 void __iomem *addr;
74 struct net_device *dev; 74 struct net_device *dev;
75 struct c_can_priv *priv; 75 struct c_can_priv *priv;
76 struct resource *mem, *irq; 76 struct resource *mem;
77 int irq;
77#ifdef CONFIG_HAVE_CLK 78#ifdef CONFIG_HAVE_CLK
78 struct clk *clk; 79 struct clk *clk;
79 80
@@ -88,8 +89,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
88 89
89 /* get the platform data */ 90 /* get the platform data */
90 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 91 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
91 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 92 irq = platform_get_irq(pdev, 0);
92 if (!mem || (irq <= 0)) { 93 if (!mem || irq <= 0) {
93 ret = -ENODEV; 94 ret = -ENODEV;
94 goto exit_free_clk; 95 goto exit_free_clk;
95 } 96 }
@@ -117,7 +118,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
117 118
118 priv = netdev_priv(dev); 119 priv = netdev_priv(dev);
119 120
120 dev->irq = irq->start; 121 dev->irq = irq;
121 priv->regs = addr; 122 priv->regs = addr;
122#ifdef CONFIG_HAVE_CLK 123#ifdef CONFIG_HAVE_CLK
123 priv->can.clock.freq = clk_get_rate(clk); 124 priv->can.clock.freq = clk_get_rate(clk);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 366f5cc050a..102b16c6cc9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -15,6 +15,7 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/mfd/core.h>
18 19
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/can.h> 21#include <linux/can.h>
@@ -1643,7 +1644,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
1643 struct device *dev; 1644 struct device *dev;
1644 int ret; 1645 int ret;
1645 1646
1646 pdata = pdev->dev.platform_data; 1647 pdata = mfd_get_data(pdev);
1647 if (!pdata) 1648 if (!pdata)
1648 return -ENXIO; 1649 return -ENXIO;
1649 1650
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 4d538a4e9d5..91089314329 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1983,14 +1983,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1983{ 1983{
1984 struct port_info *pi = netdev_priv(dev); 1984 struct port_info *pi = netdev_priv(dev);
1985 struct adapter *adapter = pi->adapter; 1985 struct adapter *adapter = pi->adapter;
1986 struct qset_params *qsp = &adapter->params.sge.qset[0]; 1986 struct qset_params *qsp;
1987 struct sge_qset *qs = &adapter->sge.qs[0]; 1987 struct sge_qset *qs;
1988 int i;
1988 1989
1989 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) 1990 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1990 return -EINVAL; 1991 return -EINVAL;
1991 1992
1992 qsp->coalesce_usecs = c->rx_coalesce_usecs; 1993 for (i = 0; i < pi->nqsets; i++) {
1993 t3_update_qset_coalesce(qs, qsp); 1994 qsp = &adapter->params.sge.qset[i];
1995 qs = &adapter->sge.qs[i];
1996 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1997 t3_update_qset_coalesce(qs, qsp);
1998 }
1999
1994 return 0; 2000 return 0;
1995} 2001}
1996 2002
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
index e92b2b6cd8c..ae47f23ba93 100644
--- a/drivers/net/davinci_cpdma.c
+++ b/drivers/net/davinci_cpdma.c
@@ -76,6 +76,7 @@ struct cpdma_desc {
76 76
77struct cpdma_desc_pool { 77struct cpdma_desc_pool {
78 u32 phys; 78 u32 phys;
79 u32 hw_addr;
79 void __iomem *iomap; /* ioremap map */ 80 void __iomem *iomap; /* ioremap map */
80 void *cpumap; /* dma_alloc map */ 81 void *cpumap; /* dma_alloc map */
81 int desc_size, mem_size; 82 int desc_size, mem_size;
@@ -137,7 +138,8 @@ struct cpdma_chan {
137 * abstract out these details 138 * abstract out these details
138 */ 139 */
139static struct cpdma_desc_pool * 140static struct cpdma_desc_pool *
140cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) 141cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
142 int size, int align)
141{ 143{
142 int bitmap_size; 144 int bitmap_size;
143 struct cpdma_desc_pool *pool; 145 struct cpdma_desc_pool *pool;
@@ -161,10 +163,12 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
161 if (phys) { 163 if (phys) {
162 pool->phys = phys; 164 pool->phys = phys;
163 pool->iomap = ioremap(phys, size); 165 pool->iomap = ioremap(phys, size);
166 pool->hw_addr = hw_addr;
164 } else { 167 } else {
165 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 168 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
166 GFP_KERNEL); 169 GFP_KERNEL);
167 pool->iomap = (void __force __iomem *)pool->cpumap; 170 pool->iomap = (void __force __iomem *)pool->cpumap;
171 pool->hw_addr = pool->phys;
168 } 172 }
169 173
170 if (pool->iomap) 174 if (pool->iomap)
@@ -201,14 +205,14 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
201{ 205{
202 if (!desc) 206 if (!desc)
203 return 0; 207 return 0;
204 return pool->phys + (__force dma_addr_t)desc - 208 return pool->hw_addr + (__force dma_addr_t)desc -
205 (__force dma_addr_t)pool->iomap; 209 (__force dma_addr_t)pool->iomap;
206} 210}
207 211
208static inline struct cpdma_desc __iomem * 212static inline struct cpdma_desc __iomem *
209desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 213desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
210{ 214{
211 return dma ? pool->iomap + dma - pool->phys : NULL; 215 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
212} 216}
213 217
214static struct cpdma_desc __iomem * 218static struct cpdma_desc __iomem *
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
260 264
261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 265 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
262 ctlr->params.desc_mem_phys, 266 ctlr->params.desc_mem_phys,
267 ctlr->params.desc_hw_addr,
263 ctlr->params.desc_mem_size, 268 ctlr->params.desc_mem_size,
264 ctlr->params.desc_align); 269 ctlr->params.desc_align);
265 if (!ctlr->pool) { 270 if (!ctlr->pool) {
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
index 868e50ebde4..afa19a0c0d8 100644
--- a/drivers/net/davinci_cpdma.h
+++ b/drivers/net/davinci_cpdma.h
@@ -33,6 +33,7 @@ struct cpdma_params {
33 bool has_soft_reset; 33 bool has_soft_reset;
34 int min_packet_size; 34 int min_packet_size;
35 u32 desc_mem_phys; 35 u32 desc_mem_phys;
36 u32 desc_hw_addr;
36 int desc_mem_size; 37 int desc_mem_size;
37 int desc_align; 38 int desc_align;
38 39
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 082d6ea6992..baca6bfcb08 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1854,10 +1854,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1854 dma_params.rxcp = priv->emac_base + 0x660; 1854 dma_params.rxcp = priv->emac_base + 0x660;
1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; 1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; 1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
1857 dma_params.desc_mem_phys = hw_ram_addr; 1857 dma_params.desc_hw_addr = hw_ram_addr;
1858 dma_params.desc_mem_size = pdata->ctrl_ram_size; 1858 dma_params.desc_mem_size = pdata->ctrl_ram_size;
1859 dma_params.desc_align = 16; 1859 dma_params.desc_align = 16;
1860 1860
1861 dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
1862 (u32 __force)res->start + pdata->ctrl_ram_offset;
1863
1861 priv->dma = cpdma_ctlr_create(&dma_params); 1864 priv->dma = cpdma_ctlr_create(&dma_params);
1862 if (!priv->dma) { 1865 if (!priv->dma) {
1863 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); 1866 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 31770811360..b7af5bab993 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -621,9 +621,9 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
621 /* change in wol state, update IRQ state */ 621 /* change in wol state, update IRQ state */
622 622
623 if (!dm->wake_state) 623 if (!dm->wake_state)
624 set_irq_wake(dm->irq_wake, 1); 624 irq_set_irq_wake(dm->irq_wake, 1);
625 else if (dm->wake_state & !opts) 625 else if (dm->wake_state & !opts)
626 set_irq_wake(dm->irq_wake, 0); 626 irq_set_irq_wake(dm->irq_wake, 0);
627 } 627 }
628 628
629 dm->wake_state = opts; 629 dm->wake_state = opts;
@@ -1424,13 +1424,13 @@ dm9000_probe(struct platform_device *pdev)
1424 } else { 1424 } else {
1425 1425
1426 /* test to see if irq is really wakeup capable */ 1426 /* test to see if irq is really wakeup capable */
1427 ret = set_irq_wake(db->irq_wake, 1); 1427 ret = irq_set_irq_wake(db->irq_wake, 1);
1428 if (ret) { 1428 if (ret) {
1429 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1429 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1430 db->irq_wake, ret); 1430 db->irq_wake, ret);
1431 ret = 0; 1431 ret = 0;
1432 } else { 1432 } else {
1433 set_irq_wake(db->irq_wake, 0); 1433 irq_set_irq_wake(db->irq_wake, 0);
1434 db->wake_supported = 1; 1434 db->wake_supported = 1;
1435 } 1435 }
1436 } 1436 }
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
index 1d6f4b8d393..a31661948c4 100644
--- a/drivers/net/ftmac100.c
+++ b/drivers/net/ftmac100.c
@@ -1102,7 +1102,7 @@ static int ftmac100_probe(struct platform_device *pdev)
1102 goto err_req_mem; 1102 goto err_req_mem;
1103 } 1103 }
1104 1104
1105 priv->base = ioremap(res->start, res->end - res->start); 1105 priv->base = ioremap(res->start, resource_size(res));
1106 if (!priv->base) { 1106 if (!priv->base) {
1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1108 err = -EIO; 1108 err = -EIO;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ccb231c4d93..2a0ad9a501b 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -949,6 +949,11 @@ static void gfar_detect_errata(struct gfar_private *priv)
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002; 950 priv->errata |= GFAR_ERRATA_A002;
951 951
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12;
956
952 if (priv->errata) 957 if (priv->errata)
953 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
954 priv->errata); 959 priv->errata);
@@ -2154,8 +2159,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2154 /* Set up checksumming */ 2159 /* Set up checksumming */
2155 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2160 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2156 fcb = gfar_add_fcb(skb); 2161 fcb = gfar_add_fcb(skb);
2157 lstatus |= BD_LFLAG(TXBD_TOE); 2162 /* as specified by errata */
2158 gfar_tx_checksum(skb, fcb); 2163 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2164 && ((unsigned long)fcb % 0x20) > 0x18)) {
2165 __skb_pull(skb, GMAC_FCB_LEN);
2166 skb_checksum_help(skb);
2167 } else {
2168 lstatus |= BD_LFLAG(TXBD_TOE);
2169 gfar_tx_checksum(skb, fcb);
2170 }
2159 } 2171 }
2160 2172
2161 if (vlan_tx_tag_present(skb)) { 2173 if (vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 54de4135e93..ec5d595ce2e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1039,6 +1039,7 @@ enum gfar_errata {
1039 GFAR_ERRATA_74 = 0x01, 1039 GFAR_ERRATA_74 = 0x01,
1040 GFAR_ERRATA_76 = 0x02, 1040 GFAR_ERRATA_76 = 0x02,
1041 GFAR_ERRATA_A002 = 0x04, 1041 GFAR_ERRATA_A002 = 0x04,
1042 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1042}; 1043};
1043 1044
1044/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1045/* Struct stolen almost completely (and shamelessly) from the FCC enet source
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 67c0ad42d81..186cd28a61c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -75,15 +75,9 @@ static int dongle_id = 0; /* default: probe */
75/* We can't guess the type of connected dongle, user *must* supply it. */ 75/* We can't guess the type of connected dongle, user *must* supply it. */
76module_param(dongle_id, int, 0); 76module_param(dongle_id, int, 0);
77 77
78/* FIXME : we should not need this, because instances should be automatically
79 * managed by the PCI layer. Especially that we seem to only be using the
80 * first entry. Jean II */
81/* Max 4 instances for now */
82static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
83
84/* Some prototypes */ 78/* Some prototypes */
85static int via_ircc_open(int i, chipio_t * info, unsigned int id); 79static int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
86static int via_ircc_close(struct via_ircc_cb *self); 80 unsigned int id);
87static int via_ircc_dma_receive(struct via_ircc_cb *self); 81static int via_ircc_dma_receive(struct via_ircc_cb *self);
88static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, 82static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
89 int iobase); 83 int iobase);
@@ -215,7 +209,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
215 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); 209 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
216 pci_write_config_byte(pcidev,0x5a,0xc0); 210 pci_write_config_byte(pcidev,0x5a,0xc0);
217 WriteLPCReg(0x28, 0x70 ); 211 WriteLPCReg(0x28, 0x70 );
218 if (via_ircc_open(0, &info,0x3076) == 0) 212 if (via_ircc_open(pcidev, &info, 0x3076) == 0)
219 rc=0; 213 rc=0;
220 } else 214 } else
221 rc = -ENODEV; //IR not turn on 215 rc = -ENODEV; //IR not turn on
@@ -254,7 +248,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
254 info.irq=FirIRQ; 248 info.irq=FirIRQ;
255 info.dma=FirDRQ1; 249 info.dma=FirDRQ1;
256 info.dma2=FirDRQ0; 250 info.dma2=FirDRQ0;
257 if (via_ircc_open(0, &info,0x3096) == 0) 251 if (via_ircc_open(pcidev, &info, 0x3096) == 0)
258 rc=0; 252 rc=0;
259 } else 253 } else
260 rc = -ENODEV; //IR not turn on !!!!! 254 rc = -ENODEV; //IR not turn on !!!!!
@@ -264,48 +258,10 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
264 return rc; 258 return rc;
265} 259}
266 260
267/*
268 * Function via_ircc_clean ()
269 *
270 * Close all configured chips
271 *
272 */
273static void via_ircc_clean(void)
274{
275 int i;
276
277 IRDA_DEBUG(3, "%s()\n", __func__);
278
279 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
280 if (dev_self[i])
281 via_ircc_close(dev_self[i]);
282 }
283}
284
285static void __devexit via_remove_one (struct pci_dev *pdev)
286{
287 IRDA_DEBUG(3, "%s()\n", __func__);
288
289 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
290 * to get our driver instance and call directly via_ircc_close().
291 * See vlsi_ir for details...
292 * Jean II */
293 via_ircc_clean();
294
295 /* FIXME : This should be in via_ircc_close(), because here we may
296 * theoritically disable still configured devices :-( - Jean II */
297 pci_disable_device(pdev);
298}
299
300static void __exit via_ircc_cleanup(void) 261static void __exit via_ircc_cleanup(void)
301{ 262{
302 IRDA_DEBUG(3, "%s()\n", __func__); 263 IRDA_DEBUG(3, "%s()\n", __func__);
303 264
304 /* FIXME : This should be redundant, as pci_unregister_driver()
305 * should call via_remove_one() on each device.
306 * Jean II */
307 via_ircc_clean();
308
309 /* Cleanup all instances of the driver */ 265 /* Cleanup all instances of the driver */
310 pci_unregister_driver (&via_driver); 266 pci_unregister_driver (&via_driver);
311} 267}
@@ -324,12 +280,13 @@ static const struct net_device_ops via_ircc_fir_ops = {
324}; 280};
325 281
326/* 282/*
327 * Function via_ircc_open (iobase, irq) 283 * Function via_ircc_open(pdev, iobase, irq)
328 * 284 *
329 * Open driver instance 285 * Open driver instance
330 * 286 *
331 */ 287 */
332static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id) 288static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
289 unsigned int id)
333{ 290{
334 struct net_device *dev; 291 struct net_device *dev;
335 struct via_ircc_cb *self; 292 struct via_ircc_cb *self;
@@ -337,9 +294,6 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
337 294
338 IRDA_DEBUG(3, "%s()\n", __func__); 295 IRDA_DEBUG(3, "%s()\n", __func__);
339 296
340 if (i >= ARRAY_SIZE(dev_self))
341 return -ENOMEM;
342
343 /* Allocate new instance of the driver */ 297 /* Allocate new instance of the driver */
344 dev = alloc_irdadev(sizeof(struct via_ircc_cb)); 298 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
345 if (dev == NULL) 299 if (dev == NULL)
@@ -349,13 +303,8 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
349 self->netdev = dev; 303 self->netdev = dev;
350 spin_lock_init(&self->lock); 304 spin_lock_init(&self->lock);
351 305
352 /* FIXME : We should store our driver instance in the PCI layer, 306 pci_set_drvdata(pdev, self);
353 * using pci_set_drvdata(), not in this array. 307
354 * See vlsi_ir for details... - Jean II */
355 /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
356 /* Need to store self somewhere */
357 dev_self[i] = self;
358 self->index = i;
359 /* Initialize Resource */ 308 /* Initialize Resource */
360 self->io.cfg_base = info->cfg_base; 309 self->io.cfg_base = info->cfg_base;
361 self->io.fir_base = info->fir_base; 310 self->io.fir_base = info->fir_base;
@@ -414,7 +363,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
414 363
415 /* Allocate memory if needed */ 364 /* Allocate memory if needed */
416 self->rx_buff.head = 365 self->rx_buff.head =
417 dma_alloc_coherent(NULL, self->rx_buff.truesize, 366 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
418 &self->rx_buff_dma, GFP_KERNEL); 367 &self->rx_buff_dma, GFP_KERNEL);
419 if (self->rx_buff.head == NULL) { 368 if (self->rx_buff.head == NULL) {
420 err = -ENOMEM; 369 err = -ENOMEM;
@@ -423,7 +372,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
423 memset(self->rx_buff.head, 0, self->rx_buff.truesize); 372 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
424 373
425 self->tx_buff.head = 374 self->tx_buff.head =
426 dma_alloc_coherent(NULL, self->tx_buff.truesize, 375 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
427 &self->tx_buff_dma, GFP_KERNEL); 376 &self->tx_buff_dma, GFP_KERNEL);
428 if (self->tx_buff.head == NULL) { 377 if (self->tx_buff.head == NULL) {
429 err = -ENOMEM; 378 err = -ENOMEM;
@@ -455,33 +404,32 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
455 via_hw_init(self); 404 via_hw_init(self);
456 return 0; 405 return 0;
457 err_out4: 406 err_out4:
458 dma_free_coherent(NULL, self->tx_buff.truesize, 407 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
459 self->tx_buff.head, self->tx_buff_dma); 408 self->tx_buff.head, self->tx_buff_dma);
460 err_out3: 409 err_out3:
461 dma_free_coherent(NULL, self->rx_buff.truesize, 410 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
462 self->rx_buff.head, self->rx_buff_dma); 411 self->rx_buff.head, self->rx_buff_dma);
463 err_out2: 412 err_out2:
464 release_region(self->io.fir_base, self->io.fir_ext); 413 release_region(self->io.fir_base, self->io.fir_ext);
465 err_out1: 414 err_out1:
415 pci_set_drvdata(pdev, NULL);
466 free_netdev(dev); 416 free_netdev(dev);
467 dev_self[i] = NULL;
468 return err; 417 return err;
469} 418}
470 419
471/* 420/*
472 * Function via_ircc_close (self) 421 * Function via_remove_one(pdev)
473 * 422 *
474 * Close driver instance 423 * Close driver instance
475 * 424 *
476 */ 425 */
477static int via_ircc_close(struct via_ircc_cb *self) 426static void __devexit via_remove_one(struct pci_dev *pdev)
478{ 427{
428 struct via_ircc_cb *self = pci_get_drvdata(pdev);
479 int iobase; 429 int iobase;
480 430
481 IRDA_DEBUG(3, "%s()\n", __func__); 431 IRDA_DEBUG(3, "%s()\n", __func__);
482 432
483 IRDA_ASSERT(self != NULL, return -1;);
484
485 iobase = self->io.fir_base; 433 iobase = self->io.fir_base;
486 434
487 ResetChip(iobase, 5); //hardware reset. 435 ResetChip(iobase, 5); //hardware reset.
@@ -493,16 +441,16 @@ static int via_ircc_close(struct via_ircc_cb *self)
493 __func__, self->io.fir_base); 441 __func__, self->io.fir_base);
494 release_region(self->io.fir_base, self->io.fir_ext); 442 release_region(self->io.fir_base, self->io.fir_ext);
495 if (self->tx_buff.head) 443 if (self->tx_buff.head)
496 dma_free_coherent(NULL, self->tx_buff.truesize, 444 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
497 self->tx_buff.head, self->tx_buff_dma); 445 self->tx_buff.head, self->tx_buff_dma);
498 if (self->rx_buff.head) 446 if (self->rx_buff.head)
499 dma_free_coherent(NULL, self->rx_buff.truesize, 447 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
500 self->rx_buff.head, self->rx_buff_dma); 448 self->rx_buff.head, self->rx_buff_dma);
501 dev_self[self->index] = NULL; 449 pci_set_drvdata(pdev, NULL);
502 450
503 free_netdev(self->netdev); 451 free_netdev(self->netdev);
504 452
505 return 0; 453 pci_disable_device(pdev);
506} 454}
507 455
508/* 456/*
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index f690474f440..994c80939c7 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -273,7 +273,7 @@ jme_clear_pm(struct jme_adapter *jme)
273{ 273{
274 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 274 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
275 pci_set_power_state(jme->pdev, PCI_D0); 275 pci_set_power_state(jme->pdev, PCI_D0);
276 pci_enable_wake(jme->pdev, PCI_D0, false); 276 device_set_wakeup_enable(&jme->pdev->dev, false);
277} 277}
278 278
279static int 279static int
@@ -2538,6 +2538,8 @@ jme_set_wol(struct net_device *netdev,
2538 2538
2539 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2539 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2540 2540
2541 device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs);
2542
2541 return 0; 2543 return 0;
2542} 2544}
2543 2545
@@ -3172,9 +3174,9 @@ jme_shutdown(struct pci_dev *pdev)
3172} 3174}
3173 3175
3174#ifdef CONFIG_PM 3176#ifdef CONFIG_PM
3175static int 3177static int jme_suspend(struct device *dev)
3176jme_suspend(struct pci_dev *pdev, pm_message_t state)
3177{ 3178{
3179 struct pci_dev *pdev = to_pci_dev(dev);
3178 struct net_device *netdev = pci_get_drvdata(pdev); 3180 struct net_device *netdev = pci_get_drvdata(pdev);
3179 struct jme_adapter *jme = netdev_priv(netdev); 3181 struct jme_adapter *jme = netdev_priv(netdev);
3180 3182
@@ -3206,22 +3208,18 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3206 tasklet_hi_enable(&jme->rxclean_task); 3208 tasklet_hi_enable(&jme->rxclean_task);
3207 tasklet_hi_enable(&jme->rxempty_task); 3209 tasklet_hi_enable(&jme->rxempty_task);
3208 3210
3209 pci_save_state(pdev);
3210 jme_powersave_phy(jme); 3211 jme_powersave_phy(jme);
3211 pci_enable_wake(jme->pdev, PCI_D3hot, true);
3212 pci_set_power_state(pdev, PCI_D3hot);
3213 3212
3214 return 0; 3213 return 0;
3215} 3214}
3216 3215
3217static int 3216static int jme_resume(struct device *dev)
3218jme_resume(struct pci_dev *pdev)
3219{ 3217{
3218 struct pci_dev *pdev = to_pci_dev(dev);
3220 struct net_device *netdev = pci_get_drvdata(pdev); 3219 struct net_device *netdev = pci_get_drvdata(pdev);
3221 struct jme_adapter *jme = netdev_priv(netdev); 3220 struct jme_adapter *jme = netdev_priv(netdev);
3222 3221
3223 jme_clear_pm(jme); 3222 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
3224 pci_restore_state(pdev);
3225 3223
3226 jme_phy_on(jme); 3224 jme_phy_on(jme);
3227 if (test_bit(JME_FLAG_SSET, &jme->flags)) 3225 if (test_bit(JME_FLAG_SSET, &jme->flags))
@@ -3238,6 +3236,13 @@ jme_resume(struct pci_dev *pdev)
3238 3236
3239 return 0; 3237 return 0;
3240} 3238}
3239
3240static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3241#define JME_PM_OPS (&jme_pm_ops)
3242
3243#else
3244
3245#define JME_PM_OPS NULL
3241#endif 3246#endif
3242 3247
3243static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { 3248static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
@@ -3251,11 +3256,8 @@ static struct pci_driver jme_driver = {
3251 .id_table = jme_pci_tbl, 3256 .id_table = jme_pci_tbl,
3252 .probe = jme_init_one, 3257 .probe = jme_init_one,
3253 .remove = __devexit_p(jme_remove_one), 3258 .remove = __devexit_p(jme_remove_one),
3254#ifdef CONFIG_PM
3255 .suspend = jme_suspend,
3256 .resume = jme_resume,
3257#endif /* CONFIG_PM */
3258 .shutdown = jme_shutdown, 3259 .shutdown = jme_shutdown,
3260 .driver.pm = JME_PM_OPS,
3259}; 3261};
3260 3262
3261static int __init 3263static int __init
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 928b2b83cef..efd44afeae8 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/mfd/core.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
30#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
31#include <linux/ethtool.h> 32#include <linux/ethtool.h>
@@ -1145,7 +1146,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
1145 struct resource *iomem; 1146 struct resource *iomem;
1146 struct net_device *netdev; 1147 struct net_device *netdev;
1147 struct ks8842_adapter *adapter; 1148 struct ks8842_adapter *adapter;
1148 struct ks8842_platform_data *pdata = pdev->dev.platform_data; 1149 struct ks8842_platform_data *pdata = mfd_get_data(pdev);
1149 u16 id; 1150 u16 id;
1150 unsigned i; 1151 unsigned i;
1151 1152
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 540a8dcbcc4..7f7d5708a65 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -4898,7 +4898,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4898 goto unlock; 4898 goto unlock;
4899 } 4899 }
4900 skb_copy_and_csum_dev(org_skb, skb->data); 4900 skb_copy_and_csum_dev(org_skb, skb->data);
4901 org_skb->ip_summed = 0; 4901 org_skb->ip_summed = CHECKSUM_NONE;
4902 skb->len = org_skb->len; 4902 skb->len = org_skb->len;
4903 copy_old_skb(org_skb, skb); 4903 copy_old_skb(org_skb, skb);
4904 } 4904 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 5b37d3c191e..78e34e9e4f0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,8 +39,11 @@ struct macvlan_port {
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru; 41 bool passthru;
42 int count;
42}; 43};
43 44
45static void macvlan_port_destroy(struct net_device *dev);
46
44#define macvlan_port_get_rcu(dev) \ 47#define macvlan_port_get_rcu(dev) \
45 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) 48 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
46#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) 49#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
@@ -457,8 +460,13 @@ static int macvlan_init(struct net_device *dev)
457static void macvlan_uninit(struct net_device *dev) 460static void macvlan_uninit(struct net_device *dev)
458{ 461{
459 struct macvlan_dev *vlan = netdev_priv(dev); 462 struct macvlan_dev *vlan = netdev_priv(dev);
463 struct macvlan_port *port = vlan->port;
460 464
461 free_percpu(vlan->pcpu_stats); 465 free_percpu(vlan->pcpu_stats);
466
467 port->count -= 1;
468 if (!port->count)
469 macvlan_port_destroy(port->dev);
462} 470}
463 471
464static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 472static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -691,12 +699,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
691 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 699 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
692 700
693 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 701 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
694 if (!list_empty(&port->vlans)) 702 if (port->count)
695 return -EINVAL; 703 return -EINVAL;
696 port->passthru = true; 704 port->passthru = true;
697 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); 705 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
698 } 706 }
699 707
708 port->count += 1;
700 err = register_netdevice(dev); 709 err = register_netdevice(dev);
701 if (err < 0) 710 if (err < 0)
702 goto destroy_port; 711 goto destroy_port;
@@ -707,7 +716,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
707 return 0; 716 return 0;
708 717
709destroy_port: 718destroy_port:
710 if (list_empty(&port->vlans)) 719 port->count -= 1;
720 if (!port->count)
711 macvlan_port_destroy(lowerdev); 721 macvlan_port_destroy(lowerdev);
712 722
713 return err; 723 return err;
@@ -725,13 +735,9 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
725void macvlan_dellink(struct net_device *dev, struct list_head *head) 735void macvlan_dellink(struct net_device *dev, struct list_head *head)
726{ 736{
727 struct macvlan_dev *vlan = netdev_priv(dev); 737 struct macvlan_dev *vlan = netdev_priv(dev);
728 struct macvlan_port *port = vlan->port;
729 738
730 list_del(&vlan->list); 739 list_del(&vlan->list);
731 unregister_netdevice_queue(dev, head); 740 unregister_netdevice_queue(dev, head);
732
733 if (list_empty(&port->vlans))
734 macvlan_port_destroy(port->dev);
735} 741}
736EXPORT_SYMBOL_GPL(macvlan_dellink); 742EXPORT_SYMBOL_GPL(macvlan_dellink);
737 743
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 3a4277f6fac..116cae334da 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
62 } else 62 } else
63 obj = -1; 63 obj = -1;
64 64
65 if (obj != -1)
66 --bitmap->avail;
67
65 spin_unlock(&bitmap->lock); 68 spin_unlock(&bitmap->lock);
66 69
67 return obj; 70 return obj;
@@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
101 } else 104 } else
102 obj = -1; 105 obj = -1;
103 106
107 if (obj != -1)
108 bitmap->avail -= cnt;
109
104 spin_unlock(&bitmap->lock); 110 spin_unlock(&bitmap->lock);
105 111
106 return obj; 112 return obj;
107} 113}
108 114
115u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
116{
117 return bitmap->avail;
118}
119
109void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 120void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
110{ 121{
111 obj &= bitmap->max + bitmap->reserved_top - 1; 122 obj &= bitmap->max + bitmap->reserved_top - 1;
@@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
115 bitmap->last = min(bitmap->last, obj); 126 bitmap->last = min(bitmap->last, obj);
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 127 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask; 128 & bitmap->mask;
129 bitmap->avail += cnt;
118 spin_unlock(&bitmap->lock); 130 spin_unlock(&bitmap->lock);
119} 131}
120 132
@@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
130 bitmap->max = num - reserved_top; 142 bitmap->max = num - reserved_top;
131 bitmap->mask = mask; 143 bitmap->mask = mask;
132 bitmap->reserved_top = reserved_top; 144 bitmap->reserved_top = reserved_top;
145 bitmap->avail = num - reserved_top - reserved_bot;
133 spin_lock_init(&bitmap->lock); 146 spin_lock_init(&bitmap->lock);
134 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 147 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
135 sizeof (long), GFP_KERNEL); 148 sizeof (long), GFP_KERNEL);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 7cd34e9c7c7..bd8ef9f2fa7 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
198 u64 mtt_addr; 198 u64 mtt_addr;
199 int err; 199 int err;
200 200
201 if (vector >= dev->caps.num_comp_vectors) 201 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
202 return -EINVAL; 202 return -EINVAL;
203 203
204 cq->vector = vector; 204 cq->vector = vector;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 21786ad4455..ec4b6d047fe 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
51 int err; 51 int err;
52 52
53 cq->size = entries; 53 cq->size = entries;
54 if (mode == RX) { 54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); 55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 cq->vector = ring % mdev->dev->caps.num_comp_vectors; 56 else
57 } else {
58 cq->buf_size = sizeof(struct mlx4_cqe); 57 cq->buf_size = sizeof(struct mlx4_cqe);
59 cq->vector = 0;
60 }
61 58
62 cq->ring = ring; 59 cq->ring = ring;
63 cq->is_tx = mode; 60 cq->is_tx = mode;
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
80int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 77int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
81{ 78{
82 struct mlx4_en_dev *mdev = priv->mdev; 79 struct mlx4_en_dev *mdev = priv->mdev;
83 int err; 80 int err = 0;
81 char name[25];
84 82
85 cq->dev = mdev->pndev[priv->port]; 83 cq->dev = mdev->pndev[priv->port];
86 cq->mcq.set_ci_db = cq->wqres.db.db; 84 cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
89 *cq->mcq.arm_db = 0; 87 *cq->mcq.arm_db = 0;
90 memset(cq->buf, 0, cq->buf_size); 88 memset(cq->buf, 0, cq->buf_size);
91 89
90 if (cq->is_tx == RX) {
91 if (mdev->dev->caps.comp_pool) {
92 if (!cq->vector) {
93 sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
94 if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
95 cq->vector = (cq->ring + 1 + priv->port) %
96 mdev->dev->caps.num_comp_vectors;
97 mlx4_warn(mdev, "Failed Assigning an EQ to "
98 "%s_rx-%d ,Falling back to legacy EQ's\n",
99 priv->dev->name, cq->ring);
100 }
101 }
102 } else {
103 cq->vector = (cq->ring + 1 + priv->port) %
104 mdev->dev->caps.num_comp_vectors;
105 }
106 } else {
107 if (!cq->vector || !mdev->dev->caps.comp_pool) {
108 /*Fallback to legacy pool in case of error*/
109 cq->vector = 0;
110 }
111 }
112
92 if (!cq->is_tx) 113 if (!cq->is_tx)
93 cq->size = priv->rx_ring[cq->ring].actual_size; 114 cq->size = priv->rx_ring[cq->ring].actual_size;
94 115
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
112 return 0; 133 return 0;
113} 134}
114 135
115void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 136void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
137 bool reserve_vectors)
116{ 138{
117 struct mlx4_en_dev *mdev = priv->mdev; 139 struct mlx4_en_dev *mdev = priv->mdev;
118 140
119 mlx4_en_unmap_buffer(&cq->wqres.buf); 141 mlx4_en_unmap_buffer(&cq->wqres.buf);
120 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 142 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
143 if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
144 mlx4_release_eq(priv->mdev->dev, cq->vector);
121 cq->buf_size = 0; 145 cq->buf_size = 0;
122 cq->buf = NULL; 146 cq->buf = NULL;
123} 147}
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 056152b3ff5..d54b7abf022 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
45 struct mlx4_en_priv *priv = netdev_priv(dev); 45 struct mlx4_en_priv *priv = netdev_priv(dev);
46 struct mlx4_en_dev *mdev = priv->mdev; 46 struct mlx4_en_dev *mdev = priv->mdev;
47 47
48 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); 48 strncpy(drvinfo->driver, DRV_NAME, 32);
49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); 49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
50 sprintf(drvinfo->fw_version, "%d.%d.%d", 50 sprintf(drvinfo->fw_version, "%d.%d.%d",
51 (u16) (mdev->dev->caps.fw_ver >> 32), 51 (u16) (mdev->dev->caps.fw_ver >> 32),
@@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
131static void mlx4_en_get_wol(struct net_device *netdev, 131static void mlx4_en_get_wol(struct net_device *netdev,
132 struct ethtool_wolinfo *wol) 132 struct ethtool_wolinfo *wol)
133{ 133{
134 wol->supported = 0; 134 struct mlx4_en_priv *priv = netdev_priv(netdev);
135 wol->wolopts = 0; 135 int err = 0;
136 u64 config = 0;
137
138 if (!priv->mdev->dev->caps.wol) {
139 wol->supported = 0;
140 wol->wolopts = 0;
141 return;
142 }
143
144 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
145 if (err) {
146 en_err(priv, "Failed to get WoL information\n");
147 return;
148 }
149
150 if (config & MLX4_EN_WOL_MAGIC)
151 wol->supported = WAKE_MAGIC;
152 else
153 wol->supported = 0;
154
155 if (config & MLX4_EN_WOL_ENABLED)
156 wol->wolopts = WAKE_MAGIC;
157 else
158 wol->wolopts = 0;
159}
160
161static int mlx4_en_set_wol(struct net_device *netdev,
162 struct ethtool_wolinfo *wol)
163{
164 struct mlx4_en_priv *priv = netdev_priv(netdev);
165 u64 config = 0;
166 int err = 0;
167
168 if (!priv->mdev->dev->caps.wol)
169 return -EOPNOTSUPP;
170
171 if (wol->supported & ~WAKE_MAGIC)
172 return -EINVAL;
173
174 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
175 if (err) {
176 en_err(priv, "Failed to get WoL info, unable to modify\n");
177 return err;
178 }
179
180 if (wol->wolopts & WAKE_MAGIC) {
181 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
182 MLX4_EN_WOL_MAGIC;
183 } else {
184 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
185 config |= MLX4_EN_WOL_DO_MODIFY;
186 }
187
188 err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
189 if (err)
190 en_err(priv, "Failed to set WoL information\n");
191
192 return err;
136} 193}
137 194
138static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 195static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
@@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
388 mlx4_en_stop_port(dev); 445 mlx4_en_stop_port(dev);
389 } 446 }
390 447
391 mlx4_en_free_resources(priv); 448 mlx4_en_free_resources(priv, true);
392 449
393 priv->prof->tx_ring_size = tx_size; 450 priv->prof->tx_ring_size = tx_size;
394 priv->prof->rx_ring_size = rx_size; 451 priv->prof->rx_ring_size = rx_size;
@@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
442 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 499 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
443 .self_test = mlx4_en_self_test, 500 .self_test = mlx4_en_self_test,
444 .get_wol = mlx4_en_get_wol, 501 .get_wol = mlx4_en_get_wol,
502 .set_wol = mlx4_en_set_wol,
445 .get_msglevel = mlx4_en_get_msglevel, 503 .get_msglevel = mlx4_en_get_msglevel,
446 .set_msglevel = mlx4_en_set_msglevel, 504 .set_msglevel = mlx4_en_set_msglevel,
447 .get_coalesce = mlx4_en_get_coalesce, 505 .get_coalesce = mlx4_en_get_coalesce,
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 1ff6ca6466e..9317b61a75b 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
242 mdev->port_cnt++; 242 mdev->port_cnt++;
243 243
244 /* If we did not receive an explicit number of Rx rings, default to
245 * the number of completion vectors populated by the mlx4_core */
246 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
247 mlx4_info(mdev, "Using %d tx rings for port:%d\n", 245 if (!dev->caps.comp_pool) {
248 mdev->profile.prof[i].tx_ring_num, i); 246 mdev->profile.prof[i].rx_ring_num =
249 mdev->profile.prof[i].rx_ring_num = min_t(int, 247 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
250 roundup_pow_of_two(dev->caps.num_comp_vectors), 248 min_t(int,
251 MAX_RX_RINGS); 249 dev->caps.num_comp_vectors,
252 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", 250 MAX_RX_RINGS)));
253 mdev->profile.prof[i].rx_ring_num, i); 251 } else {
252 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
253 min_t(int, dev->caps.comp_pool/
254 dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
255 }
254 } 256 }
255 257
256 /* Create our own workqueue for reset/multicast tasks 258 /* Create our own workqueue for reset/multicast tasks
@@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = {
294 .remove = mlx4_en_remove, 296 .remove = mlx4_en_remove,
295 .event = mlx4_en_event, 297 .event = mlx4_en_event,
296 .get_dev = mlx4_en_get_netdev, 298 .get_dev = mlx4_en_get_netdev,
297 .protocol = MLX4_PROTOCOL_EN, 299 .protocol = MLX4_PROT_ETH,
298}; 300};
299 301
300static int __init mlx4_en_init(void) 302static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 897f576b8b1..4f158baa024 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
156 mutex_lock(&mdev->state_lock); 156 mutex_lock(&mdev->state_lock);
157 if (priv->port_up) { 157 if (priv->port_up) {
158 /* Remove old MAC and insert the new one */ 158 /* Remove old MAC and insert the new one */
159 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 159 err = mlx4_replace_mac(mdev->dev, priv->port,
160 err = mlx4_register_mac(mdev->dev, priv->port, 160 priv->base_qpn, priv->mac, 0);
161 priv->mac, &priv->mac_index);
162 if (err) 161 if (err)
163 en_err(priv, "Failed changing HW MAC address\n"); 162 en_err(priv, "Failed changing HW MAC address\n");
164 } else 163 } else
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
214 struct mlx4_en_dev *mdev = priv->mdev; 213 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev; 214 struct net_device *dev = priv->dev;
216 u64 mcast_addr = 0; 215 u64 mcast_addr = 0;
216 u8 mc_list[16] = {0};
217 int err; 217 int err;
218 218
219 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
239 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
240 240
241 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 if (!mdev->dev->caps.vep_uc_steering)
243 priv->base_qpn, 1); 243 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
244 priv->base_qpn, 1);
245 else
246 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
247 priv->port);
244 if (err) 248 if (err)
245 en_err(priv, "Failed enabling " 249 en_err(priv, "Failed enabling "
246 "promiscous mode\n"); 250 "promiscous mode\n");
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
252 en_err(priv, "Failed disabling " 256 en_err(priv, "Failed disabling "
253 "multicast filter\n"); 257 "multicast filter\n");
254 258
255 /* Disable port VLAN filter */ 259 /* Add the default qp number as multicast promisc */
256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 260 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
257 if (err) 261 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
258 en_err(priv, "Failed disabling VLAN filter\n"); 262 priv->port);
263 if (err)
264 en_err(priv, "Failed entering multicast promisc mode\n");
265 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
266 }
267
268 if (priv->vlgrp) {
269 /* Disable port VLAN filter */
270 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
271 if (err)
272 en_err(priv, "Failed disabling VLAN filter\n");
273 }
259 } 274 }
260 goto out; 275 goto out;
261 } 276 }
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
270 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 285 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
271 286
272 /* Disable promiscouos mode */ 287 /* Disable promiscouos mode */
273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 288 if (!mdev->dev->caps.vep_uc_steering)
274 priv->base_qpn, 0); 289 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
290 priv->base_qpn, 0);
291 else
292 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
293 priv->port);
275 if (err) 294 if (err)
276 en_err(priv, "Failed disabling promiscous mode\n"); 295 en_err(priv, "Failed disabling promiscous mode\n");
277 296
297 /* Disable Multicast promisc */
298 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
299 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
300 priv->port);
301 if (err)
302 en_err(priv, "Failed disabling multicast promiscous mode\n");
303 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
304 }
305
278 /* Enable port VLAN filter */ 306 /* Enable port VLAN filter */
279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 307 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
280 if (err) 308 if (err)
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
287 0, MLX4_MCAST_DISABLE); 315 0, MLX4_MCAST_DISABLE);
288 if (err) 316 if (err)
289 en_err(priv, "Failed disabling multicast filter\n"); 317 en_err(priv, "Failed disabling multicast filter\n");
318
319 /* Add the default qp number as multicast promisc */
320 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
321 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
322 priv->port);
323 if (err)
324 en_err(priv, "Failed entering multicast promisc mode\n");
325 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
326 }
290 } else { 327 } else {
291 int i; 328 int i;
329 /* Disable Multicast promisc */
330 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
331 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
332 priv->port);
333 if (err)
334 en_err(priv, "Failed disabling multicast promiscous mode\n");
335 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
336 }
292 337
293 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 338 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
294 0, MLX4_MCAST_DISABLE); 339 0, MLX4_MCAST_DISABLE);
295 if (err) 340 if (err)
296 en_err(priv, "Failed disabling multicast filter\n"); 341 en_err(priv, "Failed disabling multicast filter\n");
297 342
343 /* Detach our qp from all the multicast addresses */
344 for (i = 0; i < priv->mc_addrs_cnt; i++) {
345 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
346 mc_list[5] = priv->port;
347 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
348 mc_list, MLX4_PROT_ETH);
349 }
298 /* Flush mcast filter and init it with broadcast address */ 350 /* Flush mcast filter and init it with broadcast address */
299 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 351 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
300 1, MLX4_MCAST_CONFIG); 352 1, MLX4_MCAST_CONFIG);
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
307 for (i = 0; i < priv->mc_addrs_cnt; i++) { 359 for (i = 0; i < priv->mc_addrs_cnt; i++) {
308 mcast_addr = 360 mcast_addr =
309 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 361 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
362 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
363 mc_list[5] = priv->port;
364 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
365 mc_list, 0, MLX4_PROT_ETH);
310 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 366 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
311 mcast_addr, 0, MLX4_MCAST_CONFIG); 367 mcast_addr, 0, MLX4_MCAST_CONFIG);
312 } 368 }
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
314 0, MLX4_MCAST_ENABLE); 370 0, MLX4_MCAST_ENABLE);
315 if (err) 371 if (err)
316 en_err(priv, "Failed enabling multicast filter\n"); 372 en_err(priv, "Failed enabling multicast filter\n");
317
318 mlx4_en_clear_list(dev);
319 } 373 }
320out: 374out:
321 mutex_unlock(&mdev->state_lock); 375 mutex_unlock(&mdev->state_lock);
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
417 unsigned long avg_pkt_size; 471 unsigned long avg_pkt_size;
418 unsigned long rx_packets; 472 unsigned long rx_packets;
419 unsigned long rx_bytes; 473 unsigned long rx_bytes;
420 unsigned long rx_byte_diff;
421 unsigned long tx_packets; 474 unsigned long tx_packets;
422 unsigned long tx_pkt_diff; 475 unsigned long tx_pkt_diff;
423 unsigned long rx_pkt_diff; 476 unsigned long rx_pkt_diff;
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
441 rx_pkt_diff = ((unsigned long) (rx_packets - 494 rx_pkt_diff = ((unsigned long) (rx_packets -
442 priv->last_moder_packets)); 495 priv->last_moder_packets));
443 packets = max(tx_pkt_diff, rx_pkt_diff); 496 packets = max(tx_pkt_diff, rx_pkt_diff);
444 rx_byte_diff = rx_bytes - priv->last_moder_bytes;
445 rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
446 rate = packets * HZ / period; 497 rate = packets * HZ / period;
447 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 498 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
448 priv->last_moder_bytes)) / packets : 0; 499 priv->last_moder_bytes)) / packets : 0;
449 500
450 /* Apply auto-moderation only when packet rate exceeds a rate that 501 /* Apply auto-moderation only when packet rate exceeds a rate that
451 * it matters */ 502 * it matters */
452 if (rate > MLX4_EN_RX_RATE_THRESH) { 503 if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
453 /* If tx and rx packet rates are not balanced, assume that 504 /* If tx and rx packet rates are not balanced, assume that
454 * traffic is mainly BW bound and apply maximum moderation. 505 * traffic is mainly BW bound and apply maximum moderation.
455 * Otherwise, moderate according to packet rate */ 506 * Otherwise, moderate according to packet rate */
456 if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 507 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
457 rx_pkt_diff / rx_byte_diff < 508 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
458 MLX4_EN_SMALL_PKT_SIZE)
459 moder_time = priv->rx_usecs_low;
460 else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
461 moder_time = priv->rx_usecs_high; 509 moder_time = priv->rx_usecs_high;
462 else { 510 } else {
463 if (rate < priv->pkt_rate_low) 511 if (rate < priv->pkt_rate_low)
464 moder_time = priv->rx_usecs_low; 512 moder_time = priv->rx_usecs_low;
465 else if (rate > priv->pkt_rate_high) 513 else if (rate > priv->pkt_rate_high)
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
471 priv->rx_usecs_low; 519 priv->rx_usecs_low;
472 } 520 }
473 } else { 521 } else {
474 /* When packet rate is low, use default moderation rather than 522 moder_time = priv->rx_usecs_low;
475 * 0 to prevent interrupt storms if traffic suddenly increases */
476 moder_time = priv->rx_usecs;
477 } 523 }
478 524
479 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 525 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
565 int err = 0; 611 int err = 0;
566 int i; 612 int i;
567 int j; 613 int j;
614 u8 mc_list[16] = {0};
615 char name[32];
568 616
569 if (priv->port_up) { 617 if (priv->port_up) {
570 en_dbg(DRV, priv, "start port called while port already up\n"); 618 en_dbg(DRV, priv, "start port called while port already up\n");
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
603 ++rx_index; 651 ++rx_index;
604 } 652 }
605 653
654 /* Set port mac number */
655 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
656 err = mlx4_register_mac(mdev->dev, priv->port,
657 priv->mac, &priv->base_qpn, 0);
658 if (err) {
659 en_err(priv, "Failed setting port mac\n");
660 goto cq_err;
661 }
662 mdev->mac_removed[priv->port] = 0;
663
606 err = mlx4_en_config_rss_steer(priv); 664 err = mlx4_en_config_rss_steer(priv);
607 if (err) { 665 if (err) {
608 en_err(priv, "Failed configuring rss steering\n"); 666 en_err(priv, "Failed configuring rss steering\n");
609 goto cq_err; 667 goto mac_err;
610 } 668 }
611 669
670 if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
671 sprintf(name , "%s-tx", priv->dev->name);
672 if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
673 mlx4_warn(mdev, "Failed Assigning an EQ to "
674 "%s_tx ,Falling back to legacy "
675 "EQ's\n", priv->dev->name);
676 }
677 }
612 /* Configure tx cq's and rings */ 678 /* Configure tx cq's and rings */
613 for (i = 0; i < priv->tx_ring_num; i++) { 679 for (i = 0; i < priv->tx_ring_num; i++) {
614 /* Configure cq */ 680 /* Configure cq */
615 cq = &priv->tx_cq[i]; 681 cq = &priv->tx_cq[i];
682 cq->vector = priv->tx_vector;
616 err = mlx4_en_activate_cq(priv, cq); 683 err = mlx4_en_activate_cq(priv, cq);
617 if (err) { 684 if (err) {
618 en_err(priv, "Failed allocating Tx CQ\n"); 685 en_err(priv, "Failed allocating Tx CQ\n");
@@ -659,24 +726,25 @@ int mlx4_en_start_port(struct net_device *dev)
659 en_err(priv, "Failed setting default qp numbers\n"); 726 en_err(priv, "Failed setting default qp numbers\n");
660 goto tx_err; 727 goto tx_err;
661 } 728 }
662 /* Set port mac number */
663 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
664 err = mlx4_register_mac(mdev->dev, priv->port,
665 priv->mac, &priv->mac_index);
666 if (err) {
667 en_err(priv, "Failed setting port mac\n");
668 goto tx_err;
669 }
670 mdev->mac_removed[priv->port] = 0;
671 729
672 /* Init port */ 730 /* Init port */
673 en_dbg(HW, priv, "Initializing port\n"); 731 en_dbg(HW, priv, "Initializing port\n");
674 err = mlx4_INIT_PORT(mdev->dev, priv->port); 732 err = mlx4_INIT_PORT(mdev->dev, priv->port);
675 if (err) { 733 if (err) {
676 en_err(priv, "Failed Initializing port\n"); 734 en_err(priv, "Failed Initializing port\n");
677 goto mac_err; 735 goto tx_err;
678 } 736 }
679 737
738 /* Attach rx QP to bradcast address */
739 memset(&mc_list[10], 0xff, ETH_ALEN);
740 mc_list[5] = priv->port;
741 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
742 0, MLX4_PROT_ETH))
743 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
744
745 /* Must redo promiscuous mode setup. */
746 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
747
680 /* Schedule multicast task to populate multicast list */ 748 /* Schedule multicast task to populate multicast list */
681 queue_work(mdev->workqueue, &priv->mcast_task); 749 queue_work(mdev->workqueue, &priv->mcast_task);
682 750
@@ -684,8 +752,6 @@ int mlx4_en_start_port(struct net_device *dev)
684 netif_tx_start_all_queues(dev); 752 netif_tx_start_all_queues(dev);
685 return 0; 753 return 0;
686 754
687mac_err:
688 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
689tx_err: 755tx_err:
690 while (tx_index--) { 756 while (tx_index--) {
691 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 757 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -693,6 +759,8 @@ tx_err:
693 } 759 }
694 760
695 mlx4_en_release_rss_steer(priv); 761 mlx4_en_release_rss_steer(priv);
762mac_err:
763 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
696cq_err: 764cq_err:
697 while (rx_index--) 765 while (rx_index--)
698 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 766 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -708,6 +776,7 @@ void mlx4_en_stop_port(struct net_device *dev)
708 struct mlx4_en_priv *priv = netdev_priv(dev); 776 struct mlx4_en_priv *priv = netdev_priv(dev);
709 struct mlx4_en_dev *mdev = priv->mdev; 777 struct mlx4_en_dev *mdev = priv->mdev;
710 int i; 778 int i;
779 u8 mc_list[16] = {0};
711 780
712 if (!priv->port_up) { 781 if (!priv->port_up) {
713 en_dbg(DRV, priv, "stop port called while port already down\n"); 782 en_dbg(DRV, priv, "stop port called while port already down\n");
@@ -722,8 +791,23 @@ void mlx4_en_stop_port(struct net_device *dev)
722 /* Set port as not active */ 791 /* Set port as not active */
723 priv->port_up = false; 792 priv->port_up = false;
724 793
794 /* Detach All multicasts */
795 memset(&mc_list[10], 0xff, ETH_ALEN);
796 mc_list[5] = priv->port;
797 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
798 MLX4_PROT_ETH);
799 for (i = 0; i < priv->mc_addrs_cnt; i++) {
800 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
801 mc_list[5] = priv->port;
802 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
803 mc_list, MLX4_PROT_ETH);
804 }
805 mlx4_en_clear_list(dev);
806 /* Flush multicast filter */
807 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
808
725 /* Unregister Mac address for the port */ 809 /* Unregister Mac address for the port */
726 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 810 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
727 mdev->mac_removed[priv->port] = 1; 811 mdev->mac_removed[priv->port] = 1;
728 812
729 /* Free TX Rings */ 813 /* Free TX Rings */
@@ -801,7 +885,6 @@ static int mlx4_en_open(struct net_device *dev)
801 priv->rx_ring[i].packets = 0; 885 priv->rx_ring[i].packets = 0;
802 } 886 }
803 887
804 mlx4_en_set_default_moderation(priv);
805 err = mlx4_en_start_port(dev); 888 err = mlx4_en_start_port(dev);
806 if (err) 889 if (err)
807 en_err(priv, "Failed starting port:%d\n", priv->port); 890 en_err(priv, "Failed starting port:%d\n", priv->port);
@@ -828,7 +911,7 @@ static int mlx4_en_close(struct net_device *dev)
828 return 0; 911 return 0;
829} 912}
830 913
831void mlx4_en_free_resources(struct mlx4_en_priv *priv) 914void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
832{ 915{
833 int i; 916 int i;
834 917
@@ -836,14 +919,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
836 if (priv->tx_ring[i].tx_info) 919 if (priv->tx_ring[i].tx_info)
837 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 920 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
838 if (priv->tx_cq[i].buf) 921 if (priv->tx_cq[i].buf)
839 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 922 mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
840 } 923 }
841 924
842 for (i = 0; i < priv->rx_ring_num; i++) { 925 for (i = 0; i < priv->rx_ring_num; i++) {
843 if (priv->rx_ring[i].rx_info) 926 if (priv->rx_ring[i].rx_info)
844 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 927 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
845 if (priv->rx_cq[i].buf) 928 if (priv->rx_cq[i].buf)
846 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 929 mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
847 } 930 }
848} 931}
849 932
@@ -851,6 +934,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
851{ 934{
852 struct mlx4_en_port_profile *prof = priv->prof; 935 struct mlx4_en_port_profile *prof = priv->prof;
853 int i; 936 int i;
937 int base_tx_qpn, err;
938
939 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
940 if (err) {
941 en_err(priv, "failed reserving range for TX rings\n");
942 return err;
943 }
854 944
855 /* Create tx Rings */ 945 /* Create tx Rings */
856 for (i = 0; i < priv->tx_ring_num; i++) { 946 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -858,7 +948,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
858 prof->tx_ring_size, i, TX)) 948 prof->tx_ring_size, i, TX))
859 goto err; 949 goto err;
860 950
861 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 951 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
862 prof->tx_ring_size, TXBB_SIZE)) 952 prof->tx_ring_size, TXBB_SIZE))
863 goto err; 953 goto err;
864 } 954 }
@@ -878,6 +968,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
878 968
879err: 969err:
880 en_err(priv, "Failed to allocate NIC resources\n"); 970 en_err(priv, "Failed to allocate NIC resources\n");
971 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
881 return -ENOMEM; 972 return -ENOMEM;
882} 973}
883 974
@@ -905,7 +996,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
905 mdev->pndev[priv->port] = NULL; 996 mdev->pndev[priv->port] = NULL;
906 mutex_unlock(&mdev->state_lock); 997 mutex_unlock(&mdev->state_lock);
907 998
908 mlx4_en_free_resources(priv); 999 mlx4_en_free_resources(priv, false);
909 free_netdev(dev); 1000 free_netdev(dev);
910} 1001}
911 1002
@@ -932,7 +1023,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
932 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1023 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
933 } else { 1024 } else {
934 mlx4_en_stop_port(dev); 1025 mlx4_en_stop_port(dev);
935 mlx4_en_set_default_moderation(priv);
936 err = mlx4_en_start_port(dev); 1026 err = mlx4_en_start_port(dev);
937 if (err) { 1027 if (err) {
938 en_err(priv, "Failed restarting port:%d\n", 1028 en_err(priv, "Failed restarting port:%d\n",
@@ -1079,7 +1169,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1079 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1169 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1080 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1170 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1081 1171
1172 /* Configure port */
1173 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1174 MLX4_EN_MIN_MTU,
1175 0, 0, 0, 0);
1176 if (err) {
1177 en_err(priv, "Failed setting port general configurations "
1178 "for port %d, with error %d\n", priv->port, err);
1179 goto out;
1180 }
1181
1182 /* Init port */
1183 en_warn(priv, "Initializing port\n");
1184 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1185 if (err) {
1186 en_err(priv, "Failed Initializing port\n");
1187 goto out;
1188 }
1082 priv->registered = 1; 1189 priv->registered = 1;
1190 mlx4_en_set_default_moderation(priv);
1083 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1191 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1084 return 0; 1192 return 0;
1085 1193
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 7f5a3221e0c..f2a4f5dd313 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
119 struct mlx4_set_port_rqp_calc_context *context; 119 struct mlx4_set_port_rqp_calc_context *context;
120 int err; 120 int err;
121 u32 in_mod; 121 u32 in_mod;
122 u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
123
124 if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
125 return 0;
122 126
123 mailbox = mlx4_alloc_cmd_mailbox(dev); 127 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox)) 128 if (IS_ERR(mailbox))
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
127 memset(context, 0, sizeof *context); 131 memset(context, 0, sizeof *context);
128 132
129 context->base_qpn = cpu_to_be32(base_qpn); 133 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); 134 context->n_mac = 0x7;
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); 135 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
136 base_qpn);
137 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
138 base_qpn);
132 context->intra_no_vlan = 0; 139 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX; 140 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0; 141 context->intra_vlan_miss = 0;
@@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 } 213 }
207 stats->tx_packets = 0; 214 stats->tx_packets = 0;
208 stats->tx_bytes = 0; 215 stats->tx_bytes = 0;
209 for (i = 0; i <= priv->tx_ring_num; i++) { 216 for (i = 0; i < priv->tx_ring_num; i++) {
210 stats->tx_packets += priv->tx_ring[i].packets; 217 stats->tx_packets += priv->tx_ring[i].packets;
211 stats->tx_bytes += priv->tx_ring[i].bytes; 218 stats->tx_bytes += priv->tx_ring[i].bytes;
212 } 219 }
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index 092e814b198..e3d73e41c56 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,8 +36,8 @@
36 36
37 37
38#define SET_PORT_GEN_ALL_VALID 0x7 38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_EN_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_PROMISC_MODE_SHIFT 30 40#define SET_PORT_MC_PROMISC_SHIFT 30
41 41
42enum { 42enum {
43 MLX4_CMD_SET_VLAN_FLTR = 0x47, 43 MLX4_CMD_SET_VLAN_FLTR = 0x47,
@@ -45,6 +45,12 @@ enum {
45 MLX4_CMD_DUMP_ETH_STATS = 0x49, 45 MLX4_CMD_DUMP_ETH_STATS = 0x49,
46}; 46};
47 47
48enum {
49 MCAST_DIRECT_ONLY = 0,
50 MCAST_DIRECT = 1,
51 MCAST_DEFAULT = 2
52};
53
48struct mlx4_set_port_general_context { 54struct mlx4_set_port_general_context {
49 u8 reserved[3]; 55 u8 reserved[3];
50 u8 flags; 56 u8 flags;
@@ -60,14 +66,17 @@ struct mlx4_set_port_general_context {
60 66
61struct mlx4_set_port_rqp_calc_context { 67struct mlx4_set_port_rqp_calc_context {
62 __be32 base_qpn; 68 __be32 base_qpn;
63 __be32 flags; 69 u8 rererved;
64 u8 reserved[3]; 70 u8 n_mac;
71 u8 n_vlan;
72 u8 n_prio;
73 u8 reserved2[3];
65 u8 mac_miss; 74 u8 mac_miss;
66 u8 intra_no_vlan; 75 u8 intra_no_vlan;
67 u8 no_vlan; 76 u8 no_vlan;
68 u8 intra_vlan_miss; 77 u8 intra_vlan_miss;
69 u8 vlan_miss; 78 u8 vlan_miss;
70 u8 reserved2[3]; 79 u8 reserved3[3];
71 u8 no_vlan_prio; 80 u8 no_vlan_prio;
72 __be32 promisc; 81 __be32 promisc;
73 __be32 mcast; 82 __be32 mcast;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 570f2508fb3..05998ee297c 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
845 } 845 }
846 846
847 /* Configure RSS indirection qp */ 847 /* Configure RSS indirection qp */
848 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
849 if (err) {
850 en_err(priv, "Failed to reserve range for RSS "
851 "indirection qp\n");
852 goto rss_err;
853 }
854 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 848 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
855 if (err) { 849 if (err) {
856 en_err(priv, "Failed to allocate RSS indirection QP\n"); 850 en_err(priv, "Failed to allocate RSS indirection QP\n");
857 goto reserve_err; 851 goto rss_err;
858 } 852 }
859 rss_map->indir_qp.event = mlx4_en_sqp_event; 853 rss_map->indir_qp.event = mlx4_en_sqp_event;
860 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 854 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
@@ -881,8 +875,6 @@ indir_err:
881 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 875 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
882 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 876 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
883 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 877 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
884reserve_err:
885 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
886rss_err: 878rss_err:
887 for (i = 0; i < good_qps; i++) { 879 for (i = 0; i < good_qps; i++) {
888 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 880 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
904 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 896 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
905 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 897 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
906 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 898 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
907 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
908 899
909 for (i = 0; i < priv->rx_ring_num; i++) { 900 for (i = 0; i < priv->rx_ring_num; i++) {
910 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 901 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index a680cd4a5ab..01feb8fd42a 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -44,6 +44,7 @@
44 44
45enum { 45enum {
46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
47 MAX_BF = 256,
47}; 48};
48 49
49static int inline_thold __read_mostly = MAX_INLINE; 50static int inline_thold __read_mostly = MAX_INLINE;
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
52MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 53MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
53 54
54int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 55int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
55 struct mlx4_en_tx_ring *ring, u32 size, 56 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
56 u16 stride) 57 u16 stride)
57{ 58{
58 struct mlx4_en_dev *mdev = priv->mdev; 59 struct mlx4_en_dev *mdev = priv->mdev;
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
105 106
106 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 107 ring->qpn = qpn;
107 if (err) {
108 en_err(priv, "Failed reserving qp for tx ring.\n");
109 goto err_map;
110 }
111
112 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
113 if (err) { 109 if (err) {
114 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
115 goto err_reserve; 111 goto err_map;
116 } 112 }
117 ring->qp.event = mlx4_en_sqp_event; 113 ring->qp.event = mlx4_en_sqp_event;
118 114
115 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
116 if (err) {
117 en_dbg(DRV, priv, "working without blueflame (%d)", err);
118 ring->bf.uar = &mdev->priv_uar;
119 ring->bf.uar->map = mdev->uar_map;
120 ring->bf_enabled = false;
121 } else
122 ring->bf_enabled = true;
123
119 return 0; 124 return 0;
120 125
121err_reserve:
122 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
123err_map: 126err_map:
124 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 mlx4_en_unmap_buffer(&ring->wqres.buf);
125err_hwq_res: 128err_hwq_res:
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
140 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 143 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
141 144
145 if (ring->bf_enabled)
146 mlx4_bf_free(mdev->dev, &ring->bf);
142 mlx4_qp_remove(mdev->dev, &ring->qp); 147 mlx4_qp_remove(mdev->dev, &ring->qp);
143 mlx4_qp_free(mdev->dev, &ring->qp); 148 mlx4_qp_free(mdev->dev, &ring->qp);
144 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
171 176
172 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
173 ring->cqn, &ring->context); 178 ring->cqn, &ring->context);
179 if (ring->bf_enabled)
180 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
174 181
175 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 182 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
176 &ring->qp, &ring->qp_state); 183 &ring->qp, &ring->qp_state);
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
591 return skb_tx_hash(dev, skb); 598 return skb_tx_hash(dev, skb);
592} 599}
593 600
601static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
602{
603 __iowrite64_copy(dst, src, bytecnt / 8);
604}
605
594netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 606netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
595{ 607{
596 struct mlx4_en_priv *priv = netdev_priv(dev); 608 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
609 int desc_size; 621 int desc_size;
610 int real_size; 622 int real_size;
611 dma_addr_t dma; 623 dma_addr_t dma;
612 u32 index; 624 u32 index, bf_index;
613 __be32 op_own; 625 __be32 op_own;
614 u16 vlan_tag = 0; 626 u16 vlan_tag = 0;
615 int i; 627 int i;
616 int lso_header_size; 628 int lso_header_size;
617 void *fragptr; 629 void *fragptr;
630 bool bounce = false;
618 631
619 if (!priv->port_up) 632 if (!priv->port_up)
620 goto tx_drop; 633 goto tx_drop;
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
657 670
658 /* Packet is good - grab an index and transmit it */ 671 /* Packet is good - grab an index and transmit it */
659 index = ring->prod & ring->size_mask; 672 index = ring->prod & ring->size_mask;
673 bf_index = ring->prod;
660 674
661 /* See if we have enough space for whole descriptor TXBB for setting 675 /* See if we have enough space for whole descriptor TXBB for setting
662 * SW ownership on next descriptor; if not, use a bounce buffer. */ 676 * SW ownership on next descriptor; if not, use a bounce buffer. */
663 if (likely(index + nr_txbb <= ring->size)) 677 if (likely(index + nr_txbb <= ring->size))
664 tx_desc = ring->buf + index * TXBB_SIZE; 678 tx_desc = ring->buf + index * TXBB_SIZE;
665 else 679 else {
666 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 680 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
681 bounce = true;
682 }
667 683
668 /* Save skb in tx_info ring */ 684 /* Save skb in tx_info ring */
669 tx_info = &ring->tx_info[index]; 685 tx_info = &ring->tx_info[index];
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
768 ring->prod += nr_txbb; 784 ring->prod += nr_txbb;
769 785
770 /* If we used a bounce buffer then copy descriptor back into place */ 786 /* If we used a bounce buffer then copy descriptor back into place */
771 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 787 if (bounce)
772 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 788 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
773 789
774 /* Run destructor before passing skb to HW */ 790 /* Run destructor before passing skb to HW */
775 if (likely(!skb_shared(skb))) 791 if (likely(!skb_shared(skb)))
776 skb_orphan(skb); 792 skb_orphan(skb);
777 793
778 /* Ensure new descirptor hits memory 794 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
779 * before setting ownership of this descriptor to HW */ 795 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
780 wmb(); 796 op_own |= htonl((bf_index & 0xffff) << 8);
781 tx_desc->ctrl.owner_opcode = op_own; 797 /* Ensure new descirptor hits memory
798 * before setting ownership of this descriptor to HW */
799 wmb();
800 tx_desc->ctrl.owner_opcode = op_own;
782 801
783 /* Ring doorbell! */ 802 wmb();
784 wmb(); 803
785 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 804 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
805 desc_size);
806
807 wmb();
808
809 ring->bf.offset ^= ring->bf.buf_size;
810 } else {
811 /* Ensure new descirptor hits memory
812 * before setting ownership of this descriptor to HW */
813 wmb();
814 tx_desc->ctrl.owner_opcode = op_own;
815 wmb();
816 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
817 }
786 818
787 /* Poll CQ here */ 819 /* Poll CQ here */
788 mlx4_en_xmit_poll(priv, tx_ind); 820 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 552d0fce6f6..1ad1f6029af 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -42,7 +42,7 @@
42#include "fw.h" 42#include "fw.h"
43 43
44enum { 44enum {
45 MLX4_IRQNAME_SIZE = 64 45 MLX4_IRQNAME_SIZE = 32
46}; 46};
47 47
48enum { 48enum {
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
317 * we need to map, take the difference of highest index and 317 * we need to map, take the difference of highest index and
318 * the lowest index we'll use and add 1. 318 * the lowest index we'll use and add 1.
319 */ 319 */
320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
321 dev->caps.reserved_eqs / 4 + 1; 321 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
322} 322}
323 323
324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
496static void mlx4_free_irqs(struct mlx4_dev *dev) 496static void mlx4_free_irqs(struct mlx4_dev *dev)
497{ 497{
498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
499 int i; 499 struct mlx4_priv *priv = mlx4_priv(dev);
500 int i, vec;
500 501
501 if (eq_table->have_irq) 502 if (eq_table->have_irq)
502 free_irq(dev->pdev->irq, dev); 503 free_irq(dev->pdev->irq, dev);
504
503 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 505 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
504 if (eq_table->eq[i].have_irq) { 506 if (eq_table->eq[i].have_irq) {
505 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 507 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
506 eq_table->eq[i].have_irq = 0; 508 eq_table->eq[i].have_irq = 0;
507 } 509 }
508 510
511 for (i = 0; i < dev->caps.comp_pool; i++) {
512 /*
513 * Freeing the assigned irq's
514 * all bits should be 0, but we need to validate
515 */
516 if (priv->msix_ctl.pool_bm & 1ULL << i) {
517 /* NO need protecting*/
518 vec = dev->caps.num_comp_vectors + 1 + i;
519 free_irq(priv->eq_table.eq[vec].irq,
520 &priv->eq_table.eq[vec]);
521 }
522 }
523
524
509 kfree(eq_table->irq_names); 525 kfree(eq_table->irq_names);
510} 526}
511 527
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
578 (priv->eq_table.inta_pin < 32 ? 4 : 0); 594 (priv->eq_table.inta_pin < 32 ? 4 : 0);
579 595
580 priv->eq_table.irq_names = 596 priv->eq_table.irq_names =
581 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 597 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
598 dev->caps.comp_pool),
582 GFP_KERNEL); 599 GFP_KERNEL);
583 if (!priv->eq_table.irq_names) { 600 if (!priv->eq_table.irq_names) {
584 err = -ENOMEM; 601 err = -ENOMEM;
@@ -586,7 +603,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
586 } 603 }
587 604
588 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 605 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
589 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 606 err = mlx4_create_eq(dev, dev->caps.num_cqs -
607 dev->caps.reserved_cqs +
608 MLX4_NUM_SPARE_EQE,
590 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 609 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
591 &priv->eq_table.eq[i]); 610 &priv->eq_table.eq[i]);
592 if (err) { 611 if (err) {
@@ -601,6 +620,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
601 if (err) 620 if (err)
602 goto err_out_comp; 621 goto err_out_comp;
603 622
623 /*if additional completion vectors poolsize is 0 this loop will not run*/
624 for (i = dev->caps.num_comp_vectors + 1;
625 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
626
627 err = mlx4_create_eq(dev, dev->caps.num_cqs -
628 dev->caps.reserved_cqs +
629 MLX4_NUM_SPARE_EQE,
630 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
631 &priv->eq_table.eq[i]);
632 if (err) {
633 --i;
634 goto err_out_unmap;
635 }
636 }
637
638
604 if (dev->flags & MLX4_FLAG_MSI_X) { 639 if (dev->flags & MLX4_FLAG_MSI_X) {
605 const char *eq_name; 640 const char *eq_name;
606 641
@@ -686,7 +721,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
686 721
687 mlx4_free_irqs(dev); 722 mlx4_free_irqs(dev);
688 723
689 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 724 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
690 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 725 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
691 726
692 mlx4_unmap_clr_int(dev); 727 mlx4_unmap_clr_int(dev);
@@ -743,3 +778,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
743 return err; 778 return err;
744} 779}
745EXPORT_SYMBOL(mlx4_test_interrupts); 780EXPORT_SYMBOL(mlx4_test_interrupts);
781
782int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
783{
784
785 struct mlx4_priv *priv = mlx4_priv(dev);
786 int vec = 0, err = 0, i;
787
788 spin_lock(&priv->msix_ctl.pool_lock);
789 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
790 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
791 priv->msix_ctl.pool_bm |= 1ULL << i;
792 vec = dev->caps.num_comp_vectors + 1 + i;
793 snprintf(priv->eq_table.irq_names +
794 vec * MLX4_IRQNAME_SIZE,
795 MLX4_IRQNAME_SIZE, "%s", name);
796 err = request_irq(priv->eq_table.eq[vec].irq,
797 mlx4_msi_x_interrupt, 0,
798 &priv->eq_table.irq_names[vec<<5],
799 priv->eq_table.eq + vec);
800 if (err) {
801 /*zero out bit by fliping it*/
802 priv->msix_ctl.pool_bm ^= 1 << i;
803 vec = 0;
804 continue;
805 /*we dont want to break here*/
806 }
807 eq_set_ci(&priv->eq_table.eq[vec], 1);
808 }
809 }
810 spin_unlock(&priv->msix_ctl.pool_lock);
811
812 if (vec) {
813 *vector = vec;
814 } else {
815 *vector = 0;
816 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
817 }
818 return err;
819}
820EXPORT_SYMBOL(mlx4_assign_eq);
821
822void mlx4_release_eq(struct mlx4_dev *dev, int vec)
823{
824 struct mlx4_priv *priv = mlx4_priv(dev);
825 /*bm index*/
826 int i = vec - dev->caps.num_comp_vectors - 1;
827
828 if (likely(i >= 0)) {
829 /*sanity check , making sure were not trying to free irq's
830 Belonging to a legacy EQ*/
831 spin_lock(&priv->msix_ctl.pool_lock);
832 if (priv->msix_ctl.pool_bm & 1ULL << i) {
833 free_irq(priv->eq_table.eq[vec].irq,
834 &priv->eq_table.eq[vec]);
835 priv->msix_ctl.pool_bm &= ~(1ULL << i);
836 }
837 spin_unlock(&priv->msix_ctl.pool_lock);
838 }
839
840}
841EXPORT_SYMBOL(mlx4_release_eq);
842
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 5de1db89783..67a209ba939 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
274 dev_cap->stat_rate_support = stat_rate; 274 dev_cap->stat_rate_support = stat_rate;
275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
276 dev_cap->udp_rss = field & 0x1; 276 dev_cap->udp_rss = field & 0x1;
277 dev_cap->vep_uc_steering = field & 0x2;
278 dev_cap->vep_mc_steering = field & 0x4;
277 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
278 dev_cap->loopback_support = field & 0x1; 280 dev_cap->loopback_support = field & 0x1;
281 dev_cap->wol = field & 0x40;
279 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 282 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
280 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 283 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
281 dev_cap->reserved_uars = field >> 4; 284 dev_cap->reserved_uars = field >> 4;
@@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
737#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 740#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
738#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 741#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
739#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 742#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
743#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
740#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 744#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
741#define INIT_HCA_TPT_OFFSET 0x0f0 745#define INIT_HCA_TPT_OFFSET 0x0f0
742#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 746#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
@@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
797 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 801 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
798 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 802 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
799 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 803 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
804 if (dev->caps.vep_mc_steering)
805 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
800 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 806 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
801 807
802 /* TPT attributes */ 808 /* TPT attributes */
@@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev)
908 /* Input modifier of 0x1f means "finish as soon as possible." */ 914 /* Input modifier of 0x1f means "finish as soon as possible." */
909 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 915 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
910} 916}
917
918#define MLX4_WOL_SETUP_MODE (5 << 28)
919int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
920{
921 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
922
923 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
924 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
925}
926EXPORT_SYMBOL_GPL(mlx4_wol_read);
927
928int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
929{
930 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
931
932 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
933 MLX4_CMD_TIME_CLASS_A);
934}
935EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 65cc72eb899..88003ebc618 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -80,6 +80,9 @@ struct mlx4_dev_cap {
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss; 81 int udp_rss;
82 int loopback_support; 82 int loopback_support;
83 int vep_uc_steering;
84 int vep_mc_steering;
85 int wol;
83 u32 flags; 86 u32 flags;
84 int reserved_uars; 87 int reserved_uars;
85 int uar_size; 88 int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 2765a3ce9c2..62fa7eec5f0 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -39,6 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h>
42 43
43#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
227 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
228 dev->caps.udp_rss = dev_cap->udp_rss; 229 dev->caps.udp_rss = dev_cap->udp_rss;
229 dev->caps.loopback_support = dev_cap->loopback_support; 230 dev->caps.loopback_support = dev_cap->loopback_support;
231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
233 dev->caps.wol = dev_cap->wol;
230 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 234 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
231 235
232 dev->caps.log_num_macs = log_num_mac; 236 dev->caps.log_num_macs = log_num_mac;
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
718 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
719} 723}
720 724
725static int map_bf_area(struct mlx4_dev *dev)
726{
727 struct mlx4_priv *priv = mlx4_priv(dev);
728 resource_size_t bf_start;
729 resource_size_t bf_len;
730 int err = 0;
731
732 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
733 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
734 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
735 if (!priv->bf_mapping)
736 err = -ENOMEM;
737
738 return err;
739}
740
741static void unmap_bf_area(struct mlx4_dev *dev)
742{
743 if (mlx4_priv(dev)->bf_mapping)
744 io_mapping_free(mlx4_priv(dev)->bf_mapping);
745}
746
721static void mlx4_close_hca(struct mlx4_dev *dev) 747static void mlx4_close_hca(struct mlx4_dev *dev)
722{ 748{
749 unmap_bf_area(dev);
723 mlx4_CLOSE_HCA(dev, 0); 750 mlx4_CLOSE_HCA(dev, 0);
724 mlx4_free_icms(dev); 751 mlx4_free_icms(dev);
725 mlx4_UNMAP_FA(dev); 752 mlx4_UNMAP_FA(dev);
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
772 goto err_stop_fw; 799 goto err_stop_fw;
773 } 800 }
774 801
802 if (map_bf_area(dev))
803 mlx4_dbg(dev, "Failed to map blue flame area\n");
804
775 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 805 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
776 806
777 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 807 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
@@ -802,6 +832,7 @@ err_free_icm:
802 mlx4_free_icms(dev); 832 mlx4_free_icms(dev);
803 833
804err_stop_fw: 834err_stop_fw:
835 unmap_bf_area(dev);
805 mlx4_UNMAP_FA(dev); 836 mlx4_UNMAP_FA(dev);
806 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 837 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
807 838
@@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
969{ 1000{
970 struct mlx4_priv *priv = mlx4_priv(dev); 1001 struct mlx4_priv *priv = mlx4_priv(dev);
971 struct msix_entry *entries; 1002 struct msix_entry *entries;
972 int nreq; 1003 int nreq = min_t(int, dev->caps.num_ports *
1004 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1005 + MSIX_LEGACY_SZ, MAX_MSIX);
973 int err; 1006 int err;
974 int i; 1007 int i;
975 1008
976 if (msi_x) { 1009 if (msi_x) {
977 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1010 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
978 num_possible_cpus() + 1); 1011 nreq);
979 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1012 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
980 if (!entries) 1013 if (!entries)
981 goto no_msi; 1014 goto no_msi;
@@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
998 goto no_msi; 1031 goto no_msi;
999 } 1032 }
1000 1033
1001 dev->caps.num_comp_vectors = nreq - 1; 1034 if (nreq <
1035 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1036 /*Working in legacy mode , all EQ's shared*/
1037 dev->caps.comp_pool = 0;
1038 dev->caps.num_comp_vectors = nreq - 1;
1039 } else {
1040 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1041 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1042 }
1002 for (i = 0; i < nreq; ++i) 1043 for (i = 0; i < nreq; ++i)
1003 priv->eq_table.eq[i].irq = entries[i].vector; 1044 priv->eq_table.eq[i].irq = entries[i].vector;
1004 1045
@@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1010 1051
1011no_msi: 1052no_msi:
1012 dev->caps.num_comp_vectors = 1; 1053 dev->caps.num_comp_vectors = 1;
1054 dev->caps.comp_pool = 0;
1013 1055
1014 for (i = 0; i < 2; ++i) 1056 for (i = 0; i < 2; ++i)
1015 priv->eq_table.eq[i].irq = dev->pdev->irq; 1057 priv->eq_table.eq[i].irq = dev->pdev->irq;
@@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1049 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1091 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1050} 1092}
1051 1093
1094static int mlx4_init_steering(struct mlx4_dev *dev)
1095{
1096 struct mlx4_priv *priv = mlx4_priv(dev);
1097 int num_entries = dev->caps.num_ports;
1098 int i, j;
1099
1100 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1101 if (!priv->steer)
1102 return -ENOMEM;
1103
1104 for (i = 0; i < num_entries; i++) {
1105 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1106 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1107 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1108 }
1109 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1110 }
1111 return 0;
1112}
1113
1114static void mlx4_clear_steering(struct mlx4_dev *dev)
1115{
1116 struct mlx4_priv *priv = mlx4_priv(dev);
1117 struct mlx4_steer_index *entry, *tmp_entry;
1118 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1119 int num_entries = dev->caps.num_ports;
1120 int i, j;
1121
1122 for (i = 0; i < num_entries; i++) {
1123 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1124 list_for_each_entry_safe(pqp, tmp_pqp,
1125 &priv->steer[i].promisc_qps[j],
1126 list) {
1127 list_del(&pqp->list);
1128 kfree(pqp);
1129 }
1130 list_for_each_entry_safe(entry, tmp_entry,
1131 &priv->steer[i].steer_entries[j],
1132 list) {
1133 list_del(&entry->list);
1134 list_for_each_entry_safe(pqp, tmp_pqp,
1135 &entry->duplicates,
1136 list) {
1137 list_del(&pqp->list);
1138 kfree(pqp);
1139 }
1140 kfree(entry);
1141 }
1142 }
1143 }
1144 kfree(priv->steer);
1145}
1146
1052static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1147static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1053{ 1148{
1054 struct mlx4_priv *priv; 1149 struct mlx4_priv *priv;
@@ -1109,6 +1204,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1109 } 1204 }
1110 } 1205 }
1111 1206
1207 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1208 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1209
1112 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1210 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1113 if (!priv) { 1211 if (!priv) {
1114 dev_err(&pdev->dev, "Device struct alloc failed, " 1212 dev_err(&pdev->dev, "Device struct alloc failed, "
@@ -1127,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1127 INIT_LIST_HEAD(&priv->pgdir_list); 1225 INIT_LIST_HEAD(&priv->pgdir_list);
1128 mutex_init(&priv->pgdir_mutex); 1226 mutex_init(&priv->pgdir_mutex);
1129 1227
1228 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
1229
1230 INIT_LIST_HEAD(&priv->bf_list);
1231 mutex_init(&priv->bf_mutex);
1232
1130 /* 1233 /*
1131 * Now reset the HCA before we touch the PCI capabilities or 1234 * Now reset the HCA before we touch the PCI capabilities or
1132 * attempt a firmware command, since a boot ROM may have left 1235 * attempt a firmware command, since a boot ROM may have left
@@ -1151,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1151 if (err) 1254 if (err)
1152 goto err_close; 1255 goto err_close;
1153 1256
1257 priv->msix_ctl.pool_bm = 0;
1258 spin_lock_init(&priv->msix_ctl.pool_lock);
1259
1154 mlx4_enable_msi_x(dev); 1260 mlx4_enable_msi_x(dev);
1155 1261
1262 err = mlx4_init_steering(dev);
1263 if (err)
1264 goto err_free_eq;
1265
1156 err = mlx4_setup_hca(dev); 1266 err = mlx4_setup_hca(dev);
1157 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1267 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1158 dev->flags &= ~MLX4_FLAG_MSI_X; 1268 dev->flags &= ~MLX4_FLAG_MSI_X;
@@ -1161,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1161 } 1271 }
1162 1272
1163 if (err) 1273 if (err)
1164 goto err_free_eq; 1274 goto err_steer;
1165 1275
1166 for (port = 1; port <= dev->caps.num_ports; port++) { 1276 for (port = 1; port <= dev->caps.num_ports; port++) {
1167 err = mlx4_init_port_info(dev, port); 1277 err = mlx4_init_port_info(dev, port);
@@ -1194,6 +1304,9 @@ err_port:
1194 mlx4_cleanup_pd_table(dev); 1304 mlx4_cleanup_pd_table(dev);
1195 mlx4_cleanup_uar_table(dev); 1305 mlx4_cleanup_uar_table(dev);
1196 1306
1307err_steer:
1308 mlx4_clear_steering(dev);
1309
1197err_free_eq: 1310err_free_eq:
1198 mlx4_free_eq_table(dev); 1311 mlx4_free_eq_table(dev);
1199 1312
@@ -1253,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1253 iounmap(priv->kar); 1366 iounmap(priv->kar);
1254 mlx4_uar_free(dev, &priv->driver_uar); 1367 mlx4_uar_free(dev, &priv->driver_uar);
1255 mlx4_cleanup_uar_table(dev); 1368 mlx4_cleanup_uar_table(dev);
1369 mlx4_clear_steering(dev);
1256 mlx4_free_eq_table(dev); 1370 mlx4_free_eq_table(dev);
1257 mlx4_close_hca(dev); 1371 mlx4_close_hca(dev);
1258 mlx4_cmd_cleanup(dev); 1372 mlx4_cmd_cleanup(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 79cf42db2ea..c6d336aed2d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/etherdevice.h>
35 36
36#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
37 38
@@ -40,38 +41,40 @@
40#define MGM_QPN_MASK 0x00FFFFFF 41#define MGM_QPN_MASK 0x00FFFFFF
41#define MGM_BLCK_LB_BIT 30 42#define MGM_BLCK_LB_BIT 30
42 43
43struct mlx4_mgm {
44 __be32 next_gid_index;
45 __be32 members_count;
46 u32 reserved[2];
47 u8 gid[16];
48 __be32 qp[MLX4_QP_PER_MGM];
49};
50
51static const u8 zero_gid[16]; /* automatically initialized to 0 */ 44static const u8 zero_gid[16]; /* automatically initialized to 0 */
52 45
53static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, 46static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
54 struct mlx4_cmd_mailbox *mailbox) 47 struct mlx4_cmd_mailbox *mailbox)
55{ 48{
56 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 49 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
57 MLX4_CMD_TIME_CLASS_A); 50 MLX4_CMD_TIME_CLASS_A);
58} 51}
59 52
60static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, 53static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
61 struct mlx4_cmd_mailbox *mailbox) 54 struct mlx4_cmd_mailbox *mailbox)
62{ 55{
63 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 56 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
64 MLX4_CMD_TIME_CLASS_A); 57 MLX4_CMD_TIME_CLASS_A);
65} 58}
66 59
67static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 60static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
68 u16 *hash) 61 struct mlx4_cmd_mailbox *mailbox)
62{
63 u32 in_mod;
64
65 in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
66 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
67 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
68}
69
70static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
71 u16 *hash, u8 op_mod)
69{ 72{
70 u64 imm; 73 u64 imm;
71 int err; 74 int err;
72 75
73 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, 76 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
74 MLX4_CMD_TIME_CLASS_A); 77 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
75 78
76 if (!err) 79 if (!err)
77 *hash = imm; 80 *hash = imm;
@@ -79,6 +82,458 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
79 return err; 82 return err;
80} 83}
81 84
85static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
86 enum mlx4_steer_type steer,
87 u32 qpn)
88{
89 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
90 struct mlx4_promisc_qp *pqp;
91
92 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
93 if (pqp->qpn == qpn)
94 return pqp;
95 }
96 /* not found */
97 return NULL;
98}
99
100/*
101 * Add new entry to steering data structure.
102 * All promisc QPs should be added as well
103 */
104static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
105 enum mlx4_steer_type steer,
106 unsigned int index, u32 qpn)
107{
108 struct mlx4_steer *s_steer;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_mgm *mgm;
111 u32 members_count;
112 struct mlx4_steer_index *new_entry;
113 struct mlx4_promisc_qp *pqp;
114 struct mlx4_promisc_qp *dqp = NULL;
115 u32 prot;
116 int err;
117 u8 pf_num;
118
119 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
120 s_steer = &mlx4_priv(dev)->steer[pf_num];
121 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
122 if (!new_entry)
123 return -ENOMEM;
124
125 INIT_LIST_HEAD(&new_entry->duplicates);
126 new_entry->index = index;
127 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
128
129 /* If the given qpn is also a promisc qp,
130 * it should be inserted to duplicates list
131 */
132 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
133 if (pqp) {
134 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
135 if (!dqp) {
136 err = -ENOMEM;
137 goto out_alloc;
138 }
139 dqp->qpn = qpn;
140 list_add_tail(&dqp->list, &new_entry->duplicates);
141 }
142
143 /* if no promisc qps for this vep, we are done */
144 if (list_empty(&s_steer->promisc_qps[steer]))
145 return 0;
146
147 /* now need to add all the promisc qps to the new
148 * steering entry, as they should also receive the packets
149 * destined to this address */
150 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 if (IS_ERR(mailbox)) {
152 err = -ENOMEM;
153 goto out_alloc;
154 }
155 mgm = mailbox->buf;
156
157 err = mlx4_READ_ENTRY(dev, index, mailbox);
158 if (err)
159 goto out_mailbox;
160
161 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
162 prot = be32_to_cpu(mgm->members_count) >> 30;
163 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
164 /* don't add already existing qpn */
165 if (pqp->qpn == qpn)
166 continue;
167 if (members_count == MLX4_QP_PER_MGM) {
168 /* out of space */
169 err = -ENOMEM;
170 goto out_mailbox;
171 }
172
173 /* add the qpn */
174 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
175 }
176 /* update the qps count and update the entry with all the promisc qps*/
177 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
178 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
179
180out_mailbox:
181 mlx4_free_cmd_mailbox(dev, mailbox);
182 if (!err)
183 return 0;
184out_alloc:
185 if (dqp) {
186 list_del(&dqp->list);
187 kfree(dqp);
188 }
189 list_del(&new_entry->list);
190 kfree(new_entry);
191 return err;
192}
193
194/* update the data structures with existing steering entry */
195static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
196 enum mlx4_steer_type steer,
197 unsigned int index, u32 qpn)
198{
199 struct mlx4_steer *s_steer;
200 struct mlx4_steer_index *tmp_entry, *entry = NULL;
201 struct mlx4_promisc_qp *pqp;
202 struct mlx4_promisc_qp *dqp;
203 u8 pf_num;
204
205 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
206 s_steer = &mlx4_priv(dev)->steer[pf_num];
207
208 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
209 if (!pqp)
210 return 0; /* nothing to do */
211
212 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
213 if (tmp_entry->index == index) {
214 entry = tmp_entry;
215 break;
216 }
217 }
218 if (unlikely(!entry)) {
219 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
220 return -EINVAL;
221 }
222
223 /* the given qpn is listed as a promisc qpn
224 * we need to add it as a duplicate to this entry
225 * for future refernce */
226 list_for_each_entry(dqp, &entry->duplicates, list) {
227 if (qpn == dqp->qpn)
228 return 0; /* qp is already duplicated */
229 }
230
231 /* add the qp as a duplicate on this index */
232 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
233 if (!dqp)
234 return -ENOMEM;
235 dqp->qpn = qpn;
236 list_add_tail(&dqp->list, &entry->duplicates);
237
238 return 0;
239}
240
241/* Check whether a qpn is a duplicate on steering entry
242 * If so, it should not be removed from mgm */
243static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
244 enum mlx4_steer_type steer,
245 unsigned int index, u32 qpn)
246{
247 struct mlx4_steer *s_steer;
248 struct mlx4_steer_index *tmp_entry, *entry = NULL;
249 struct mlx4_promisc_qp *dqp, *tmp_dqp;
250 u8 pf_num;
251
252 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
253 s_steer = &mlx4_priv(dev)->steer[pf_num];
254
255 /* if qp is not promisc, it cannot be duplicated */
256 if (!get_promisc_qp(dev, pf_num, steer, qpn))
257 return false;
258
259 /* The qp is promisc qp so it is a duplicate on this index
260 * Find the index entry, and remove the duplicate */
261 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
262 if (tmp_entry->index == index) {
263 entry = tmp_entry;
264 break;
265 }
266 }
267 if (unlikely(!entry)) {
268 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
269 return false;
270 }
271 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
272 if (dqp->qpn == qpn) {
273 list_del(&dqp->list);
274 kfree(dqp);
275 }
276 }
277 return true;
278}
279
280/* I a steering entry contains only promisc QPs, it can be removed. */
281static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
282 enum mlx4_steer_type steer,
283 unsigned int index, u32 tqpn)
284{
285 struct mlx4_steer *s_steer;
286 struct mlx4_cmd_mailbox *mailbox;
287 struct mlx4_mgm *mgm;
288 struct mlx4_steer_index *entry = NULL, *tmp_entry;
289 u32 qpn;
290 u32 members_count;
291 bool ret = false;
292 int i;
293 u8 pf_num;
294
295 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
296 s_steer = &mlx4_priv(dev)->steer[pf_num];
297
298 mailbox = mlx4_alloc_cmd_mailbox(dev);
299 if (IS_ERR(mailbox))
300 return false;
301 mgm = mailbox->buf;
302
303 if (mlx4_READ_ENTRY(dev, index, mailbox))
304 goto out;
305 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
306 for (i = 0; i < members_count; i++) {
307 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
308 if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
309 /* the qp is not promisc, the entry can't be removed */
310 goto out;
311 }
312 }
313 /* All the qps currently registered for this entry are promiscuous,
314 * Checking for duplicates */
315 ret = true;
316 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
317 if (entry->index == index) {
318 if (list_empty(&entry->duplicates)) {
319 list_del(&entry->list);
320 kfree(entry);
321 } else {
322 /* This entry contains duplicates so it shouldn't be removed */
323 ret = false;
324 goto out;
325 }
326 }
327 }
328
329out:
330 mlx4_free_cmd_mailbox(dev, mailbox);
331 return ret;
332}
333
334static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
335 enum mlx4_steer_type steer, u32 qpn)
336{
337 struct mlx4_steer *s_steer;
338 struct mlx4_cmd_mailbox *mailbox;
339 struct mlx4_mgm *mgm;
340 struct mlx4_steer_index *entry;
341 struct mlx4_promisc_qp *pqp;
342 struct mlx4_promisc_qp *dqp;
343 u32 members_count;
344 u32 prot;
345 int i;
346 bool found;
347 int last_index;
348 int err;
349 u8 pf_num;
350 struct mlx4_priv *priv = mlx4_priv(dev);
351 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
352 s_steer = &mlx4_priv(dev)->steer[pf_num];
353
354 mutex_lock(&priv->mcg_table.mutex);
355
356 if (get_promisc_qp(dev, pf_num, steer, qpn)) {
357 err = 0; /* Noting to do, already exists */
358 goto out_mutex;
359 }
360
361 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
362 if (!pqp) {
363 err = -ENOMEM;
364 goto out_mutex;
365 }
366 pqp->qpn = qpn;
367
368 mailbox = mlx4_alloc_cmd_mailbox(dev);
369 if (IS_ERR(mailbox)) {
370 err = -ENOMEM;
371 goto out_alloc;
372 }
373 mgm = mailbox->buf;
374
375 /* the promisc qp needs to be added for each one of the steering
376 * entries, if it already exists, needs to be added as a duplicate
377 * for this entry */
378 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
379 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
380 if (err)
381 goto out_mailbox;
382
383 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
384 prot = be32_to_cpu(mgm->members_count) >> 30;
385 found = false;
386 for (i = 0; i < members_count; i++) {
387 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
388 /* Entry already exists, add to duplicates */
389 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
390 if (!dqp)
391 goto out_mailbox;
392 dqp->qpn = qpn;
393 list_add_tail(&dqp->list, &entry->duplicates);
394 found = true;
395 }
396 }
397 if (!found) {
398 /* Need to add the qpn to mgm */
399 if (members_count == MLX4_QP_PER_MGM) {
400 /* entry is full */
401 err = -ENOMEM;
402 goto out_mailbox;
403 }
404 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
405 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
406 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
407 if (err)
408 goto out_mailbox;
409 }
410 last_index = entry->index;
411 }
412
413 /* add the new qpn to list of promisc qps */
414 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
415 /* now need to add all the promisc qps to default entry */
416 memset(mgm, 0, sizeof *mgm);
417 members_count = 0;
418 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
419 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
420 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
421
422 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
423 if (err)
424 goto out_list;
425
426 mlx4_free_cmd_mailbox(dev, mailbox);
427 mutex_unlock(&priv->mcg_table.mutex);
428 return 0;
429
430out_list:
431 list_del(&pqp->list);
432out_mailbox:
433 mlx4_free_cmd_mailbox(dev, mailbox);
434out_alloc:
435 kfree(pqp);
436out_mutex:
437 mutex_unlock(&priv->mcg_table.mutex);
438 return err;
439}
440
441static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
442 enum mlx4_steer_type steer, u32 qpn)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 struct mlx4_steer *s_steer;
446 struct mlx4_cmd_mailbox *mailbox;
447 struct mlx4_mgm *mgm;
448 struct mlx4_steer_index *entry;
449 struct mlx4_promisc_qp *pqp;
450 struct mlx4_promisc_qp *dqp;
451 u32 members_count;
452 bool found;
453 bool back_to_list = false;
454 int loc, i;
455 int err;
456 u8 pf_num;
457
458 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
459 s_steer = &mlx4_priv(dev)->steer[pf_num];
460 mutex_lock(&priv->mcg_table.mutex);
461
462 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
463 if (unlikely(!pqp)) {
464 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
465 /* nothing to do */
466 err = 0;
467 goto out_mutex;
468 }
469
470 /*remove from list of promisc qps */
471 list_del(&pqp->list);
472
473 /* set the default entry not to include the removed one */
474 mailbox = mlx4_alloc_cmd_mailbox(dev);
475 if (IS_ERR(mailbox)) {
476 err = -ENOMEM;
477 back_to_list = true;
478 goto out_list;
479 }
480 mgm = mailbox->buf;
481 members_count = 0;
482 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
483 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
484 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
485
486 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
487 if (err)
488 goto out_mailbox;
489
490 /* remove the qp from all the steering entries*/
491 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
492 found = false;
493 list_for_each_entry(dqp, &entry->duplicates, list) {
494 if (dqp->qpn == qpn) {
495 found = true;
496 break;
497 }
498 }
499 if (found) {
500 /* a duplicate, no need to change the mgm,
501 * only update the duplicates list */
502 list_del(&dqp->list);
503 kfree(dqp);
504 } else {
505 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
506 if (err)
507 goto out_mailbox;
508 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
509 for (loc = -1, i = 0; i < members_count; ++i)
510 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
511 loc = i;
512
513 mgm->members_count = cpu_to_be32(--members_count |
514 (MLX4_PROT_ETH << 30));
515 mgm->qp[loc] = mgm->qp[i - 1];
516 mgm->qp[i - 1] = 0;
517
518 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
519 if (err)
520 goto out_mailbox;
521 }
522
523 }
524
525out_mailbox:
526 mlx4_free_cmd_mailbox(dev, mailbox);
527out_list:
528 if (back_to_list)
529 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
530 else
531 kfree(pqp);
532out_mutex:
533 mutex_unlock(&priv->mcg_table.mutex);
534 return err;
535}
536
82/* 537/*
83 * Caller must hold MCG table semaphore. gid and mgm parameters must 538 * Caller must hold MCG table semaphore. gid and mgm parameters must
84 * be properly aligned for command interface. 539 * be properly aligned for command interface.
@@ -94,15 +549,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
94 * If no AMGM exists for given gid, *index = -1, *prev = index of last 549 * If no AMGM exists for given gid, *index = -1, *prev = index of last
95 * entry in hash chain and *mgm holds end of hash chain. 550 * entry in hash chain and *mgm holds end of hash chain.
96 */ 551 */
97static int find_mgm(struct mlx4_dev *dev, 552static int find_entry(struct mlx4_dev *dev, u8 port,
98 u8 *gid, enum mlx4_protocol protocol, 553 u8 *gid, enum mlx4_protocol prot,
99 struct mlx4_cmd_mailbox *mgm_mailbox, 554 enum mlx4_steer_type steer,
100 u16 *hash, int *prev, int *index) 555 struct mlx4_cmd_mailbox *mgm_mailbox,
556 u16 *hash, int *prev, int *index)
101{ 557{
102 struct mlx4_cmd_mailbox *mailbox; 558 struct mlx4_cmd_mailbox *mailbox;
103 struct mlx4_mgm *mgm = mgm_mailbox->buf; 559 struct mlx4_mgm *mgm = mgm_mailbox->buf;
104 u8 *mgid; 560 u8 *mgid;
105 int err; 561 int err;
562 u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
106 563
107 mailbox = mlx4_alloc_cmd_mailbox(dev); 564 mailbox = mlx4_alloc_cmd_mailbox(dev);
108 if (IS_ERR(mailbox)) 565 if (IS_ERR(mailbox))
@@ -111,7 +568,7 @@ static int find_mgm(struct mlx4_dev *dev,
111 568
112 memcpy(mgid, gid, 16); 569 memcpy(mgid, gid, 16);
113 570
114 err = mlx4_MGID_HASH(dev, mailbox, hash); 571 err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
115 mlx4_free_cmd_mailbox(dev, mailbox); 572 mlx4_free_cmd_mailbox(dev, mailbox);
116 if (err) 573 if (err)
117 return err; 574 return err;
@@ -123,11 +580,11 @@ static int find_mgm(struct mlx4_dev *dev,
123 *prev = -1; 580 *prev = -1;
124 581
125 do { 582 do {
126 err = mlx4_READ_MCG(dev, *index, mgm_mailbox); 583 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
127 if (err) 584 if (err)
128 return err; 585 return err;
129 586
130 if (!memcmp(mgm->gid, zero_gid, 16)) { 587 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
131 if (*index != *hash) { 588 if (*index != *hash) {
132 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 589 mlx4_err(dev, "Found zero MGID in AMGM.\n");
133 err = -EINVAL; 590 err = -EINVAL;
@@ -136,7 +593,7 @@ static int find_mgm(struct mlx4_dev *dev,
136 } 593 }
137 594
138 if (!memcmp(mgm->gid, gid, 16) && 595 if (!memcmp(mgm->gid, gid, 16) &&
139 be32_to_cpu(mgm->members_count) >> 30 == protocol) 596 be32_to_cpu(mgm->members_count) >> 30 == prot)
140 return err; 597 return err;
141 598
142 *prev = *index; 599 *prev = *index;
@@ -147,8 +604,9 @@ static int find_mgm(struct mlx4_dev *dev,
147 return err; 604 return err;
148} 605}
149 606
150int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 607int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
151 int block_mcast_loopback, enum mlx4_protocol protocol) 608 int block_mcast_loopback, enum mlx4_protocol prot,
609 enum mlx4_steer_type steer)
152{ 610{
153 struct mlx4_priv *priv = mlx4_priv(dev); 611 struct mlx4_priv *priv = mlx4_priv(dev);
154 struct mlx4_cmd_mailbox *mailbox; 612 struct mlx4_cmd_mailbox *mailbox;
@@ -159,6 +617,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
159 int link = 0; 617 int link = 0;
160 int i; 618 int i;
161 int err; 619 int err;
620 u8 port = gid[5];
621 u8 new_entry = 0;
162 622
163 mailbox = mlx4_alloc_cmd_mailbox(dev); 623 mailbox = mlx4_alloc_cmd_mailbox(dev);
164 if (IS_ERR(mailbox)) 624 if (IS_ERR(mailbox))
@@ -166,14 +626,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
166 mgm = mailbox->buf; 626 mgm = mailbox->buf;
167 627
168 mutex_lock(&priv->mcg_table.mutex); 628 mutex_lock(&priv->mcg_table.mutex);
169 629 err = find_entry(dev, port, gid, prot, steer,
170 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 630 mailbox, &hash, &prev, &index);
171 if (err) 631 if (err)
172 goto out; 632 goto out;
173 633
174 if (index != -1) { 634 if (index != -1) {
175 if (!memcmp(mgm->gid, zero_gid, 16)) 635 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
636 new_entry = 1;
176 memcpy(mgm->gid, gid, 16); 637 memcpy(mgm->gid, gid, 16);
638 }
177 } else { 639 } else {
178 link = 1; 640 link = 1;
179 641
@@ -209,26 +671,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
209 else 671 else
210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 672 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
211 673
212 mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 674 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
213 675
214 err = mlx4_WRITE_MCG(dev, index, mailbox); 676 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
215 if (err) 677 if (err)
216 goto out; 678 goto out;
217 679
218 if (!link) 680 if (!link)
219 goto out; 681 goto out;
220 682
221 err = mlx4_READ_MCG(dev, prev, mailbox); 683 err = mlx4_READ_ENTRY(dev, prev, mailbox);
222 if (err) 684 if (err)
223 goto out; 685 goto out;
224 686
225 mgm->next_gid_index = cpu_to_be32(index << 6); 687 mgm->next_gid_index = cpu_to_be32(index << 6);
226 688
227 err = mlx4_WRITE_MCG(dev, prev, mailbox); 689 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
228 if (err) 690 if (err)
229 goto out; 691 goto out;
230 692
231out: 693out:
694 if (prot == MLX4_PROT_ETH) {
695 /* manage the steering entry for promisc mode */
696 if (new_entry)
697 new_steering_entry(dev, 0, port, steer, index, qp->qpn);
698 else
699 existing_steering_entry(dev, 0, port, steer,
700 index, qp->qpn);
701 }
232 if (err && link && index != -1) { 702 if (err && link && index != -1) {
233 if (index < dev->caps.num_mgms) 703 if (index < dev->caps.num_mgms)
234 mlx4_warn(dev, "Got AMGM index %d < %d", 704 mlx4_warn(dev, "Got AMGM index %d < %d",
@@ -242,10 +712,9 @@ out:
242 mlx4_free_cmd_mailbox(dev, mailbox); 712 mlx4_free_cmd_mailbox(dev, mailbox);
243 return err; 713 return err;
244} 714}
245EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
246 715
247int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 716int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
248 enum mlx4_protocol protocol) 717 enum mlx4_protocol prot, enum mlx4_steer_type steer)
249{ 718{
250 struct mlx4_priv *priv = mlx4_priv(dev); 719 struct mlx4_priv *priv = mlx4_priv(dev);
251 struct mlx4_cmd_mailbox *mailbox; 720 struct mlx4_cmd_mailbox *mailbox;
@@ -255,6 +724,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
255 int prev, index; 724 int prev, index;
256 int i, loc; 725 int i, loc;
257 int err; 726 int err;
727 u8 port = gid[5];
728 bool removed_entry = false;
258 729
259 mailbox = mlx4_alloc_cmd_mailbox(dev); 730 mailbox = mlx4_alloc_cmd_mailbox(dev);
260 if (IS_ERR(mailbox)) 731 if (IS_ERR(mailbox))
@@ -263,7 +734,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
263 734
264 mutex_lock(&priv->mcg_table.mutex); 735 mutex_lock(&priv->mcg_table.mutex);
265 736
266 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 737 err = find_entry(dev, port, gid, prot, steer,
738 mailbox, &hash, &prev, &index);
267 if (err) 739 if (err)
268 goto out; 740 goto out;
269 741
@@ -273,6 +745,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
273 goto out; 745 goto out;
274 } 746 }
275 747
748 /* if this pq is also a promisc qp, it shouldn't be removed */
749 if (prot == MLX4_PROT_ETH &&
750 check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
751 goto out;
752
276 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 753 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
277 for (loc = -1, i = 0; i < members_count; ++i) 754 for (loc = -1, i = 0; i < members_count; ++i)
278 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 755 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
@@ -285,26 +762,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
285 } 762 }
286 763
287 764
288 mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 765 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
289 mgm->qp[loc] = mgm->qp[i - 1]; 766 mgm->qp[loc] = mgm->qp[i - 1];
290 mgm->qp[i - 1] = 0; 767 mgm->qp[i - 1] = 0;
291 768
292 if (i != 1) { 769 if (prot == MLX4_PROT_ETH)
293 err = mlx4_WRITE_MCG(dev, index, mailbox); 770 removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
771 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
772 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
294 goto out; 773 goto out;
295 } 774 }
296 775
776 /* We are going to delete the entry, members count should be 0 */
777 mgm->members_count = cpu_to_be32((u32) prot << 30);
778
297 if (prev == -1) { 779 if (prev == -1) {
298 /* Remove entry from MGM */ 780 /* Remove entry from MGM */
299 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 781 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
300 if (amgm_index) { 782 if (amgm_index) {
301 err = mlx4_READ_MCG(dev, amgm_index, mailbox); 783 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
302 if (err) 784 if (err)
303 goto out; 785 goto out;
304 } else 786 } else
305 memset(mgm->gid, 0, 16); 787 memset(mgm->gid, 0, 16);
306 788
307 err = mlx4_WRITE_MCG(dev, index, mailbox); 789 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
308 if (err) 790 if (err)
309 goto out; 791 goto out;
310 792
@@ -319,13 +801,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
319 } else { 801 } else {
320 /* Remove entry from AMGM */ 802 /* Remove entry from AMGM */
321 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 803 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
322 err = mlx4_READ_MCG(dev, prev, mailbox); 804 err = mlx4_READ_ENTRY(dev, prev, mailbox);
323 if (err) 805 if (err)
324 goto out; 806 goto out;
325 807
326 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 808 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
327 809
328 err = mlx4_WRITE_MCG(dev, prev, mailbox); 810 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
329 if (err) 811 if (err)
330 goto out; 812 goto out;
331 813
@@ -343,8 +825,85 @@ out:
343 mlx4_free_cmd_mailbox(dev, mailbox); 825 mlx4_free_cmd_mailbox(dev, mailbox);
344 return err; 826 return err;
345} 827}
828
829
830int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
831 int block_mcast_loopback, enum mlx4_protocol prot)
832{
833 enum mlx4_steer_type steer;
834
835 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
836
837 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
838 return 0;
839
840 if (prot == MLX4_PROT_ETH)
841 gid[7] |= (steer << 1);
842
843 return mlx4_qp_attach_common(dev, qp, gid,
844 block_mcast_loopback, prot,
845 steer);
846}
847EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
848
849int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
850 enum mlx4_protocol prot)
851{
852 enum mlx4_steer_type steer;
853
854 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
855
856 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
857 return 0;
858
859 if (prot == MLX4_PROT_ETH) {
860 gid[7] |= (steer << 1);
861 }
862
863 return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
864}
346EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 865EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
347 866
867
868int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
869{
870 if (!dev->caps.vep_mc_steering)
871 return 0;
872
873
874 return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
875}
876EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
877
878int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
879{
880 if (!dev->caps.vep_mc_steering)
881 return 0;
882
883
884 return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
885}
886EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
887
888int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
889{
890 if (!dev->caps.vep_mc_steering)
891 return 0;
892
893
894 return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
895}
896EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
897
898int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
899{
900 if (!dev->caps.vep_mc_steering)
901 return 0;
902
903 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
904}
905EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
906
348int mlx4_init_mcg_table(struct mlx4_dev *dev) 907int mlx4_init_mcg_table(struct mlx4_dev *dev)
349{ 908{
350 struct mlx4_priv *priv = mlx4_priv(dev); 909 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 0da5bb7285b..c1e0e5f1bcd 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -105,6 +105,7 @@ struct mlx4_bitmap {
105 u32 max; 105 u32 max;
106 u32 reserved_top; 106 u32 reserved_top;
107 u32 mask; 107 u32 mask;
108 u32 avail;
108 spinlock_t lock; 109 spinlock_t lock;
109 unsigned long *table; 110 unsigned long *table;
110}; 111};
@@ -162,6 +163,27 @@ struct mlx4_fw {
162 u8 catas_bar; 163 u8 catas_bar;
163}; 164};
164 165
166#define MGM_QPN_MASK 0x00FFFFFF
167#define MGM_BLCK_LB_BIT 30
168
169struct mlx4_promisc_qp {
170 struct list_head list;
171 u32 qpn;
172};
173
174struct mlx4_steer_index {
175 struct list_head list;
176 unsigned int index;
177 struct list_head duplicates;
178};
179
180struct mlx4_mgm {
181 __be32 next_gid_index;
182 __be32 members_count;
183 u32 reserved[2];
184 u8 gid[16];
185 __be32 qp[MLX4_QP_PER_MGM];
186};
165struct mlx4_cmd { 187struct mlx4_cmd {
166 struct pci_pool *pool; 188 struct pci_pool *pool;
167 void __iomem *hcr; 189 void __iomem *hcr;
@@ -265,6 +287,10 @@ struct mlx4_vlan_table {
265 int max; 287 int max;
266}; 288};
267 289
290struct mlx4_mac_entry {
291 u64 mac;
292};
293
268struct mlx4_port_info { 294struct mlx4_port_info {
269 struct mlx4_dev *dev; 295 struct mlx4_dev *dev;
270 int port; 296 int port;
@@ -272,7 +298,9 @@ struct mlx4_port_info {
272 struct device_attribute port_attr; 298 struct device_attribute port_attr;
273 enum mlx4_port_type tmp_type; 299 enum mlx4_port_type tmp_type;
274 struct mlx4_mac_table mac_table; 300 struct mlx4_mac_table mac_table;
301 struct radix_tree_root mac_tree;
275 struct mlx4_vlan_table vlan_table; 302 struct mlx4_vlan_table vlan_table;
303 int base_qpn;
276}; 304};
277 305
278struct mlx4_sense { 306struct mlx4_sense {
@@ -282,6 +310,17 @@ struct mlx4_sense {
282 struct delayed_work sense_poll; 310 struct delayed_work sense_poll;
283}; 311};
284 312
313struct mlx4_msix_ctl {
314 u64 pool_bm;
315 spinlock_t pool_lock;
316};
317
318struct mlx4_steer {
319 struct list_head promisc_qps[MLX4_NUM_STEERS];
320 struct list_head steer_entries[MLX4_NUM_STEERS];
321 struct list_head high_prios;
322};
323
285struct mlx4_priv { 324struct mlx4_priv {
286 struct mlx4_dev dev; 325 struct mlx4_dev dev;
287 326
@@ -313,6 +352,11 @@ struct mlx4_priv {
313 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 352 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
314 struct mlx4_sense sense; 353 struct mlx4_sense sense;
315 struct mutex port_mutex; 354 struct mutex port_mutex;
355 struct mlx4_msix_ctl msix_ctl;
356 struct mlx4_steer *steer;
357 struct list_head bf_list;
358 struct mutex bf_mutex;
359 struct io_mapping *bf_mapping;
316}; 360};
317 361
318static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 362static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
328void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 372void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
329u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 373u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
330void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 374void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
375u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
331int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 376int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
332 u32 reserved_bot, u32 resetrved_top); 377 u32 reserved_bot, u32 resetrved_top);
333void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 378void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
@@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
403int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 448int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
404int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 449int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
405 450
451int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
452 enum mlx4_protocol prot, enum mlx4_steer_type steer);
453int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
454 int block_mcast_loopback, enum mlx4_protocol prot,
455 enum mlx4_steer_type steer);
406#endif /* MLX4_H */ 456#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index dfed6a07c2d..e30f6099c0d 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,8 +49,8 @@
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.5.1.6" 52#define DRV_VERSION "1.5.4.1"
53#define DRV_RELDATE "August 2010" 53#define DRV_RELDATE "March 2011"
54 54
55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
56 56
@@ -62,6 +62,7 @@
62#define MLX4_EN_PAGE_SHIFT 12 62#define MLX4_EN_PAGE_SHIFT 12
63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
64#define MAX_RX_RINGS 16 64#define MAX_RX_RINGS 16
65#define MIN_RX_RINGS 4
65#define TXBB_SIZE 64 66#define TXBB_SIZE 64
66#define HEADROOM (2048 / TXBB_SIZE + 1) 67#define HEADROOM (2048 / TXBB_SIZE + 1)
67#define STAMP_STRIDE 64 68#define STAMP_STRIDE 64
@@ -124,6 +125,7 @@ enum {
124#define MLX4_EN_RX_SIZE_THRESH 1024 125#define MLX4_EN_RX_SIZE_THRESH 1024
125#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) 126#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
126#define MLX4_EN_SAMPLE_INTERVAL 0 127#define MLX4_EN_SAMPLE_INTERVAL 0
128#define MLX4_EN_AVG_PKT_SMALL 256
127 129
128#define MLX4_EN_AUTO_CONF 0xffff 130#define MLX4_EN_AUTO_CONF 0xffff
129 131
@@ -214,6 +216,9 @@ struct mlx4_en_tx_desc {
214 216
215#define MLX4_EN_USE_SRQ 0x01000000 217#define MLX4_EN_USE_SRQ 0x01000000
216 218
219#define MLX4_EN_CX3_LOW_ID 0x1000
220#define MLX4_EN_CX3_HIGH_ID 0x1005
221
217struct mlx4_en_rx_alloc { 222struct mlx4_en_rx_alloc {
218 struct page *page; 223 struct page *page;
219 u16 offset; 224 u16 offset;
@@ -243,6 +248,8 @@ struct mlx4_en_tx_ring {
243 unsigned long bytes; 248 unsigned long bytes;
244 unsigned long packets; 249 unsigned long packets;
245 spinlock_t comp_lock; 250 spinlock_t comp_lock;
251 struct mlx4_bf bf;
252 bool bf_enabled;
246}; 253};
247 254
248struct mlx4_en_rx_desc { 255struct mlx4_en_rx_desc {
@@ -453,6 +460,7 @@ struct mlx4_en_priv {
453 struct mlx4_en_rss_map rss_map; 460 struct mlx4_en_rss_map rss_map;
454 u32 flags; 461 u32 flags;
455#define MLX4_EN_FLAG_PROMISC 0x1 462#define MLX4_EN_FLAG_PROMISC 0x1
463#define MLX4_EN_FLAG_MC_PROMISC 0x2
456 u32 tx_ring_num; 464 u32 tx_ring_num;
457 u32 rx_ring_num; 465 u32 rx_ring_num;
458 u32 rx_skb_size; 466 u32 rx_skb_size;
@@ -461,6 +469,7 @@ struct mlx4_en_priv {
461 u16 log_rx_info; 469 u16 log_rx_info;
462 470
463 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 471 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
472 int tx_vector;
464 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 473 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
465 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 474 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
466 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 475 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
@@ -476,6 +485,13 @@ struct mlx4_en_priv {
476 int mc_addrs_cnt; 485 int mc_addrs_cnt;
477 struct mlx4_en_stat_out_mbox hw_stats; 486 struct mlx4_en_stat_out_mbox hw_stats;
478 int vids[128]; 487 int vids[128];
488 bool wol;
489};
490
491enum mlx4_en_wol {
492 MLX4_EN_WOL_MAGIC = (1ULL << 61),
493 MLX4_EN_WOL_ENABLED = (1ULL << 62),
494 MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
479}; 495};
480 496
481 497
@@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
486int mlx4_en_start_port(struct net_device *dev); 502int mlx4_en_start_port(struct net_device *dev);
487void mlx4_en_stop_port(struct net_device *dev); 503void mlx4_en_stop_port(struct net_device *dev);
488 504
489void mlx4_en_free_resources(struct mlx4_en_priv *priv); 505void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
490int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 506int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
491 507
492int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 508int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
493 int entries, int ring, enum cq_type mode); 509 int entries, int ring, enum cq_type mode);
494void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 510void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
511 bool reserve_vectors);
495int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 512int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
496void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 513void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
497int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 514int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
503netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 520netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
504 521
505int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 522int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
506 u32 size, u16 stride); 523 int qpn, u32 size, u16 stride);
507void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 524void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
508int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 525int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_tx_ring *ring, 526 struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index c4988d6bd5b..1286b886dce 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -32,12 +32,17 @@
32 */ 32 */
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/io-mapping.h>
35 36
36#include <asm/page.h> 37#include <asm/page.h>
37 38
38#include "mlx4.h" 39#include "mlx4.h"
39#include "icm.h" 40#include "icm.h"
40 41
42enum {
43 MLX4_NUM_RESERVED_UARS = 8
44};
45
41int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) 46int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
42{ 47{
43 struct mlx4_priv *priv = mlx4_priv(dev); 48 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
77 return -ENOMEM; 82 return -ENOMEM;
78 83
79 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; 84 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
85 uar->map = NULL;
80 86
81 return 0; 87 return 0;
82} 88}
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
88} 94}
89EXPORT_SYMBOL_GPL(mlx4_uar_free); 95EXPORT_SYMBOL_GPL(mlx4_uar_free);
90 96
97int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
98{
99 struct mlx4_priv *priv = mlx4_priv(dev);
100 struct mlx4_uar *uar;
101 int err = 0;
102 int idx;
103
104 if (!priv->bf_mapping)
105 return -ENOMEM;
106
107 mutex_lock(&priv->bf_mutex);
108 if (!list_empty(&priv->bf_list))
109 uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
110 else {
111 if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
112 err = -ENOMEM;
113 goto out;
114 }
115 uar = kmalloc(sizeof *uar, GFP_KERNEL);
116 if (!uar) {
117 err = -ENOMEM;
118 goto out;
119 }
120 err = mlx4_uar_alloc(dev, uar);
121 if (err)
122 goto free_kmalloc;
123
124 uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
125 if (!uar->map) {
126 err = -ENOMEM;
127 goto free_uar;
128 }
129
130 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
131 if (!uar->bf_map) {
132 err = -ENOMEM;
133 goto unamp_uar;
134 }
135 uar->free_bf_bmap = 0;
136 list_add(&uar->bf_list, &priv->bf_list);
137 }
138
139 bf->uar = uar;
140 idx = ffz(uar->free_bf_bmap);
141 uar->free_bf_bmap |= 1 << idx;
142 bf->uar = uar;
143 bf->offset = 0;
144 bf->buf_size = dev->caps.bf_reg_size / 2;
145 bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
146 if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
147 list_del_init(&uar->bf_list);
148
149 goto out;
150
151unamp_uar:
152 bf->uar = NULL;
153 iounmap(uar->map);
154
155free_uar:
156 mlx4_uar_free(dev, uar);
157
158free_kmalloc:
159 kfree(uar);
160
161out:
162 mutex_unlock(&priv->bf_mutex);
163 return err;
164}
165EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
166
167void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
168{
169 struct mlx4_priv *priv = mlx4_priv(dev);
170 int idx;
171
172 if (!bf->uar || !bf->uar->bf_map)
173 return;
174
175 mutex_lock(&priv->bf_mutex);
176 idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
177 bf->uar->free_bf_bmap &= ~(1 << idx);
178 if (!bf->uar->free_bf_bmap) {
179 if (!list_empty(&bf->uar->bf_list))
180 list_del(&bf->uar->bf_list);
181
182 io_mapping_unmap(bf->uar->bf_map);
183 iounmap(bf->uar->map);
184 mlx4_uar_free(dev, bf->uar);
185 kfree(bf->uar);
186 } else if (list_empty(&bf->uar->bf_list))
187 list_add(&bf->uar->bf_list, &priv->bf_list);
188
189 mutex_unlock(&priv->bf_mutex);
190}
191EXPORT_SYMBOL_GPL(mlx4_bf_free);
192
91int mlx4_init_uar_table(struct mlx4_dev *dev) 193int mlx4_init_uar_table(struct mlx4_dev *dev)
92{ 194{
93 if (dev->caps.num_uars <= 128) { 195 if (dev->caps.num_uars <= 128) {
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 451339559bd..eca7d8596f8 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
90 return err; 90 return err;
91} 91}
92 92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) 93static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
94 u64 mac, int *qpn, u8 reserve)
94{ 95{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 96 struct mlx4_qp qp;
97 u8 gid[16] = {0};
98 int err;
99
100 if (reserve) {
101 err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
102 if (err) {
103 mlx4_err(dev, "Failed to reserve qp for mac registration\n");
104 return err;
105 }
106 }
107 qp.qpn = *qpn;
108
109 mac &= 0xffffffffffffULL;
110 mac = cpu_to_be64(mac << 16);
111 memcpy(&gid[10], &mac, ETH_ALEN);
112 gid[5] = port;
113 gid[7] = MLX4_UC_STEER << 1;
114
115 err = mlx4_qp_attach_common(dev, &qp, gid, 0,
116 MLX4_PROT_ETH, MLX4_UC_STEER);
117 if (err && reserve)
118 mlx4_qp_release_range(dev, *qpn, 1);
119
120 return err;
121}
122
123static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
124 u64 mac, int qpn, u8 free)
125{
126 struct mlx4_qp qp;
127 u8 gid[16] = {0};
128
129 qp.qpn = qpn;
130 mac &= 0xffffffffffffULL;
131 mac = cpu_to_be64(mac << 16);
132 memcpy(&gid[10], &mac, ETH_ALEN);
133 gid[5] = port;
134 gid[7] = MLX4_UC_STEER << 1;
135
136 mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
137 if (free)
138 mlx4_qp_release_range(dev, qpn, 1);
139}
140
141int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
142{
143 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
144 struct mlx4_mac_table *table = &info->mac_table;
145 struct mlx4_mac_entry *entry;
96 int i, err = 0; 146 int i, err = 0;
97 int free = -1; 147 int free = -1;
98 148
149 if (dev->caps.vep_uc_steering) {
150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
151 if (!err) {
152 entry = kmalloc(sizeof *entry, GFP_KERNEL);
153 if (!entry) {
154 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
155 return -ENOMEM;
156 }
157 entry->mac = mac;
158 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
159 if (err) {
160 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
161 return err;
162 }
163 } else
164 return err;
165 }
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 166 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex); 167 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { 168 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
@@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
106 173
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 174 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */ 175 /* MAC already registered, increase refernce count */
109 *index = i;
110 ++table->refs[i]; 176 ++table->refs[i];
111 goto out; 177 goto out;
112 } 178 }
@@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
137 goto out; 203 goto out;
138 } 204 }
139 205
140 *index = free; 206 if (!dev->caps.vep_uc_steering)
207 *qpn = info->base_qpn + free;
141 ++table->total; 208 ++table->total;
142out: 209out:
143 mutex_unlock(&table->mutex); 210 mutex_unlock(&table->mutex);
@@ -145,20 +212,52 @@ out:
145} 212}
146EXPORT_SYMBOL_GPL(mlx4_register_mac); 213EXPORT_SYMBOL_GPL(mlx4_register_mac);
147 214
148void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) 215static int validate_index(struct mlx4_dev *dev,
216 struct mlx4_mac_table *table, int index)
149{ 217{
150 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 218 int err = 0;
151 219
152 mutex_lock(&table->mutex); 220 if (index < 0 || index >= table->max || !table->entries[index]) {
153 if (!table->refs[index]) { 221 mlx4_warn(dev, "No valid Mac entry for the given index\n");
154 mlx4_warn(dev, "No MAC entry for index %d\n", index); 222 err = -EINVAL;
155 goto out;
156 } 223 }
157 if (--table->refs[index]) { 224 return err;
158 mlx4_warn(dev, "Have more references for index %d," 225}
159 "no need to modify MAC table\n", index); 226
160 goto out; 227static int find_index(struct mlx4_dev *dev,
228 struct mlx4_mac_table *table, u64 mac)
229{
230 int i;
231 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
232 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
233 return i;
161 } 234 }
235 /* Mac not found */
236 return -EINVAL;
237}
238
239void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
240{
241 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
242 struct mlx4_mac_table *table = &info->mac_table;
243 int index = qpn - info->base_qpn;
244 struct mlx4_mac_entry *entry;
245
246 if (dev->caps.vep_uc_steering) {
247 entry = radix_tree_lookup(&info->mac_tree, qpn);
248 if (entry) {
249 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
250 radix_tree_delete(&info->mac_tree, qpn);
251 index = find_index(dev, table, entry->mac);
252 kfree(entry);
253 }
254 }
255
256 mutex_lock(&table->mutex);
257
258 if (validate_index(dev, table, index))
259 goto out;
260
162 table->entries[index] = 0; 261 table->entries[index] = 0;
163 mlx4_set_port_mac_table(dev, port, table->entries); 262 mlx4_set_port_mac_table(dev, port, table->entries);
164 --table->total; 263 --table->total;
@@ -167,6 +266,44 @@ out:
167} 266}
168EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 267EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
169 268
269int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
270{
271 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
272 struct mlx4_mac_table *table = &info->mac_table;
273 int index = qpn - info->base_qpn;
274 struct mlx4_mac_entry *entry;
275 int err;
276
277 if (dev->caps.vep_uc_steering) {
278 entry = radix_tree_lookup(&info->mac_tree, qpn);
279 if (!entry)
280 return -EINVAL;
281 index = find_index(dev, table, entry->mac);
282 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
283 entry->mac = new_mac;
284 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
285 if (err || index < 0)
286 return err;
287 }
288
289 mutex_lock(&table->mutex);
290
291 err = validate_index(dev, table, index);
292 if (err)
293 goto out;
294
295 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
296
297 err = mlx4_set_port_mac_table(dev, port, table->entries);
298 if (unlikely(err)) {
299 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
300 table->entries[index] = 0;
301 }
302out:
303 mutex_unlock(&table->mutex);
304 return err;
305}
306EXPORT_SYMBOL_GPL(mlx4_replace_mac);
170static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 307static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
171 __be32 *entries) 308 __be32 *entries)
172{ 309{
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index e749f82865f..b967647d0c7 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
107 profile[MLX4_RES_AUXC].num = request->num_qp; 107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq; 108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq; 109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
111 dev_cap->reserved_eqs +
112 num_possible_cpus() + 1);
113 profile[MLX4_RES_DMPT].num = request->num_mpt; 111 profile[MLX4_RES_DMPT].num = request->num_mpt;
114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
115 profile[MLX4_RES_MTT].num = request->num_mtt; 113 profile[MLX4_RES_MTT].num = request->num_mtt;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a7f2eed9a08..673dc600c89 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
1312 * page into an skb */ 1312 * page into an skb */
1313 1313
1314static inline int 1314static inline int
1315myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, 1315myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
1316 int bytes, int len, __wsum csum) 1316 int lro_enabled)
1317{ 1317{
1318 struct myri10ge_priv *mgp = ss->mgp; 1318 struct myri10ge_priv *mgp = ss->mgp;
1319 struct sk_buff *skb; 1319 struct sk_buff *skb;
1320 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1320 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
1321 int i, idx, hlen, remainder; 1321 struct myri10ge_rx_buf *rx;
1322 int i, idx, hlen, remainder, bytes;
1322 struct pci_dev *pdev = mgp->pdev; 1323 struct pci_dev *pdev = mgp->pdev;
1323 struct net_device *dev = mgp->dev; 1324 struct net_device *dev = mgp->dev;
1324 u8 *va; 1325 u8 *va;
1325 1326
1327 if (len <= mgp->small_bytes) {
1328 rx = &ss->rx_small;
1329 bytes = mgp->small_bytes;
1330 } else {
1331 rx = &ss->rx_big;
1332 bytes = mgp->big_bytes;
1333 }
1334
1326 len += MXGEFW_PAD; 1335 len += MXGEFW_PAD;
1327 idx = rx->cnt & rx->mask; 1336 idx = rx->cnt & rx->mask;
1328 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; 1337 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
@@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1341 remainder -= MYRI10GE_ALLOC_SIZE; 1350 remainder -= MYRI10GE_ALLOC_SIZE;
1342 } 1351 }
1343 1352
1344 if (dev->features & NETIF_F_LRO) { 1353 if (lro_enabled) {
1345 rx_frags[0].page_offset += MXGEFW_PAD; 1354 rx_frags[0].page_offset += MXGEFW_PAD;
1346 rx_frags[0].size -= MXGEFW_PAD; 1355 rx_frags[0].size -= MXGEFW_PAD;
1347 len -= MXGEFW_PAD; 1356 len -= MXGEFW_PAD;
@@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1463{ 1472{
1464 struct myri10ge_rx_done *rx_done = &ss->rx_done; 1473 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1465 struct myri10ge_priv *mgp = ss->mgp; 1474 struct myri10ge_priv *mgp = ss->mgp;
1466 struct net_device *netdev = mgp->dev; 1475
1467 unsigned long rx_bytes = 0; 1476 unsigned long rx_bytes = 0;
1468 unsigned long rx_packets = 0; 1477 unsigned long rx_packets = 0;
1469 unsigned long rx_ok; 1478 unsigned long rx_ok;
@@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1474 u16 length; 1483 u16 length;
1475 __wsum checksum; 1484 __wsum checksum;
1476 1485
1486 /*
1487 * Prevent compiler from generating more than one ->features memory
1488 * access to avoid theoretical race condition with functions that
1489 * change NETIF_F_LRO flag at runtime.
1490 */
1491 bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
1492
1477 while (rx_done->entry[idx].length != 0 && work_done < budget) { 1493 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1478 length = ntohs(rx_done->entry[idx].length); 1494 length = ntohs(rx_done->entry[idx].length);
1479 rx_done->entry[idx].length = 0; 1495 rx_done->entry[idx].length = 0;
1480 checksum = csum_unfold(rx_done->entry[idx].checksum); 1496 checksum = csum_unfold(rx_done->entry[idx].checksum);
1481 if (length <= mgp->small_bytes) 1497 rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
1482 rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
1483 mgp->small_bytes,
1484 length, checksum);
1485 else
1486 rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
1487 mgp->big_bytes,
1488 length, checksum);
1489 rx_packets += rx_ok; 1498 rx_packets += rx_ok;
1490 rx_bytes += rx_ok * (unsigned long)length; 1499 rx_bytes += rx_ok * (unsigned long)length;
1491 cnt++; 1500 cnt++;
@@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1497 ss->stats.rx_packets += rx_packets; 1506 ss->stats.rx_packets += rx_packets;
1498 ss->stats.rx_bytes += rx_bytes; 1507 ss->stats.rx_bytes += rx_bytes;
1499 1508
1500 if (netdev->features & NETIF_F_LRO) 1509 if (lro_enabled)
1501 lro_flush_all(&rx_done->lro_mgr); 1510 lro_flush_all(&rx_done->lro_mgr);
1502 1511
1503 /* restock receive rings if needed */ 1512 /* restock receive rings if needed */
@@ -3645,6 +3654,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3645 dma_free_coherent(&pdev->dev, bytes, 3654 dma_free_coherent(&pdev->dev, bytes,
3646 ss->fw_stats, ss->fw_stats_bus); 3655 ss->fw_stats, ss->fw_stats_bus);
3647 ss->fw_stats = NULL; 3656 ss->fw_stats = NULL;
3657 netif_napi_del(&ss->napi);
3648 } 3658 }
3649 } 3659 }
3650 kfree(mgp->ss); 3660 kfree(mgp->ss);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 653d308e0f5..3bdcc803ec6 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -871,7 +871,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
871 struct netxen_adapter *adapter = netdev_priv(netdev); 871 struct netxen_adapter *adapter = netdev_priv(netdev);
872 int hw_lro; 872 int hw_lro;
873 873
874 if (data & ~ETH_FLAG_LRO) 874 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
875 return -EINVAL; 875 return -EINVAL;
876 876
877 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) 877 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 40fa59e2fd5..32678b6c6b3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9501,7 +9501,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9501 struct niu_parent *p; 9501 struct niu_parent *p;
9502 int i; 9502 int i;
9503 9503
9504 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9504 plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9505 NULL, 0); 9505 NULL, 0);
9506 if (IS_ERR(plat_dev)) 9506 if (IS_ERR(plat_dev))
9507 return NULL; 9507 return NULL;
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 8c66e22c3a0..50986840c99 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -2441,7 +2441,7 @@ static struct pci_error_handlers pch_gbe_err_handler = {
2441 .resume = pch_gbe_io_resume 2441 .resume = pch_gbe_io_resume
2442}; 2442};
2443 2443
2444static struct pci_driver pch_gbe_pcidev = { 2444static struct pci_driver pch_gbe_driver = {
2445 .name = KBUILD_MODNAME, 2445 .name = KBUILD_MODNAME,
2446 .id_table = pch_gbe_pcidev_id, 2446 .id_table = pch_gbe_pcidev_id,
2447 .probe = pch_gbe_probe, 2447 .probe = pch_gbe_probe,
@@ -2458,7 +2458,7 @@ static int __init pch_gbe_init_module(void)
2458{ 2458{
2459 int ret; 2459 int ret;
2460 2460
2461 ret = pci_register_driver(&pch_gbe_pcidev); 2461 ret = pci_register_driver(&pch_gbe_driver);
2462 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { 2462 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2463 if (copybreak == 0) { 2463 if (copybreak == 0) {
2464 pr_info("copybreak disabled\n"); 2464 pr_info("copybreak disabled\n");
@@ -2472,7 +2472,7 @@ static int __init pch_gbe_init_module(void)
2472 2472
2473static void __exit pch_gbe_exit_module(void) 2473static void __exit pch_gbe_exit_module(void)
2474{ 2474{
2475 pci_unregister_driver(&pch_gbe_pcidev); 2475 pci_unregister_driver(&pch_gbe_driver);
2476} 2476}
2477 2477
2478module_init(pch_gbe_init_module); 2478module_init(pch_gbe_init_module);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 993c52c82ae..e870c0698bb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -442,11 +442,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
442 u32 flags, phy_interface_t interface) 442 u32 flags, phy_interface_t interface)
443{ 443{
444 struct device *d = &phydev->dev; 444 struct device *d = &phydev->dev;
445 int err;
445 446
446 /* Assume that if there is no driver, that it doesn't 447 /* Assume that if there is no driver, that it doesn't
447 * exist, and we should use the genphy driver. */ 448 * exist, and we should use the genphy driver. */
448 if (NULL == d->driver) { 449 if (NULL == d->driver) {
449 int err;
450 d->driver = &genphy_driver.driver; 450 d->driver = &genphy_driver.driver;
451 451
452 err = d->driver->probe(d); 452 err = d->driver->probe(d);
@@ -474,7 +474,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
474 /* Do initial configuration here, now that 474 /* Do initial configuration here, now that
475 * we have certain key parameters 475 * we have certain key parameters
476 * (dev_flags and interface) */ 476 * (dev_flags and interface) */
477 return phy_init_hw(phydev); 477 err = phy_init_hw(phydev);
478 if (err)
479 phy_detach(phydev);
480
481 return err;
478} 482}
479 483
480/** 484/**
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 43583309a65..31e9407a073 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -129,7 +129,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
129 129
130 state->strm.next_in = NULL; 130 state->strm.next_in = NULL;
131 state->w_size = w_size; 131 state->w_size = w_size;
132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize()); 132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
133 if (state->strm.workspace == NULL) 133 if (state->strm.workspace == NULL)
134 goto out_free; 134 goto out_free;
135 135
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 4c14510e2a8..45b2755d6cb 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
1003 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1003 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1004 int hw_lro; 1004 int hw_lro;
1005 1005
1006 if (data & ~ETH_FLAG_LRO) 1006 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
1007 return -EINVAL; 1007 return -EINVAL;
1008 1008
1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) 1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 5e403511289..493b0de3848 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2685,9 +2685,9 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2685 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, 2685 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
2686 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 2686 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 2687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2688 tp->mii.supports_gmii ? 2688 (tp->mii.supports_gmii ?
2689 ADVERTISED_1000baseT_Half | 2689 ADVERTISED_1000baseT_Half |
2690 ADVERTISED_1000baseT_Full : 0); 2690 ADVERTISED_1000baseT_Full : 0));
2691 2691
2692 if (RTL_R8(PHYstatus) & TBI_Enable) 2692 if (RTL_R8(PHYstatus) & TBI_Enable)
2693 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 2693 netif_info(tp, link, dev, "TBI auto-negotiating\n");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 44150f2f7bf..26afbaae23f 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -382,7 +382,7 @@ static void rionet_remove(struct rio_dev *rdev)
382 struct rionet_peer *peer, *tmp; 382 struct rionet_peer *peer, *tmp;
383 383
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
385 __ilog2(sizeof(void *)) + 4 : 0); 385 __fls(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev); 386 unregister_netdev(ndev);
387 free_netdev(ndev); 387 free_netdev(ndev);
388 388
@@ -450,7 +450,7 @@ static int rionet_setup_netdev(struct rio_mport *mport)
450 } 450 }
451 451
452 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, 452 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
453 mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0); 453 mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
454 if (!rionet_active) { 454 if (!rionet_active) {
455 rc = -ENOMEM; 455 rc = -ENOMEM;
456 goto out; 456 goto out;
@@ -571,5 +571,5 @@ static void __exit rionet_exit(void)
571 rio_unregister_driver(&rionet_driver); 571 rio_unregister_driver(&rionet_driver);
572} 572}
573 573
574module_init(rionet_init); 574late_initcall(rionet_init);
575module_exit(rionet_exit); 575module_exit(rionet_exit);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 2ad6364103e..356e74d20b8 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6726 int rc = 0; 6726 int rc = 0;
6727 int changed = 0; 6727 int changed = 0;
6728 6728
6729 if (data & ~ETH_FLAG_LRO) 6729 if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
6730 return -EINVAL; 6730 return -EINVAL;
6731 6731
6732 if (data & ETH_FLAG_LRO) { 6732 if (data & ETH_FLAG_LRO) {
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index b8bd936374f..d890679e4c4 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1054,6 +1054,7 @@ static int efx_init_io(struct efx_nic *efx)
1054{ 1054{
1055 struct pci_dev *pci_dev = efx->pci_dev; 1055 struct pci_dev *pci_dev = efx->pci_dev;
1056 dma_addr_t dma_mask = efx->type->max_dma_mask; 1056 dma_addr_t dma_mask = efx->type->max_dma_mask;
1057 bool use_wc;
1057 int rc; 1058 int rc;
1058 1059
1059 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1060 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1104,8 +1105,21 @@ static int efx_init_io(struct efx_nic *efx)
1104 rc = -EIO; 1105 rc = -EIO;
1105 goto fail3; 1106 goto fail3;
1106 } 1107 }
1107 efx->membase = ioremap_wc(efx->membase_phys, 1108
1108 efx->type->mem_map_size); 1109 /* bug22643: If SR-IOV is enabled then tx push over a write combined
1110 * mapping is unsafe. We need to disable write combining in this case.
1111 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1112 * have removed the MSI capability. So write combining is safe if
1113 * there is an MSI capability.
1114 */
1115 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1116 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1117 if (use_wc)
1118 efx->membase = ioremap_wc(efx->membase_phys,
1119 efx->type->mem_map_size);
1120 else
1121 efx->membase = ioremap_nocache(efx->membase_phys,
1122 efx->type->mem_map_size);
1109 if (!efx->membase) { 1123 if (!efx->membase) {
1110 netif_err(efx, probe, efx->net_dev, 1124 netif_err(efx, probe, efx->net_dev,
1111 "could not map memory BAR at %llx+%x\n", 1125 "could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e4dd3a7f304..99ff11400ce 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,6 +38,8 @@
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
41 43
42/* Spurious parity errors in TSORT buffers */ 44/* Spurious parity errors in TSORT buffers */
43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/skfp/Makefile b/drivers/net/skfp/Makefile
index cb23580fcff..b0be0234abf 100644
--- a/drivers/net/skfp/Makefile
+++ b/drivers/net/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
17# projects. To keep the source common for all those drivers (and 17# projects. To keep the source common for all those drivers (and
18# thus simplify fixes to it), please do not clean it up! 18# thus simplify fixes to it), please do not clean it up!
19 19
20EXTRA_CFLAGS += -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes 20ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a4f2bd52e54..36045f3b032 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -144,11 +144,7 @@ static int full_duplex[MAX_UNITS] = {0, };
144/* Time in jiffies before concluding the transmitter is hung. */ 144/* Time in jiffies before concluding the transmitter is hung. */
145#define TX_TIMEOUT (2 * HZ) 145#define TX_TIMEOUT (2 * HZ)
146 146
147/* 147#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
148 * This SUCKS.
149 * We need a much better method to determine if dma_addr_t is 64-bit.
150 */
151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || (defined(CONFIG_MIPS) && ((defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || defined(CONFIG_64BIT))) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
152/* 64-bit dma_addr_t */ 148/* 64-bit dma_addr_t */
153#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 149#define ADDR_64BITS /* This chip uses 64 bit addresses. */
154#define netdrv_addr_t __le64 150#define netdrv_addr_t __le64
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ebec88882c3..73c942d85f0 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -48,9 +48,9 @@
48#include <net/ip.h> 48#include <net/ip.h>
49 49
50#include <asm/system.h> 50#include <asm/system.h>
51#include <asm/io.h> 51#include <linux/io.h>
52#include <asm/byteorder.h> 52#include <asm/byteorder.h>
53#include <asm/uaccess.h> 53#include <linux/uaccess.h>
54 54
55#ifdef CONFIG_SPARC 55#ifdef CONFIG_SPARC
56#include <asm/idprom.h> 56#include <asm/idprom.h>
@@ -13118,7 +13118,7 @@ done:
13118 13118
13119static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 13119static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13120 13120
13121static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13121static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
13122{ 13122{
13123 dev->vlan_features |= flags; 13123 dev->vlan_features |= flags;
13124} 13124}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6f600cced6e..3ec22c30779 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -433,4 +433,19 @@ config USB_SIERRA_NET
433 To compile this driver as a module, choose M here: the 433 To compile this driver as a module, choose M here: the
434 module will be called sierra_net. 434 module will be called sierra_net.
435 435
436config USB_VL600
437 tristate "LG VL600 modem dongle"
438 depends on USB_NET_CDCETHER
439 select USB_ACM
440 help
441 Select this if you want to use an LG Electronics 4G/LTE usb modem
442 called VL600. This driver only handles the ethernet
443 interface exposed by the modem firmware. To establish a connection
444 you will first need a userspace program that sends the right
445 command to the modem through its CDC ACM port, and most
446 likely also a DHCP client. See this thread about using the
447 4G modem from Verizon:
448
449 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
450
436endmenu 451endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index cac17030118..c7ec8a5f0a9 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -27,4 +27,5 @@ obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
29obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o 29obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
30obj-$(CONFIG_USB_VL600) += lg-vl600.o
30 31
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 5f3b97668e6..8f128541656 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -340,7 +340,7 @@ next:
340 340
341static const struct driver_info eem_info = { 341static const struct driver_info eem_info = {
342 .description = "CDC EEM Device", 342 .description = "CDC EEM Device",
343 .flags = FLAG_ETHER, 343 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
344 .bind = eem_bind, 344 .bind = eem_bind,
345 .rx_fixup = eem_rx_fixup, 345 .rx_fixup = eem_rx_fixup,
346 .tx_fixup = eem_tx_fixup, 346 .tx_fixup = eem_tx_fixup,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9a60e415d76..341f7056a80 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -378,7 +378,7 @@ static void dumpspeed(struct usbnet *dev, __le32 *speeds)
378 __le32_to_cpu(speeds[1]) / 1000); 378 __le32_to_cpu(speeds[1]) / 1000);
379} 379}
380 380
381static void cdc_status(struct usbnet *dev, struct urb *urb) 381void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
382{ 382{
383 struct usb_cdc_notification *event; 383 struct usb_cdc_notification *event;
384 384
@@ -418,8 +418,9 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
418 break; 418 break;
419 } 419 }
420} 420}
421EXPORT_SYMBOL_GPL(usbnet_cdc_status);
421 422
422static int cdc_bind(struct usbnet *dev, struct usb_interface *intf) 423int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
423{ 424{
424 int status; 425 int status;
425 struct cdc_state *info = (void *) &dev->data; 426 struct cdc_state *info = (void *) &dev->data;
@@ -441,6 +442,7 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
441 */ 442 */
442 return 0; 443 return 0;
443} 444}
445EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
444 446
445static int cdc_manage_power(struct usbnet *dev, int on) 447static int cdc_manage_power(struct usbnet *dev, int on)
446{ 448{
@@ -450,20 +452,20 @@ static int cdc_manage_power(struct usbnet *dev, int on)
450 452
451static const struct driver_info cdc_info = { 453static const struct driver_info cdc_info = {
452 .description = "CDC Ethernet Device", 454 .description = "CDC Ethernet Device",
453 .flags = FLAG_ETHER, 455 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
454 // .check_connect = cdc_check_connect, 456 // .check_connect = cdc_check_connect,
455 .bind = cdc_bind, 457 .bind = usbnet_cdc_bind,
456 .unbind = usbnet_cdc_unbind, 458 .unbind = usbnet_cdc_unbind,
457 .status = cdc_status, 459 .status = usbnet_cdc_status,
458 .manage_power = cdc_manage_power, 460 .manage_power = cdc_manage_power,
459}; 461};
460 462
461static const struct driver_info mbm_info = { 463static const struct driver_info mbm_info = {
462 .description = "Mobile Broadband Network Device", 464 .description = "Mobile Broadband Network Device",
463 .flags = FLAG_WWAN, 465 .flags = FLAG_WWAN,
464 .bind = cdc_bind, 466 .bind = usbnet_cdc_bind,
465 .unbind = usbnet_cdc_unbind, 467 .unbind = usbnet_cdc_unbind,
466 .status = cdc_status, 468 .status = usbnet_cdc_status,
467 .manage_power = cdc_manage_power, 469 .manage_power = cdc_manage_power,
468}; 470};
469 471
@@ -560,6 +562,13 @@ static const struct usb_device_id products [] = {
560 .driver_info = 0, 562 .driver_info = 0,
561}, 563},
562 564
565/* LG Electronics VL600 wants additional headers on every frame */
566{
567 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
568 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
569 .driver_info = 0,
570},
571
563/* 572/*
564 * WHITELIST!!! 573 * WHITELIST!!!
565 * 574 *
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 7113168473c..967371f0445 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1237,7 +1237,7 @@ static int cdc_ncm_manage_power(struct usbnet *dev, int status)
1237 1237
1238static const struct driver_info cdc_ncm_info = { 1238static const struct driver_info cdc_ncm_info = {
1239 .description = "CDC NCM", 1239 .description = "CDC NCM",
1240 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET, 1240 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1241 .bind = cdc_ncm_bind, 1241 .bind = cdc_ncm_bind,
1242 .unbind = cdc_ncm_unbind, 1242 .unbind = cdc_ncm_unbind,
1243 .check_connect = cdc_ncm_check_connect, 1243 .check_connect = cdc_ncm_check_connect,
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index ca39ace0b0e..fc5f13d47ad 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -89,6 +89,7 @@ static int always_connected (struct usbnet *dev)
89 89
90static const struct driver_info ali_m5632_info = { 90static const struct driver_info ali_m5632_info = {
91 .description = "ALi M5632", 91 .description = "ALi M5632",
92 .flags = FLAG_POINTTOPOINT,
92}; 93};
93 94
94#endif 95#endif
@@ -110,6 +111,7 @@ static const struct driver_info ali_m5632_info = {
110 111
111static const struct driver_info an2720_info = { 112static const struct driver_info an2720_info = {
112 .description = "AnchorChips/Cypress 2720", 113 .description = "AnchorChips/Cypress 2720",
114 .flags = FLAG_POINTTOPOINT,
113 // no reset available! 115 // no reset available!
114 // no check_connect available! 116 // no check_connect available!
115 117
@@ -132,6 +134,7 @@ static const struct driver_info an2720_info = {
132 134
133static const struct driver_info belkin_info = { 135static const struct driver_info belkin_info = {
134 .description = "Belkin, eTEK, or compatible", 136 .description = "Belkin, eTEK, or compatible",
137 .flags = FLAG_POINTTOPOINT,
135}; 138};
136 139
137#endif /* CONFIG_USB_BELKIN */ 140#endif /* CONFIG_USB_BELKIN */
@@ -157,6 +160,7 @@ static const struct driver_info belkin_info = {
157static const struct driver_info epson2888_info = { 160static const struct driver_info epson2888_info = {
158 .description = "Epson USB Device", 161 .description = "Epson USB Device",
159 .check_connect = always_connected, 162 .check_connect = always_connected,
163 .flags = FLAG_POINTTOPOINT,
160 164
161 .in = 4, .out = 3, 165 .in = 4, .out = 3,
162}; 166};
@@ -173,6 +177,7 @@ static const struct driver_info epson2888_info = {
173#define HAVE_HARDWARE 177#define HAVE_HARDWARE
174static const struct driver_info kc2190_info = { 178static const struct driver_info kc2190_info = {
175 .description = "KC Technology KC-190", 179 .description = "KC Technology KC-190",
180 .flags = FLAG_POINTTOPOINT,
176}; 181};
177#endif /* CONFIG_USB_KC2190 */ 182#endif /* CONFIG_USB_KC2190 */
178 183
@@ -200,16 +205,19 @@ static const struct driver_info kc2190_info = {
200static const struct driver_info linuxdev_info = { 205static const struct driver_info linuxdev_info = {
201 .description = "Linux Device", 206 .description = "Linux Device",
202 .check_connect = always_connected, 207 .check_connect = always_connected,
208 .flags = FLAG_POINTTOPOINT,
203}; 209};
204 210
205static const struct driver_info yopy_info = { 211static const struct driver_info yopy_info = {
206 .description = "Yopy", 212 .description = "Yopy",
207 .check_connect = always_connected, 213 .check_connect = always_connected,
214 .flags = FLAG_POINTTOPOINT,
208}; 215};
209 216
210static const struct driver_info blob_info = { 217static const struct driver_info blob_info = {
211 .description = "Boot Loader OBject", 218 .description = "Boot Loader OBject",
212 .check_connect = always_connected, 219 .check_connect = always_connected,
220 .flags = FLAG_POINTTOPOINT,
213}; 221};
214 222
215#endif /* CONFIG_USB_ARMLINUX */ 223#endif /* CONFIG_USB_ARMLINUX */
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index dcd57c37ef7..c4cfd1dea88 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -193,7 +193,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
193 193
194static const struct driver_info genelink_info = { 194static const struct driver_info genelink_info = {
195 .description = "Genesys GeneLink", 195 .description = "Genesys GeneLink",
196 .flags = FLAG_FRAMING_GL | FLAG_NO_SETINT, 196 .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_GL | FLAG_NO_SETINT,
197 .bind = genelink_bind, 197 .bind = genelink_bind,
198 .rx_fixup = genelink_rx_fixup, 198 .rx_fixup = genelink_rx_fixup,
199 .tx_fixup = genelink_tx_fixup, 199 .tx_fixup = genelink_tx_fixup,
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
new file mode 100644
index 00000000000..1d83ccfd727
--- /dev/null
+++ b/drivers/net/usb/lg-vl600.c
@@ -0,0 +1,346 @@
1/*
2 * Ethernet interface part of the LG VL600 LTE modem (4G dongle)
3 *
4 * Copyright (C) 2011 Intel Corporation
5 * Author: Andrzej Zaborowski <balrogg@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
23#include <linux/mii.h>
24#include <linux/usb.h>
25#include <linux/usb/cdc.h>
26#include <linux/usb/usbnet.h>
27#include <linux/if_ether.h>
28#include <linux/if_arp.h>
29#include <linux/inetdevice.h>
30
31/*
32 * The device has a CDC ACM port for modem control (it claims to be
33 * CDC ACM anyway) and a CDC Ethernet port for actual network data.
34 * It will however ignore data on both ports that is not encapsulated
35 * in a specific way, any data returned is also encapsulated the same
36 * way. The headers don't seem to follow any popular standard.
37 *
38 * This driver adds and strips these headers from the ethernet frames
39 * sent/received from the CDC Ethernet port. The proprietary header
40 * replaces the standard ethernet header in a packet so only actual
41 * ethernet frames are allowed. The headers allow some form of
42 * multiplexing by using non standard values of the .h_proto field.
43 * Windows/Mac drivers do send a couple of such frames to the device
44 * during initialisation, with protocol set to 0x0906 or 0x0b06 and (what
45 * seems to be) a flag in the .dummy_flags. This doesn't seem necessary
46 * for modem operation but can possibly be used for GPS or other funcitons.
47 */
48
49struct vl600_frame_hdr {
50 __le32 len;
51 __le32 serial;
52 __le32 pkt_cnt;
53 __le32 dummy_flags;
54 __le32 dummy;
55 __le32 magic;
56} __attribute__((packed));
57
58struct vl600_pkt_hdr {
59 __le32 dummy[2];
60 __le32 len;
61 __be16 h_proto;
62} __attribute__((packed));
63
64struct vl600_state {
65 struct sk_buff *current_rx_buf;
66};
67
68static int vl600_bind(struct usbnet *dev, struct usb_interface *intf)
69{
70 int ret;
71 struct vl600_state *s = kzalloc(sizeof(struct vl600_state), GFP_KERNEL);
72
73 if (!s)
74 return -ENOMEM;
75
76 ret = usbnet_cdc_bind(dev, intf);
77 if (ret) {
78 kfree(s);
79 return ret;
80 }
81
82 dev->driver_priv = s;
83
84 /* ARP packets don't go through, but they're also of no use. The
85 * subnet has only two hosts anyway: us and the gateway / DHCP
86 * server (probably simulated by modem firmware or network operator)
87 * whose address changes everytime we connect to the intarwebz and
88 * who doesn't bother answering ARP requests either. So hardware
89 * addresses have no meaning, the destination and the source of every
90 * packet depend only on whether it is on the IN or OUT endpoint. */
91 dev->net->flags |= IFF_NOARP;
92
93 return ret;
94}
95
96static void vl600_unbind(struct usbnet *dev, struct usb_interface *intf)
97{
98 struct vl600_state *s = dev->driver_priv;
99
100 if (s->current_rx_buf)
101 dev_kfree_skb(s->current_rx_buf);
102
103 kfree(s);
104
105 return usbnet_cdc_unbind(dev, intf);
106}
107
108static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
109{
110 struct vl600_frame_hdr *frame;
111 struct vl600_pkt_hdr *packet;
112 struct ethhdr *ethhdr;
113 int packet_len, count;
114 struct sk_buff *buf = skb;
115 struct sk_buff *clone;
116 struct vl600_state *s = dev->driver_priv;
117
118 /* Frame lengths are generally 4B multiplies but every couple of
119 * hours there's an odd number of bytes sized yet correct frame,
120 * so don't require this. */
121
122 /* Allow a packet (or multiple packets batched together) to be
123 * split across many frames. We don't allow a new batch to
124 * begin in the same frame another one is ending however, and no
125 * leading or trailing pad bytes. */
126 if (s->current_rx_buf) {
127 frame = (struct vl600_frame_hdr *) s->current_rx_buf->data;
128 if (skb->len + s->current_rx_buf->len >
129 le32_to_cpup(&frame->len)) {
130 netif_err(dev, ifup, dev->net, "Fragment too long\n");
131 dev->net->stats.rx_length_errors++;
132 goto error;
133 }
134
135 buf = s->current_rx_buf;
136 memcpy(skb_put(buf, skb->len), skb->data, skb->len);
137 } else if (skb->len < 4) {
138 netif_err(dev, ifup, dev->net, "Frame too short\n");
139 dev->net->stats.rx_length_errors++;
140 goto error;
141 }
142
143 frame = (struct vl600_frame_hdr *) buf->data;
144 /* NOTE: Should check that frame->magic == 0x53544448?
145 * Otherwise if we receive garbage at the beginning of the frame
146 * we may end up allocating a huge buffer and saving all the
147 * future incoming data into it. */
148
149 if (buf->len < sizeof(*frame) ||
150 buf->len != le32_to_cpup(&frame->len)) {
151 /* Save this fragment for later assembly */
152 if (s->current_rx_buf)
153 return 0;
154
155 s->current_rx_buf = skb_copy_expand(skb, 0,
156 le32_to_cpup(&frame->len), GFP_ATOMIC);
157 if (!s->current_rx_buf) {
158 netif_err(dev, ifup, dev->net, "Reserving %i bytes "
159 "for packet assembly failed.\n",
160 le32_to_cpup(&frame->len));
161 dev->net->stats.rx_errors++;
162 }
163
164 return 0;
165 }
166
167 count = le32_to_cpup(&frame->pkt_cnt);
168
169 skb_pull(buf, sizeof(*frame));
170
171 while (count--) {
172 if (buf->len < sizeof(*packet)) {
173 netif_err(dev, ifup, dev->net, "Packet too short\n");
174 goto error;
175 }
176
177 packet = (struct vl600_pkt_hdr *) buf->data;
178 packet_len = sizeof(*packet) + le32_to_cpup(&packet->len);
179 if (packet_len > buf->len) {
180 netif_err(dev, ifup, dev->net,
181 "Bad packet length stored in header\n");
182 goto error;
183 }
184
185 /* Packet header is same size as the ethernet header
186 * (sizeof(*packet) == sizeof(*ethhdr)), additionally
187 * the h_proto field is in the same place so we just leave it
188 * alone and fill in the remaining fields.
189 */
190 ethhdr = (struct ethhdr *) skb->data;
191 if (be16_to_cpup(&ethhdr->h_proto) == ETH_P_ARP &&
192 buf->len > 0x26) {
193 /* Copy the addresses from packet contents */
194 memcpy(ethhdr->h_source,
195 &buf->data[sizeof(*ethhdr) + 0x8],
196 ETH_ALEN);
197 memcpy(ethhdr->h_dest,
198 &buf->data[sizeof(*ethhdr) + 0x12],
199 ETH_ALEN);
200 } else {
201 memset(ethhdr->h_source, 0, ETH_ALEN);
202 memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN);
203 }
204
205 if (count) {
206 /* Not the last packet in this batch */
207 clone = skb_clone(buf, GFP_ATOMIC);
208 if (!clone)
209 goto error;
210
211 skb_trim(clone, packet_len);
212 usbnet_skb_return(dev, clone);
213
214 skb_pull(buf, (packet_len + 3) & ~3);
215 } else {
216 skb_trim(buf, packet_len);
217
218 if (s->current_rx_buf) {
219 usbnet_skb_return(dev, buf);
220 s->current_rx_buf = NULL;
221 return 0;
222 }
223
224 return 1;
225 }
226 }
227
228error:
229 if (s->current_rx_buf) {
230 dev_kfree_skb_any(s->current_rx_buf);
231 s->current_rx_buf = NULL;
232 }
233 dev->net->stats.rx_errors++;
234 return 0;
235}
236
237static struct sk_buff *vl600_tx_fixup(struct usbnet *dev,
238 struct sk_buff *skb, gfp_t flags)
239{
240 struct sk_buff *ret;
241 struct vl600_frame_hdr *frame;
242 struct vl600_pkt_hdr *packet;
243 static uint32_t serial = 1;
244 int orig_len = skb->len - sizeof(struct ethhdr);
245 int full_len = (skb->len + sizeof(struct vl600_frame_hdr) + 3) & ~3;
246
247 frame = (struct vl600_frame_hdr *) skb->data;
248 if (skb->len > sizeof(*frame) && skb->len == le32_to_cpup(&frame->len))
249 return skb; /* Already encapsulated? */
250
251 if (skb->len < sizeof(struct ethhdr))
252 /* Drop, device can only deal with ethernet packets */
253 return NULL;
254
255 if (!skb_cloned(skb)) {
256 int headroom = skb_headroom(skb);
257 int tailroom = skb_tailroom(skb);
258
259 if (tailroom >= full_len - skb->len - sizeof(*frame) &&
260 headroom >= sizeof(*frame))
261 /* There's enough head and tail room */
262 goto encapsulate;
263
264 if (headroom + tailroom + skb->len >= full_len) {
265 /* There's enough total room, just readjust */
266 skb->data = memmove(skb->head + sizeof(*frame),
267 skb->data, skb->len);
268 skb_set_tail_pointer(skb, skb->len);
269 goto encapsulate;
270 }
271 }
272
273 /* Alloc a new skb with the required size */
274 ret = skb_copy_expand(skb, sizeof(struct vl600_frame_hdr), full_len -
275 skb->len - sizeof(struct vl600_frame_hdr), flags);
276 dev_kfree_skb_any(skb);
277 if (!ret)
278 return ret;
279 skb = ret;
280
281encapsulate:
282 /* Packet header is same size as ethernet packet header
283 * (sizeof(*packet) == sizeof(struct ethhdr)), additionally the
284 * h_proto field is in the same place so we just leave it alone and
285 * overwrite the remaining fields.
286 */
287 packet = (struct vl600_pkt_hdr *) skb->data;
288 memset(&packet->dummy, 0, sizeof(packet->dummy));
289 packet->len = cpu_to_le32(orig_len);
290
291 frame = (struct vl600_frame_hdr *) skb_push(skb, sizeof(*frame));
292 memset(frame, 0, sizeof(*frame));
293 frame->len = cpu_to_le32(full_len);
294 frame->serial = cpu_to_le32(serial++);
295 frame->pkt_cnt = cpu_to_le32(1);
296
297 if (skb->len < full_len) /* Pad */
298 skb_put(skb, full_len - skb->len);
299
300 return skb;
301}
302
303static const struct driver_info vl600_info = {
304 .description = "LG VL600 modem",
305 .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE,
306 .bind = vl600_bind,
307 .unbind = vl600_unbind,
308 .status = usbnet_cdc_status,
309 .rx_fixup = vl600_rx_fixup,
310 .tx_fixup = vl600_tx_fixup,
311};
312
313static const struct usb_device_id products[] = {
314 {
315 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
316 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
317 .driver_info = (unsigned long) &vl600_info,
318 },
319 {}, /* End */
320};
321MODULE_DEVICE_TABLE(usb, products);
322
323static struct usb_driver lg_vl600_driver = {
324 .name = "lg-vl600",
325 .id_table = products,
326 .probe = usbnet_probe,
327 .disconnect = usbnet_disconnect,
328 .suspend = usbnet_suspend,
329 .resume = usbnet_resume,
330};
331
332static int __init vl600_init(void)
333{
334 return usb_register(&lg_vl600_driver);
335}
336module_init(vl600_init);
337
338static void __exit vl600_exit(void)
339{
340 usb_deregister(&lg_vl600_driver);
341}
342module_exit(vl600_exit);
343
344MODULE_AUTHOR("Anrzej Zaborowski");
345MODULE_DESCRIPTION("LG-VL600 modem's ethernet link");
346MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index ba72a7281cb..01db4602a39 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -560,7 +560,7 @@ static int net1080_bind(struct usbnet *dev, struct usb_interface *intf)
560 560
561static const struct driver_info net1080_info = { 561static const struct driver_info net1080_info = {
562 .description = "NetChip TurboCONNECT", 562 .description = "NetChip TurboCONNECT",
563 .flags = FLAG_FRAMING_NC, 563 .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_NC,
564 .bind = net1080_bind, 564 .bind = net1080_bind,
565 .reset = net1080_reset, 565 .reset = net1080_reset,
566 .check_connect = net1080_check_connect, 566 .check_connect = net1080_check_connect,
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 08ad269f6b4..823c5375130 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -96,7 +96,7 @@ static int pl_reset(struct usbnet *dev)
96 96
97static const struct driver_info prolific_info = { 97static const struct driver_info prolific_info = {
98 .description = "Prolific PL-2301/PL-2302", 98 .description = "Prolific PL-2301/PL-2302",
99 .flags = FLAG_NO_SETINT, 99 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
100 /* some PL-2302 versions seem to fail usb_set_interface() */ 100 /* some PL-2302 versions seem to fail usb_set_interface() */
101 .reset = pl_reset, 101 .reset = pl_reset,
102}; 102};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index dd8a4adf48c..5994a25c56a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(rndis_tx_fixup);
573 573
574static const struct driver_info rndis_info = { 574static const struct driver_info rndis_info = {
575 .description = "RNDIS device", 575 .description = "RNDIS device",
576 .flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT, 576 .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
577 .bind = rndis_bind, 577 .bind = rndis_bind,
578 .unbind = rndis_unbind, 578 .unbind = rndis_unbind,
579 .status = rndis_status, 579 .status = rndis_status,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bc86f4b6ecc..727874d9deb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -49,6 +49,8 @@
49 49
50struct smsc95xx_priv { 50struct smsc95xx_priv {
51 u32 mac_cr; 51 u32 mac_cr;
52 u32 hash_hi;
53 u32 hash_lo;
52 spinlock_t mac_cr_lock; 54 spinlock_t mac_cr_lock;
53 bool use_tx_csum; 55 bool use_tx_csum;
54 bool use_rx_csum; 56 bool use_rx_csum;
@@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
370{ 372{
371 struct usbnet *dev = netdev_priv(netdev); 373 struct usbnet *dev = netdev_priv(netdev);
372 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 374 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
373 u32 hash_hi = 0;
374 u32 hash_lo = 0;
375 unsigned long flags; 375 unsigned long flags;
376 376
377 pdata->hash_hi = 0;
378 pdata->hash_lo = 0;
379
377 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 380 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
378 381
379 if (dev->net->flags & IFF_PROMISC) { 382 if (dev->net->flags & IFF_PROMISC) {
@@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
394 u32 bitnum = smsc95xx_hash(ha->addr); 397 u32 bitnum = smsc95xx_hash(ha->addr);
395 u32 mask = 0x01 << (bitnum & 0x1F); 398 u32 mask = 0x01 << (bitnum & 0x1F);
396 if (bitnum & 0x20) 399 if (bitnum & 0x20)
397 hash_hi |= mask; 400 pdata->hash_hi |= mask;
398 else 401 else
399 hash_lo |= mask; 402 pdata->hash_lo |= mask;
400 } 403 }
401 404
402 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", 405 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
403 hash_hi, hash_lo); 406 pdata->hash_hi, pdata->hash_lo);
404 } else { 407 } else {
405 netif_dbg(dev, drv, dev->net, "receive own packets only\n"); 408 netif_dbg(dev, drv, dev->net, "receive own packets only\n");
406 pdata->mac_cr &= 409 pdata->mac_cr &=
@@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
410 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 413 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
411 414
412 /* Initiate async writes, as we can't wait for completion here */ 415 /* Initiate async writes, as we can't wait for completion here */
413 smsc95xx_write_reg_async(dev, HASHH, &hash_hi); 416 smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
414 smsc95xx_write_reg_async(dev, HASHL, &hash_lo); 417 smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
415 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 418 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
416} 419}
417 420
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 95c41d56631..069c1cf0fdf 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -387,8 +387,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
387static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 387static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
388{ 388{
389 if (dev->driver_info->rx_fixup && 389 if (dev->driver_info->rx_fixup &&
390 !dev->driver_info->rx_fixup (dev, skb)) 390 !dev->driver_info->rx_fixup (dev, skb)) {
391 goto error; 391 /* With RX_ASSEMBLE, rx_fixup() must update counters */
392 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
393 dev->net->stats.rx_errors++;
394 goto done;
395 }
392 // else network stack removes extra byte if we forced a short packet 396 // else network stack removes extra byte if we forced a short packet
393 397
394 if (skb->len) { 398 if (skb->len) {
@@ -401,8 +405,8 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
401 } 405 }
402 406
403 netif_dbg(dev, rx_err, dev->net, "drop\n"); 407 netif_dbg(dev, rx_err, dev->net, "drop\n");
404error:
405 dev->net->stats.rx_errors++; 408 dev->net->stats.rx_errors++;
409done:
406 skb_queue_tail(&dev->done, skb); 410 skb_queue_tail(&dev->done, skb);
407} 411}
408 412
@@ -1376,7 +1380,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1376 // else "eth%d" when there's reasonable doubt. userspace 1380 // else "eth%d" when there's reasonable doubt. userspace
1377 // can rename the link if it knows better. 1381 // can rename the link if it knows better.
1378 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1382 if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
1379 (net->dev_addr [0] & 0x02) == 0) 1383 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
1384 (net->dev_addr [0] & 0x02) == 0))
1380 strcpy (net->name, "eth%d"); 1385 strcpy (net->name, "eth%d");
1381 /* WLAN devices should always be named "wlan%d" */ 1386 /* WLAN devices should always be named "wlan%d" */
1382 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1387 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 3eb0b167b5b..241756e0e86 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -102,7 +102,7 @@ static int always_connected (struct usbnet *dev)
102 102
103static const struct driver_info zaurus_sl5x00_info = { 103static const struct driver_info zaurus_sl5x00_info = {
104 .description = "Sharp Zaurus SL-5x00", 104 .description = "Sharp Zaurus SL-5x00",
105 .flags = FLAG_FRAMING_Z, 105 .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
106 .check_connect = always_connected, 106 .check_connect = always_connected,
107 .bind = zaurus_bind, 107 .bind = zaurus_bind,
108 .unbind = usbnet_cdc_unbind, 108 .unbind = usbnet_cdc_unbind,
@@ -112,7 +112,7 @@ static const struct driver_info zaurus_sl5x00_info = {
112 112
113static const struct driver_info zaurus_pxa_info = { 113static const struct driver_info zaurus_pxa_info = {
114 .description = "Sharp Zaurus, PXA-2xx based", 114 .description = "Sharp Zaurus, PXA-2xx based",
115 .flags = FLAG_FRAMING_Z, 115 .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
116 .check_connect = always_connected, 116 .check_connect = always_connected,
117 .bind = zaurus_bind, 117 .bind = zaurus_bind,
118 .unbind = usbnet_cdc_unbind, 118 .unbind = usbnet_cdc_unbind,
@@ -122,7 +122,7 @@ static const struct driver_info zaurus_pxa_info = {
122 122
123static const struct driver_info olympus_mxl_info = { 123static const struct driver_info olympus_mxl_info = {
124 .description = "Olympus R1000", 124 .description = "Olympus R1000",
125 .flags = FLAG_FRAMING_Z, 125 .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
126 .check_connect = always_connected, 126 .check_connect = always_connected,
127 .bind = zaurus_bind, 127 .bind = zaurus_bind,
128 .unbind = usbnet_cdc_unbind, 128 .unbind = usbnet_cdc_unbind,
@@ -258,7 +258,7 @@ bad_desc:
258 258
259static const struct driver_info bogus_mdlm_info = { 259static const struct driver_info bogus_mdlm_info = {
260 .description = "pseudo-MDLM (BLAN) device", 260 .description = "pseudo-MDLM (BLAN) device",
261 .flags = FLAG_FRAMING_Z, 261 .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
262 .check_connect = always_connected, 262 .check_connect = always_connected,
263 .tx_fixup = zaurus_tx_fixup, 263 .tx_fixup = zaurus_tx_fixup,
264 .bind = blan_mdlm_bind, 264 .bind = blan_mdlm_bind,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 105d7f0630c..2de9b90c5f8 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -171,7 +171,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
171 if (skb->ip_summed == CHECKSUM_NONE) 171 if (skb->ip_summed == CHECKSUM_NONE)
172 skb->ip_summed = rcv_priv->ip_summed; 172 skb->ip_summed = rcv_priv->ip_summed;
173 173
174 length = skb->len + ETH_HLEN; 174 length = skb->len;
175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) 175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
176 goto rx_drop; 176 goto rx_drop;
177 177
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 81254be85b9..51f2ef142a5 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -304,8 +304,8 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags; 305 unsigned long flags;
306 306
307 if (data & ~ETH_FLAG_LRO) 307 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
308 return -EOPNOTSUPP; 308 return -EINVAL;
309 309
310 if (lro_requested ^ lro_present) { 310 if (lro_requested ^ lro_present) {
311 /* toggle the LRO feature*/ 311 /* toggle the LRO feature*/
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 1dd3a21b3a4..c5eb034107f 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data)
1117 struct vxgedev *vdev = netdev_priv(dev); 1117 struct vxgedev *vdev = netdev_priv(dev);
1118 enum vxge_hw_status status; 1118 enum vxge_hw_status status;
1119 1119
1120 if (data & ~ETH_FLAG_RXHASH) 1120 if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
1121 return -EOPNOTSUPP; 1121 return -EINVAL;
1122 1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) 1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0; 1124 return 0;
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
index dabdcfed4ef..609710d64eb 100644
--- a/drivers/net/wan/lmc/Makefile
+++ b/drivers/net/wan/lmc/Makefile
@@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
14# -DDEBUG \ 14# -DDEBUG \
15# -DLMC_PACKET_LOG 15# -DLMC_PACKET_LOG
16 16
17EXTRA_CFLAGS += -I. $(DBGDEF) 17ccflags-y := -I. $(DBGDEF)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115f162c617..dddb85de622 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1048,6 +1048,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1048 "Starting driver with initial channel: %d MHz\n", 1048 "Starting driver with initial channel: %d MHz\n",
1049 curchan->center_freq); 1049 curchan->center_freq);
1050 1050
1051 ath9k_ps_wakeup(sc);
1052
1051 mutex_lock(&sc->mutex); 1053 mutex_lock(&sc->mutex);
1052 1054
1053 /* setup initial channel */ 1055 /* setup initial channel */
@@ -1143,6 +1145,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1143mutex_unlock: 1145mutex_unlock:
1144 mutex_unlock(&sc->mutex); 1146 mutex_unlock(&sc->mutex);
1145 1147
1148 ath9k_ps_restore(sc);
1149
1146 return r; 1150 return r;
1147} 1151}
1148 1152
@@ -2160,6 +2164,8 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2160 if (!ath_drain_all_txq(sc, false)) 2164 if (!ath_drain_all_txq(sc, false))
2161 ath_reset(sc, false); 2165 ath_reset(sc, false);
2162 2166
2167 ieee80211_wake_queues(hw);
2168
2163out: 2169out:
2164 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2170 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2165 mutex_unlock(&sc->mutex); 2171 mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 960d717ca7c..a3241cd089b 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1328 1328
1329 hdr = (struct ieee80211_hdr *)skb->data; 1329 hdr = (struct ieee80211_hdr *)skb->data;
1330 fc = hdr->frame_control; 1330 fc = hdr->frame_control;
1331 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 1331 for (i = 0; i < sc->hw->max_rates; i++) {
1332 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; 1332 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1333 if (!rate->count) 1333 if (!rate->count)
1334 break; 1334 break;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index ef22096d40c..26734e53b37 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,8 +1725,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1725 u8 tidno; 1725 u8 tidno;
1726 1726
1727 spin_lock_bh(&txctl->txq->axq_lock); 1727 spin_lock_bh(&txctl->txq->axq_lock);
1728 1728 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1729 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { 1729 ieee80211_is_data_qos(hdr->frame_control)) {
1730 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1730 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1731 IEEE80211_QOS_CTL_TID_MASK; 1731 IEEE80211_QOS_CTL_TID_MASK;
1732 tid = ATH_AN_2_TID(txctl->an, tidno); 1732 tid = ATH_AN_2_TID(txctl->an, tidno);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index c6a5fae634a..9cad061cc1d 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -443,6 +443,7 @@ struct carl9170_ba_stats {
443 u8 ampdu_len; 443 u8 ampdu_len;
444 u8 ampdu_ack_len; 444 u8 ampdu_ack_len;
445 bool clear; 445 bool clear;
446 bool req;
446}; 447};
447 448
448struct carl9170_sta_info { 449struct carl9170_sta_info {
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index ede3d7e5a04..89fe60accf8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1355,6 +1355,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1355 tid_info = rcu_dereference(sta_info->agg[tid]); 1355 tid_info = rcu_dereference(sta_info->agg[tid]);
1356 1356
1357 sta_info->stats[tid].clear = true; 1357 sta_info->stats[tid].clear = true;
1358 sta_info->stats[tid].req = false;
1358 1359
1359 if (tid_info) { 1360 if (tid_info) {
1360 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE); 1361 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 0ef70b6fc51..cb70ed7ec5c 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -383,6 +383,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
383 383
384 if (sta_info->stats[tid].clear) { 384 if (sta_info->stats[tid].clear) {
385 sta_info->stats[tid].clear = false; 385 sta_info->stats[tid].clear = false;
386 sta_info->stats[tid].req = false;
386 sta_info->stats[tid].ampdu_len = 0; 387 sta_info->stats[tid].ampdu_len = 0;
387 sta_info->stats[tid].ampdu_ack_len = 0; 388 sta_info->stats[tid].ampdu_ack_len = 0;
388 } 389 }
@@ -391,10 +392,16 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
391 if (txinfo->status.rates[0].count == 1) 392 if (txinfo->status.rates[0].count == 1)
392 sta_info->stats[tid].ampdu_ack_len++; 393 sta_info->stats[tid].ampdu_ack_len++;
393 394
395 if (!(txinfo->flags & IEEE80211_TX_STAT_ACK))
396 sta_info->stats[tid].req = true;
397
394 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) { 398 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
395 super->s.rix = sta_info->stats[tid].ampdu_len; 399 super->s.rix = sta_info->stats[tid].ampdu_len;
396 super->s.cnt = sta_info->stats[tid].ampdu_ack_len; 400 super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
397 txinfo->flags |= IEEE80211_TX_STAT_AMPDU; 401 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
402 if (sta_info->stats[tid].req)
403 txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
404
398 sta_info->stats[tid].clear = true; 405 sta_info->stats[tid].clear = true;
399 } 406 }
400 spin_unlock_bh(&tid_info->lock); 407 spin_unlock_bh(&tid_info->lock);
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 30acd39d76a..2c8f71f0ed4 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -30,9 +30,9 @@
30 30
31/* Following defines can be used to remove unneeded parts of the driver, e.g., 31/* Following defines can be used to remove unneeded parts of the driver, e.g.,
32 * to limit the size of the kernel module. Definitions can be added here in 32 * to limit the size of the kernel module. Definitions can be added here in
33 * hostap_config.h or they can be added to make command with EXTRA_CFLAGS, 33 * hostap_config.h or they can be added to make command with ccflags-y,
34 * e.g., 34 * e.g.,
35 * 'make pccard EXTRA_CFLAGS="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"' 35 * 'make pccard ccflags-y="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
36 */ 36 */
37 37
38/* Do not include debug messages into the driver */ 38/* Do not include debug messages into the driver */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index d418b647be8..a209a0e76bf 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -1805,6 +1805,15 @@ iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1805 1805
1806 mutex_lock(&priv->mutex); 1806 mutex_lock(&priv->mutex);
1807 1807
1808 if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
1809 /*
1810 * Huh? But wait ... this can maybe happen when
1811 * we're in the middle of a firmware restart!
1812 */
1813 err = -EBUSY;
1814 goto out;
1815 }
1816
1808 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; 1817 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1809 1818
1810 if (!(interface_modes & BIT(newtype))) { 1819 if (!(interface_modes & BIT(newtype))) {
@@ -1832,6 +1841,7 @@ iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1832 /* success */ 1841 /* success */
1833 iwl_legacy_teardown_interface(priv, vif, true); 1842 iwl_legacy_teardown_interface(priv, vif, true);
1834 vif->type = newtype; 1843 vif->type = newtype;
1844 vif->p2p = newp2p;
1835 err = iwl_legacy_setup_interface(priv, ctx); 1845 err = iwl_legacy_setup_interface(priv, ctx);
1836 WARN_ON(err); 1846 WARN_ON(err);
1837 /* 1847 /*
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index ab87e1b7352..28eb3d885ba 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -93,6 +93,7 @@ MODULE_LICENSE("GPL");
93struct iwl_mod_params iwl3945_mod_params = { 93struct iwl_mod_params iwl3945_mod_params = {
94 .sw_crypto = 1, 94 .sw_crypto = 1,
95 .restart_fw = 1, 95 .restart_fw = 1,
96 .disable_hw_scan = 1,
96 /* the rest are 0 by default */ 97 /* the rest are 0 by default */
97}; 98};
98 99
@@ -3960,8 +3961,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3960 * "the hard way", rather than using device's scan. 3961 * "the hard way", rather than using device's scan.
3961 */ 3962 */
3962 if (iwl3945_mod_params.disable_hw_scan) { 3963 if (iwl3945_mod_params.disable_hw_scan) {
3963 dev_printk(KERN_DEBUG, &(pdev->dev), 3964 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3964 "sw scan support is deprecated\n");
3965 iwl3945_hw_ops.hw_scan = NULL; 3965 iwl3945_hw_ops.hw_scan = NULL;
3966 } 3966 }
3967 3967
@@ -4280,8 +4280,7 @@ MODULE_PARM_DESC(swcrypto,
4280 "using software crypto (default 1 [software])"); 4280 "using software crypto (default 1 [software])");
4281module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, 4281module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4282 int, S_IRUGO); 4282 int, S_IRUGO);
4283MODULE_PARM_DESC(disable_hw_scan, 4283MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
4284 "disable hardware scanning (default 0) (deprecated)");
4285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG 4284#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4286module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); 4285module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4287MODULE_PARM_DESC(debug, "debug output mask"); 4286MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 2003c1d4295..08ccb9496f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -2265,7 +2265,7 @@ signed long iwlagn_wait_notification(struct iwl_priv *priv,
2265 int ret; 2265 int ret;
2266 2266
2267 ret = wait_event_timeout(priv->_agn.notif_waitq, 2267 ret = wait_event_timeout(priv->_agn.notif_waitq,
2268 &wait_entry->triggered, 2268 wait_entry->triggered,
2269 timeout); 2269 timeout);
2270 2270
2271 spin_lock_bh(&priv->_agn.notif_wait_lock); 2271 spin_lock_bh(&priv->_agn.notif_wait_lock);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 581dc9f1027..321b18b5913 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3009,14 +3009,17 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
3009 3009
3010 mutex_lock(&priv->mutex); 3010 mutex_lock(&priv->mutex);
3011 3011
3012 if (!priv->_agn.offchan_tx_skb) 3012 if (!priv->_agn.offchan_tx_skb) {
3013 return -EINVAL; 3013 ret = -EINVAL;
3014 goto unlock;
3015 }
3014 3016
3015 priv->_agn.offchan_tx_skb = NULL; 3017 priv->_agn.offchan_tx_skb = NULL;
3016 3018
3017 ret = iwl_scan_cancel_timeout(priv, 200); 3019 ret = iwl_scan_cancel_timeout(priv, 200);
3018 if (ret) 3020 if (ret)
3019 ret = -EIO; 3021 ret = -EIO;
3022unlock:
3020 mutex_unlock(&priv->mutex); 3023 mutex_unlock(&priv->mutex);
3021 3024
3022 return ret; 3025 return ret;
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 09fae2f0ea0..736bbb9bd1d 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -153,6 +153,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
153 priv->scan_request = request; 153 priv->scan_request = request;
154 154
155 err = orinoco_hw_trigger_scan(priv, request->ssids); 155 err = orinoco_hw_trigger_scan(priv, request->ssids);
156 /* On error the we aren't processing the request */
157 if (err)
158 priv->scan_request = NULL;
156 159
157 return err; 160 return err;
158} 161}
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index f3d396e7544..62c6b2b37db 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1376,13 +1376,13 @@ static void orinoco_process_scan_results(struct work_struct *work)
1376 1376
1377 spin_lock_irqsave(&priv->scan_lock, flags); 1377 spin_lock_irqsave(&priv->scan_lock, flags);
1378 list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { 1378 list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
1379 spin_unlock_irqrestore(&priv->scan_lock, flags);
1380 1379
1381 buf = sd->buf; 1380 buf = sd->buf;
1382 len = sd->len; 1381 len = sd->len;
1383 type = sd->type; 1382 type = sd->type;
1384 1383
1385 list_del(&sd->list); 1384 list_del(&sd->list);
1385 spin_unlock_irqrestore(&priv->scan_lock, flags);
1386 kfree(sd); 1386 kfree(sd);
1387 1387
1388 if (len > 0) { 1388 if (len > 0) {
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 18d24b7b1e3..7ecc0bda57b 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -649,8 +649,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
649 goto err_free_common; 649 goto err_free_common;
650 } 650 }
651 651
652 set_irq_type(gpio_to_irq(p54spi_gpio_irq), 652 irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
653 IRQ_TYPE_EDGE_RISING);
654 653
655 disable_irq(gpio_to_irq(p54spi_gpio_irq)); 654 disable_irq(gpio_to_irq(p54spi_gpio_irq));
656 655
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1a92144996..37509d01991 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -719,6 +719,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, 721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
722 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
722 /* AzureWave */ 723 /* AzureWave */
723 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 724 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
724 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, 725 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -729,8 +730,12 @@ static struct usb_device_id rt2800usb_device_table[] = {
729 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, 730 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
730 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, 731 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
731 { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) }, 732 { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) },
733 { USB_DEVICE(0x050d, 0x825b), USB_DEVICE_DATA(&rt2800usb_ops) },
734 { USB_DEVICE(0x050d, 0x935a), USB_DEVICE_DATA(&rt2800usb_ops) },
735 { USB_DEVICE(0x050d, 0x935b), USB_DEVICE_DATA(&rt2800usb_ops) },
732 /* Buffalo */ 736 /* Buffalo */
733 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) }, 737 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
738 { USB_DEVICE(0x0411, 0x016f), USB_DEVICE_DATA(&rt2800usb_ops) },
734 /* Conceptronic */ 739 /* Conceptronic */
735 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, 740 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
736 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, 741 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -817,6 +822,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
817 /* Pegatron */ 822 /* Pegatron */
818 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 823 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
819 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, 824 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
825 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
820 /* Philips */ 826 /* Philips */
821 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, 827 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
822 /* Planex */ 828 /* Planex */
@@ -898,6 +904,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
898 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, 904 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
899 /* Sitecom */ 905 /* Sitecom */
900 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 906 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
907 /* Toshiba */
908 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
901 /* Zinwell */ 909 /* Zinwell */
902 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, 910 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
903#endif 911#endif
@@ -913,7 +921,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
913 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 921 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
914 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 922 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
915 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 923 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
916 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
917 /* AzureWave */ 924 /* AzureWave */
918 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 925 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
919 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 926 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -937,6 +944,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
937 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 944 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
938 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 945 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
939 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, 946 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
947 /* Edimax */
948 { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) },
940 /* Encore */ 949 /* Encore */
941 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 950 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
942 /* Gemtek */ 951 /* Gemtek */
@@ -959,8 +968,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
959 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, 968 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
960 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 969 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
961 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 970 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
962 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
963 /* Planex */ 971 /* Planex */
972 { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) },
964 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 973 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
965 /* Qcom */ 974 /* Qcom */
966 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, 975 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 4f92cba6810..f74a8701c67 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -410,8 +410,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
410 410
411 if (!efuse_shadow_update_chk(hw)) { 411 if (!efuse_shadow_update_chk(hw)) {
412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
413 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 413 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
414 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 414 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
416 416
417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
@@ -446,9 +446,9 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
446 446
447 if (word_en != 0x0F) { 447 if (word_en != 0x0F) {
448 u8 tmpdata[8]; 448 u8 tmpdata[8];
449 memcpy((void *)tmpdata, 449 memcpy(tmpdata,
450 (void *)(&rtlefuse-> 450 &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
451 efuse_map[EFUSE_MODIFY_MAP][base]), 8); 451 8);
452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, 452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
453 ("U-efuse\n"), tmpdata, 8); 453 ("U-efuse\n"), tmpdata, 8);
454 454
@@ -465,8 +465,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
465 efuse_power_switch(hw, true, false); 465 efuse_power_switch(hw, true, false);
466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
467 467
468 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 468 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
469 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 469 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
471 471
472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); 472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
@@ -479,13 +479,12 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
480 480
481 if (rtlefuse->autoload_failflag == true) { 481 if (rtlefuse->autoload_failflag == true) {
482 memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128, 482 memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128);
483 0xFF);
484 } else 483 } else
485 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 484 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
486 485
487 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 486 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
488 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 487 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
489 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 488 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
490 489
491} 490}
@@ -694,8 +693,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
694 if (offset > 15) 693 if (offset > 15)
695 return false; 694 return false;
696 695
697 memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff); 696 memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
698 memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff); 697 memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
699 698
700 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { 699 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
701 if (readstate & PG_STATE_HEADER) { 700 if (readstate & PG_STATE_HEADER) {
@@ -862,7 +861,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
862 861
863 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 862 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
864 863
865 memset((void *)originaldata, 8 * sizeof(u8), 0xff); 864 memset(originaldata, 0xff, 8 * sizeof(u8));
866 865
867 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { 866 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
868 badworden = efuse_word_enable_data_write(hw, 867 badworden = efuse_word_enable_data_write(hw,
@@ -917,7 +916,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
917 target_pkt.offset = offset; 916 target_pkt.offset = offset;
918 target_pkt.word_en = word_en; 917 target_pkt.word_en = word_en;
919 918
920 memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF); 919 memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
921 920
922 efuse_word_enable_data_read(word_en, data, target_pkt.data); 921 efuse_word_enable_data_read(word_en, data, target_pkt.data);
923 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); 922 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
@@ -1022,7 +1021,7 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
1022 u8 badworden = 0x0F; 1021 u8 badworden = 0x0F;
1023 u8 tmpdata[8]; 1022 u8 tmpdata[8];
1024 1023
1025 memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff); 1024 memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
1026 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 1025 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
1027 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); 1026 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
1028 1027
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index d550b5e68d3..f51a0241a44 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -265,7 +265,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
265 goto disable; 265 goto disable;
266 } 266 }
267 267
268 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 268 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
269 disable_irq(wl->irq); 269 disable_irq(wl->irq);
270 270
271 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; 271 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index ac872b38960..af6448c4d3e 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -286,7 +286,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
286 goto out_free; 286 goto out_free;
287 } 287 }
288 288
289 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 289 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
290 290
291 disable_irq(wl->irq); 291 disable_irq(wl->irq);
292 292
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 1907eafb9b1..5728a918e50 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -5,7 +5,5 @@ zd1211rw-objs := zd_chip.o zd_mac.o \
5 zd_rf_al7230b.o zd_rf_uw2453.o \ 5 zd_rf_al7230b.o zd_rf_uw2453.o \
6 zd_rf.o zd_usb.o 6 zd_rf.o zd_usb.o
7 7
8ifeq ($(CONFIG_ZD1211RW_DEBUG),y) 8ccflags-$(CONFIG_ZD1211RW_DEBUG) := -DDEBUG
9EXTRA_CFLAGS += -DDEBUG
10endif
11 9
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 81e80489a05..58236e6d092 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -60,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
62 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
63 { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 },
63 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, 64 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
64 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 65 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
65 /* ZD1211B */ 66 /* ZD1211B */