aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/au1000_eth.c10
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c6
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c42
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_main.c21
-rw-r--r--drivers/net/bonding/bonding.h12
-rw-r--r--drivers/net/caif/caif_shm_u5500.c2
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/caif/caif_spi.c4
-rw-r--r--drivers/net/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c88
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c94
-rw-r--r--drivers/net/e1000/e1000_main.c12
-rw-r--r--drivers/net/ehea/ehea_ethtool.c9
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/enic/enic_main.c3
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/icplus.c59
-rw-r--r--drivers/net/phy/marvell.c164
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/qlge/qlge.h1
-rw-r--r--drivers/net/qlge/qlge_main.c7
-rw-r--r--drivers/net/qlge/qlge_mpi.c12
-rw-r--r--drivers/net/r8169.c29
-rw-r--r--drivers/net/sfc/efx.c43
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c6
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tile/Makefile10
-rw-r--r--drivers/net/tile/tilepro.c2406
-rw-r--r--drivers/net/tulip/dmfe.c6
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/hso.c14
-rw-r--r--drivers/net/wan/hd64572.c5
-rw-r--r--drivers/net/wan/x25_asy.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c22
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c4
-rw-r--r--drivers/net/wireless/b43/sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/main.c2
-rw-r--r--drivers/net/wireless/orinoco/main.c18
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c14
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/orinoco/scan.c8
-rw-r--r--drivers/net/wireless/orinoco/scan.h1
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c14
-rw-r--r--drivers/net/wireless/orinoco/wext.c4
-rw-r--r--drivers/net/xen-netfront.c4
89 files changed, 3206 insertions, 421 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
699#define DEVICE_PCI(dev) NULL 699#define DEVICE_PCI(dev) NULL
700#endif 700#endif
701 701
702#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) 702#define VORTEX_PCI(vp) \
703 ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
703 704
704#ifdef CONFIG_EISA 705#ifdef CONFIG_EISA
705#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) 706#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
707#define DEVICE_EISA(dev) NULL 708#define DEVICE_EISA(dev) NULL
708#endif 709#endif
709 710
710#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL) 711#define VORTEX_EISA(vp) \
712 ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
711 713
712/* The action to take with a media selection timer tick. 714/* The action to take with a media selection timer tick.
713 Note that we deviate from the 3Com order by checking 10base2 before AUI. 715 Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
490{ 490{
491 unsigned int protocol = (status >> 16) & 0x3; 491 unsigned int protocol = (status >> 16) & 0x3;
492 492
493 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) 493 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
494 ((protocol == RxProtoUDP) && !(status & UDPFail)))
494 return 1; 495 return 1;
495 else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) 496 else
496 return 1; 497 return 0;
497 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
498 return 1;
499 return 0;
500} 498}
501 499
502static int cp_rx_poll(struct napi_struct *napi, int budget) 500static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..4f1755bddf6b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2543,10 +2543,10 @@ config PCH_GBE
2543 depends on PCI 2543 depends on PCI
2544 select MII 2544 select MII
2545 ---help--- 2545 ---help---
2546 This is a gigabit ethernet driver for Topcliff PCH. 2546 This is a gigabit ethernet driver for EG20T PCH.
2547 Topcliff PCH is the platform controller hub that is used in Intel's 2547 EG20T PCH is the platform controller hub that is used in Intel's
2548 general embedded platform. 2548 general embedded platform.
2549 Topcliff PCH has Gigabit Ethernet interface. 2549 EG20T PCH has Gigabit Ethernet interface.
2550 Using this interface, it is able to access system devices connected 2550 Using this interface, it is able to access system devices connected
2551 to Gigabit Ethernet. 2551 to Gigabit Ethernet.
2552 This driver enables Gigabit Ethernet function. 2552 This driver enables Gigabit Ethernet function.
@@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig"
2945 2945
2946source "drivers/net/caif/Kconfig" 2946source "drivers/net/caif/Kconfig"
2947 2947
2948config TILE_NET
2949 tristate "Tilera GBE/XGBE network driver support"
2950 depends on TILE
2951 default y
2952 select CRC32
2953 help
2954 This is a standard Linux network device driver for the
2955 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
2956
2957 To compile this driver as a module, choose M here: the module
2958 will be called tile_net.
2959
2948config XEN_NETDEV_FRONTEND 2960config XEN_NETDEV_FRONTEND
2949 tristate "Xen network device frontend driver" 2961 tristate "Xen network device frontend driver"
2950 depends on XEN 2962 depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 652fc6b98039..b90738d13994 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/
301 301
302obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ 302obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
303obj-$(CONFIG_PCH_GBE) += pch_gbe/ 303obj-$(CONFIG_PCH_GBE) += pch_gbe/
304obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b2c3a5..1bf672009948 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
82 addr[0] = addr[1] = 0; 82 addr[0] = addr[1] = 0;
83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); 83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
84 if (atl1c_check_eeprom_exist(hw)) { 84 if (atl1c_check_eeprom_exist(hw)) {
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { 85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
86 /* Enable OTP CLK */ 86 /* Enable OTP CLK */
87 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { 87 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
88 otp_ctrl_data |= OTP_CTRL_CLK_EN; 88 otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f89c142..53eff9ba6e95 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
155 spin_lock_irqsave(&aup->lock, flags); 155 spin_lock_irqsave(&aup->lock, flags);
156 156
157 if (force_reset || (!aup->mac_enabled)) { 157 if (force_reset || (!aup->mac_enabled)) {
158 writel(MAC_EN_CLOCK_ENABLE, &aup->enable); 158 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
159 au_sync_delay(2); 159 au_sync_delay(2);
160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
161 | MAC_EN_CLOCK_ENABLE), &aup->enable); 161 | MAC_EN_CLOCK_ENABLE), aup->enable);
162 au_sync_delay(2); 162 au_sync_delay(2);
163 163
164 aup->mac_enabled = 1; 164 aup->mac_enabled = 1;
@@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
503 503
504 au1000_hard_stop(dev); 504 au1000_hard_stop(dev);
505 505
506 writel(MAC_EN_CLOCK_ENABLE, &aup->enable); 506 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
507 au_sync_delay(2); 507 au_sync_delay(2);
508 writel(0, &aup->enable); 508 writel(0, aup->enable);
509 au_sync_delay(2); 509 au_sync_delay(2);
510 510
511 aup->tx_full = 0; 511 aup->tx_full = 0;
@@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1119 /* set a random MAC now in case platform_data doesn't provide one */ 1119 /* set a random MAC now in case platform_data doesn't provide one */
1120 random_ether_addr(dev->dev_addr); 1120 random_ether_addr(dev->dev_addr);
1121 1121
1122 writel(0, &aup->enable); 1122 writel(0, aup->enable);
1123 aup->mac_enabled = 0; 1123 aup->mac_enabled = 0;
1124 1124
1125 pd = pdev->dev.platform_data; 1125 pd = pdev->dev.platform_data;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e86315b3f8..2e2b76258ab4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
381 __b44_set_flow_ctrl(bp, pause_enab); 381 __b44_set_flow_ctrl(bp, pause_enab);
382} 382}
383 383
384#ifdef SSB_DRIVER_MIPS 384#ifdef CONFIG_BCM47XX
385extern char *nvram_get(char *name); 385#include <asm/mach-bcm47xx/nvram.h>
386static void b44_wap54g10_workaround(struct b44 *bp) 386static void b44_wap54g10_workaround(struct b44 *bp)
387{ 387{
388 const char *str; 388 char buf[20];
389 u32 val; 389 u32 val;
390 int err; 390 int err;
391 391
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
394 * see https://dev.openwrt.org/ticket/146 394 * see https://dev.openwrt.org/ticket/146
395 * check and reset bit "isolate" 395 * check and reset bit "isolate"
396 */ 396 */
397 str = nvram_get("boardnum"); 397 if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
398 if (!str)
399 return; 398 return;
400 if (simple_strtoul(str, NULL, 0) == 2) { 399 if (simple_strtoul(buf, NULL, 0) == 2) {
401 err = __b44_readphy(bp, 0, MII_BMCR, &val); 400 err = __b44_readphy(bp, 0, MII_BMCR, &val);
402 if (err) 401 if (err)
403 goto error; 402 goto error;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d4..e4465d222a7d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1235,7 +1235,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1235 1235
1236 i = 0; 1236 i = 0;
1237 netdev_for_each_mc_addr(ha, netdev) 1237 netdev_for_each_mc_addr(ha, netdev)
1238 memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); 1238 memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1239 } else { 1239 } else {
1240 req->promiscuous = 1; 1240 req->promiscuous = 1;
1241 } 1241 }
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index c36cd2ffbadc..93354eee2cfd 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -2458,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2458 int status, i = 0, num_imgs = 0; 2458 int status, i = 0, num_imgs = 0;
2459 const u8 *p; 2459 const u8 *p;
2460 2460
2461 if (!netif_running(adapter->netdev)) {
2462 dev_err(&adapter->pdev->dev,
2463 "Firmware load not allowed (interface is down)\n");
2464 return -EPERM;
2465 }
2466
2461 strcpy(fw_file, func); 2467 strcpy(fw_file, func);
2462 2468
2463 status = request_firmware(&fw, fw_file, &adapter->pdev->dev); 2469 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 863e73a85fbe..d255428122fc 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.60.00-4" 23#define DRV_MODULE_VERSION "1.60.01-0"
24#define DRV_MODULE_RELDATE "2010/11/01" 24#define DRV_MODULE_RELDATE "2010/11/12"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 94d5f59d5a6f..0af361e4e3d1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1782,15 +1782,15 @@ exit_lbl:
1782} 1782}
1783#endif 1783#endif
1784 1784
1785static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, 1785static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1786 struct eth_tx_parse_bd_e2 *pbd, 1786 u32 xmit_type)
1787 u32 xmit_type)
1788{ 1787{
1789 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) << 1788 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1790 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT; 1789 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1790 ETH_TX_PARSE_BD_E2_LSO_MSS;
1791 if ((xmit_type & XMIT_GSO_V6) && 1791 if ((xmit_type & XMIT_GSO_V6) &&
1792 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 1792 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1793 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 1793 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1794} 1794}
1795 1795
1796/** 1796/**
@@ -1835,15 +1835,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1835 * @return header len 1835 * @return header len
1836 */ 1836 */
1837static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 1837static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1838 struct eth_tx_parse_bd_e2 *pbd, 1838 u32 *parsing_data, u32 xmit_type)
1839 u32 xmit_type)
1840{ 1839{
1841 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) << 1840 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1842 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT; 1841 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1842 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
1843 1843
1844 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) - 1844 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1845 skb->data) / 2) << 1845 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1846 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT; 1846 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
1847 1847
1848 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 1848 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1849} 1849}
@@ -1912,6 +1912,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1912 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1912 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1913 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 1913 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1914 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 1914 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1915 u32 pbd_e2_parsing_data = 0;
1915 u16 pkt_prod, bd_prod; 1916 u16 pkt_prod, bd_prod;
1916 int nbd, fp_index; 1917 int nbd, fp_index;
1917 dma_addr_t mapping; 1918 dma_addr_t mapping;
@@ -2033,8 +2034,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2033 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2034 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2034 /* Set PBD in checksum offload case */ 2035 /* Set PBD in checksum offload case */
2035 if (xmit_type & XMIT_CSUM) 2036 if (xmit_type & XMIT_CSUM)
2036 hlen = bnx2x_set_pbd_csum_e2(bp, 2037 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2037 skb, pbd_e2, xmit_type); 2038 &pbd_e2_parsing_data,
2039 xmit_type);
2038 } else { 2040 } else {
2039 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; 2041 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2040 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2042 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2078,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2076 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2078 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2077 hlen, bd_prod, ++nbd); 2079 hlen, bd_prod, ++nbd);
2078 if (CHIP_IS_E2(bp)) 2080 if (CHIP_IS_E2(bp))
2079 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type); 2081 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2082 xmit_type);
2080 else 2083 else
2081 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 2084 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2082 } 2085 }
2086
2087 /* Set the PBD's parsing_data field if not zero
2088 * (for the chips newer than 57711).
2089 */
2090 if (pbd_e2_parsing_data)
2091 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2092
2083 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2093 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2084 2094
2085 /* Handle fragmented skb */ 2095 /* Handle fragmented skb */
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index a306b0e46b61..66df29fcf751 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
838/**************************************************************************** 838/****************************************************************************
839* SRC initializations 839* SRC initializations
840****************************************************************************/ 840****************************************************************************/
841 841#ifdef BCM_CNIC
842/* called during init func stage */ 842/* called during init func stage */
843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
844 dma_addr_t t2_mapping, int src_cid_count) 844 dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
862 U64_HI((u64)t2_mapping + 862 U64_HI((u64)t2_mapping +
863 (src_cid_count-1) * sizeof(struct src_ent))); 863 (src_cid_count-1) * sizeof(struct src_ent)));
864} 864}
865 865#endif
866#endif /* BNX2X_INIT_OPS_H */ 866#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index e9ad16f00b56..9709b8569666 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -9064,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9064 default: 9064 default:
9065 pr_err("Unknown board_type (%ld), aborting\n", 9065 pr_err("Unknown board_type (%ld), aborting\n",
9066 ent->driver_data); 9066 ent->driver_data);
9067 return ENODEV; 9067 return -ENODEV;
9068 } 9068 }
9069 9069
9070 cid_count += CNIC_CONTEXT_USE; 9070 cid_count += CNIC_CONTEXT_USE;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a600382..d0ea760ce419 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -171,7 +171,7 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
171/*----------------------------- Global variables ----------------------------*/ 171/*----------------------------- Global variables ----------------------------*/
172 172
173#ifdef CONFIG_NET_POLL_CONTROLLER 173#ifdef CONFIG_NET_POLL_CONTROLLER
174cpumask_var_t netpoll_block_tx; 174atomic_t netpoll_block_tx = ATOMIC_INIT(0);
175#endif 175#endif
176 176
177static const char * const version = 177static const char * const version =
@@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
878 rcu_read_lock(); 878 rcu_read_lock();
879 in_dev = __in_dev_get_rcu(dev); 879 in_dev = __in_dev_get_rcu(dev);
880 if (in_dev) { 880 if (in_dev) {
881 read_lock(&in_dev->mc_list_lock);
881 for (im = in_dev->mc_list; im; im = im->next) 882 for (im = in_dev->mc_list; im; im = im->next)
882 ip_mc_rejoin_group(im); 883 ip_mc_rejoin_group(im);
884 read_unlock(&in_dev->mc_list_lock);
883 } 885 }
884 886
885 rcu_read_unlock(); 887 rcu_read_unlock();
@@ -1574,7 +1576,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1574 1576
1575 /* If this is the first slave, then we need to set the master's hardware 1577 /* If this is the first slave, then we need to set the master's hardware
1576 * address to be the same as the slave's. */ 1578 * address to be the same as the slave's. */
1577 if (bond->slave_cnt == 0) 1579 if (is_zero_ether_addr(bond->dev->dev_addr))
1578 memcpy(bond->dev->dev_addr, slave_dev->dev_addr, 1580 memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
1579 slave_dev->addr_len); 1581 slave_dev->addr_len);
1580 1582
@@ -5297,13 +5299,6 @@ static int __init bonding_init(void)
5297 if (res) 5299 if (res)
5298 goto out; 5300 goto out;
5299 5301
5300#ifdef CONFIG_NET_POLL_CONTROLLER
5301 if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
5302 res = -ENOMEM;
5303 goto out;
5304 }
5305#endif
5306
5307 res = register_pernet_subsys(&bond_net_ops); 5302 res = register_pernet_subsys(&bond_net_ops);
5308 if (res) 5303 if (res)
5309 goto out; 5304 goto out;
@@ -5332,9 +5327,6 @@ err:
5332 rtnl_link_unregister(&bond_link_ops); 5327 rtnl_link_unregister(&bond_link_ops);
5333err_link: 5328err_link:
5334 unregister_pernet_subsys(&bond_net_ops); 5329 unregister_pernet_subsys(&bond_net_ops);
5335#ifdef CONFIG_NET_POLL_CONTROLLER
5336 free_cpumask_var(netpoll_block_tx);
5337#endif
5338 goto out; 5330 goto out;
5339 5331
5340} 5332}
@@ -5351,7 +5343,10 @@ static void __exit bonding_exit(void)
5351 unregister_pernet_subsys(&bond_net_ops); 5343 unregister_pernet_subsys(&bond_net_ops);
5352 5344
5353#ifdef CONFIG_NET_POLL_CONTROLLER 5345#ifdef CONFIG_NET_POLL_CONTROLLER
5354 free_cpumask_var(netpoll_block_tx); 5346 /*
5347 * Make sure we don't have an imbalance on our netpoll blocking
5348 */
5349 WARN_ON(atomic_read(&netpoll_block_tx));
5355#endif 5350#endif
5356} 5351}
5357 5352
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6ca..c2f081352a03 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -119,26 +119,22 @@
119 119
120 120
121#ifdef CONFIG_NET_POLL_CONTROLLER 121#ifdef CONFIG_NET_POLL_CONTROLLER
122extern cpumask_var_t netpoll_block_tx; 122extern atomic_t netpoll_block_tx;
123 123
124static inline void block_netpoll_tx(void) 124static inline void block_netpoll_tx(void)
125{ 125{
126 preempt_disable(); 126 atomic_inc(&netpoll_block_tx);
127 BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
128 netpoll_block_tx));
129} 127}
130 128
131static inline void unblock_netpoll_tx(void) 129static inline void unblock_netpoll_tx(void)
132{ 130{
133 BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(), 131 atomic_dec(&netpoll_block_tx);
134 netpoll_block_tx));
135 preempt_enable();
136} 132}
137 133
138static inline int is_netpoll_tx_blocked(struct net_device *dev) 134static inline int is_netpoll_tx_blocked(struct net_device *dev)
139{ 135{
140 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) 136 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
141 return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx); 137 return atomic_read(&netpoll_block_tx);
142 return 0; 138 return 0;
143} 139}
144#else 140#else
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 1cd90da86f13..32b1c6fb2de1 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -5,7 +5,7 @@
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
9 9
10#include <linux/version.h> 10#include <linux/version.h>
11#include <linux/init.h> 11#include <linux/init.h>
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 19f9c0656667..80511167f35b 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -6,7 +6,7 @@
6 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt 9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10 10
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8b4cea57a6c5..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -635,8 +635,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
635 635
636 ndev = alloc_netdev(sizeof(struct cfspi), 636 ndev = alloc_netdev(sizeof(struct cfspi),
637 "cfspi%d", cfspi_setup); 637 "cfspi%d", cfspi_setup);
638 if (!dev) 638 if (!ndev)
639 return -ENODEV; 639 return -ENOMEM;
640 640
641 cfspi = netdev_priv(ndev); 641 cfspi = netdev_priv(ndev);
642 netif_stop_queue(ndev); 642 netif_stop_queue(ndev);
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index bb813d94aea8..e97521c801ea 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -2408,7 +2408,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2408 if (index < NEXACT_MAC) 2408 if (index < NEXACT_MAC)
2409 ret++; 2409 ret++;
2410 else if (hash) 2410 else if (hash)
2411 *hash |= (1 << hash_mac_addr(addr[i])); 2411 *hash |= (1ULL << hash_mac_addr(addr[i]));
2412 } 2412 }
2413 return ret; 2413 return ret;
2414} 2414}
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index c3449bbc585a..6bf464afa90e 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -816,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
816} 816}
817 817
818/* 818/*
819 * Collect up to maxaddrs worth of a netdevice's unicast addresses into an 819 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
820 * array of addrss pointers and return the number collected. 820 * at a specified offset within the list, into an array of addrss pointers and
821 * return the number collected.
821 */ 822 */
822static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, 823static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
823 const u8 **addr, 824 const u8 **addr,
824 unsigned int maxaddrs) 825 unsigned int offset,
826 unsigned int maxaddrs)
825{ 827{
828 unsigned int index = 0;
826 unsigned int naddr = 0; 829 unsigned int naddr = 0;
827 const struct netdev_hw_addr *ha; 830 const struct netdev_hw_addr *ha;
828 831
829 for_each_dev_addr(dev, ha) { 832 for_each_dev_addr(dev, ha)
830 addr[naddr++] = ha->addr; 833 if (index++ >= offset) {
831 if (naddr >= maxaddrs) 834 addr[naddr++] = ha->addr;
832 break; 835 if (naddr >= maxaddrs)
833 } 836 break;
837 }
834 return naddr; 838 return naddr;
835} 839}
836 840
837/* 841/*
838 * Collect up to maxaddrs worth of a netdevice's multicast addresses into an 842 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
839 * array of addrss pointers and return the number collected. 843 * at a specified offset within the list, into an array of addrss pointers and
844 * return the number collected.
840 */ 845 */
841static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, 846static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
842 const u8 **addr, 847 const u8 **addr,
843 unsigned int maxaddrs) 848 unsigned int offset,
849 unsigned int maxaddrs)
844{ 850{
851 unsigned int index = 0;
845 unsigned int naddr = 0; 852 unsigned int naddr = 0;
846 const struct netdev_hw_addr *ha; 853 const struct netdev_hw_addr *ha;
847 854
848 netdev_for_each_mc_addr(ha, dev) { 855 netdev_for_each_mc_addr(ha, dev)
849 addr[naddr++] = ha->addr; 856 if (index++ >= offset) {
850 if (naddr >= maxaddrs) 857 addr[naddr++] = ha->addr;
851 break; 858 if (naddr >= maxaddrs)
852 } 859 break;
860 }
853 return naddr; 861 return naddr;
854} 862}
855 863
@@ -862,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
862 u64 mhash = 0; 870 u64 mhash = 0;
863 u64 uhash = 0; 871 u64 uhash = 0;
864 bool free = true; 872 bool free = true;
865 u16 filt_idx[7]; 873 unsigned int offset, naddr;
866 const u8 *addr[7]; 874 const u8 *addr[7];
867 int ret, naddr = 0; 875 int ret;
868 const struct port_info *pi = netdev_priv(dev); 876 const struct port_info *pi = netdev_priv(dev);
869 877
870 /* first do the secondary unicast addresses */ 878 /* first do the secondary unicast addresses */
871 naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 879 for (offset = 0; ; offset += naddr) {
872 if (naddr > 0) { 880 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
881 ARRAY_SIZE(addr));
882 if (naddr == 0)
883 break;
884
873 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 885 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
874 naddr, addr, filt_idx, &uhash, sleep); 886 naddr, addr, NULL, &uhash, sleep);
875 if (ret < 0) 887 if (ret < 0)
876 return ret; 888 return ret;
877 889
@@ -879,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
879 } 891 }
880 892
881 /* next set up the multicast addresses */ 893 /* next set up the multicast addresses */
882 naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 894 for (offset = 0; ; offset += naddr) {
883 if (naddr > 0) { 895 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
896 ARRAY_SIZE(addr));
897 if (naddr == 0)
898 break;
899
884 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 900 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
885 naddr, addr, filt_idx, &mhash, sleep); 901 naddr, addr, NULL, &mhash, sleep);
886 if (ret < 0) 902 if (ret < 0)
887 return ret; 903 return ret;
904 free = false;
888 } 905 }
889 906
890 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, 907 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -2252,6 +2269,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2252{ 2269{
2253 struct sge *s = &adapter->sge; 2270 struct sge *s = &adapter->sge;
2254 int q10g, n10g, qidx, pidx, qs; 2271 int q10g, n10g, qidx, pidx, qs;
2272 size_t iqe_size;
2255 2273
2256 /* 2274 /*
2257 * We should not be called till we know how many Queue Sets we can 2275 * We should not be called till we know how many Queue Sets we can
@@ -2296,6 +2314,13 @@ static void __devinit cfg_queues(struct adapter *adapter)
2296 s->ethqsets = qidx; 2314 s->ethqsets = qidx;
2297 2315
2298 /* 2316 /*
2317 * The Ingress Queue Entry Size for our various Response Queues needs
2318 * to be big enough to accommodate the largest message we can receive
2319 * from the chip/firmware; which is 64 bytes ...
2320 */
2321 iqe_size = 64;
2322
2323 /*
2299 * Set up default Queue Set parameters ... Start off with the 2324 * Set up default Queue Set parameters ... Start off with the
2300 * shortest interrupt holdoff timer. 2325 * shortest interrupt holdoff timer.
2301 */ 2326 */
@@ -2303,7 +2328,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2303 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; 2328 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2304 struct sge_eth_txq *txq = &s->ethtxq[qs]; 2329 struct sge_eth_txq *txq = &s->ethtxq[qs];
2305 2330
2306 init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); 2331 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2307 rxq->fl.size = 72; 2332 rxq->fl.size = 72;
2308 txq->q.size = 1024; 2333 txq->q.size = 1024;
2309 } 2334 }
@@ -2312,8 +2337,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2312 * The firmware event queue is used for link state changes and 2337 * The firmware event queue is used for link state changes and
2313 * notifications of TX DMA completions. 2338 * notifications of TX DMA completions.
2314 */ 2339 */
2315 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, 2340 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2316 L1_CACHE_BYTES);
2317 2341
2318 /* 2342 /*
2319 * The forwarded interrupt queue is used when we're in MSI interrupt 2343 * The forwarded interrupt queue is used when we're in MSI interrupt
@@ -2329,7 +2353,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2329 * any time ... 2353 * any time ...
2330 */ 2354 */
2331 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, 2355 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2332 L1_CACHE_BYTES); 2356 iqe_size);
2333} 2357}
2334 2358
2335/* 2359/*
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e306c20dfaee..19520afe1a12 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -1014,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1014 unsigned int naddr, const u8 **addr, u16 *idx, 1014 unsigned int naddr, const u8 **addr, u16 *idx,
1015 u64 *hash, bool sleep_ok) 1015 u64 *hash, bool sleep_ok)
1016{ 1016{
1017 int i, ret; 1017 int offset, ret = 0;
1018 unsigned nfilters = 0;
1019 unsigned int rem = naddr;
1018 struct fw_vi_mac_cmd cmd, rpl; 1020 struct fw_vi_mac_cmd cmd, rpl;
1019 struct fw_vi_mac_exact *p;
1020 size_t len16;
1021 1021
1022 if (naddr > ARRAY_SIZE(cmd.u.exact)) 1022 if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
1023 return -EINVAL; 1023 return -EINVAL;
1024 len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1025 u.exact[naddr]), 16);
1026 1024
1027 memset(&cmd, 0, sizeof(cmd)); 1025 for (offset = 0; offset < naddr; /**/) {
1028 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | 1026 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1029 FW_CMD_REQUEST | 1027 ? rem
1030 FW_CMD_WRITE | 1028 : ARRAY_SIZE(cmd.u.exact));
1031 (free ? FW_CMD_EXEC : 0) | 1029 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1032 FW_VI_MAC_CMD_VIID(viid)); 1030 u.exact[fw_naddr]), 16);
1033 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | 1031 struct fw_vi_mac_exact *p;
1034 FW_CMD_LEN16(len16)); 1032 int i;
1035 1033
1036 for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) { 1034 memset(&cmd, 0, sizeof(cmd));
1037 p->valid_to_idx = 1035 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1038 cpu_to_be16(FW_VI_MAC_CMD_VALID | 1036 FW_CMD_REQUEST |
1039 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 1037 FW_CMD_WRITE |
1040 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 1038 (free ? FW_CMD_EXEC : 0) |
1041 } 1039 FW_VI_MAC_CMD_VIID(viid));
1040 cmd.freemacs_to_len16 =
1041 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
1042 FW_CMD_LEN16(len16));
1043
1044 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1045 p->valid_to_idx = cpu_to_be16(
1046 FW_VI_MAC_CMD_VALID |
1047 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1048 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1049 }
1050
1051
1052 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1053 sleep_ok);
1054 if (ret && ret != -ENOMEM)
1055 break;
1042 1056
1043 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); 1057 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1044 if (ret) 1058 u16 index = FW_VI_MAC_CMD_IDX_GET(
1045 return ret; 1059 be16_to_cpu(p->valid_to_idx));
1046 1060
1047 for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { 1061 if (idx)
1048 u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1062 idx[offset+i] =
1049 1063 (index >= FW_CLS_TCAM_NUM_ENTRIES
1050 if (idx) 1064 ? 0xffff
1051 idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES 1065 : index);
1052 ? 0xffff 1066 if (index < FW_CLS_TCAM_NUM_ENTRIES)
1053 : index); 1067 nfilters++;
1054 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1068 else if (hash)
1055 ret++; 1069 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1056 else if (hash) 1070 }
1057 *hash |= (1 << hash_mac_addr(addr[i])); 1071
1072 free = false;
1073 offset += fw_naddr;
1074 rem -= fw_naddr;
1058 } 1075 }
1076
1077 /*
1078 * If there were no errors or we merely ran out of room in our MAC
1079 * address arena, return the number of filters actually written.
1080 */
1081 if (ret == 0 || ret == -ENOMEM)
1082 ret = nfilters;
1059 return ret; 1083 return ret;
1060} 1084}
1061 1085
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4686c3983fc3..4d62f7bfa036 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k6-NAPI" 34#define DRV_VERSION "7.3.21-k8-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
485 struct net_device *netdev = adapter->netdev; 485 struct net_device *netdev = adapter->netdev;
486 u32 rctl, tctl; 486 u32 rctl, tctl;
487 487
488 /* signal that we're down so the interrupt handler does not
489 * reschedule our watchdog timer */
490 set_bit(__E1000_DOWN, &adapter->flags);
491 488
492 /* disable receives in the hardware */ 489 /* disable receives in the hardware */
493 rctl = er32(RCTL); 490 rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
508 505
509 e1000_irq_disable(adapter); 506 e1000_irq_disable(adapter);
510 507
508 /*
509 * Setting DOWN must be after irq_disable to prevent
510 * a screaming interrupt. Setting DOWN also prevents
511 * timers and tasks from rescheduling.
512 */
513 set_bit(__E1000_DOWN, &adapter->flags);
514
511 del_timer_sync(&adapter->tx_fifo_stall_timer); 515 del_timer_sync(&adapter->tx_fifo_stall_timer);
512 del_timer_sync(&adapter->watchdog_timer); 516 del_timer_sync(&adapter->watchdog_timer);
513 del_timer_sync(&adapter->phy_info_timer); 517 del_timer_sync(&adapter->phy_info_timer);
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 75b099ce49c9..1f37ee6b2a26 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -261,6 +261,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
261 261
262} 262}
263 263
264static int ehea_set_flags(struct net_device *dev, u32 data)
265{
266 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
267 | ETH_FLAG_TXVLAN
268 | ETH_FLAG_RXVLAN);
269}
270
264const struct ethtool_ops ehea_ethtool_ops = { 271const struct ethtool_ops ehea_ethtool_ops = {
265 .get_settings = ehea_get_settings, 272 .get_settings = ehea_get_settings,
266 .get_drvinfo = ehea_get_drvinfo, 273 .get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +280,8 @@ const struct ethtool_ops ehea_ethtool_ops = {
273 .get_ethtool_stats = ehea_get_ethtool_stats, 280 .get_ethtool_stats = ehea_get_ethtool_stats,
274 .get_rx_csum = ehea_get_rx_csum, 281 .get_rx_csum = ehea_get_rx_csum,
275 .set_settings = ehea_set_settings, 282 .set_settings = ehea_set_settings,
283 .get_flags = ethtool_op_get_flags,
284 .set_flags = ehea_set_flags,
276 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ 285 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */
277}; 286};
278 287
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7be8dc..b95f087cd5a9 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -400,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
400 skb_arr_rq1[index] = netdev_alloc_skb(dev, 400 skb_arr_rq1[index] = netdev_alloc_skb(dev,
401 EHEA_L_PKT_SIZE); 401 EHEA_L_PKT_SIZE);
402 if (!skb_arr_rq1[index]) { 402 if (!skb_arr_rq1[index]) {
403 ehea_info("Unable to allocate enough skb in the array\n");
403 pr->rq1_skba.os_skbs = fill_wqes - i; 404 pr->rq1_skba.os_skbs = fill_wqes - i;
404 break; 405 break;
405 } 406 }
@@ -422,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
422 struct net_device *dev = pr->port->netdev; 423 struct net_device *dev = pr->port->netdev;
423 int i; 424 int i;
424 425
425 for (i = 0; i < pr->rq1_skba.len; i++) { 426 if (nr_rq1a > pr->rq1_skba.len) {
427 ehea_error("NR_RQ1A bigger than skb array len\n");
428 return;
429 }
430
431 for (i = 0; i < nr_rq1a; i++) {
426 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 432 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
427 if (!skb_arr_rq1[i]) 433 if (!skb_arr_rq1[i]) {
434 ehea_info("No enough memory to allocate skb array\n");
428 break; 435 break;
436 }
429 } 437 }
430 /* Ring doorbell */ 438 /* Ring doorbell */
431 ehea_update_rq1a(pr->qp, nr_rq1a); 439 ehea_update_rq1a(pr->qp, i);
432} 440}
433 441
434static int ehea_refill_rq_def(struct ehea_port_res *pr, 442static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -675,7 +683,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
675 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) && 683 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
676 pr->port->vgrp); 684 pr->port->vgrp);
677 685
678 if (use_lro) { 686 if (skb->dev->features & NETIF_F_LRO) {
679 if (vlan_extracted) 687 if (vlan_extracted)
680 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, 688 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
681 pr->port->vgrp, 689 pr->port->vgrp,
@@ -735,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
735 743
736 skb = netdev_alloc_skb(dev, 744 skb = netdev_alloc_skb(dev,
737 EHEA_L_PKT_SIZE); 745 EHEA_L_PKT_SIZE);
738 if (!skb) 746 if (!skb) {
747 ehea_info("Not enough memory to allocate skb\n");
739 break; 748 break;
749 }
740 } 750 }
741 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 751 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
742 cqe->num_bytes_transfered - 4); 752 cqe->num_bytes_transfered - 4);
@@ -777,7 +787,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
777 } 787 }
778 cqe = ehea_poll_rq1(qp, &wqe_index); 788 cqe = ehea_poll_rq1(qp, &wqe_index);
779 } 789 }
780 if (use_lro) 790 if (dev->features & NETIF_F_LRO)
781 lro_flush_all(&pr->lro_mgr); 791 lro_flush_all(&pr->lro_mgr);
782 792
783 pr->rx_packets += processed; 793 pr->rx_packets += processed;
@@ -3268,6 +3278,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3268 | NETIF_F_LLTX; 3278 | NETIF_F_LLTX;
3269 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3279 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3270 3280
3281 if (use_lro)
3282 dev->features |= NETIF_F_LRO;
3283
3271 INIT_WORK(&port->reset_task, ehea_reset_port); 3284 INIT_WORK(&port->reset_task, ehea_reset_port);
3272 3285
3273 ret = register_netdev(dev); 3286 ret = register_netdev(dev);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd43..aa28b270c045 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1962,7 +1962,8 @@ static void enic_poll_controller(struct net_device *netdev)
1962 case VNIC_DEV_INTR_MODE_MSIX: 1962 case VNIC_DEV_INTR_MODE_MSIX:
1963 for (i = 0; i < enic->rq_count; i++) { 1963 for (i = 0; i < enic->rq_count; i++) {
1964 intr = enic_msix_rq_intr(enic, i); 1964 intr = enic_msix_rq_intr(enic, i);
1965 enic_isr_msix_rq(enic->msix_entry[intr].vector, enic); 1965 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1966 &enic->napi[i]);
1966 } 1967 }
1967 intr = enic_msix_wq_intr(enic, i); 1968 intr = enic_msix_wq_intr(enic, i);
1968 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); 1969 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 49e4ce1246a7..d1bec6269173 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
577 irq_of_parse_and_map(np, 1); 577 irq_of_parse_and_map(np, 1);
578 priv->gfargrp[priv->num_grps].interruptError = 578 priv->gfargrp[priv->num_grps].interruptError =
579 irq_of_parse_and_map(np,2); 579 irq_of_parse_and_map(np,2);
580 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || 580 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
581 priv->gfargrp[priv->num_grps].interruptReceive < 0 || 581 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
582 priv->gfargrp[priv->num_grps].interruptError < 0) { 582 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
583 return -EINVAL; 583 return -EINVAL;
584 }
585 } 584 }
586 585
587 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 586 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab9f675c5b8b..fe337bd121aa 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -104,6 +104,8 @@ static void ri_tasklet(unsigned long dev)
104 rcu_read_unlock(); 104 rcu_read_unlock();
105 dev_kfree_skb(skb); 105 dev_kfree_skb(skb);
106 stats->tx_dropped++; 106 stats->tx_dropped++;
107 if (skb_queue_len(&dp->tq) != 0)
108 goto resched;
107 break; 109 break;
108 } 110 }
109 rcu_read_unlock(); 111 rcu_read_unlock();
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
88 "IC PLUS IP1000 1000/100/10 based NIC", 88 "IC PLUS IP1000 1000/100/10 based NIC",
89 "Sundance Technology ST2021 based NIC", 89 "Sundance Technology ST2021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC", 90 "Tamarack Microelectronics TC9020/9021 based NIC",
91 "Tamarack Microelectronics TC9020/9021 based NIC",
92 "D-Link NIC IP1000A" 91 "D-Link NIC IP1000A"
93}; 92};
94 93
95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { 94static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 95 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 96 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 97 { PCI_VDEVICE(DLINK, 0x9021), 2 },
99 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 98 { PCI_VDEVICE(DLINK, 0x4020), 3 },
100 { PCI_VDEVICE(DLINK, 0x4020), 4 },
101 { 0, } 99 { 0, }
102}; 100};
103 101
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bccd6d0..52a7c86af663 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
258 258
259 /* Baud Rate Error Correction x 10000 */ 259 /* Baud Rate Error Correction x 10000 */
260 u32 rate_err_array[] = { 260 u32 rate_err_array[] = {
261 0000, 0625, 1250, 1875, 261 0, 625, 1250, 1875,
262 2500, 3125, 3750, 4375, 262 2500, 3125, 3750, 4375,
263 5000, 5625, 6250, 6875, 263 5000, 5625, 6250, 6875,
264 7500, 8125, 8750, 9375, 264 7500, 8125, 8750, 9375,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index fbad4d819608..eee0b298bd36 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4771,6 +4771,9 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4771 adapter->rx_ring[i] = NULL; 4771 adapter->rx_ring[i] = NULL;
4772 } 4772 }
4773 4773
4774 adapter->num_tx_queues = 0;
4775 adapter->num_rx_queues = 0;
4776
4774 ixgbe_free_q_vectors(adapter); 4777 ixgbe_free_q_vectors(adapter);
4775 ixgbe_reset_interrupt_capability(adapter); 4778 ixgbe_reset_interrupt_capability(adapter);
4776} 4779}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b68eee2414c2..7a7e18ba278a 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
290 dev_cap->bf_reg_size = 1 << (field & 0x1f); 290 dev_cap->bf_reg_size = 1 << (field & 0x1f);
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
293 mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
294 field = 3;
295 }
292 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 296 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
293 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 297 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
294 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 298 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..03a1d280105f 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation. 2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. 3 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
4 * 4 *
5 * This code was derived from the Intel e1000e Linux driver. 5 * This code was derived from the Intel e1000e Linux driver.
6 * 6 *
@@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void)
2464module_init(pch_gbe_init_module); 2464module_init(pch_gbe_init_module);
2465module_exit(pch_gbe_exit_module); 2465module_exit(pch_gbe_exit_module);
2466 2466
2467MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver"); 2467MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2468MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>"); 2468MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
2469MODULE_LICENSE("GPL"); 2469MODULE_LICENSE("GPL");
2470MODULE_VERSION(DRV_VERSION); 2470MODULE_VERSION(DRV_VERSION);
2471MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); 2471MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146fc560..ef0996a0eaaa 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
434 .err = "using default of " 434 .err = "using default of "
435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD), 435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
436 .def = PCH_GBE_DEFAULT_TXD, 436 .def = PCH_GBE_DEFAULT_TXD,
437 .arg = { .r = { .min = PCH_GBE_MIN_TXD } }, 437 .arg = { .r = { .min = PCH_GBE_MIN_TXD,
438 .arg = { .r = { .max = PCH_GBE_MAX_TXD } } 438 .max = PCH_GBE_MAX_TXD } }
439 }; 439 };
440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
441 tx_ring->count = TxDescriptors; 441 tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
450 .err = "using default of " 450 .err = "using default of "
451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD), 451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
452 .def = PCH_GBE_DEFAULT_RXD, 452 .def = PCH_GBE_DEFAULT_RXD,
453 .arg = { .r = { .min = PCH_GBE_MIN_RXD } }, 453 .arg = { .r = { .min = PCH_GBE_MIN_RXD,
454 .arg = { .r = { .max = PCH_GBE_MAX_RXD } } 454 .max = PCH_GBE_MAX_RXD } }
455 }; 455 };
456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
457 rx_ring->count = RxDescriptors; 457 rx_ring->count = RxDescriptors;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb3d13e4e074..35fda5ac8120 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -64,7 +64,7 @@ config BCM63XX_PHY
64config ICPLUS_PHY 64config ICPLUS_PHY
65 tristate "Drivers for ICPlus PHYs" 65 tristate "Drivers for ICPlus PHYs"
66 ---help--- 66 ---help---
67 Currently supports the IP175C PHY. 67 Currently supports the IP175C and IP1001 PHYs.
68 68
69config REALTEK_PHY 69config REALTEK_PHY
70 tristate "Drivers for Realtek PHYs" 70 tristate "Drivers for Realtek PHYs"
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index c1d2d251fe8b..9a09e24c30bc 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -30,7 +30,7 @@
30#include <asm/irq.h> 30#include <asm/irq.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32 32
33MODULE_DESCRIPTION("ICPlus IP175C PHY driver"); 33MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers");
34MODULE_AUTHOR("Michael Barkowski"); 34MODULE_AUTHOR("Michael Barkowski");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36 36
@@ -89,6 +89,33 @@ static int ip175c_config_init(struct phy_device *phydev)
89 return 0; 89 return 0;
90} 90}
91 91
92static int ip1001_config_init(struct phy_device *phydev)
93{
94 int err, value;
95
96 /* Software Reset PHY */
97 value = phy_read(phydev, MII_BMCR);
98 value |= BMCR_RESET;
99 err = phy_write(phydev, MII_BMCR, value);
100 if (err < 0)
101 return err;
102
103 do {
104 value = phy_read(phydev, MII_BMCR);
105 } while (value & BMCR_RESET);
106
107 /* Additional delay (2ns) used to adjust RX clock phase
108 * at GMII/ RGMII interface */
109 value = phy_read(phydev, 16);
110 value |= 0x3;
111
112 err = phy_write(phydev, 16, value);
113 if (err < 0)
114 return err;
115
116 return err;
117}
118
92static int ip175c_read_status(struct phy_device *phydev) 119static int ip175c_read_status(struct phy_device *phydev)
93{ 120{
94 if (phydev->addr == 4) /* WAN port */ 121 if (phydev->addr == 4) /* WAN port */
@@ -121,21 +148,43 @@ static struct phy_driver ip175c_driver = {
121 .driver = { .owner = THIS_MODULE,}, 148 .driver = { .owner = THIS_MODULE,},
122}; 149};
123 150
124static int __init ip175c_init(void) 151static struct phy_driver ip1001_driver = {
152 .phy_id = 0x02430d90,
153 .name = "ICPlus IP1001",
154 .phy_id_mask = 0x0ffffff0,
155 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
156 SUPPORTED_Asym_Pause,
157 .config_init = &ip1001_config_init,
158 .config_aneg = &genphy_config_aneg,
159 .read_status = &genphy_read_status,
160 .suspend = genphy_suspend,
161 .resume = genphy_resume,
162 .driver = { .owner = THIS_MODULE,},
163};
164
165static int __init icplus_init(void)
125{ 166{
167 int ret = 0;
168
169 ret = phy_driver_register(&ip1001_driver);
170 if (ret < 0)
171 return -ENODEV;
172
126 return phy_driver_register(&ip175c_driver); 173 return phy_driver_register(&ip175c_driver);
127} 174}
128 175
129static void __exit ip175c_exit(void) 176static void __exit icplus_exit(void)
130{ 177{
178 phy_driver_unregister(&ip1001_driver);
131 phy_driver_unregister(&ip175c_driver); 179 phy_driver_unregister(&ip175c_driver);
132} 180}
133 181
134module_init(ip175c_init); 182module_init(icplus_init);
135module_exit(ip175c_exit); 183module_exit(icplus_exit);
136 184
137static struct mdio_device_id __maybe_unused icplus_tbl[] = { 185static struct mdio_device_id __maybe_unused icplus_tbl[] = {
138 { 0x02430d80, 0x0ffffff0 }, 186 { 0x02430d80, 0x0ffffff0 },
187 { 0x02430d90, 0x0ffffff0 },
139 { } 188 { }
140}; 189};
141 190
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f0bd1a1aba3a..e8b9c53c304b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/marvell_phy.h> 32#include <linux/marvell_phy.h>
33#include <linux/of.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37 38
39#define MII_MARVELL_PHY_PAGE 22
40
38#define MII_M1011_IEVENT 0x13 41#define MII_M1011_IEVENT 0x13
39#define MII_M1011_IEVENT_CLEAR 0x0000 42#define MII_M1011_IEVENT_CLEAR 0x0000
40 43
@@ -80,7 +83,6 @@
80#define MII_88E1121_PHY_LED_CTRL 16 83#define MII_88E1121_PHY_LED_CTRL 16
81#define MII_88E1121_PHY_LED_PAGE 3 84#define MII_88E1121_PHY_LED_PAGE 3
82#define MII_88E1121_PHY_LED_DEF 0x0030 85#define MII_88E1121_PHY_LED_DEF 0x0030
83#define MII_88E1121_PHY_PAGE 22
84 86
85#define MII_M1011_PHY_STATUS 0x11 87#define MII_M1011_PHY_STATUS 0x11
86#define MII_M1011_PHY_STATUS_1000 0x8000 88#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
186 return 0; 188 return 0;
187} 189}
188 190
191#ifdef CONFIG_OF_MDIO
192/*
193 * Set and/or override some configuration registers based on the
194 * marvell,reg-init property stored in the of_node for the phydev.
195 *
196 * marvell,reg-init = <reg-page reg mask value>,...;
197 *
198 * There may be one or more sets of <reg-page reg mask value>:
199 *
200 * reg-page: which register bank to use.
201 * reg: the register.
202 * mask: if non-zero, ANDed with existing register value.
203 * value: ORed with the masked value and written to the regiser.
204 *
205 */
206static int marvell_of_reg_init(struct phy_device *phydev)
207{
208 const __be32 *paddr;
209 int len, i, saved_page, current_page, page_changed, ret;
210
211 if (!phydev->dev.of_node)
212 return 0;
213
214 paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
215 if (!paddr || len < (4 * sizeof(*paddr)))
216 return 0;
217
218 saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
219 if (saved_page < 0)
220 return saved_page;
221 page_changed = 0;
222 current_page = saved_page;
223
224 ret = 0;
225 len /= sizeof(*paddr);
226 for (i = 0; i < len - 3; i += 4) {
227 u16 reg_page = be32_to_cpup(paddr + i);
228 u16 reg = be32_to_cpup(paddr + i + 1);
229 u16 mask = be32_to_cpup(paddr + i + 2);
230 u16 val_bits = be32_to_cpup(paddr + i + 3);
231 int val;
232
233 if (reg_page != current_page) {
234 current_page = reg_page;
235 page_changed = 1;
236 ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
237 if (ret < 0)
238 goto err;
239 }
240
241 val = 0;
242 if (mask) {
243 val = phy_read(phydev, reg);
244 if (val < 0) {
245 ret = val;
246 goto err;
247 }
248 val &= mask;
249 }
250 val |= val_bits;
251
252 ret = phy_write(phydev, reg, val);
253 if (ret < 0)
254 goto err;
255
256 }
257err:
258 if (page_changed) {
259 i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
260 if (ret == 0)
261 ret = i;
262 }
263 return ret;
264}
265#else
266static int marvell_of_reg_init(struct phy_device *phydev)
267{
268 return 0;
269}
270#endif /* CONFIG_OF_MDIO */
271
189static int m88e1121_config_aneg(struct phy_device *phydev) 272static int m88e1121_config_aneg(struct phy_device *phydev)
190{ 273{
191 int err, oldpage, mscr; 274 int err, oldpage, mscr;
192 275
193 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 276 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
194 277
195 err = phy_write(phydev, MII_88E1121_PHY_PAGE, 278 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
196 MII_88E1121_PHY_MSCR_PAGE); 279 MII_88E1121_PHY_MSCR_PAGE);
197 if (err < 0) 280 if (err < 0)
198 return err; 281 return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
218 return err; 301 return err;
219 } 302 }
220 303
221 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 304 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
222 305
223 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 306 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
224 if (err < 0) 307 if (err < 0)
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
229 if (err < 0) 312 if (err < 0)
230 return err; 313 return err;
231 314
232 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 315 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
233 316
234 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); 317 phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
235 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); 318 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
236 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 319 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
237 320
238 err = genphy_config_aneg(phydev); 321 err = genphy_config_aneg(phydev);
239 322
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
244{ 327{
245 int err, oldpage, mscr; 328 int err, oldpage, mscr;
246 329
247 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 330 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
248 331
249 err = phy_write(phydev, MII_88E1121_PHY_PAGE, 332 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
250 MII_88E1121_PHY_MSCR_PAGE); 333 MII_88E1121_PHY_MSCR_PAGE);
251 if (err < 0) 334 if (err < 0)
252 return err; 335 return err;
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
258 if (err < 0) 341 if (err < 0)
259 return err; 342 return err;
260 343
261 err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 344 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
262 if (err < 0) 345 if (err < 0)
263 return err; 346 return err;
264 347
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
368 return err; 451 return err;
369 } 452 }
370 453
454 err = marvell_of_reg_init(phydev);
455 if (err < 0)
456 return err;
371 457
372 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 458 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
373 if (err < 0) 459 if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
398 int err; 484 int err;
399 485
400 /* Change address */ 486 /* Change address */
401 err = phy_write(phydev, 0x16, 0x0002); 487 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
402 if (err < 0) 488 if (err < 0)
403 return err; 489 return err;
404 490
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
408 return err; 494 return err;
409 495
410 /* Change address */ 496 /* Change address */
411 err = phy_write(phydev, 0x16, 0x0003); 497 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
412 if (err < 0) 498 if (err < 0)
413 return err; 499 return err;
414 500
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
420 if (err < 0) 506 if (err < 0)
421 return err; 507 return err;
422 508
509 err = marvell_of_reg_init(phydev);
510 if (err < 0)
511 return err;
512
423 /* Reset address */ 513 /* Reset address */
424 err = phy_write(phydev, 0x16, 0x0); 514 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
515 if (err < 0)
516 return err;
517
518 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
519 if (err < 0)
520 return err;
521
522 return 0;
523}
524
525static int m88e1149_config_init(struct phy_device *phydev)
526{
527 int err;
528
529 /* Change address */
530 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
531 if (err < 0)
532 return err;
533
534 /* Enable 1000 Mbit */
535 err = phy_write(phydev, 0x15, 0x1048);
536 if (err < 0)
537 return err;
538
539 err = marvell_of_reg_init(phydev);
540 if (err < 0)
541 return err;
542
543 /* Reset address */
544 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
425 if (err < 0) 545 if (err < 0)
426 return err; 546 return err;
427 547
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
491 } 611 }
492 } 612 }
493 613
614 err = marvell_of_reg_init(phydev);
615 if (err < 0)
616 return err;
617
494 return 0; 618 return 0;
495} 619}
496 620
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
685 .driver = { .owner = THIS_MODULE }, 809 .driver = { .owner = THIS_MODULE },
686 }, 810 },
687 { 811 {
812 .phy_id = MARVELL_PHY_ID_88E1149R,
813 .phy_id_mask = MARVELL_PHY_ID_MASK,
814 .name = "Marvell 88E1149R",
815 .features = PHY_GBIT_FEATURES,
816 .flags = PHY_HAS_INTERRUPT,
817 .config_init = &m88e1149_config_init,
818 .config_aneg = &m88e1118_config_aneg,
819 .read_status = &genphy_read_status,
820 .ack_interrupt = &marvell_ack_interrupt,
821 .config_intr = &marvell_config_intr,
822 .driver = { .owner = THIS_MODULE },
823 },
824 {
688 .phy_id = MARVELL_PHY_ID_88E1240, 825 .phy_id = MARVELL_PHY_ID_88E1240,
689 .phy_id_mask = MARVELL_PHY_ID_MASK, 826 .phy_id_mask = MARVELL_PHY_ID_MASK,
690 .name = "Marvell 88E1240", 827 .name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
735 { 0x01410e10, 0xfffffff0 }, 872 { 0x01410e10, 0xfffffff0 },
736 { 0x01410cb0, 0xfffffff0 }, 873 { 0x01410cb0, 0xfffffff0 },
737 { 0x01410cd0, 0xfffffff0 }, 874 { 0x01410cd0, 0xfffffff0 },
875 { 0x01410e50, 0xfffffff0 },
738 { 0x01410e30, 0xfffffff0 }, 876 { 0x01410e30, 0xfffffff0 },
739 { 0x01410e90, 0xfffffff0 }, 877 { 0x01410e90, 0xfffffff0 },
740 { } 878 { }
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..39659976a1ac 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2584 */ 2584 */
2585 dev_net_set(dev, net); 2585 dev_net_set(dev, net);
2586 2586
2587 ret = -EEXIST;
2588 mutex_lock(&pn->all_ppp_mutex); 2587 mutex_lock(&pn->all_ppp_mutex);
2589 2588
2590 if (unit < 0) { 2589 if (unit < 0) {
2591 unit = unit_get(&pn->units_idr, ppp); 2590 unit = unit_get(&pn->units_idr, ppp);
2592 if (unit < 0) { 2591 if (unit < 0) {
2593 *retp = unit; 2592 ret = unit;
2594 goto out2; 2593 goto out2;
2595 } 2594 }
2596 } else { 2595 } else {
2596 ret = -EEXIST;
2597 if (unit_find(&pn->units_idr, unit)) 2597 if (unit_find(&pn->units_idr, unit))
2598 goto out2; /* unit already exists */ 2598 goto out2; /* unit already exists */
2599 /* 2599 /*
@@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2668 ppp->closing = 1; 2668 ppp->closing = 1;
2669 ppp_unlock(ppp); 2669 ppp_unlock(ppp);
2670 unregister_netdev(ppp->dev); 2670 unregister_netdev(ppp->dev);
2671 unit_put(&pn->units_idr, ppp->file.index);
2671 } else 2672 } else
2672 ppp_unlock(ppp); 2673 ppp_unlock(ppp);
2673 2674
2674 unit_put(&pn->units_idr, ppp->file.index);
2675 ppp->file.dead = 1; 2675 ppp->file.dead = 1;
2676 ppp->owner = NULL; 2676 ppp->owner = NULL;
2677 wake_up_interruptible(&ppp->file.rwait); 2677 wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void)
2859 * by holding all_ppp_mutex 2859 * by holding all_ppp_mutex
2860 */ 2860 */
2861 2861
2862/* associate pointer with specified number */ 2862static int __unit_alloc(struct idr *p, void *ptr, int n)
2863static int unit_set(struct idr *p, void *ptr, int n)
2864{ 2863{
2865 int unit, err; 2864 int unit, err;
2866 2865
@@ -2871,10 +2870,24 @@ again:
2871 } 2870 }
2872 2871
2873 err = idr_get_new_above(p, ptr, n, &unit); 2872 err = idr_get_new_above(p, ptr, n, &unit);
2874 if (err == -EAGAIN) 2873 if (err < 0) {
2875 goto again; 2874 if (err == -EAGAIN)
2875 goto again;
2876 return err;
2877 }
2878
2879 return unit;
2880}
2881
2882/* associate pointer with specified number */
2883static int unit_set(struct idr *p, void *ptr, int n)
2884{
2885 int unit;
2876 2886
2877 if (unit != n) { 2887 unit = __unit_alloc(p, ptr, n);
2888 if (unit < 0)
2889 return unit;
2890 else if (unit != n) {
2878 idr_remove(p, unit); 2891 idr_remove(p, unit);
2879 return -EINVAL; 2892 return -EINVAL;
2880 } 2893 }
@@ -2885,19 +2898,7 @@ again:
2885/* get new free unit number and associate pointer with it */ 2898/* get new free unit number and associate pointer with it */
2886static int unit_get(struct idr *p, void *ptr) 2899static int unit_get(struct idr *p, void *ptr)
2887{ 2900{
2888 int unit, err; 2901 return __unit_alloc(p, ptr, 0);
2889
2890again:
2891 if (!idr_pre_get(p, GFP_KERNEL)) {
2892 printk(KERN_ERR "PPP: No free memory for idr\n");
2893 return -ENOMEM;
2894 }
2895
2896 err = idr_get_new_above(p, ptr, 0, &unit);
2897 if (err == -EAGAIN)
2898 goto again;
2899
2900 return unit;
2901} 2902}
2902 2903
2903/* put unit number back to a pool */ 2904/* put unit number back to a pool */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index d72fb0519a2a..78c0e3c9b2b5 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
948 948
949abort: 949abort:
950 kfree_skb(skb); 950 kfree_skb(skb);
951 return 0; 951 return 1;
952} 952}
953 953
954/************************************************************************ 954/************************************************************************
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..9787dff90d3f 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2083,6 +2083,7 @@ struct ql_adapter {
2083 u32 mailbox_in; 2083 u32 mailbox_in;
2084 u32 mailbox_out; 2084 u32 mailbox_out;
2085 struct mbox_params idc_mbc; 2085 struct mbox_params idc_mbc;
2086 struct mutex mpi_mutex;
2086 2087
2087 int tx_ring_size; 2088 int tx_ring_size;
2088 int rx_ring_size; 2089 int rx_ring_size;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe55a31..2555b1d34f34 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
62/* NETIF_MSG_PKTDATA | */ 62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0; 63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64 64
65static int debug = 0x00007fff; /* defaults above */ 65static int debug = -1; /* defaults above */
66module_param(debug, int, 0); 66module_param(debug, int, 0664);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 68
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int qlge_irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, 0664);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static int qlge_mpi_coredump; 76static int qlge_mpi_coredump;
@@ -4629,6 +4629,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4629 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4629 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4630 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); 4630 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4631 init_completion(&qdev->ide_completion); 4631 init_completion(&qdev->ide_completion);
4632 mutex_init(&qdev->mpi_mutex);
4632 4633
4633 if (!cards_found) { 4634 if (!cards_found) {
4634 dev_info(&pdev->dev, "%s\n", DRV_STRING); 4635 dev_info(&pdev->dev, "%s\n", DRV_STRING);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..a2e919bcb3c6 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -534,6 +534,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
534 int status; 534 int status;
535 unsigned long count; 535 unsigned long count;
536 536
537 mutex_lock(&qdev->mpi_mutex);
537 538
538 /* Begin polled mode for MPI */ 539 /* Begin polled mode for MPI */
539 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 540 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -603,6 +604,7 @@ done:
603end: 604end:
604 /* End polled mode for MPI */ 605 /* End polled mode for MPI */
605 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 606 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
607 mutex_unlock(&qdev->mpi_mutex);
606 return status; 608 return status;
607} 609}
608 610
@@ -1099,9 +1101,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
1099static int ql_set_port_cfg(struct ql_adapter *qdev) 1101static int ql_set_port_cfg(struct ql_adapter *qdev)
1100{ 1102{
1101 int status; 1103 int status;
1102 rtnl_lock();
1103 status = ql_mb_set_port_cfg(qdev); 1104 status = ql_mb_set_port_cfg(qdev);
1104 rtnl_unlock();
1105 if (status) 1105 if (status)
1106 return status; 1106 return status;
1107 status = ql_idc_wait(qdev); 1107 status = ql_idc_wait(qdev);
@@ -1122,9 +1122,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
1122 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 1122 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
1123 int status; 1123 int status;
1124 1124
1125 rtnl_lock();
1126 status = ql_mb_get_port_cfg(qdev); 1125 status = ql_mb_get_port_cfg(qdev);
1127 rtnl_unlock();
1128 if (status) { 1126 if (status) {
1129 netif_err(qdev, drv, qdev->ndev, 1127 netif_err(qdev, drv, qdev->ndev,
1130 "Bug: Failed to get port config data.\n"); 1128 "Bug: Failed to get port config data.\n");
@@ -1167,7 +1165,6 @@ void ql_mpi_idc_work(struct work_struct *work)
1167 u32 aen; 1165 u32 aen;
1168 int timeout; 1166 int timeout;
1169 1167
1170 rtnl_lock();
1171 aen = mbcp->mbox_out[1] >> 16; 1168 aen = mbcp->mbox_out[1] >> 16;
1172 timeout = (mbcp->mbox_out[1] >> 8) & 0xf; 1169 timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
1173 1170
@@ -1231,7 +1228,6 @@ void ql_mpi_idc_work(struct work_struct *work)
1231 } 1228 }
1232 break; 1229 break;
1233 } 1230 }
1234 rtnl_unlock();
1235} 1231}
1236 1232
1237void ql_mpi_work(struct work_struct *work) 1233void ql_mpi_work(struct work_struct *work)
@@ -1242,7 +1238,7 @@ void ql_mpi_work(struct work_struct *work)
1242 struct mbox_params *mbcp = &mbc; 1238 struct mbox_params *mbcp = &mbc;
1243 int err = 0; 1239 int err = 0;
1244 1240
1245 rtnl_lock(); 1241 mutex_lock(&qdev->mpi_mutex);
1246 /* Begin polled mode for MPI */ 1242 /* Begin polled mode for MPI */
1247 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 1243 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
1248 1244
@@ -1259,7 +1255,7 @@ void ql_mpi_work(struct work_struct *work)
1259 1255
1260 /* End polled mode for MPI */ 1256 /* End polled mode for MPI */
1261 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 1257 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
1262 rtnl_unlock(); 1258 mutex_unlock(&qdev->mpi_mutex);
1263 ql_enable_completion_interrupt(qdev, 0); 1259 ql_enable_completion_interrupt(qdev, 0);
1264} 1260}
1265 1261
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4c4d16905efb..53b13deade95 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -744,26 +744,36 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
744 mdio_write(ioaddr, MII_BMCR, val & 0xffff); 744 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
745} 745}
746 746
747static void rtl8169_check_link_status(struct net_device *dev, 747static void __rtl8169_check_link_status(struct net_device *dev,
748 struct rtl8169_private *tp, 748 struct rtl8169_private *tp,
749 void __iomem *ioaddr) 749 void __iomem *ioaddr,
750 bool pm)
750{ 751{
751 unsigned long flags; 752 unsigned long flags;
752 753
753 spin_lock_irqsave(&tp->lock, flags); 754 spin_lock_irqsave(&tp->lock, flags);
754 if (tp->link_ok(ioaddr)) { 755 if (tp->link_ok(ioaddr)) {
755 /* This is to cancel a scheduled suspend if there's one. */ 756 /* This is to cancel a scheduled suspend if there's one. */
756 pm_request_resume(&tp->pci_dev->dev); 757 if (pm)
758 pm_request_resume(&tp->pci_dev->dev);
757 netif_carrier_on(dev); 759 netif_carrier_on(dev);
758 netif_info(tp, ifup, dev, "link up\n"); 760 netif_info(tp, ifup, dev, "link up\n");
759 } else { 761 } else {
760 netif_carrier_off(dev); 762 netif_carrier_off(dev);
761 netif_info(tp, ifdown, dev, "link down\n"); 763 netif_info(tp, ifdown, dev, "link down\n");
762 pm_schedule_suspend(&tp->pci_dev->dev, 100); 764 if (pm)
765 pm_schedule_suspend(&tp->pci_dev->dev, 100);
763 } 766 }
764 spin_unlock_irqrestore(&tp->lock, flags); 767 spin_unlock_irqrestore(&tp->lock, flags);
765} 768}
766 769
770static void rtl8169_check_link_status(struct net_device *dev,
771 struct rtl8169_private *tp,
772 void __iomem *ioaddr)
773{
774 __rtl8169_check_link_status(dev, tp, ioaddr, false);
775}
776
767#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) 777#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
768 778
769static u32 __rtl8169_get_wol(struct rtl8169_private *tp) 779static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -4440,8 +4450,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4440 u32 status = opts1 & RxProtoMask; 4450 u32 status = opts1 & RxProtoMask;
4441 4451
4442 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || 4452 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
4443 ((status == RxProtoUDP) && !(opts1 & UDPFail)) || 4453 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
4444 ((status == RxProtoIP) && !(opts1 & IPFail)))
4445 skb->ip_summed = CHECKSUM_UNNECESSARY; 4454 skb->ip_summed = CHECKSUM_UNNECESSARY;
4446 else 4455 else
4447 skb_checksum_none_assert(skb); 4456 skb_checksum_none_assert(skb);
@@ -4601,7 +4610,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4601 } 4610 }
4602 4611
4603 if (status & LinkChg) 4612 if (status & LinkChg)
4604 rtl8169_check_link_status(dev, tp, ioaddr); 4613 __rtl8169_check_link_status(dev, tp, ioaddr, true);
4605 4614
4606 /* We need to see the lastest version of tp->intr_mask to 4615 /* We need to see the lastest version of tp->intr_mask to
4607 * avoid ignoring an MSI interrupt and having to wait for 4616 * avoid ignoring an MSI interrupt and having to wait for
@@ -4891,11 +4900,7 @@ static int rtl8169_runtime_idle(struct device *device)
4891 struct net_device *dev = pci_get_drvdata(pdev); 4900 struct net_device *dev = pci_get_drvdata(pdev);
4892 struct rtl8169_private *tp = netdev_priv(dev); 4901 struct rtl8169_private *tp = netdev_priv(dev);
4893 4902
4894 if (!tp->TxDescArray) 4903 return tp->TxDescArray ? -EBUSY : 0;
4895 return 0;
4896
4897 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4898 return -EBUSY;
4899} 4904}
4900 4905
4901static const struct dev_pm_ops rtl8169_pm_ops = { 4906static const struct dev_pm_ops rtl8169_pm_ops = {
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 05df20e47976..fb83cdd94643 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -197,7 +197,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
197 197
198static void efx_remove_channels(struct efx_nic *efx); 198static void efx_remove_channels(struct efx_nic *efx);
199static void efx_remove_port(struct efx_nic *efx); 199static void efx_remove_port(struct efx_nic *efx);
200static void efx_init_napi(struct efx_nic *efx);
200static void efx_fini_napi(struct efx_nic *efx); 201static void efx_fini_napi(struct efx_nic *efx);
202static void efx_fini_napi_channel(struct efx_channel *channel);
201static void efx_fini_struct(struct efx_nic *efx); 203static void efx_fini_struct(struct efx_nic *efx);
202static void efx_start_all(struct efx_nic *efx); 204static void efx_start_all(struct efx_nic *efx);
203static void efx_stop_all(struct efx_nic *efx); 205static void efx_stop_all(struct efx_nic *efx);
@@ -335,8 +337,10 @@ void efx_process_channel_now(struct efx_channel *channel)
335 337
336 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
337 efx_nic_disable_interrupts(efx); 339 efx_nic_disable_interrupts(efx);
338 if (efx->legacy_irq) 340 if (efx->legacy_irq) {
339 synchronize_irq(efx->legacy_irq); 341 synchronize_irq(efx->legacy_irq);
342 efx->legacy_irq_enabled = false;
343 }
340 if (channel->irq) 344 if (channel->irq)
341 synchronize_irq(channel->irq); 345 synchronize_irq(channel->irq);
342 346
@@ -351,6 +355,8 @@ void efx_process_channel_now(struct efx_channel *channel)
351 efx_channel_processed(channel); 355 efx_channel_processed(channel);
352 356
353 napi_enable(&channel->napi_str); 357 napi_enable(&channel->napi_str);
358 if (efx->legacy_irq)
359 efx->legacy_irq_enabled = true;
354 efx_nic_enable_interrupts(efx); 360 efx_nic_enable_interrupts(efx);
355} 361}
356 362
@@ -426,6 +432,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
426 432
427 *channel = *old_channel; 433 *channel = *old_channel;
428 434
435 channel->napi_dev = NULL;
429 memset(&channel->eventq, 0, sizeof(channel->eventq)); 436 memset(&channel->eventq, 0, sizeof(channel->eventq));
430 437
431 rx_queue = &channel->rx_queue; 438 rx_queue = &channel->rx_queue;
@@ -736,9 +743,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
736 if (rc) 743 if (rc)
737 goto rollback; 744 goto rollback;
738 745
746 efx_init_napi(efx);
747
739 /* Destroy old channels */ 748 /* Destroy old channels */
740 for (i = 0; i < efx->n_channels; i++) 749 for (i = 0; i < efx->n_channels; i++) {
750 efx_fini_napi_channel(other_channel[i]);
741 efx_remove_channel(other_channel[i]); 751 efx_remove_channel(other_channel[i]);
752 }
742out: 753out:
743 /* Free unused channel structures */ 754 /* Free unused channel structures */
744 for (i = 0; i < efx->n_channels; i++) 755 for (i = 0; i < efx->n_channels; i++)
@@ -1400,6 +1411,8 @@ static void efx_start_all(struct efx_nic *efx)
1400 efx_start_channel(channel); 1411 efx_start_channel(channel);
1401 } 1412 }
1402 1413
1414 if (efx->legacy_irq)
1415 efx->legacy_irq_enabled = true;
1403 efx_nic_enable_interrupts(efx); 1416 efx_nic_enable_interrupts(efx);
1404 1417
1405 /* Switch to event based MCDI completions after enabling interrupts. 1418 /* Switch to event based MCDI completions after enabling interrupts.
@@ -1460,8 +1473,10 @@ static void efx_stop_all(struct efx_nic *efx)
1460 1473
1461 /* Disable interrupts and wait for ISR to complete */ 1474 /* Disable interrupts and wait for ISR to complete */
1462 efx_nic_disable_interrupts(efx); 1475 efx_nic_disable_interrupts(efx);
1463 if (efx->legacy_irq) 1476 if (efx->legacy_irq) {
1464 synchronize_irq(efx->legacy_irq); 1477 synchronize_irq(efx->legacy_irq);
1478 efx->legacy_irq_enabled = false;
1479 }
1465 efx_for_each_channel(channel, efx) { 1480 efx_for_each_channel(channel, efx) {
1466 if (channel->irq) 1481 if (channel->irq)
1467 synchronize_irq(channel->irq); 1482 synchronize_irq(channel->irq);
@@ -1593,7 +1608,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1593 * 1608 *
1594 **************************************************************************/ 1609 **************************************************************************/
1595 1610
1596static int efx_init_napi(struct efx_nic *efx) 1611static void efx_init_napi(struct efx_nic *efx)
1597{ 1612{
1598 struct efx_channel *channel; 1613 struct efx_channel *channel;
1599 1614
@@ -1602,18 +1617,21 @@ static int efx_init_napi(struct efx_nic *efx)
1602 netif_napi_add(channel->napi_dev, &channel->napi_str, 1617 netif_napi_add(channel->napi_dev, &channel->napi_str,
1603 efx_poll, napi_weight); 1618 efx_poll, napi_weight);
1604 } 1619 }
1605 return 0; 1620}
1621
1622static void efx_fini_napi_channel(struct efx_channel *channel)
1623{
1624 if (channel->napi_dev)
1625 netif_napi_del(&channel->napi_str);
1626 channel->napi_dev = NULL;
1606} 1627}
1607 1628
1608static void efx_fini_napi(struct efx_nic *efx) 1629static void efx_fini_napi(struct efx_nic *efx)
1609{ 1630{
1610 struct efx_channel *channel; 1631 struct efx_channel *channel;
1611 1632
1612 efx_for_each_channel(channel, efx) { 1633 efx_for_each_channel(channel, efx)
1613 if (channel->napi_dev) 1634 efx_fini_napi_channel(channel);
1614 netif_napi_del(&channel->napi_str);
1615 channel->napi_dev = NULL;
1616 }
1617} 1635}
1618 1636
1619/************************************************************************** 1637/**************************************************************************
@@ -2335,9 +2353,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2335 if (rc) 2353 if (rc)
2336 goto fail1; 2354 goto fail1;
2337 2355
2338 rc = efx_init_napi(efx); 2356 efx_init_napi(efx);
2339 if (rc)
2340 goto fail2;
2341 2357
2342 rc = efx->type->init(efx); 2358 rc = efx->type->init(efx);
2343 if (rc) { 2359 if (rc) {
@@ -2368,7 +2384,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2368 efx->type->fini(efx); 2384 efx->type->fini(efx);
2369 fail3: 2385 fail3:
2370 efx_fini_napi(efx); 2386 efx_fini_napi(efx);
2371 fail2:
2372 efx_remove_all(efx); 2387 efx_remove_all(efx);
2373 fail1: 2388 fail1:
2374 return rc; 2389 return rc;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 0a7e26d73b52..b137c889152b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -621,6 +621,7 @@ struct efx_filter_state;
621 * @pci_dev: The PCI device 621 * @pci_dev: The PCI device
622 * @type: Controller type attributes 622 * @type: Controller type attributes
623 * @legacy_irq: IRQ number 623 * @legacy_irq: IRQ number
624 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
624 * @workqueue: Workqueue for port reconfigures and the HW monitor. 625 * @workqueue: Workqueue for port reconfigures and the HW monitor.
625 * Work items do not hold and must not acquire RTNL. 626 * Work items do not hold and must not acquire RTNL.
626 * @workqueue_name: Name of workqueue 627 * @workqueue_name: Name of workqueue
@@ -709,6 +710,7 @@ struct efx_nic {
709 struct pci_dev *pci_dev; 710 struct pci_dev *pci_dev;
710 const struct efx_nic_type *type; 711 const struct efx_nic_type *type;
711 int legacy_irq; 712 int legacy_irq;
713 bool legacy_irq_enabled;
712 struct workqueue_struct *workqueue; 714 struct workqueue_struct *workqueue;
713 char workqueue_name[16]; 715 char workqueue_name[16];
714 struct work_struct reset_work; 716 struct work_struct reset_work;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 41c36b9a4244..67cb0c96838c 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1418,6 +1418,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1418 u32 queues; 1418 u32 queues;
1419 int syserr; 1419 int syserr;
1420 1420
1421 /* Could this be ours? If interrupts are disabled then the
1422 * channel state may not be valid.
1423 */
1424 if (!efx->legacy_irq_enabled)
1425 return result;
1426
1421 /* Read the ISR which also ACKs the interrupts */ 1427 /* Read the ISR which also ACKs the interrupts */
1422 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1428 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1423 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1429 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc6034ce81..2114837809e7 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1509,6 +1509,8 @@ static int stmmac_probe(struct net_device *dev)
1509 pr_warning("\tno valid MAC address;" 1509 pr_warning("\tno valid MAC address;"
1510 "please, use ifconfig or nwhwconfig!\n"); 1510 "please, use ifconfig or nwhwconfig!\n");
1511 1511
1512 spin_lock_init(&priv->lock);
1513
1512 ret = register_netdev(dev); 1514 ret = register_netdev(dev);
1513 if (ret) { 1515 if (ret) {
1514 pr_err("%s: ERROR %i registering the device\n", 1516 pr_err("%s: ERROR %i registering the device\n",
@@ -1520,8 +1522,6 @@ static int stmmac_probe(struct net_device *dev)
1520 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", 1522 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1521 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); 1523 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
1522 1524
1523 spin_lock_init(&priv->lock);
1524
1525 return ret; 1525 return ret;
1526} 1526}
1527 1527
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile
new file mode 100644
index 000000000000..f634f142cab4
--- /dev/null
+++ b/drivers/net/tile/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the TILE on-chip networking support.
3#
4
5obj-$(CONFIG_TILE_NET) += tile_net.o
6ifdef CONFIG_TILEGX
7tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
8else
9tile_net-objs := tilepro.o
10endif
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
new file mode 100644
index 000000000000..0e6bac5ec65b
--- /dev/null
+++ b/drivers/net/tile/tilepro.c
@@ -0,0 +1,2406 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h> /* printk() */
20#include <linux/slab.h> /* kmalloc() */
21#include <linux/errno.h> /* error codes */
22#include <linux/types.h> /* size_t */
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/netdevice.h> /* struct device, and other headers */
26#include <linux/etherdevice.h> /* eth_type_trans */
27#include <linux/skbuff.h>
28#include <linux/ioctl.h>
29#include <linux/cdev.h>
30#include <linux/hugetlb.h>
31#include <linux/in6.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <asm/checksum.h>
35#include <asm/homecache.h>
36
37#include <hv/drv_xgbe_intf.h>
38#include <hv/drv_xgbe_impl.h>
39#include <hv/hypervisor.h>
40#include <hv/netio_intf.h>
41
42/* For TSO */
43#include <linux/ip.h>
44#include <linux/tcp.h>
45
46
47/* There is no singlethread_cpu, so schedule work on the current cpu. */
48#define singlethread_cpu -1
49
50
51/*
52 * First, "tile_net_init_module()" initializes all four "devices" which
53 * can be used by linux.
54 *
55 * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
56 * the network cpus, then uses "tile_net_open_aux()" to initialize
57 * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
58 * the tiles, provide buffers to LIPP, allow ingress to start, and
59 * turn on hypervisor interrupt handling (and NAPI) on all tiles.
60 *
61 * If registration fails due to the link being down, then "retry_work"
62 * is used to keep calling "tile_net_open_inner()" until it succeeds.
63 *
64 * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
65 * stop egress, drain the LIPP buffers, unregister all the tiles, stop
66 * LIPP/LEPP, and wipe the LEPP queue.
67 *
68 * We start out with the ingress interrupt enabled on each CPU. When
69 * this interrupt fires, we disable it, and call "napi_schedule()".
70 * This will cause "tile_net_poll()" to be called, which will pull
71 * packets from the netio queue, filtering them out, or passing them
72 * to "netif_receive_skb()". If our budget is exhausted, we will
73 * return, knowing we will be called again later. Otherwise, we
74 * reenable the ingress interrupt, and call "napi_complete()".
75 *
76 *
77 * NOTE: The use of "native_driver" ensures that EPP exists, and that
78 * "epp_sendv" is legal, and that "LIPP" is being used.
79 *
80 * NOTE: Failing to free completions for an arbitrarily long time
81 * (which is defined to be illegal) does in fact cause bizarre
82 * problems. The "egress_timer" helps prevent this from happening.
83 *
84 * NOTE: The egress code can be interrupted by the interrupt handler.
85 */
86
87
88/* HACK: Allow use of "jumbo" packets. */
89/* This should be 1500 if "jumbo" is not set in LIPP. */
90/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
91/* ISSUE: This has not been thoroughly tested (except at 1500). */
92#define TILE_NET_MTU 1500
93
94/* HACK: Define to support GSO. */
95/* ISSUE: This may actually hurt performance of the TCP blaster. */
96/* #define TILE_NET_GSO */
97
98/* Define this to collapse "duplicate" acks. */
99/* #define IGNORE_DUP_ACKS */
100
101/* HACK: Define this to verify incoming packets. */
102/* #define TILE_NET_VERIFY_INGRESS */
103
104/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
105#define TILE_NET_TX_QUEUE_LEN 0
106
107/* Define to dump packets (prints out the whole packet on tx and rx). */
108/* #define TILE_NET_DUMP_PACKETS */
109
110/* Define to enable debug spew (all PDEBUG's are enabled). */
111/* #define TILE_NET_DEBUG */
112
113
114/* Define to activate paranoia checks. */
115/* #define TILE_NET_PARANOIA */
116
117/* Default transmit lockup timeout period, in jiffies. */
118#define TILE_NET_TIMEOUT (5 * HZ)
119
120/* Default retry interval for bringing up the NetIO interface, in jiffies. */
121#define TILE_NET_RETRY_INTERVAL (5 * HZ)
122
123/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
124#define TILE_NET_DEVS 4
125
126
127
128/* Paranoia. */
129#if NET_IP_ALIGN != LIPP_PACKET_PADDING
130#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
131#endif
132
133
134/* Debug print. */
135#ifdef TILE_NET_DEBUG
136#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
137#else
138#define PDEBUG(fmt, args...)
139#endif
140
141
142MODULE_AUTHOR("Tilera");
143MODULE_LICENSE("GPL");
144
145
146#define IS_MULTICAST(mac_addr) \
147 (((u8 *)(mac_addr))[0] & 0x01)
148
149#define IS_BROADCAST(mac_addr) \
150 (((u16 *)(mac_addr))[0] == 0xffff)
151
152
153/*
154 * Queue of incoming packets for a specific cpu and device.
155 *
156 * Includes a pointer to the "system" data, and the actual "user" data.
157 */
158struct tile_netio_queue {
159 netio_queue_impl_t *__system_part;
160 netio_queue_user_impl_t __user_part;
161
162};
163
164
165/*
166 * Statistics counters for a specific cpu and device.
167 */
168struct tile_net_stats_t {
169 u32 rx_packets;
170 u32 rx_bytes;
171 u32 tx_packets;
172 u32 tx_bytes;
173};
174
175
176/*
177 * Info for a specific cpu and device.
178 *
179 * ISSUE: There is a "dev" pointer in "napi" as well.
180 */
181struct tile_net_cpu {
182 /* The NAPI struct. */
183 struct napi_struct napi;
184 /* Packet queue. */
185 struct tile_netio_queue queue;
186 /* Statistics. */
187 struct tile_net_stats_t stats;
188 /* ISSUE: Is this needed? */
189 bool napi_enabled;
190 /* True if this tile has succcessfully registered with the IPP. */
191 bool registered;
192 /* True if the link was down last time we tried to register. */
193 bool link_down;
194 /* True if "egress_timer" is scheduled. */
195 bool egress_timer_scheduled;
196 /* Number of small sk_buffs which must still be provided. */
197 unsigned int num_needed_small_buffers;
198 /* Number of large sk_buffs which must still be provided. */
199 unsigned int num_needed_large_buffers;
200 /* A timer for handling egress completions. */
201 struct timer_list egress_timer;
202};
203
204
205/*
206 * Info for a specific device.
207 */
208struct tile_net_priv {
209 /* Our network device. */
210 struct net_device *dev;
211 /* The actual egress queue. */
212 lepp_queue_t *epp_queue;
213 /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
214 spinlock_t cmd_lock;
215 /* Protects "epp_queue->comp_head". */
216 spinlock_t comp_lock;
217 /* The hypervisor handle for this interface. */
218 int hv_devhdl;
219 /* The intr bit mask that IDs this device. */
220 u32 intr_id;
221 /* True iff "tile_net_open_aux()" has succeeded. */
222 int partly_opened;
223 /* True iff "tile_net_open_inner()" has succeeded. */
224 int fully_opened;
225 /* Effective network cpus. */
226 struct cpumask network_cpus_map;
227 /* Number of network cpus. */
228 int network_cpus_count;
229 /* Credits per network cpu. */
230 int network_cpus_credits;
231 /* Network stats. */
232 struct net_device_stats stats;
233 /* For NetIO bringup retries. */
234 struct delayed_work retry_work;
235 /* Quick access to per cpu data. */
236 struct tile_net_cpu *cpu[NR_CPUS];
237};
238
239
240/*
241 * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
242 */
243static struct net_device *tile_net_devs[TILE_NET_DEVS];
244
245/*
246 * The "tile_net_cpu" structures for each device.
247 */
248static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
249static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
250static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
251static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
252
253
254/*
255 * True if "network_cpus" was specified.
256 */
257static bool network_cpus_used;
258
259/*
260 * The actual cpus in "network_cpus".
261 */
262static struct cpumask network_cpus_map;
263
264
265
266#ifdef TILE_NET_DEBUG
267/*
268 * printk with extra stuff.
269 *
270 * We print the CPU we're running in brackets.
271 */
272static void net_printk(char *fmt, ...)
273{
274 int i;
275 int len;
276 va_list args;
277 static char buf[256];
278
279 len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
280 va_start(args, fmt);
281 i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
282 va_end(args);
283 buf[255] = '\0';
284 pr_notice(buf);
285}
286#endif
287
288
289#ifdef TILE_NET_DUMP_PACKETS
290/*
291 * Dump a packet.
292 */
293static void dump_packet(unsigned char *data, unsigned long length, char *s)
294{
295 unsigned long i;
296 static unsigned int count;
297
298 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
299 data, length, s, count++);
300
301 pr_info("\n");
302
303 for (i = 0; i < length; i++) {
304 if ((i & 0xf) == 0)
305 sprintf(buf, "%8.8lx:", i);
306 sprintf(buf + strlen(buf), " %2.2x", data[i]);
307 if ((i & 0xf) == 0xf || i == length - 1)
308 pr_info("%s\n", buf);
309 }
310}
311#endif
312
313
314/*
315 * Provide support for the __netio_fastio1() swint
316 * (see <hv/drv_xgbe_intf.h> for how it is used).
317 *
318 * The fastio swint2 call may clobber all the caller-saved registers.
319 * It rarely clobbers memory, but we allow for the possibility in
320 * the signature just to be on the safe side.
321 *
322 * Also, gcc doesn't seem to allow an input operand to be
323 * clobbered, so we fake it with dummy outputs.
324 *
325 * This function can't be static because of the way it is declared
326 * in the netio header.
327 */
328inline int __netio_fastio1(u32 fastio_index, u32 arg0)
329{
330 long result, clobber_r1, clobber_r10;
331 asm volatile("swint2"
332 : "=R00" (result),
333 "=R01" (clobber_r1), "=R10" (clobber_r10)
334 : "R10" (fastio_index), "R01" (arg0)
335 : "memory", "r2", "r3", "r4",
336 "r5", "r6", "r7", "r8", "r9",
337 "r11", "r12", "r13", "r14",
338 "r15", "r16", "r17", "r18", "r19",
339 "r20", "r21", "r22", "r23", "r24",
340 "r25", "r26", "r27", "r28", "r29");
341 return result;
342}
343
344
345/*
346 * Provide a linux buffer to LIPP.
347 */
348static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
349 void *va, bool small)
350{
351 struct tile_netio_queue *queue = &info->queue;
352
353 /* Convert "va" and "small" to "linux_buffer_t". */
354 unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
355
356 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
357}
358
359
360/*
361 * Provide a linux buffer for LIPP.
362 */
363static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
364 bool small)
365{
366 /* ISSUE: What should we use here? */
367 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
368
369 /* Round up to ensure to avoid "false sharing" with last cache line. */
370 unsigned int buffer_size =
371 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
372 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
373
374 /*
375 * ISSUE: Since CPAs are 38 bits, and we can only encode the
376 * high 31 bits in a "linux_buffer_t", the low 7 bits must be
377 * zero, and thus, we must align the actual "va" mod 128.
378 */
379 const unsigned long align = 128;
380
381 struct sk_buff *skb;
382 void *va;
383
384 struct sk_buff **skb_ptr;
385
386 /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
387 /* and also "reserves" that many bytes. */
388 /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
389 int len = sizeof(*skb_ptr) + align + buffer_size;
390
391 while (1) {
392
393 /* Allocate (or fail). */
394 skb = dev_alloc_skb(len);
395 if (skb == NULL)
396 return false;
397
398 /* Make room for a back-pointer to 'skb'. */
399 skb_reserve(skb, sizeof(*skb_ptr));
400
401 /* Make sure we are aligned. */
402 skb_reserve(skb, -(long)skb->data & (align - 1));
403
404 /* This address is given to IPP. */
405 va = skb->data;
406
407 if (small)
408 break;
409
410 /* ISSUE: This has never been observed! */
411 /* Large buffers must not span a huge page. */
412 if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
413 break;
414 pr_err("Leaking unaligned linux buffer at %p.\n", va);
415 }
416
417 /* Skip two bytes to satisfy LIPP assumptions. */
418 /* Note that this aligns IP on a 16 byte boundary. */
419 /* ISSUE: Do this when the packet arrives? */
420 skb_reserve(skb, NET_IP_ALIGN);
421
422 /* Save a back-pointer to 'skb'. */
423 skb_ptr = va - sizeof(*skb_ptr);
424 *skb_ptr = skb;
425
426 /* Invalidate the packet buffer. */
427 if (!hash_default)
428 __inv_buffer(skb->data, buffer_size);
429
430 /* Make sure "skb_ptr" has been flushed. */
431 __insn_mf();
432
433#ifdef TILE_NET_PARANOIA
434#if CHIP_HAS_CBOX_HOME_MAP()
435 if (hash_default) {
436 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
437 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
438 panic("Non-coherent ingress buffer!");
439 }
440#endif
441#endif
442
443 /* Provide the new buffer. */
444 tile_net_provide_linux_buffer(info, va, small);
445
446 return true;
447}
448
449
450/*
451 * Provide linux buffers for LIPP.
452 */
453static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
454{
455 while (info->num_needed_small_buffers != 0) {
456 if (!tile_net_provide_needed_buffer(info, true))
457 goto oops;
458 info->num_needed_small_buffers--;
459 }
460
461 while (info->num_needed_large_buffers != 0) {
462 if (!tile_net_provide_needed_buffer(info, false))
463 goto oops;
464 info->num_needed_large_buffers--;
465 }
466
467 return;
468
469oops:
470
471 /* Add a description to the page allocation failure dump. */
472 pr_notice("Could not provide a linux buffer to LIPP.\n");
473}
474
475
476/*
477 * Grab some LEPP completions, and store them in "comps", of size
478 * "comps_size", and return the number of completions which were
479 * stored, so the caller can free them.
480 *
481 * If "pending" is not NULL, it will be set to true if there might
482 * still be some pending completions caused by this tile, else false.
483 */
484static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
485 struct sk_buff *comps[],
486 unsigned int comps_size,
487 bool *pending)
488{
489 struct tile_net_priv *priv = netdev_priv(dev);
490
491 lepp_queue_t *eq = priv->epp_queue;
492
493 unsigned int n = 0;
494
495 unsigned int comp_head;
496 unsigned int comp_busy;
497 unsigned int comp_tail;
498
499 spin_lock(&priv->comp_lock);
500
501 comp_head = eq->comp_head;
502 comp_busy = eq->comp_busy;
503 comp_tail = eq->comp_tail;
504
505 while (comp_head != comp_busy && n < comps_size) {
506 comps[n++] = eq->comps[comp_head];
507 LEPP_QINC(comp_head);
508 }
509
510 if (pending != NULL)
511 *pending = (comp_head != comp_tail);
512
513 eq->comp_head = comp_head;
514
515 spin_unlock(&priv->comp_lock);
516
517 return n;
518}
519
520
521/*
522 * Make sure the egress timer is scheduled.
523 *
524 * Note that we use "schedule if not scheduled" logic instead of the more
525 * obvious "reschedule" logic, because "reschedule" is fairly expensive.
526 */
527static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
528{
529 if (!info->egress_timer_scheduled) {
530 mod_timer_pinned(&info->egress_timer, jiffies + 1);
531 info->egress_timer_scheduled = true;
532 }
533}
534
535
536/*
537 * The "function" for "info->egress_timer".
538 *
539 * This timer will reschedule itself as long as there are any pending
540 * completions expected (on behalf of any tile).
541 *
542 * ISSUE: Realistically, will the timer ever stop scheduling itself?
543 *
544 * ISSUE: This timer is almost never actually needed, so just use a global
545 * timer that can run on any tile.
546 *
547 * ISSUE: Maybe instead track number of expected completions, and free
548 * only that many, resetting to zero if "pending" is ever false.
549 */
550static void tile_net_handle_egress_timer(unsigned long arg)
551{
552 struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
553 struct net_device *dev = info->napi.dev;
554
555 struct sk_buff *olds[32];
556 unsigned int wanted = 32;
557 unsigned int i, nolds = 0;
558 bool pending;
559
560 /* The timer is no longer scheduled. */
561 info->egress_timer_scheduled = false;
562
563 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
564
565 for (i = 0; i < nolds; i++)
566 kfree_skb(olds[i]);
567
568 /* Reschedule timer if needed. */
569 if (pending)
570 tile_net_schedule_egress_timer(info);
571}
572
573
574#ifdef IGNORE_DUP_ACKS
575
576/*
577 * Help detect "duplicate" ACKs. These are sequential packets (for a
578 * given flow) which are exactly 66 bytes long, sharing everything but
579 * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
580 * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
581 * +N, and the Tstamps are usually identical.
582 *
583 * NOTE: Apparently truly duplicate acks (with identical "ack" values),
584 * should not be collapsed, as they are used for some kind of flow control.
585 */
586static bool is_dup_ack(char *s1, char *s2, unsigned int len)
587{
588 int i;
589
590 unsigned long long ignorable = 0;
591
592 /* Identification. */
593 ignorable |= (1ULL << 0x12);
594 ignorable |= (1ULL << 0x13);
595
596 /* Header checksum. */
597 ignorable |= (1ULL << 0x18);
598 ignorable |= (1ULL << 0x19);
599
600 /* ACK. */
601 ignorable |= (1ULL << 0x2a);
602 ignorable |= (1ULL << 0x2b);
603 ignorable |= (1ULL << 0x2c);
604 ignorable |= (1ULL << 0x2d);
605
606 /* WinSize. */
607 ignorable |= (1ULL << 0x30);
608 ignorable |= (1ULL << 0x31);
609
610 /* Checksum. */
611 ignorable |= (1ULL << 0x32);
612 ignorable |= (1ULL << 0x33);
613
614 for (i = 0; i < len; i++, ignorable >>= 1) {
615
616 if ((ignorable & 1) || (s1[i] == s2[i]))
617 continue;
618
619#ifdef TILE_NET_DEBUG
620 /* HACK: Mention non-timestamp diffs. */
621 if (i < 0x38 && i != 0x2f &&
622 net_ratelimit())
623 pr_info("Diff at 0x%x\n", i);
624#endif
625
626 return false;
627 }
628
629#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
630 /* HACK: Do not suppress truly duplicate ACKs. */
631 /* ISSUE: Is this actually necessary or helpful? */
632 if (s1[0x2a] == s2[0x2a] &&
633 s1[0x2b] == s2[0x2b] &&
634 s1[0x2c] == s2[0x2c] &&
635 s1[0x2d] == s2[0x2d]) {
636 return false;
637 }
638#endif
639
640 return true;
641}
642
643#endif
644
645
646
647/*
648 * Like "tile_net_handle_packets()", but just discard packets.
649 */
650static void tile_net_discard_packets(struct net_device *dev)
651{
652 struct tile_net_priv *priv = netdev_priv(dev);
653 int my_cpu = smp_processor_id();
654 struct tile_net_cpu *info = priv->cpu[my_cpu];
655 struct tile_netio_queue *queue = &info->queue;
656 netio_queue_impl_t *qsp = queue->__system_part;
657 netio_queue_user_impl_t *qup = &queue->__user_part;
658
659 while (qup->__packet_receive_read !=
660 qsp->__packet_receive_queue.__packet_write) {
661
662 int index = qup->__packet_receive_read;
663
664 int index2_aux = index + sizeof(netio_pkt_t);
665 int index2 =
666 ((index2_aux ==
667 qsp->__packet_receive_queue.__last_packet_plus_one) ?
668 0 : index2_aux);
669
670 netio_pkt_t *pkt = (netio_pkt_t *)
671 ((unsigned long) &qsp[1] + index);
672
673 /* Extract the "linux_buffer_t". */
674 unsigned int buffer = pkt->__packet.word;
675
676 /* Convert "linux_buffer_t" to "va". */
677 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
678
679 /* Acquire the associated "skb". */
680 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
681 struct sk_buff *skb = *skb_ptr;
682
683 kfree_skb(skb);
684
685 /* Consume this packet. */
686 qup->__packet_receive_read = index2;
687 }
688}
689
690
691/*
692 * Handle the next packet. Return true if "processed", false if "filtered".
693 */
694static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
695{
696 struct net_device *dev = info->napi.dev;
697
698 struct tile_netio_queue *queue = &info->queue;
699 netio_queue_impl_t *qsp = queue->__system_part;
700 netio_queue_user_impl_t *qup = &queue->__user_part;
701 struct tile_net_stats_t *stats = &info->stats;
702
703 int filter;
704
705 int index2_aux = index + sizeof(netio_pkt_t);
706 int index2 =
707 ((index2_aux ==
708 qsp->__packet_receive_queue.__last_packet_plus_one) ?
709 0 : index2_aux);
710
711 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
712
713 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
714
715 /* Extract the packet size. */
716 unsigned long len =
717 (NETIO_PKT_CUSTOM_LENGTH(pkt) +
718 NET_IP_ALIGN - NETIO_PACKET_PADDING);
719
720 /* Extract the "linux_buffer_t". */
721 unsigned int buffer = pkt->__packet.word;
722
723 /* Extract "small" (vs "large"). */
724 bool small = ((buffer & 1) != 0);
725
726 /* Convert "linux_buffer_t" to "va". */
727 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
728
729 /* Extract the packet data pointer. */
730 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
731 unsigned char *buf = va + NET_IP_ALIGN;
732
733#ifdef IGNORE_DUP_ACKS
734
735 static int other;
736 static int final;
737 static int keep;
738 static int skip;
739
740#endif
741
742 /* Invalidate the packet buffer. */
743 if (!hash_default)
744 __inv_buffer(buf, len);
745
746 /* ISSUE: Is this needed? */
747 dev->last_rx = jiffies;
748
749#ifdef TILE_NET_DUMP_PACKETS
750 dump_packet(buf, len, "rx");
751#endif /* TILE_NET_DUMP_PACKETS */
752
753#ifdef TILE_NET_VERIFY_INGRESS
754 if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
755 NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
756 /*
757 * FIXME: This complains about UDP packets
758 * with a "zero" checksum (bug 6624).
759 */
760#ifdef TILE_NET_PANIC_ON_BAD
761 dump_packet(buf, len, "rx");
762 panic("Bad L4 checksum.");
763#else
764 pr_warning("Bad L4 checksum on %d byte packet.\n", len);
765#endif
766 }
767 if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
768 NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
769 dump_packet(buf, len, "rx");
770 panic("Bad L3 checksum.");
771 }
772 switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
773 case NETIO_PKT_STATUS_OVERSIZE:
774 if (len >= 64) {
775 dump_packet(buf, len, "rx");
776 panic("Unexpected OVERSIZE.");
777 }
778 break;
779 case NETIO_PKT_STATUS_BAD:
780#ifdef TILE_NET_PANIC_ON_BAD
781 dump_packet(buf, len, "rx");
782 panic("Unexpected BAD packet.");
783#else
784 pr_warning("Unexpected BAD %d byte packet.\n", len);
785#endif
786 }
787#endif
788
789 filter = 0;
790
791 if (!(dev->flags & IFF_UP)) {
792 /* Filter packets received before we're up. */
793 filter = 1;
794 } else if (!(dev->flags & IFF_PROMISC)) {
795 /*
796 * FIXME: Implement HW multicast filter.
797 */
798 if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
799 /* Filter packets not for our address. */
800 const u8 *mine = dev->dev_addr;
801 filter = compare_ether_addr(mine, buf);
802 }
803 }
804
805#ifdef IGNORE_DUP_ACKS
806
807 if (len != 66) {
808 /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
809
810 other++;
811
812 } else if (index2 ==
813 qsp->__packet_receive_queue.__packet_write) {
814
815 final++;
816
817 } else {
818
819 netio_pkt_t *pkt2 = (netio_pkt_t *)
820 ((unsigned long) &qsp[1] + index2);
821
822 netio_pkt_metadata_t *metadata2 =
823 NETIO_PKT_METADATA(pkt2);
824
825 /* Extract the packet size. */
826 unsigned long len2 =
827 (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
828 NET_IP_ALIGN - NETIO_PACKET_PADDING);
829
830 if (len2 == 66 &&
831 NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
832 NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
833
834 /* Extract the "linux_buffer_t". */
835 unsigned int buffer2 = pkt2->__packet.word;
836
837 /* Convert "linux_buffer_t" to "va". */
838 void *va2 =
839 __va((phys_addr_t)(buffer2 >> 1) << 7);
840
841 /* Extract the packet data pointer. */
842 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
843 unsigned char *buf2 = va2 + NET_IP_ALIGN;
844
845 /* Invalidate the packet buffer. */
846 if (!hash_default)
847 __inv_buffer(buf2, len2);
848
849 if (is_dup_ack(buf, buf2, len)) {
850 skip++;
851 filter = 1;
852 } else {
853 keep++;
854 }
855 }
856 }
857
858 if (net_ratelimit())
859 pr_info("Other %d Final %d Keep %d Skip %d.\n",
860 other, final, keep, skip);
861
862#endif
863
864 if (filter) {
865
866 /* ISSUE: Update "drop" statistics? */
867
868 tile_net_provide_linux_buffer(info, va, small);
869
870 } else {
871
872 /* Acquire the associated "skb". */
873 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
874 struct sk_buff *skb = *skb_ptr;
875
876 /* Paranoia. */
877 if (skb->data != buf)
878 panic("Corrupt linux buffer from LIPP! "
879 "VA=%p, skb=%p, skb->data=%p\n",
880 va, skb, skb->data);
881
882 /* Encode the actual packet length. */
883 skb_put(skb, len);
884
885 /* NOTE: This call also sets "skb->dev = dev". */
886 skb->protocol = eth_type_trans(skb, dev);
887
888 /* ISSUE: Discard corrupt packets? */
889 /* ISSUE: Discard packets with bad checksums? */
890
891 /* Avoid recomputing TCP/UDP checksums. */
892 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
893 skb->ip_summed = CHECKSUM_UNNECESSARY;
894
895 netif_receive_skb(skb);
896
897 stats->rx_packets++;
898 stats->rx_bytes += len;
899
900 if (small)
901 info->num_needed_small_buffers++;
902 else
903 info->num_needed_large_buffers++;
904 }
905
906 /* Return four credits after every fourth packet. */
907 if (--qup->__receive_credit_remaining == 0) {
908 u32 interval = qup->__receive_credit_interval;
909 qup->__receive_credit_remaining = interval;
910 __netio_fastio_return_credits(qup->__fastio_index, interval);
911 }
912
913 /* Consume this packet. */
914 qup->__packet_receive_read = index2;
915
916 return !filter;
917}
918
919
920/*
921 * Handle some packets for the given device on the current CPU.
922 *
923 * ISSUE: The "rotting packet" race condition occurs if a packet
924 * arrives after the queue appears to be empty, and before the
925 * hypervisor interrupt is re-enabled.
926 */
927static int tile_net_poll(struct napi_struct *napi, int budget)
928{
929 struct net_device *dev = napi->dev;
930 struct tile_net_priv *priv = netdev_priv(dev);
931 int my_cpu = smp_processor_id();
932 struct tile_net_cpu *info = priv->cpu[my_cpu];
933 struct tile_netio_queue *queue = &info->queue;
934 netio_queue_impl_t *qsp = queue->__system_part;
935 netio_queue_user_impl_t *qup = &queue->__user_part;
936
937 unsigned int work = 0;
938
939 while (1) {
940 int index = qup->__packet_receive_read;
941 if (index == qsp->__packet_receive_queue.__packet_write)
942 break;
943
944 if (tile_net_poll_aux(info, index)) {
945 if (++work >= budget)
946 goto done;
947 }
948 }
949
950 napi_complete(&info->napi);
951
952 /* Re-enable hypervisor interrupts. */
953 enable_percpu_irq(priv->intr_id);
954
955 /* HACK: Avoid the "rotting packet" problem. */
956 if (qup->__packet_receive_read !=
957 qsp->__packet_receive_queue.__packet_write)
958 napi_schedule(&info->napi);
959
960 /* ISSUE: Handle completions? */
961
962done:
963
964 tile_net_provide_needed_buffers(info);
965
966 return work;
967}
968
969
970/*
971 * Handle an ingress interrupt for the given device on the current cpu.
972 */
973static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
974{
975 struct net_device *dev = (struct net_device *)dev_ptr;
976 struct tile_net_priv *priv = netdev_priv(dev);
977 int my_cpu = smp_processor_id();
978 struct tile_net_cpu *info = priv->cpu[my_cpu];
979
980 /* Disable hypervisor interrupt. */
981 disable_percpu_irq(priv->intr_id);
982
983 napi_schedule(&info->napi);
984
985 return IRQ_HANDLED;
986}
987
988
989/*
990 * One time initialization per interface.
991 */
992static int tile_net_open_aux(struct net_device *dev)
993{
994 struct tile_net_priv *priv = netdev_priv(dev);
995
996 int ret;
997 int dummy;
998 unsigned int epp_lotar;
999
1000 /*
1001 * Find out where EPP memory should be homed.
1002 */
1003 ret = hv_dev_pread(priv->hv_devhdl, 0,
1004 (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
1005 NETIO_EPP_SHM_OFF);
1006 if (ret < 0) {
1007 pr_err("could not read epp_shm_queue lotar.\n");
1008 return -EIO;
1009 }
1010
1011 /*
1012 * Home the page on the EPP.
1013 */
1014 {
1015 int epp_home = hv_lotar_to_cpu(epp_lotar);
1016 struct page *page = virt_to_page(priv->epp_queue);
1017 homecache_change_page_home(page, 0, epp_home);
1018 }
1019
1020 /*
1021 * Register the EPP shared memory queue.
1022 */
1023 {
1024 netio_ipp_address_t ea = {
1025 .va = 0,
1026 .pa = __pa(priv->epp_queue),
1027 .pte = hv_pte(0),
1028 .size = PAGE_SIZE,
1029 };
1030 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
1031 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
1032 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1033 (HV_VirtAddr)&ea,
1034 sizeof(ea),
1035 NETIO_EPP_SHM_OFF);
1036 if (ret < 0)
1037 return -EIO;
1038 }
1039
1040 /*
1041 * Start LIPP/LEPP.
1042 */
1043 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1044 sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
1045 pr_warning("Failed to start LIPP/LEPP.\n");
1046 return -EIO;
1047 }
1048
1049 return 0;
1050}
1051
1052
1053/*
1054 * Register with hypervisor on each CPU.
1055 *
1056 * Strangely, this function does important things even if it "fails",
1057 * which is especially common if the link is not up yet. Hopefully
1058 * these things are all "harmless" if done twice!
1059 */
1060static void tile_net_register(void *dev_ptr)
1061{
1062 struct net_device *dev = (struct net_device *)dev_ptr;
1063 struct tile_net_priv *priv = netdev_priv(dev);
1064 int my_cpu = smp_processor_id();
1065 struct tile_net_cpu *info;
1066
1067 struct tile_netio_queue *queue;
1068
1069 /* Only network cpus can receive packets. */
1070 int queue_id =
1071 cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
1072
1073 netio_input_config_t config = {
1074 .flags = 0,
1075 .num_receive_packets = priv->network_cpus_credits,
1076 .queue_id = queue_id
1077 };
1078
1079 int ret = 0;
1080 netio_queue_impl_t *queuep;
1081
1082 PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
1083
1084 if (!strcmp(dev->name, "xgbe0"))
1085 info = &__get_cpu_var(hv_xgbe0);
1086 else if (!strcmp(dev->name, "xgbe1"))
1087 info = &__get_cpu_var(hv_xgbe1);
1088 else if (!strcmp(dev->name, "gbe0"))
1089 info = &__get_cpu_var(hv_gbe0);
1090 else if (!strcmp(dev->name, "gbe1"))
1091 info = &__get_cpu_var(hv_gbe1);
1092 else
1093 BUG();
1094
1095 /* Initialize the egress timer. */
1096 init_timer(&info->egress_timer);
1097 info->egress_timer.data = (long)info;
1098 info->egress_timer.function = tile_net_handle_egress_timer;
1099
1100 priv->cpu[my_cpu] = info;
1101
1102 /*
1103 * Register ourselves with the IPP.
1104 */
1105 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1106 (HV_VirtAddr)&config,
1107 sizeof(netio_input_config_t),
1108 NETIO_IPP_INPUT_REGISTER_OFF);
1109 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1110 ret);
1111 if (ret < 0) {
1112 printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF"
1113 " failure %d\n", ret);
1114 info->link_down = (ret == NETIO_LINK_DOWN);
1115 return;
1116 }
1117
1118 /*
1119 * Get the pointer to our queue's system part.
1120 */
1121
1122 ret = hv_dev_pread(priv->hv_devhdl, 0,
1123 (HV_VirtAddr)&queuep,
1124 sizeof(netio_queue_impl_t *),
1125 NETIO_IPP_INPUT_REGISTER_OFF);
1126 PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1127 ret);
1128 PDEBUG("queuep %p\n", queuep);
1129 if (ret <= 0) {
1130 /* ISSUE: Shouldn't this be a fatal error? */
1131 pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
1132 return;
1133 }
1134
1135 queue = &info->queue;
1136
1137 queue->__system_part = queuep;
1138
1139 memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
1140
1141 /* This is traditionally "config.num_receive_packets / 2". */
1142 queue->__user_part.__receive_credit_interval = 4;
1143 queue->__user_part.__receive_credit_remaining =
1144 queue->__user_part.__receive_credit_interval;
1145
1146 /*
1147 * Get a fastio index from the hypervisor.
1148 * ISSUE: Shouldn't this check the result?
1149 */
1150 ret = hv_dev_pread(priv->hv_devhdl, 0,
1151 (HV_VirtAddr)&queue->__user_part.__fastio_index,
1152 sizeof(queue->__user_part.__fastio_index),
1153 NETIO_IPP_GET_FASTIO_OFF);
1154 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
1155
1156 netif_napi_add(dev, &info->napi, tile_net_poll, 64);
1157
1158 /* Now we are registered. */
1159 info->registered = true;
1160}
1161
1162
1163/*
1164 * Unregister with hypervisor on each CPU.
1165 */
1166static void tile_net_unregister(void *dev_ptr)
1167{
1168 struct net_device *dev = (struct net_device *)dev_ptr;
1169 struct tile_net_priv *priv = netdev_priv(dev);
1170 int my_cpu = smp_processor_id();
1171 struct tile_net_cpu *info = priv->cpu[my_cpu];
1172
1173 int ret = 0;
1174 int dummy = 0;
1175
1176 /* Do nothing if never registered. */
1177 if (info == NULL)
1178 return;
1179
1180 /* Do nothing if already unregistered. */
1181 if (!info->registered)
1182 return;
1183
1184 /*
1185 * Unregister ourselves with LIPP.
1186 */
1187 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1188 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
1189 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n",
1190 ret);
1191 if (ret < 0) {
1192 /* FIXME: Just panic? */
1193 pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
1194 " failure %d\n", ret);
1195 }
1196
1197 /*
1198 * Discard all packets still in our NetIO queue. Hopefully,
1199 * once the unregister call is complete, there will be no
1200 * packets still in flight on the IDN.
1201 */
1202 tile_net_discard_packets(dev);
1203
1204 /* Reset state. */
1205 info->num_needed_small_buffers = 0;
1206 info->num_needed_large_buffers = 0;
1207
1208 /* Cancel egress timer. */
1209 del_timer(&info->egress_timer);
1210 info->egress_timer_scheduled = false;
1211
1212 netif_napi_del(&info->napi);
1213
1214 /* Now we are unregistered. */
1215 info->registered = false;
1216}
1217
1218
1219/*
1220 * Helper function for "tile_net_stop()".
1221 *
1222 * Also used to handle registration failure in "tile_net_open_inner()",
1223 * when "fully_opened" is known to be false, and the various extra
1224 * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
1225 * simpler if we could just call "tile_net_stop()" anyway.
1226 */
1227static void tile_net_stop_aux(struct net_device *dev)
1228{
1229 struct tile_net_priv *priv = netdev_priv(dev);
1230
1231 int dummy = 0;
1232
1233 /* Unregister all tiles, so LIPP will stop delivering packets. */
1234 on_each_cpu(tile_net_unregister, (void *)dev, 1);
1235
1236 /* Stop LIPP/LEPP. */
1237 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1238 sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
1239 panic("Failed to stop LIPP/LEPP!\n");
1240
1241 priv->partly_opened = 0;
1242}
1243
1244
1245/*
1246 * Disable ingress interrupts for the given device on the current cpu.
1247 */
1248static void tile_net_disable_intr(void *dev_ptr)
1249{
1250 struct net_device *dev = (struct net_device *)dev_ptr;
1251 struct tile_net_priv *priv = netdev_priv(dev);
1252 int my_cpu = smp_processor_id();
1253 struct tile_net_cpu *info = priv->cpu[my_cpu];
1254
1255 /* Disable hypervisor interrupt. */
1256 disable_percpu_irq(priv->intr_id);
1257
1258 /* Disable NAPI if needed. */
1259 if (info != NULL && info->napi_enabled) {
1260 napi_disable(&info->napi);
1261 info->napi_enabled = false;
1262 }
1263}
1264
1265
1266/*
1267 * Enable ingress interrupts for the given device on the current cpu.
1268 */
1269static void tile_net_enable_intr(void *dev_ptr)
1270{
1271 struct net_device *dev = (struct net_device *)dev_ptr;
1272 struct tile_net_priv *priv = netdev_priv(dev);
1273 int my_cpu = smp_processor_id();
1274 struct tile_net_cpu *info = priv->cpu[my_cpu];
1275
1276 /* Enable hypervisor interrupt. */
1277 enable_percpu_irq(priv->intr_id);
1278
1279 /* Enable NAPI. */
1280 napi_enable(&info->napi);
1281 info->napi_enabled = true;
1282}
1283
1284
1285/*
1286 * tile_net_open_inner does most of the work of bringing up the interface.
1287 * It's called from tile_net_open(), and also from tile_net_retry_open().
1288 * The return value is 0 if the interface was brought up, < 0 if
1289 * tile_net_open() should return the return value as an error, and > 0 if
1290 * tile_net_open() should return success and schedule a work item to
1291 * periodically retry the bringup.
1292 */
1293static int tile_net_open_inner(struct net_device *dev)
1294{
1295 struct tile_net_priv *priv = netdev_priv(dev);
1296 int my_cpu = smp_processor_id();
1297 struct tile_net_cpu *info;
1298 struct tile_netio_queue *queue;
1299 unsigned int irq;
1300 int i;
1301
1302 /*
1303 * First try to register just on the local CPU, and handle any
1304 * semi-expected "link down" failure specially. Note that we
1305 * do NOT call "tile_net_stop_aux()", unlike below.
1306 */
1307 tile_net_register(dev);
1308 info = priv->cpu[my_cpu];
1309 if (!info->registered) {
1310 if (info->link_down)
1311 return 1;
1312 return -EAGAIN;
1313 }
1314
1315 /*
1316 * Now register everywhere else. If any registration fails,
1317 * even for "link down" (which might not be possible), we
1318 * clean up using "tile_net_stop_aux()".
1319 */
1320 smp_call_function(tile_net_register, (void *)dev, 1);
1321 for_each_online_cpu(i) {
1322 if (!priv->cpu[i]->registered) {
1323 tile_net_stop_aux(dev);
1324 return -EAGAIN;
1325 }
1326 }
1327
1328 queue = &info->queue;
1329
1330 /*
1331 * Set the device intr bit mask.
1332 * The tile_net_register above sets per tile __intr_id.
1333 */
1334 priv->intr_id = queue->__system_part->__intr_id;
1335 BUG_ON(!priv->intr_id);
1336
1337 /*
1338 * Register the device interrupt handler.
1339 * The __ffs() function returns the index into the interrupt handler
1340 * table from the interrupt bit mask which should have one bit
1341 * and one bit only set.
1342 */
1343 irq = __ffs(priv->intr_id);
1344 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1345 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
1346 0, dev->name, (void *)dev) != 0);
1347
1348 /* ISSUE: How could "priv->fully_opened" ever be "true" here? */
1349
1350 if (!priv->fully_opened) {
1351
1352 int dummy = 0;
1353
1354 /* Allocate initial buffers. */
1355
1356 int max_buffers =
1357 priv->network_cpus_count * priv->network_cpus_credits;
1358
1359 info->num_needed_small_buffers =
1360 min(LIPP_SMALL_BUFFERS, max_buffers);
1361
1362 info->num_needed_large_buffers =
1363 min(LIPP_LARGE_BUFFERS, max_buffers);
1364
1365 tile_net_provide_needed_buffers(info);
1366
1367 if (info->num_needed_small_buffers != 0 ||
1368 info->num_needed_large_buffers != 0)
1369 panic("Insufficient memory for buffer stack!");
1370
1371 /* Start LIPP/LEPP and activate "ingress" at the shim. */
1372 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1373 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
1374 panic("Failed to activate the LIPP Shim!\n");
1375
1376 priv->fully_opened = 1;
1377 }
1378
1379 /* On each tile, enable the hypervisor to trigger interrupts. */
1380 /* ISSUE: Do this before starting LIPP/LEPP? */
1381 on_each_cpu(tile_net_enable_intr, (void *)dev, 1);
1382
1383 /* Start our transmit queue. */
1384 netif_start_queue(dev);
1385
1386 return 0;
1387}
1388
1389
1390/*
1391 * Called periodically to retry bringing up the NetIO interface,
1392 * if it doesn't come up cleanly during tile_net_open().
1393 */
1394static void tile_net_open_retry(struct work_struct *w)
1395{
1396 struct delayed_work *dw =
1397 container_of(w, struct delayed_work, work);
1398
1399 struct tile_net_priv *priv =
1400 container_of(dw, struct tile_net_priv, retry_work);
1401
1402 /*
1403 * Try to bring the NetIO interface up. If it fails, reschedule
1404 * ourselves to try again later; otherwise, tell Linux we now have
1405 * a working link. ISSUE: What if the return value is negative?
1406 */
1407 if (tile_net_open_inner(priv->dev))
1408 schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
1409 TILE_NET_RETRY_INTERVAL);
1410 else
1411 netif_carrier_on(priv->dev);
1412}
1413
1414
1415/*
1416 * Called when a network interface is made active.
1417 *
1418 * Returns 0 on success, negative value on failure.
1419 *
1420 * The open entry point is called when a network interface is made
1421 * active by the system (IFF_UP). At this point all resources needed
1422 * for transmit and receive operations are allocated, the interrupt
1423 * handler is registered with the OS, the watchdog timer is started,
1424 * and the stack is notified that the interface is ready.
1425 *
1426 * If the actual link is not available yet, then we tell Linux that
1427 * we have no carrier, and we keep checking until the link comes up.
1428 */
1429static int tile_net_open(struct net_device *dev)
1430{
1431 int ret = 0;
1432 struct tile_net_priv *priv = netdev_priv(dev);
1433
1434 /*
1435 * We rely on priv->partly_opened to tell us if this is the
1436 * first time this interface is being brought up. If it is
1437 * set, the IPP was already initialized and should not be
1438 * initialized again.
1439 */
1440 if (!priv->partly_opened) {
1441
1442 int count;
1443 int credits;
1444
1445 /* Initialize LIPP/LEPP, and start the Shim. */
1446 ret = tile_net_open_aux(dev);
1447 if (ret < 0) {
1448 pr_err("tile_net_open_aux failed: %d\n", ret);
1449 return ret;
1450 }
1451
1452 /* Analyze the network cpus. */
1453
1454 if (network_cpus_used)
1455 cpumask_copy(&priv->network_cpus_map,
1456 &network_cpus_map);
1457 else
1458 cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
1459
1460
1461 count = cpumask_weight(&priv->network_cpus_map);
1462
1463 /* Limit credits to available buffers, and apply min. */
1464 credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
1465
1466 /* Apply "GBE" max limit. */
1467 /* ISSUE: Use higher limit for XGBE? */
1468 credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
1469
1470 priv->network_cpus_count = count;
1471 priv->network_cpus_credits = credits;
1472
1473#ifdef TILE_NET_DEBUG
1474 pr_info("Using %d network cpus, with %d credits each\n",
1475 priv->network_cpus_count, priv->network_cpus_credits);
1476#endif
1477
1478 priv->partly_opened = 1;
1479 }
1480
1481 /*
1482 * Attempt to bring up the link.
1483 */
1484 ret = tile_net_open_inner(dev);
1485 if (ret <= 0) {
1486 if (ret == 0)
1487 netif_carrier_on(dev);
1488 return ret;
1489 }
1490
1491 /*
1492 * We were unable to bring up the NetIO interface, but we want to
1493 * try again in a little bit. Tell Linux that we have no carrier
1494 * so it doesn't try to use the interface before the link comes up
1495 * and then remember to try again later.
1496 */
1497 netif_carrier_off(dev);
1498 schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
1499 TILE_NET_RETRY_INTERVAL);
1500
1501 return 0;
1502}
1503
1504
1505/*
1506 * Disables a network interface.
1507 *
1508 * Returns 0, this is not allowed to fail.
1509 *
1510 * The close entry point is called when an interface is de-activated
1511 * by the OS. The hardware is still under the drivers control, but
1512 * needs to be disabled. A global MAC reset is issued to stop the
1513 * hardware, and all transmit and receive resources are freed.
1514 *
1515 * ISSUE: Can this can be called while "tile_net_poll()" is running?
1516 */
1517static int tile_net_stop(struct net_device *dev)
1518{
1519 struct tile_net_priv *priv = netdev_priv(dev);
1520
1521 bool pending = true;
1522
1523 PDEBUG("tile_net_stop()\n");
1524
1525 /* ISSUE: Only needed if not yet fully open. */
1526 cancel_delayed_work_sync(&priv->retry_work);
1527
1528 /* Can't transmit any more. */
1529 netif_stop_queue(dev);
1530
1531 /*
1532 * Disable hypervisor interrupts on each tile.
1533 */
1534 on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
1535
1536 /*
1537 * Unregister the interrupt handler.
1538 * The __ffs() function returns the index into the interrupt handler
1539 * table from the interrupt bit mask which should have one bit
1540 * and one bit only set.
1541 */
1542 if (priv->intr_id)
1543 free_irq(__ffs(priv->intr_id), dev);
1544
1545 /*
1546 * Drain all the LIPP buffers.
1547 */
1548
1549 while (true) {
1550 int buffer;
1551
1552 /* NOTE: This should never fail. */
1553 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
1554 sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
1555 break;
1556
1557 /* Stop when done. */
1558 if (buffer == 0)
1559 break;
1560
1561 {
1562 /* Convert "linux_buffer_t" to "va". */
1563 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
1564
1565 /* Acquire the associated "skb". */
1566 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
1567 struct sk_buff *skb = *skb_ptr;
1568
1569 kfree_skb(skb);
1570 }
1571 }
1572
1573 /* Stop LIPP/LEPP. */
1574 tile_net_stop_aux(dev);
1575
1576
1577 priv->fully_opened = 0;
1578
1579
1580 /*
1581 * XXX: ISSUE: It appears that, in practice anyway, by the
1582 * time we get here, there are no pending completions.
1583 */
1584 while (pending) {
1585
1586 struct sk_buff *olds[32];
1587 unsigned int wanted = 32;
1588 unsigned int i, nolds = 0;
1589
1590 nolds = tile_net_lepp_grab_comps(dev, olds,
1591 wanted, &pending);
1592
1593 /* ISSUE: We have never actually seen this debug spew. */
1594 if (nolds != 0)
1595 pr_info("During tile_net_stop(), grabbed %d comps.\n",
1596 nolds);
1597
1598 for (i = 0; i < nolds; i++)
1599 kfree_skb(olds[i]);
1600 }
1601
1602
1603 /* Wipe the EPP queue. */
1604 memset(priv->epp_queue, 0, sizeof(lepp_queue_t));
1605
1606 /* Evict the EPP queue. */
1607 finv_buffer(priv->epp_queue, PAGE_SIZE);
1608
1609 return 0;
1610}
1611
1612
1613/*
1614 * Prepare the "frags" info for the resulting LEPP command.
1615 *
1616 * If needed, flush the memory used by the frags.
1617 */
1618static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1619 struct sk_buff *skb,
1620 void *b_data, unsigned int b_len)
1621{
1622 unsigned int i, n = 0;
1623
1624 struct skb_shared_info *sh = skb_shinfo(skb);
1625
1626 phys_addr_t cpa;
1627
1628 if (b_len != 0) {
1629
1630 if (!hash_default)
1631 finv_buffer_remote(b_data, b_len);
1632
1633 cpa = __pa(b_data);
1634 frags[n].cpa_lo = cpa;
1635 frags[n].cpa_hi = cpa >> 32;
1636 frags[n].length = b_len;
1637 frags[n].hash_for_home = hash_default;
1638 n++;
1639 }
1640
1641 for (i = 0; i < sh->nr_frags; i++) {
1642
1643 skb_frag_t *f = &sh->frags[i];
1644 unsigned long pfn = page_to_pfn(f->page);
1645
1646 /* FIXME: Compute "hash_for_home" properly. */
1647 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
1648 int hash_for_home = hash_default;
1649
1650 /* FIXME: Hmmm. */
1651 if (!hash_default) {
1652 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1653 BUG_ON(PageHighMem(f->page));
1654 finv_buffer_remote(va, f->size);
1655 }
1656
1657 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
1658 frags[n].cpa_lo = cpa;
1659 frags[n].cpa_hi = cpa >> 32;
1660 frags[n].length = f->size;
1661 frags[n].hash_for_home = hash_for_home;
1662 n++;
1663 }
1664
1665 return n;
1666}
1667
1668
1669/*
1670 * This function takes "skb", consisting of a header template and a
1671 * payload, and hands it to LEPP, to emit as one or more segments,
1672 * each consisting of a possibly modified header, plus a piece of the
1673 * payload, via a process known as "tcp segmentation offload".
1674 *
1675 * Usually, "data" will contain the header template, of size "sh_len",
1676 * and "sh->frags" will contain "skb->data_len" bytes of payload, and
1677 * there will be "sh->gso_segs" segments.
1678 *
1679 * Sometimes, if "sendfile()" requires copying, we will be called with
1680 * "data" containing the header and payload, with "frags" being empty.
1681 *
1682 * In theory, "sh->nr_frags" could be 3, but in practice, it seems
1683 * that this will never actually happen.
1684 *
1685 * See "emulate_large_send_offload()" for some reference code, which
1686 * does not handle checksumming.
1687 *
1688 * ISSUE: How do we make sure that high memory DMA does not migrate?
1689 */
1690static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1691{
1692 struct tile_net_priv *priv = netdev_priv(dev);
1693 int my_cpu = smp_processor_id();
1694 struct tile_net_cpu *info = priv->cpu[my_cpu];
1695 struct tile_net_stats_t *stats = &info->stats;
1696
1697 struct skb_shared_info *sh = skb_shinfo(skb);
1698
1699 unsigned char *data = skb->data;
1700
1701 /* The ip header follows the ethernet header. */
1702 struct iphdr *ih = ip_hdr(skb);
1703 unsigned int ih_len = ih->ihl * 4;
1704
1705 /* Note that "nh == ih", by definition. */
1706 unsigned char *nh = skb_network_header(skb);
1707 unsigned int eh_len = nh - data;
1708
1709 /* The tcp header follows the ip header. */
1710 struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
1711 unsigned int th_len = th->doff * 4;
1712
1713 /* The total number of header bytes. */
1714 /* NOTE: This may be less than skb_headlen(skb). */
1715 unsigned int sh_len = eh_len + ih_len + th_len;
1716
1717 /* The number of payload bytes at "skb->data + sh_len". */
1718 /* This is non-zero for sendfile() without HIGHDMA. */
1719 unsigned int b_len = skb_headlen(skb) - sh_len;
1720
1721 /* The total number of payload bytes. */
1722 unsigned int d_len = b_len + skb->data_len;
1723
1724 /* The maximum payload size. */
1725 unsigned int p_len = sh->gso_size;
1726
1727 /* The total number of segments. */
1728 unsigned int num_segs = sh->gso_segs;
1729
1730 /* The temporary copy of the command. */
1731 u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
1732 lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
1733
1734 /* Analyze the "frags". */
1735 unsigned int num_frags =
1736 tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
1737
1738 /* The size of the command, including frags and header. */
1739 size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
1740
1741 /* The command header. */
1742 lepp_tso_cmd_t cmd_init = {
1743 .tso = true,
1744 .header_size = sh_len,
1745 .ip_offset = eh_len,
1746 .tcp_offset = eh_len + ih_len,
1747 .payload_size = p_len,
1748 .num_frags = num_frags,
1749 };
1750
1751 unsigned long irqflags;
1752
1753 lepp_queue_t *eq = priv->epp_queue;
1754
1755 struct sk_buff *olds[4];
1756 unsigned int wanted = 4;
1757 unsigned int i, nolds = 0;
1758
1759 unsigned int cmd_head, cmd_tail, cmd_next;
1760 unsigned int comp_tail;
1761
1762 unsigned int free_slots;
1763
1764
1765 /* Paranoia. */
1766 BUG_ON(skb->protocol != htons(ETH_P_IP));
1767 BUG_ON(ih->protocol != IPPROTO_TCP);
1768 BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
1769 BUG_ON(num_frags > LEPP_MAX_FRAGS);
1770 /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */
1771 BUG_ON(num_segs <= 1);
1772
1773
1774 /* Finish preparing the command. */
1775
1776 /* Copy the command header. */
1777 *cmd = cmd_init;
1778
1779 /* Copy the "header". */
1780 memcpy(&cmd->frags[num_frags], data, sh_len);
1781
1782
1783 /* Prefetch and wait, to minimize time spent holding the spinlock. */
1784 prefetch_L1(&eq->comp_tail);
1785 prefetch_L1(&eq->cmd_tail);
1786 mb();
1787
1788
1789 /* Enqueue the command. */
1790
1791 spin_lock_irqsave(&priv->cmd_lock, irqflags);
1792
1793 /*
1794 * Handle completions if needed to make room.
1795 * HACK: Spin until there is sufficient room.
1796 */
1797 free_slots = lepp_num_free_comp_slots(eq);
1798 if (free_slots < 1) {
1799spin:
1800 nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
1801 wanted - nolds, NULL);
1802 if (lepp_num_free_comp_slots(eq) < 1)
1803 goto spin;
1804 }
1805
1806 cmd_head = eq->cmd_head;
1807 cmd_tail = eq->cmd_tail;
1808
1809 /* NOTE: The "gotos" below are untested. */
1810
1811 /* Prepare to advance, detecting full queue. */
1812 cmd_next = cmd_tail + cmd_size;
1813 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1814 goto spin;
1815 if (cmd_next > LEPP_CMD_LIMIT) {
1816 cmd_next = 0;
1817 if (cmd_next == cmd_head)
1818 goto spin;
1819 }
1820
1821 /* Copy the command. */
1822 memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
1823
1824 /* Advance. */
1825 cmd_tail = cmd_next;
1826
1827 /* Record "skb" for eventual freeing. */
1828 comp_tail = eq->comp_tail;
1829 eq->comps[comp_tail] = skb;
1830 LEPP_QINC(comp_tail);
1831 eq->comp_tail = comp_tail;
1832
1833 /* Flush before allowing LEPP to handle the command. */
1834 __insn_mf();
1835
1836 eq->cmd_tail = cmd_tail;
1837
1838 spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
1839
1840 if (nolds == 0)
1841 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
1842
1843 /* Handle completions. */
1844 for (i = 0; i < nolds; i++)
1845 kfree_skb(olds[i]);
1846
1847 /* Update stats. */
1848 stats->tx_packets += num_segs;
1849 stats->tx_bytes += (num_segs * sh_len) + d_len;
1850
1851 /* Make sure the egress timer is scheduled. */
1852 tile_net_schedule_egress_timer(info);
1853
1854 return NETDEV_TX_OK;
1855}
1856
1857
1858/*
1859 * Transmit a packet (called by the kernel via "hard_start_xmit" hook).
1860 */
1861static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1862{
1863 struct tile_net_priv *priv = netdev_priv(dev);
1864 int my_cpu = smp_processor_id();
1865 struct tile_net_cpu *info = priv->cpu[my_cpu];
1866 struct tile_net_stats_t *stats = &info->stats;
1867
1868 unsigned long irqflags;
1869
1870 struct skb_shared_info *sh = skb_shinfo(skb);
1871
1872 unsigned int len = skb->len;
1873 unsigned char *data = skb->data;
1874
1875 unsigned int csum_start = skb->csum_start - skb_headroom(skb);
1876
1877 lepp_frag_t frags[LEPP_MAX_FRAGS];
1878
1879 unsigned int num_frags;
1880
1881 lepp_queue_t *eq = priv->epp_queue;
1882
1883 struct sk_buff *olds[4];
1884 unsigned int wanted = 4;
1885 unsigned int i, nolds = 0;
1886
1887 unsigned int cmd_size = sizeof(lepp_cmd_t);
1888
1889 unsigned int cmd_head, cmd_tail, cmd_next;
1890 unsigned int comp_tail;
1891
1892 lepp_cmd_t cmds[LEPP_MAX_FRAGS];
1893
1894 unsigned int free_slots;
1895
1896
1897 /*
1898 * This is paranoia, since we think that if the link doesn't come
1899 * up, telling Linux we have no carrier will keep it from trying
1900 * to transmit. If it does, though, we can't execute this routine,
1901 * since data structures we depend on aren't set up yet.
1902 */
1903 if (!info->registered)
1904 return NETDEV_TX_BUSY;
1905
1906
1907 /* Save the timestamp. */
1908 dev->trans_start = jiffies;
1909
1910
1911#ifdef TILE_NET_PARANOIA
1912#if CHIP_HAS_CBOX_HOME_MAP()
1913 if (hash_default) {
1914 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
1915 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
1916 panic("Non-coherent egress buffer!");
1917 }
1918#endif
1919#endif
1920
1921
1922#ifdef TILE_NET_DUMP_PACKETS
1923 /* ISSUE: Does not dump the "frags". */
1924 dump_packet(data, skb_headlen(skb), "tx");
1925#endif /* TILE_NET_DUMP_PACKETS */
1926
1927
1928 if (sh->gso_size != 0)
1929 return tile_net_tx_tso(skb, dev);
1930
1931
1932 /* Prepare the commands. */
1933
1934 num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1935
1936 for (i = 0; i < num_frags; i++) {
1937
1938 bool final = (i == num_frags - 1);
1939
1940 lepp_cmd_t cmd = {
1941 .cpa_lo = frags[i].cpa_lo,
1942 .cpa_hi = frags[i].cpa_hi,
1943 .length = frags[i].length,
1944 .hash_for_home = frags[i].hash_for_home,
1945 .send_completion = final,
1946 .end_of_packet = final
1947 };
1948
1949 if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
1950 cmd.compute_checksum = 1;
1951 cmd.checksum_data.bits.start_byte = csum_start;
1952 cmd.checksum_data.bits.count = len - csum_start;
1953 cmd.checksum_data.bits.destination_byte =
1954 csum_start + skb->csum_offset;
1955 }
1956
1957 cmds[i] = cmd;
1958 }
1959
1960
1961 /* Prefetch and wait, to minimize time spent holding the spinlock. */
1962 prefetch_L1(&eq->comp_tail);
1963 prefetch_L1(&eq->cmd_tail);
1964 mb();
1965
1966
1967 /* Enqueue the commands. */
1968
1969 spin_lock_irqsave(&priv->cmd_lock, irqflags);
1970
1971 /*
1972 * Handle completions if needed to make room.
1973 * HACK: Spin until there is sufficient room.
1974 */
1975 free_slots = lepp_num_free_comp_slots(eq);
1976 if (free_slots < 1) {
1977spin:
1978 nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
1979 wanted - nolds, NULL);
1980 if (lepp_num_free_comp_slots(eq) < 1)
1981 goto spin;
1982 }
1983
1984 cmd_head = eq->cmd_head;
1985 cmd_tail = eq->cmd_tail;
1986
1987 /* NOTE: The "gotos" below are untested. */
1988
1989 /* Copy the commands, or fail. */
1990 for (i = 0; i < num_frags; i++) {
1991
1992 /* Prepare to advance, detecting full queue. */
1993 cmd_next = cmd_tail + cmd_size;
1994 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1995 goto spin;
1996 if (cmd_next > LEPP_CMD_LIMIT) {
1997 cmd_next = 0;
1998 if (cmd_next == cmd_head)
1999 goto spin;
2000 }
2001
2002 /* Copy the command. */
2003 *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
2004
2005 /* Advance. */
2006 cmd_tail = cmd_next;
2007 }
2008
2009 /* Record "skb" for eventual freeing. */
2010 comp_tail = eq->comp_tail;
2011 eq->comps[comp_tail] = skb;
2012 LEPP_QINC(comp_tail);
2013 eq->comp_tail = comp_tail;
2014
2015 /* Flush before allowing LEPP to handle the command. */
2016 __insn_mf();
2017
2018 eq->cmd_tail = cmd_tail;
2019
2020 spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
2021
2022 if (nolds == 0)
2023 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
2024
2025 /* Handle completions. */
2026 for (i = 0; i < nolds; i++)
2027 kfree_skb(olds[i]);
2028
2029 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
2030 stats->tx_packets++;
2031 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
2032
2033 /* Make sure the egress timer is scheduled. */
2034 tile_net_schedule_egress_timer(info);
2035
2036 return NETDEV_TX_OK;
2037}
2038
2039
2040/*
2041 * Deal with a transmit timeout.
2042 */
2043static void tile_net_tx_timeout(struct net_device *dev)
2044{
2045 PDEBUG("tile_net_tx_timeout()\n");
2046 PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
2047 jiffies - dev->trans_start);
2048
2049 /* XXX: ISSUE: This doesn't seem useful for us. */
2050 netif_wake_queue(dev);
2051}
2052
2053
2054/*
2055 * Ioctl commands.
2056 */
2057static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2058{
2059 return -EOPNOTSUPP;
2060}
2061
2062
2063/*
2064 * Get System Network Statistics.
2065 *
2066 * Returns the address of the device statistics structure.
2067 */
2068static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
2069{
2070 struct tile_net_priv *priv = netdev_priv(dev);
2071 u32 rx_packets = 0;
2072 u32 tx_packets = 0;
2073 u32 rx_bytes = 0;
2074 u32 tx_bytes = 0;
2075 int i;
2076
2077 for_each_online_cpu(i) {
2078 if (priv->cpu[i]) {
2079 rx_packets += priv->cpu[i]->stats.rx_packets;
2080 rx_bytes += priv->cpu[i]->stats.rx_bytes;
2081 tx_packets += priv->cpu[i]->stats.tx_packets;
2082 tx_bytes += priv->cpu[i]->stats.tx_bytes;
2083 }
2084 }
2085
2086 priv->stats.rx_packets = rx_packets;
2087 priv->stats.rx_bytes = rx_bytes;
2088 priv->stats.tx_packets = tx_packets;
2089 priv->stats.tx_bytes = tx_bytes;
2090
2091 return &priv->stats;
2092}
2093
2094
2095/*
2096 * Change the "mtu".
2097 *
2098 * The "change_mtu" method is usually not needed.
2099 * If you need it, it must be like this.
2100 */
2101static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
2102{
2103 PDEBUG("tile_net_change_mtu()\n");
2104
2105 /* Check ranges. */
2106 if ((new_mtu < 68) || (new_mtu > 1500))
2107 return -EINVAL;
2108
2109 /* Accept the value. */
2110 dev->mtu = new_mtu;
2111
2112 return 0;
2113}
2114
2115
2116/*
2117 * Change the Ethernet Address of the NIC.
2118 *
2119 * The hypervisor driver does not support changing MAC address. However,
2120 * the IPP does not do anything with the MAC address, so the address which
2121 * gets used on outgoing packets, and which is accepted on incoming packets,
2122 * is completely up to the NetIO program or kernel driver which is actually
2123 * handling them.
2124 *
2125 * Returns 0 on success, negative on failure.
2126 */
2127static int tile_net_set_mac_address(struct net_device *dev, void *p)
2128{
2129 struct sockaddr *addr = p;
2130
2131 if (!is_valid_ether_addr(addr->sa_data))
2132 return -EINVAL;
2133
2134 /* ISSUE: Note that "dev_addr" is now a pointer. */
2135 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2136
2137 return 0;
2138}
2139
2140
2141/*
2142 * Obtain the MAC address from the hypervisor.
2143 * This must be done before opening the device.
2144 */
2145static int tile_net_get_mac(struct net_device *dev)
2146{
2147 struct tile_net_priv *priv = netdev_priv(dev);
2148
2149 char hv_dev_name[32];
2150 int len;
2151
2152 __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
2153
2154 int ret;
2155
2156 /* For example, "xgbe0". */
2157 strcpy(hv_dev_name, dev->name);
2158 len = strlen(hv_dev_name);
2159
2160 /* For example, "xgbe/0". */
2161 hv_dev_name[len] = hv_dev_name[len - 1];
2162 hv_dev_name[len - 1] = '/';
2163 len++;
2164
2165 /* For example, "xgbe/0/native_hash". */
2166 strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
2167
2168 /* Get the hypervisor handle for this device. */
2169 priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
2170 PDEBUG("hv_dev_open(%s) returned %d %p\n",
2171 hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
2172 if (priv->hv_devhdl < 0) {
2173 if (priv->hv_devhdl == HV_ENODEV)
2174 printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
2175 hv_dev_name);
2176 else
2177 printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
2178 hv_dev_name, priv->hv_devhdl);
2179 return -1;
2180 }
2181
2182 /*
2183 * Read the hardware address from the hypervisor.
2184 * ISSUE: Note that "dev_addr" is now a pointer.
2185 */
2186 offset.bits.class = NETIO_PARAM;
2187 offset.bits.addr = NETIO_PARAM_MAC;
2188 ret = hv_dev_pread(priv->hv_devhdl, 0,
2189 (HV_VirtAddr)dev->dev_addr, dev->addr_len,
2190 offset.word);
2191 PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
2192 if (ret <= 0) {
2193 printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
2194 dev->name);
2195 /*
2196 * Since the device is configured by the hypervisor but we
2197 * can't get its MAC address, we are most likely running
2198 * the simulator, so let's generate a random MAC address.
2199 */
2200 random_ether_addr(dev->dev_addr);
2201 }
2202
2203 return 0;
2204}
2205
2206
2207static struct net_device_ops tile_net_ops = {
2208 .ndo_open = tile_net_open,
2209 .ndo_stop = tile_net_stop,
2210 .ndo_start_xmit = tile_net_tx,
2211 .ndo_do_ioctl = tile_net_ioctl,
2212 .ndo_get_stats = tile_net_get_stats,
2213 .ndo_change_mtu = tile_net_change_mtu,
2214 .ndo_tx_timeout = tile_net_tx_timeout,
2215 .ndo_set_mac_address = tile_net_set_mac_address
2216};
2217
2218
2219/*
2220 * The setup function.
2221 *
2222 * This uses ether_setup() to assign various fields in dev, including
2223 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
2224 */
2225static void tile_net_setup(struct net_device *dev)
2226{
2227 PDEBUG("tile_net_setup()\n");
2228
2229 ether_setup(dev);
2230
2231 dev->netdev_ops = &tile_net_ops;
2232
2233 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2234
2235 /* We want lockless xmit. */
2236 dev->features |= NETIF_F_LLTX;
2237
2238 /* We support hardware tx checksums. */
2239 dev->features |= NETIF_F_HW_CSUM;
2240
2241 /* We support scatter/gather. */
2242 dev->features |= NETIF_F_SG;
2243
2244 /* We support TSO. */
2245 dev->features |= NETIF_F_TSO;
2246
2247#ifdef TILE_NET_GSO
2248 /* We support GSO. */
2249 dev->features |= NETIF_F_GSO;
2250#endif
2251
2252 if (hash_default)
2253 dev->features |= NETIF_F_HIGHDMA;
2254
2255 /* ISSUE: We should support NETIF_F_UFO. */
2256
2257 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
2258
2259 dev->mtu = TILE_NET_MTU;
2260}
2261
2262
2263/*
2264 * Allocate the device structure, register the device, and obtain the
2265 * MAC address from the hypervisor.
2266 */
2267static struct net_device *tile_net_dev_init(const char *name)
2268{
2269 int ret;
2270 struct net_device *dev;
2271 struct tile_net_priv *priv;
2272 struct page *page;
2273
2274 /*
2275 * Allocate the device structure. This allocates "priv", calls
2276 * tile_net_setup(), and saves "name". Normally, "name" is a
2277 * template, instantiated by register_netdev(), but not for us.
2278 */
2279 dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
2280 if (!dev) {
2281 pr_err("alloc_netdev(%s) failed\n", name);
2282 return NULL;
2283 }
2284
2285 priv = netdev_priv(dev);
2286
2287 /* Initialize "priv". */
2288
2289 memset(priv, 0, sizeof(*priv));
2290
2291 /* Save "dev" for "tile_net_open_retry()". */
2292 priv->dev = dev;
2293
2294 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
2295
2296 spin_lock_init(&priv->cmd_lock);
2297 spin_lock_init(&priv->comp_lock);
2298
2299 /* Allocate "epp_queue". */
2300 BUG_ON(get_order(sizeof(lepp_queue_t)) != 0);
2301 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2302 if (!page) {
2303 free_netdev(dev);
2304 return NULL;
2305 }
2306 priv->epp_queue = page_address(page);
2307
2308 /* Register the network device. */
2309 ret = register_netdev(dev);
2310 if (ret) {
2311 pr_err("register_netdev %s failed %d\n", dev->name, ret);
2312 free_page((unsigned long)priv->epp_queue);
2313 free_netdev(dev);
2314 return NULL;
2315 }
2316
2317 /* Get the MAC address. */
2318 ret = tile_net_get_mac(dev);
2319 if (ret < 0) {
2320 unregister_netdev(dev);
2321 free_page((unsigned long)priv->epp_queue);
2322 free_netdev(dev);
2323 return NULL;
2324 }
2325
2326 return dev;
2327}
2328
2329
2330/*
2331 * Module cleanup.
2332 */
2333static void tile_net_cleanup(void)
2334{
2335 int i;
2336
2337 for (i = 0; i < TILE_NET_DEVS; i++) {
2338 if (tile_net_devs[i]) {
2339 struct net_device *dev = tile_net_devs[i];
2340 struct tile_net_priv *priv = netdev_priv(dev);
2341 unregister_netdev(dev);
2342 finv_buffer(priv->epp_queue, PAGE_SIZE);
2343 free_page((unsigned long)priv->epp_queue);
2344 free_netdev(dev);
2345 }
2346 }
2347}
2348
2349
2350/*
2351 * Module initialization.
2352 */
2353static int tile_net_init_module(void)
2354{
2355 pr_info("Tilera IPP Net Driver\n");
2356
2357 tile_net_devs[0] = tile_net_dev_init("xgbe0");
2358 tile_net_devs[1] = tile_net_dev_init("xgbe1");
2359 tile_net_devs[2] = tile_net_dev_init("gbe0");
2360 tile_net_devs[3] = tile_net_dev_init("gbe1");
2361
2362 return 0;
2363}
2364
2365
2366#ifndef MODULE
2367/*
2368 * The "network_cpus" boot argument specifies the cpus that are dedicated
2369 * to handle ingress packets.
2370 *
2371 * The parameter should be in the form "network_cpus=m-n[,x-y]", where
2372 * m, n, x, y are integer numbers that represent the cpus that can be
2373 * neither a dedicated cpu nor a dataplane cpu.
2374 */
2375static int __init network_cpus_setup(char *str)
2376{
2377 int rc = cpulist_parse_crop(str, &network_cpus_map);
2378 if (rc != 0) {
2379 pr_warning("network_cpus=%s: malformed cpu list\n",
2380 str);
2381 } else {
2382
2383 /* Remove dedicated cpus. */
2384 cpumask_and(&network_cpus_map, &network_cpus_map,
2385 cpu_possible_mask);
2386
2387
2388 if (cpumask_empty(&network_cpus_map)) {
2389 pr_warning("Ignoring network_cpus='%s'.\n",
2390 str);
2391 } else {
2392 char buf[1024];
2393 cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
2394 pr_info("Linux network CPUs: %s\n", buf);
2395 network_cpus_used = true;
2396 }
2397 }
2398
2399 return 0;
2400}
2401__setup("network_cpus=", network_cpus_setup);
2402#endif
2403
2404
2405module_init(tile_net_init_module);
2406module_exit(tile_net_cleanup);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d1a269..7064e035757a 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
688 688
689 DMFE_DBUG(0, "dmfe_start_xmit", 0); 689 DMFE_DBUG(0, "dmfe_start_xmit", 0);
690 690
691 /* Resource flag check */
692 netif_stop_queue(dev);
693
694 /* Too large packet check */ 691 /* Too large packet check */
695 if (skb->len > MAX_PACKET_SIZE) { 692 if (skb->len > MAX_PACKET_SIZE) {
696 pr_err("big packet = %d\n", (u16)skb->len); 693 pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
698 return NETDEV_TX_OK; 695 return NETDEV_TX_OK;
699 } 696 }
700 697
698 /* Resource flag check */
699 netif_stop_queue(dev);
700
701 spin_lock_irqsave(&db->lock, flags); 701 spin_lock_irqsave(&db->lock, flags);
702 702
703 /* No Tx resource check, it never happen nromally */ 703 /* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a95586f3c5..055b87ab4f07 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size 899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
900 */ 900 */
901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ 901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
902#define UCC_GETH_UTFTT_INIT 512 902#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
903 due to errata */
903/* Gigabit Ethernet (1000 Mbps) */ 904/* Gigabit Ethernet (1000 Mbps) */
904#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual 905#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
905 FIFO size */ 906 FIFO size */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..812edf85d6d3 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -958,10 +958,6 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
958 /* Packet is complete. Inject into stack. */ 958 /* Packet is complete. Inject into stack. */
959 /* We have IP packet here */ 959 /* We have IP packet here */
960 odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); 960 odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
961 /* don't check it */
962 odev->skb_rx_buf->ip_summed =
963 CHECKSUM_UNNECESSARY;
964
965 skb_reset_mac_header(odev->skb_rx_buf); 961 skb_reset_mac_header(odev->skb_rx_buf);
966 962
967 /* Ship it off to the kernel */ 963 /* Ship it off to the kernel */
@@ -2994,12 +2990,14 @@ static int hso_probe(struct usb_interface *interface,
2994 2990
2995 case HSO_INTF_BULK: 2991 case HSO_INTF_BULK:
2996 /* It's a regular bulk interface */ 2992 /* It's a regular bulk interface */
2997 if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && 2993 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
2998 !disable_net) 2994 if (!disable_net)
2999 hso_dev = hso_create_net_device(interface, port_spec); 2995 hso_dev =
3000 else 2996 hso_create_net_device(interface, port_spec);
2997 } else {
3001 hso_dev = 2998 hso_dev =
3002 hso_create_bulk_serial_device(interface, port_spec); 2999 hso_create_bulk_serial_device(interface, port_spec);
3000 }
3003 if (!hso_dev) 3001 if (!hso_dev)
3004 goto exit; 3002 goto exit;
3005 break; 3003 break;
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index ea476cbd38b5..e305274f83fb 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -293,6 +293,7 @@ static inline void sca_tx_done(port_t *port)
293 struct net_device *dev = port->netdev; 293 struct net_device *dev = port->netdev;
294 card_t* card = port->card; 294 card_t* card = port->card;
295 u8 stat; 295 u8 stat;
296 unsigned count = 0;
296 297
297 spin_lock(&port->lock); 298 spin_lock(&port->lock);
298 299
@@ -316,10 +317,12 @@ static inline void sca_tx_done(port_t *port)
316 dev->stats.tx_bytes += readw(&desc->len); 317 dev->stats.tx_bytes += readw(&desc->len);
317 } 318 }
318 writeb(0, &desc->stat); /* Free descriptor */ 319 writeb(0, &desc->stat); /* Free descriptor */
320 count++;
319 port->txlast = (port->txlast + 1) % card->tx_ring_buffers; 321 port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
320 } 322 }
321 323
322 netif_wake_queue(dev); 324 if (count)
325 netif_wake_queue(dev);
323 spin_unlock(&port->lock); 326 spin_unlock(&port->lock);
324} 327}
325 328
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad8397885..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
498static int x25_asy_close(struct net_device *dev) 498static int x25_asy_close(struct net_device *dev)
499{ 499{
500 struct x25_asy *sl = netdev_priv(dev); 500 struct x25_asy *sl = netdev_priv(dev);
501 int err;
502 501
503 spin_lock(&sl->lock); 502 spin_lock(&sl->lock);
504 if (sl->tty) 503 if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
507 netif_stop_queue(dev); 506 netif_stop_queue(dev);
508 sl->rcount = 0; 507 sl->rcount = 0;
509 sl->xleft = 0; 508 sl->xleft = 0;
510 err = lapb_unregister(dev);
511 if (err != LAPB_OK)
512 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
513 err);
514 spin_unlock(&sl->lock); 509 spin_unlock(&sl->lock);
515 return 0; 510 return 0;
516} 511}
@@ -582,7 +577,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
582 if (err) 577 if (err)
583 return err; 578 return err;
584 /* Done. We have linked the TTY line to a channel. */ 579 /* Done. We have linked the TTY line to a channel. */
585 return sl->dev->base_addr; 580 return 0;
586} 581}
587 582
588 583
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
595static void x25_asy_close_tty(struct tty_struct *tty) 590static void x25_asy_close_tty(struct tty_struct *tty)
596{ 591{
597 struct x25_asy *sl = tty->disc_data; 592 struct x25_asy *sl = tty->disc_data;
593 int err;
598 594
599 /* First make sure we're connected. */ 595 /* First make sure we're connected. */
600 if (!sl || sl->magic != X25_ASY_MAGIC) 596 if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
605 dev_close(sl->dev); 601 dev_close(sl->dev);
606 rtnl_unlock(); 602 rtnl_unlock();
607 603
604 err = lapb_unregister(sl->dev);
605 if (err != LAPB_OK)
606 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
607 err);
608
608 tty->disc_data = NULL; 609 tty->disc_data = NULL;
609 sl->tty = NULL; 610 sl->tty = NULL;
610 x25_asy_free(sl); 611 x25_asy_free(sl);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8251946842e6..42ed923cdb1a 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1917,7 +1917,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1917 sc->bmisscount = 0; 1917 sc->bmisscount = 0;
1918 } 1918 }
1919 1919
1920 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) { 1920 if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
1921 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1921 u64 tsf = ath5k_hw_get_tsf64(ah); 1922 u64 tsf = ath5k_hw_get_tsf64(ah);
1922 u32 tsftu = TSF_TO_TU(tsf); 1923 u32 tsftu = TSF_TO_TU(tsf);
1923 int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval; 1924 int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
@@ -1949,8 +1950,9 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1949 /* NB: hw still stops DMA, so proceed */ 1950 /* NB: hw still stops DMA, so proceed */
1950 } 1951 }
1951 1952
1952 /* refresh the beacon for AP mode */ 1953 /* refresh the beacon for AP or MESH mode */
1953 if (sc->opmode == NL80211_IFTYPE_AP) 1954 if (sc->opmode == NL80211_IFTYPE_AP ||
1955 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1954 ath5k_beacon_update(sc->hw, vif); 1956 ath5k_beacon_update(sc->hw, vif);
1955 1957
1956 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); 1958 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
@@ -2851,7 +2853,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2851 2853
2852 /* Assign the vap/adhoc to a beacon xmit slot. */ 2854 /* Assign the vap/adhoc to a beacon xmit slot. */
2853 if ((avf->opmode == NL80211_IFTYPE_AP) || 2855 if ((avf->opmode == NL80211_IFTYPE_AP) ||
2854 (avf->opmode == NL80211_IFTYPE_ADHOC)) { 2856 (avf->opmode == NL80211_IFTYPE_ADHOC) ||
2857 (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
2855 int slot; 2858 int slot;
2856 2859
2857 WARN_ON(list_empty(&sc->bcbuf)); 2860 WARN_ON(list_empty(&sc->bcbuf));
@@ -2870,7 +2873,7 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2870 sc->bslot[avf->bslot] = vif; 2873 sc->bslot[avf->bslot] = vif;
2871 if (avf->opmode == NL80211_IFTYPE_AP) 2874 if (avf->opmode == NL80211_IFTYPE_AP)
2872 sc->num_ap_vifs++; 2875 sc->num_ap_vifs++;
2873 else 2876 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
2874 sc->num_adhoc_vifs++; 2877 sc->num_adhoc_vifs++;
2875 } 2878 }
2876 2879
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c4182359bee4..a7b82f0085d2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -55,6 +55,8 @@
55#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 55#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
56#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 56#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
57 57
58#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
59
58static const struct ar9300_eeprom ar9300_default = { 60static const struct ar9300_eeprom ar9300_default = {
59 .eepromVersion = 2, 61 .eepromVersion = 2,
60 .templateVersion = 2, 62 .templateVersion = 2,
@@ -290,20 +292,21 @@ static const struct ar9300_eeprom ar9300_default = {
290 } 292 }
291 }, 293 },
292 .ctlPowerData_2G = { 294 .ctlPowerData_2G = {
293 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 295 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
294 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 296 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
295 { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } }, 297 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
296 298
297 { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } }, 299 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
298 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 300 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
299 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 301 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
300 302
301 { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } }, 303 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
302 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 304 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
303 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 305 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
304 306
305 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, 307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
306 { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } }, 308 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
309 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
307 }, 310 },
308 .modalHeader5G = { 311 .modalHeader5G = {
309 /* 4 idle,t1,t2,b (4 bits per setting) */ 312 /* 4 idle,t1,t2,b (4 bits per setting) */
@@ -568,56 +571,56 @@ static const struct ar9300_eeprom ar9300_default = {
568 .ctlPowerData_5G = { 571 .ctlPowerData_5G = {
569 { 572 {
570 { 573 {
571 {60, 1}, {60, 1}, {60, 1}, {60, 1}, 574 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
572 {60, 1}, {60, 1}, {60, 1}, {60, 0}, 575 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
573 } 576 }
574 }, 577 },
575 { 578 {
576 { 579 {
577 {60, 1}, {60, 1}, {60, 1}, {60, 1}, 580 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
578 {60, 1}, {60, 1}, {60, 1}, {60, 0}, 581 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
579 } 582 }
580 }, 583 },
581 { 584 {
582 { 585 {
583 {60, 0}, {60, 1}, {60, 0}, {60, 1}, 586 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
584 {60, 1}, {60, 1}, {60, 1}, {60, 1}, 587 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
585 } 588 }
586 }, 589 },
587 { 590 {
588 { 591 {
589 {60, 0}, {60, 1}, {60, 1}, {60, 0}, 592 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
590 {60, 1}, {60, 0}, {60, 0}, {60, 0}, 593 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
591 } 594 }
592 }, 595 },
593 { 596 {
594 { 597 {
595 {60, 1}, {60, 1}, {60, 1}, {60, 0}, 598 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
596 {60, 0}, {60, 0}, {60, 0}, {60, 0}, 599 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
597 } 600 }
598 }, 601 },
599 { 602 {
600 { 603 {
601 {60, 1}, {60, 1}, {60, 1}, {60, 1}, 604 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
602 {60, 1}, {60, 0}, {60, 0}, {60, 0}, 605 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
603 } 606 }
604 }, 607 },
605 { 608 {
606 { 609 {
607 {60, 1}, {60, 1}, {60, 1}, {60, 1}, 610 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
608 {60, 1}, {60, 1}, {60, 1}, {60, 1}, 611 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
609 } 612 }
610 }, 613 },
611 { 614 {
612 { 615 {
613 {60, 1}, {60, 1}, {60, 0}, {60, 1}, 616 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
614 {60, 1}, {60, 1}, {60, 1}, {60, 0}, 617 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
615 } 618 }
616 }, 619 },
617 { 620 {
618 { 621 {
619 {60, 1}, {60, 0}, {60, 1}, {60, 1}, 622 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
620 {60, 1}, {60, 1}, {60, 0}, {60, 1}, 623 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
621 } 624 }
622 }, 625 },
623 } 626 }
@@ -1827,9 +1830,9 @@ static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
1827 struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; 1830 struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
1828 1831
1829 if (is2GHz) 1832 if (is2GHz)
1830 return ctl_2g[idx].ctlEdges[edge].tPower; 1833 return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
1831 else 1834 else
1832 return ctl_5g[idx].ctlEdges[edge].tPower; 1835 return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
1833} 1836}
1834 1837
1835static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, 1838static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
@@ -1847,12 +1850,12 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
1847 1850
1848 if (is2GHz) { 1851 if (is2GHz) {
1849 if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && 1852 if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
1850 ctl_2g[idx].ctlEdges[edge - 1].flag) 1853 CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
1851 return ctl_2g[idx].ctlEdges[edge - 1].tPower; 1854 return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
1852 } else { 1855 } else {
1853 if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && 1856 if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
1854 ctl_5g[idx].ctlEdges[edge - 1].flag) 1857 CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
1855 return ctl_5g[idx].ctlEdges[edge - 1].tPower; 1858 return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
1856 } 1859 }
1857 1860
1858 return AR9300_MAX_RATE_POWER; 1861 return AR9300_MAX_RATE_POWER;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 3c533bb983c7..655b3033396c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -261,17 +261,12 @@ struct cal_tgt_pow_ht {
261 u8 tPow2x[14]; 261 u8 tPow2x[14];
262} __packed; 262} __packed;
263 263
264struct cal_ctl_edge_pwr {
265 u8 tPower:6,
266 flag:2;
267} __packed;
268
269struct cal_ctl_data_2g { 264struct cal_ctl_data_2g {
270 struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G]; 265 u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
271} __packed; 266} __packed;
272 267
273struct cal_ctl_data_5g { 268struct cal_ctl_data_5g {
274 struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G]; 269 u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
275} __packed; 270} __packed;
276 271
277struct ar9300_eeprom { 272struct ar9300_eeprom {
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 170d44a35ccb..0963071e8f90 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,6 +21,7 @@
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/leds.h> 22#include <linux/leds.h>
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/pm_qos_params.h>
24 25
25#include "debug.h" 26#include "debug.h"
26#include "common.h" 27#include "common.h"
@@ -328,7 +329,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
328struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 329struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
329void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 330void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
330int ath_tx_setup(struct ath_softc *sc, int haltype); 331int ath_tx_setup(struct ath_softc *sc, int haltype);
331void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 332bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
332void ath_draintxq(struct ath_softc *sc, 333void ath_draintxq(struct ath_softc *sc,
333 struct ath_txq *txq, bool retry_tx); 334 struct ath_txq *txq, bool retry_tx);
334void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); 335void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
@@ -646,6 +647,8 @@ struct ath_softc {
646 struct ath_descdma txsdma; 647 struct ath_descdma txsdma;
647 648
648 struct ath_ant_comb ant_comb; 649 struct ath_ant_comb ant_comb;
650
651 struct pm_qos_request_list pm_qos_req;
649}; 652};
650 653
651struct ath_wiphy { 654struct ath_wiphy {
@@ -675,7 +678,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
675} 678}
676 679
677extern struct ieee80211_ops ath9k_ops; 680extern struct ieee80211_ops ath9k_ops;
678extern struct pm_qos_request_list ath9k_pm_qos_req;
679extern int modparam_nohwcrypt; 681extern int modparam_nohwcrypt;
680extern int led_blink; 682extern int led_blink;
681 683
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 1266333f586d..2bbf94d0191e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -240,16 +240,16 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
240 for (i = 0; (i < num_band_edges) && 240 for (i = 0; (i < num_band_edges) &&
241 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 241 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
242 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { 242 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
243 twiceMaxEdgePower = pRdEdgesPower[i].tPower; 243 twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
244 break; 244 break;
245 } else if ((i > 0) && 245 } else if ((i > 0) &&
246 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, 246 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
247 is2GHz))) { 247 is2GHz))) {
248 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, 248 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
249 is2GHz) < freq && 249 is2GHz) < freq &&
250 pRdEdgesPower[i - 1].flag) { 250 CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
251 twiceMaxEdgePower = 251 twiceMaxEdgePower =
252 pRdEdgesPower[i - 1].tPower; 252 CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
253 } 253 }
254 break; 254 break;
255 } 255 }
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index dacb45e1b906..dd59f09441a3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -233,6 +233,18 @@
233 233
234#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1) 234#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
235 235
236#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
237#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
238
239#define LNA_CTL_BUF_MODE BIT(0)
240#define LNA_CTL_ISEL_LO BIT(1)
241#define LNA_CTL_ISEL_HI BIT(2)
242#define LNA_CTL_BUF_IN BIT(3)
243#define LNA_CTL_FEM_BAND BIT(4)
244#define LNA_CTL_LOCAL_BIAS BIT(5)
245#define LNA_CTL_FORCE_XPA BIT(6)
246#define LNA_CTL_USE_ANT1 BIT(7)
247
236enum eeprom_param { 248enum eeprom_param {
237 EEP_NFTHRESH_5, 249 EEP_NFTHRESH_5,
238 EEP_NFTHRESH_2, 250 EEP_NFTHRESH_2,
@@ -378,10 +390,7 @@ struct modal_eep_header {
378 u8 xatten2Margin[AR5416_MAX_CHAINS]; 390 u8 xatten2Margin[AR5416_MAX_CHAINS];
379 u8 ob_ch1; 391 u8 ob_ch1;
380 u8 db_ch1; 392 u8 db_ch1;
381 u8 useAnt1:1, 393 u8 lna_ctl;
382 force_xpaon:1,
383 local_bias:1,
384 femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
385 u8 miscBits; 394 u8 miscBits;
386 u16 xpaBiasLvlFreq[3]; 395 u16 xpaBiasLvlFreq[3];
387 u8 futureModal[6]; 396 u8 futureModal[6];
@@ -535,18 +544,10 @@ struct cal_target_power_ht {
535 u8 tPow2x[8]; 544 u8 tPow2x[8];
536} __packed; 545} __packed;
537 546
538
539#ifdef __BIG_ENDIAN_BITFIELD
540struct cal_ctl_edges {
541 u8 bChannel;
542 u8 flag:2, tPower:6;
543} __packed;
544#else
545struct cal_ctl_edges { 547struct cal_ctl_edges {
546 u8 bChannel; 548 u8 bChannel;
547 u8 tPower:6, flag:2; 549 u8 ctl;
548} __packed; 550} __packed;
549#endif
550 551
551struct cal_data_op_loop_ar9287 { 552struct cal_data_op_loop_ar9287 {
552 u8 pwrPdg[2][5]; 553 u8 pwrPdg[2][5];
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 966b9496a9dd..195406db3bd8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -37,7 +37,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
37 int addr, eep_start_loc; 37 int addr, eep_start_loc;
38 eep_data = (u16 *)eep; 38 eep_data = (u16 *)eep;
39 39
40 if (ah->hw_version.devid == 0x7015) 40 if (AR9287_HTC_DEVID(ah))
41 eep_start_loc = AR9287_HTC_EEP_START_LOC; 41 eep_start_loc = AR9287_HTC_EEP_START_LOC;
42 else 42 else
43 eep_start_loc = AR9287_EEP_START_LOC; 43 eep_start_loc = AR9287_EEP_START_LOC;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 76b4d65472dd..a3ccb1b9638d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -451,9 +451,10 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
451 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, 451 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
452 AR_AN_TOP2_LOCALBIAS, 452 AR_AN_TOP2_LOCALBIAS,
453 AR_AN_TOP2_LOCALBIAS_S, 453 AR_AN_TOP2_LOCALBIAS_S,
454 pModal->local_bias); 454 !!(pModal->lna_ctl &
455 LNA_CTL_LOCAL_BIAS));
455 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, 456 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
456 pModal->force_xpaon); 457 !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA));
457 } 458 }
458 459
459 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, 460 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
@@ -1062,15 +1063,19 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1062 case 1: 1063 case 1:
1063 break; 1064 break;
1064 case 2: 1065 case 2:
1065 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 1066 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
1067 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
1068 else
1069 scaledPower = 0;
1066 break; 1070 break;
1067 case 3: 1071 case 3:
1068 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 1072 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
1073 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
1074 else
1075 scaledPower = 0;
1069 break; 1076 break;
1070 } 1077 }
1071 1078
1072 scaledPower = max((u16)0, scaledPower);
1073
1074 if (IS_CHAN_2GHZ(chan)) { 1079 if (IS_CHAN_2GHZ(chan)) {
1075 numCtlModes = ARRAY_SIZE(ctlModesFor11g) - 1080 numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
1076 SUB_NUM_CTL_MODES_AT_2G_40; 1081 SUB_NUM_CTL_MODES_AT_2G_40;
@@ -1428,9 +1433,9 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
1428 1433
1429 num_ant_config = 1; 1434 num_ant_config = 1;
1430 1435
1431 if (pBase->version >= 0x0E0D) 1436 if (pBase->version >= 0x0E0D &&
1432 if (pModal->useAnt1) 1437 (pModal->lna_ctl & LNA_CTL_USE_ANT1))
1433 num_ant_config += 1; 1438 num_ant_config += 1;
1434 1439
1435 return num_ant_config; 1440 return num_ant_config;
1436} 1441}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f7ec31b4ddd3..0de3c3d3c245 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -36,8 +36,13 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ 36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ 37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
38 { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ 38 { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
39 { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
40 { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
41 { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
39 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 42 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
40 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ 43 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
44 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
45 { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */
41 { }, 46 { },
42}; 47};
43 48
@@ -806,6 +811,8 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
806 case 0x7010: 811 case 0x7010:
807 case 0x7015: 812 case 0x7015:
808 case 0x9018: 813 case 0x9018:
814 case 0xA704:
815 case 0x1200:
809 firm_offset = AR7010_FIRMWARE_TEXT; 816 firm_offset = AR7010_FIRMWARE_TEXT;
810 break; 817 break;
811 default: 818 default:
@@ -928,6 +935,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
928 case 0x7010: 935 case 0x7010:
929 case 0x7015: 936 case 0x7015:
930 case 0x9018: 937 case 0x9018:
938 case 0xA704:
939 case 0x1200:
931 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) 940 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
932 hif_dev->fw_name = FIRMWARE_AR7010_1_1; 941 hif_dev->fw_name = FIRMWARE_AR7010_1_1;
933 else 942 else
@@ -1015,6 +1024,13 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
1015 struct hif_device_usb *hif_dev = 1024 struct hif_device_usb *hif_dev =
1016 (struct hif_device_usb *) usb_get_intfdata(interface); 1025 (struct hif_device_usb *) usb_get_intfdata(interface);
1017 1026
1027 /*
1028 * The device has to be set to FULLSLEEP mode in case no
1029 * interface is up.
1030 */
1031 if (!(hif_dev->flags & HIF_USB_START))
1032 ath9k_htc_suspend(hif_dev->htc_handle);
1033
1018 ath9k_hif_usb_dealloc_urbs(hif_dev); 1034 ath9k_hif_usb_dealloc_urbs(hif_dev);
1019 1035
1020 return 0; 1036 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 75ecf6a30d25..c3b561daa6c1 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -455,6 +455,8 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
455void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv); 455void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
456void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv); 456void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
457void ath9k_ps_work(struct work_struct *work); 457void ath9k_ps_work(struct work_struct *work);
458bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
459 enum ath9k_power_mode mode);
458 460
459void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 461void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
460void ath9k_init_leds(struct ath9k_htc_priv *priv); 462void ath9k_init_leds(struct ath9k_htc_priv *priv);
@@ -464,6 +466,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
464 u16 devid, char *product); 466 u16 devid, char *product);
465void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); 467void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
466#ifdef CONFIG_PM 468#ifdef CONFIG_PM
469void ath9k_htc_suspend(struct htc_target *htc_handle);
467int ath9k_htc_resume(struct htc_target *htc_handle); 470int ath9k_htc_resume(struct htc_target *htc_handle);
468#endif 471#endif
469#ifdef CONFIG_ATH9K_HTC_DEBUGFS 472#ifdef CONFIG_ATH9K_HTC_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 3d7b97f1b3ae..8776f49ffd41 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -249,6 +249,8 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
249 case 0x7010: 249 case 0x7010:
250 case 0x7015: 250 case 0x7015:
251 case 0x9018: 251 case 0x9018:
252 case 0xA704:
253 case 0x1200:
252 priv->htc->credits = 45; 254 priv->htc->credits = 45;
253 break; 255 break;
254 default: 256 default:
@@ -889,6 +891,12 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
889} 891}
890 892
891#ifdef CONFIG_PM 893#ifdef CONFIG_PM
894
895void ath9k_htc_suspend(struct htc_target *htc_handle)
896{
897 ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP);
898}
899
892int ath9k_htc_resume(struct htc_target *htc_handle) 900int ath9k_htc_resume(struct htc_target *htc_handle)
893{ 901{
894 int ret; 902 int ret;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a3be8da755d..51977caca47f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -63,8 +63,8 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
63 return mode; 63 return mode;
64} 64}
65 65
66static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 66bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
67 enum ath9k_power_mode mode) 67 enum ath9k_power_mode mode)
68{ 68{
69 bool ret; 69 bool ret;
70 70
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3d19b5bc937f..29d80ca78393 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -121,7 +121,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
121 tx_hdr.data_type = ATH9K_HTC_NORMAL; 121 tx_hdr.data_type = ATH9K_HTC_NORMAL;
122 } 122 }
123 123
124 if (ieee80211_is_data(fc)) { 124 if (ieee80211_is_data_qos(fc)) {
125 qc = ieee80211_get_qos_ctl(hdr); 125 qc = ieee80211_get_qos_ctl(hdr);
126 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 126 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
127 } 127 }
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6ebc68bca91f..c7fbe25cc128 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2044,7 +2044,8 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
2044 val = REG_READ(ah, AR7010_GPIO_IN); 2044 val = REG_READ(ah, AR7010_GPIO_IN);
2045 return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; 2045 return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
2046 } else if (AR_SREV_9300_20_OR_LATER(ah)) 2046 } else if (AR_SREV_9300_20_OR_LATER(ah))
2047 return MS_REG_READ(AR9300, gpio) != 0; 2047 return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
2048 AR_GPIO_BIT(gpio)) != 0;
2048 else if (AR_SREV_9271(ah)) 2049 else if (AR_SREV_9271(ah))
2049 return MS_REG_READ(AR9271, gpio) != 0; 2050 return MS_REG_READ(AR9271, gpio) != 0;
2050 else if (AR_SREV_9287_11_OR_LATER(ah)) 2051 else if (AR_SREV_9287_11_OR_LATER(ah))
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 6a0d99eff404..14b8ab386daf 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pm_qos_params.h>
19 18
20#include "ath9k.h" 19#include "ath9k.h"
21 20
@@ -180,8 +179,6 @@ static const struct ath_ops ath9k_common_ops = {
180 .write = ath9k_iowrite32, 179 .write = ath9k_iowrite32,
181}; 180};
182 181
183struct pm_qos_request_list ath9k_pm_qos_req;
184
185/**************************/ 182/**************************/
186/* Initialization */ 183/* Initialization */
187/**************************/ 184/**************************/
@@ -664,6 +661,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
664 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 661 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
665 662
666 hw->wiphy->interface_modes = 663 hw->wiphy->interface_modes =
664 BIT(NL80211_IFTYPE_P2P_GO) |
665 BIT(NL80211_IFTYPE_P2P_CLIENT) |
667 BIT(NL80211_IFTYPE_AP) | 666 BIT(NL80211_IFTYPE_AP) |
668 BIT(NL80211_IFTYPE_WDS) | 667 BIT(NL80211_IFTYPE_WDS) |
669 BIT(NL80211_IFTYPE_STATION) | 668 BIT(NL80211_IFTYPE_STATION) |
@@ -759,7 +758,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
759 ath_init_leds(sc); 758 ath_init_leds(sc);
760 ath_start_rfkill_poll(sc); 759 ath_start_rfkill_poll(sc);
761 760
762 pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 761 pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
763 PM_QOS_DEFAULT_VALUE); 762 PM_QOS_DEFAULT_VALUE);
764 763
765 return 0; 764 return 0;
@@ -817,8 +816,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
817 816
818 ath9k_ps_wakeup(sc); 817 ath9k_ps_wakeup(sc);
819 818
820 pm_qos_remove_request(&ath9k_pm_qos_req);
821
822 wiphy_rfkill_stop_polling(sc->hw->wiphy); 819 wiphy_rfkill_stop_polling(sc->hw->wiphy);
823 ath_deinit_leds(sc); 820 ath_deinit_leds(sc);
824 821
@@ -832,6 +829,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
832 } 829 }
833 830
834 ieee80211_unregister_hw(hw); 831 ieee80211_unregister_hw(hw);
832 pm_qos_remove_request(&sc->pm_qos_req);
835 ath_rx_cleanup(sc); 833 ath_rx_cleanup(sc);
836 ath_tx_cleanup(sc); 834 ath_tx_cleanup(sc);
837 ath9k_deinit_softc(sc); 835 ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 8c13479b17cd..c996963ab339 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -703,8 +703,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
703 rs->rs_phyerr = phyerr; 703 rs->rs_phyerr = phyerr;
704 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 704 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
705 rs->rs_status |= ATH9K_RXERR_DECRYPT; 705 rs->rs_status |= ATH9K_RXERR_DECRYPT;
706 else if ((ads.ds_rxstatus8 & AR_MichaelErr) && 706 else if (ads.ds_rxstatus8 & AR_MichaelErr)
707 rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
708 rs->rs_status |= ATH9K_RXERR_MIC; 707 rs->rs_status |= ATH9K_RXERR_MIC;
709 else if (ads.ds_rxstatus8 & AR_KeyMiss) 708 else if (ads.ds_rxstatus8 & AR_KeyMiss)
710 rs->rs_status |= ATH9K_RXERR_DECRYPT; 709 rs->rs_status |= ATH9K_RXERR_DECRYPT;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 25d3ef4c338e..c0c3464d3a86 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pm_qos_params.h>
19#include "ath9k.h" 18#include "ath9k.h"
20#include "btcoex.h" 19#include "btcoex.h"
21 20
@@ -245,11 +244,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
245 * the relevant bits of the h/w. 244 * the relevant bits of the h/w.
246 */ 245 */
247 ath9k_hw_set_interrupts(ah, 0); 246 ath9k_hw_set_interrupts(ah, 0);
248 ath_drain_all_txq(sc, false); 247 stopped = ath_drain_all_txq(sc, false);
249 248
250 spin_lock_bh(&sc->rx.pcu_lock); 249 spin_lock_bh(&sc->rx.pcu_lock);
251 250
252 stopped = ath_stoprecv(sc); 251 if (!ath_stoprecv(sc))
252 stopped = false;
253 253
254 /* XXX: do not flush receive queue here. We don't want 254 /* XXX: do not flush receive queue here. We don't want
255 * to flush data frames already in queue because of 255 * to flush data frames already in queue because of
@@ -1244,7 +1244,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1244 ath9k_btcoex_timer_resume(sc); 1244 ath9k_btcoex_timer_resume(sc);
1245 } 1245 }
1246 1246
1247 pm_qos_update_request(&ath9k_pm_qos_req, 55); 1247 pm_qos_update_request(&sc->pm_qos_req, 55);
1248 1248
1249mutex_unlock: 1249mutex_unlock:
1250 mutex_unlock(&sc->mutex); 1250 mutex_unlock(&sc->mutex);
@@ -1423,7 +1423,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1423 1423
1424 sc->sc_flags |= SC_OP_INVALID; 1424 sc->sc_flags |= SC_OP_INVALID;
1425 1425
1426 pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1426 pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
1427 1427
1428 mutex_unlock(&sc->mutex); 1428 mutex_unlock(&sc->mutex);
1429 1429
@@ -1520,7 +1520,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1520 struct ath_softc *sc = aphy->sc; 1520 struct ath_softc *sc = aphy->sc;
1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1522 struct ath_vif *avp = (void *)vif->drv_priv; 1522 struct ath_vif *avp = (void *)vif->drv_priv;
1523 int i;
1524 1523
1525 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1524 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
1526 1525
@@ -1534,21 +1533,24 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1534 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 1533 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
1535 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 1534 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
1536 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 1535 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
1536 /* Disable SWBA interrupt */
1537 sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
1537 ath9k_ps_wakeup(sc); 1538 ath9k_ps_wakeup(sc);
1539 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1538 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1540 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1539 ath9k_ps_restore(sc); 1541 ath9k_ps_restore(sc);
1542 tasklet_kill(&sc->bcon_tasklet);
1540 } 1543 }
1541 1544
1542 ath_beacon_return(sc, avp); 1545 ath_beacon_return(sc, avp);
1543 sc->sc_flags &= ~SC_OP_BEACONS; 1546 sc->sc_flags &= ~SC_OP_BEACONS;
1544 1547
1545 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 1548 if (sc->nbcnvifs) {
1546 if (sc->beacon.bslot[i] == vif) { 1549 /* Re-enable SWBA interrupt */
1547 printk(KERN_DEBUG "%s: vif had allocated beacon " 1550 sc->sc_ah->imask |= ATH9K_INT_SWBA;
1548 "slot\n", __func__); 1551 ath9k_ps_wakeup(sc);
1549 sc->beacon.bslot[i] = NULL; 1552 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1550 sc->beacon.bslot_aphy[i] = NULL; 1553 ath9k_ps_restore(sc);
1551 }
1552 } 1554 }
1553 1555
1554 sc->nvifs--; 1556 sc->nvifs--;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index c76ea53c20ce..fdc2ec52b42f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -518,7 +518,7 @@ bool ath_stoprecv(struct ath_softc *sc)
518 bool stopped; 518 bool stopped;
519 519
520 spin_lock_bh(&sc->rx.rxbuflock); 520 spin_lock_bh(&sc->rx.rxbuflock);
521 ath9k_hw_stoppcurecv(ah); 521 ath9k_hw_abortpcurecv(ah);
522 ath9k_hw_setrxfilter(ah, 0); 522 ath9k_hw_setrxfilter(ah, 0);
523 stopped = ath9k_hw_stopdmarecv(ah); 523 stopped = ath9k_hw_stopdmarecv(ah);
524 524
@@ -838,6 +838,10 @@ static bool ath9k_rx_accept(struct ath_common *common,
838 struct ath_rx_status *rx_stats, 838 struct ath_rx_status *rx_stats,
839 bool *decrypt_error) 839 bool *decrypt_error)
840{ 840{
841#define is_mc_or_valid_tkip_keyix ((is_mc || \
842 (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
843 test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
844
841 struct ath_hw *ah = common->ah; 845 struct ath_hw *ah = common->ah;
842 __le16 fc; 846 __le16 fc;
843 u8 rx_status_len = ah->caps.rx_status_len; 847 u8 rx_status_len = ah->caps.rx_status_len;
@@ -879,15 +883,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
879 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 883 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
880 *decrypt_error = true; 884 *decrypt_error = true;
881 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 885 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
886 bool is_mc;
882 /* 887 /*
883 * The MIC error bit is only valid if the frame 888 * The MIC error bit is only valid if the frame
884 * is not a control frame or fragment, and it was 889 * is not a control frame or fragment, and it was
885 * decrypted using a valid TKIP key. 890 * decrypted using a valid TKIP key.
886 */ 891 */
892 is_mc = !!is_multicast_ether_addr(hdr->addr1);
893
887 if (!ieee80211_is_ctl(fc) && 894 if (!ieee80211_is_ctl(fc) &&
888 !ieee80211_has_morefrags(fc) && 895 !ieee80211_has_morefrags(fc) &&
889 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 896 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
890 test_bit(rx_stats->rs_keyix, common->tkip_keymap)) 897 is_mc_or_valid_tkip_keyix)
891 rxs->flag |= RX_FLAG_MMIC_ERROR; 898 rxs->flag |= RX_FLAG_MMIC_ERROR;
892 else 899 else
893 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 900 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index fa05b711e5cd..2c6a22fbb0f0 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -866,7 +866,13 @@
866#define AR_DEVID_7010(_ah) \ 866#define AR_DEVID_7010(_ah) \
867 (((_ah)->hw_version.devid == 0x7010) || \ 867 (((_ah)->hw_version.devid == 0x7010) || \
868 ((_ah)->hw_version.devid == 0x7015) || \ 868 ((_ah)->hw_version.devid == 0x7015) || \
869 ((_ah)->hw_version.devid == 0x9018)) 869 ((_ah)->hw_version.devid == 0x9018) || \
870 ((_ah)->hw_version.devid == 0xA704) || \
871 ((_ah)->hw_version.devid == 0x1200))
872
873#define AR9287_HTC_DEVID(_ah) \
874 (((_ah)->hw_version.devid == 0x7015) || \
875 ((_ah)->hw_version.devid == 0x1200))
870 876
871#define AR_RADIO_SREV_MAJOR 0xf0 877#define AR_RADIO_SREV_MAJOR 0xf0
872#define AR_RAD5133_SREV_MAJOR 0xc0 878#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -978,11 +984,13 @@ enum {
978#define AR9287_GPIO_IN_VAL_S 11 984#define AR9287_GPIO_IN_VAL_S 11
979#define AR9271_GPIO_IN_VAL 0xFFFF0000 985#define AR9271_GPIO_IN_VAL 0xFFFF0000
980#define AR9271_GPIO_IN_VAL_S 16 986#define AR9271_GPIO_IN_VAL_S 16
981#define AR9300_GPIO_IN_VAL 0x0001FFFF
982#define AR9300_GPIO_IN_VAL_S 0
983#define AR7010_GPIO_IN_VAL 0x0000FFFF 987#define AR7010_GPIO_IN_VAL 0x0000FFFF
984#define AR7010_GPIO_IN_VAL_S 0 988#define AR7010_GPIO_IN_VAL_S 0
985 989
990#define AR_GPIO_IN 0x404c
991#define AR9300_GPIO_IN_VAL 0x0001FFFF
992#define AR9300_GPIO_IN_VAL_S 0
993
986#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c) 994#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
987#define AR_GPIO_OE_OUT_DRV 0x3 995#define AR_GPIO_OE_OUT_DRV 0x3
988#define AR_GPIO_OE_OUT_DRV_NO 0x0 996#define AR_GPIO_OE_OUT_DRV_NO 0x0
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f2ade2402ce2..aff04789f794 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1120,7 +1120,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1120 } 1120 }
1121} 1121}
1122 1122
1123void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1123bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1124{ 1124{
1125 struct ath_hw *ah = sc->sc_ah; 1125 struct ath_hw *ah = sc->sc_ah;
1126 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1126 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1128,7 +1128,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1128 int i, npend = 0; 1128 int i, npend = 0;
1129 1129
1130 if (sc->sc_flags & SC_OP_INVALID) 1130 if (sc->sc_flags & SC_OP_INVALID)
1131 return; 1131 return true;
1132 1132
1133 /* Stop beacon queue */ 1133 /* Stop beacon queue */
1134 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1134 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -1142,25 +1142,15 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1142 } 1142 }
1143 } 1143 }
1144 1144
1145 if (npend) { 1145 if (npend)
1146 int r; 1146 ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n");
1147
1148 ath_print(common, ATH_DBG_FATAL,
1149 "Failed to stop TX DMA. Resetting hardware!\n");
1150
1151 spin_lock_bh(&sc->sc_resetlock);
1152 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
1153 if (r)
1154 ath_print(common, ATH_DBG_FATAL,
1155 "Unable to reset hardware; reset status %d\n",
1156 r);
1157 spin_unlock_bh(&sc->sc_resetlock);
1158 }
1159 1147
1160 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1148 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1161 if (ATH_TXQ_SETUP(sc, i)) 1149 if (ATH_TXQ_SETUP(sc, i))
1162 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1150 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1163 } 1151 }
1152
1153 return !npend;
1164} 1154}
1165 1155
1166void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1156void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index ae6c006bbc56..546b4e4ec5ea 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -291,7 +291,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
291 291
292 if (SUPP(CARL9170FW_WLANTX_CAB)) { 292 if (SUPP(CARL9170FW_WLANTX_CAB)) {
293 ar->hw->wiphy->interface_modes |= 293 ar->hw->wiphy->interface_modes |=
294 BIT(NL80211_IFTYPE_AP); 294 BIT(NL80211_IFTYPE_AP) |
295 BIT(NL80211_IFTYPE_P2P_GO);
295 } 296 }
296 } 297 }
297 298
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 980ae70ea424..dc7b30b170d0 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -647,7 +647,7 @@ init:
647 } 647 }
648 648
649unlock: 649unlock:
650 if (err && (vif_id != -1)) { 650 if (err && (vif_id >= 0)) {
651 vif_priv->active = false; 651 vif_priv->active = false;
652 bitmap_release_region(&ar->vif_bitmap, vif_id, 0); 652 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
653 ar->vifs--; 653 ar->vifs--;
@@ -1631,7 +1631,8 @@ void *carl9170_alloc(size_t priv_size)
1631 * supports these modes. The code which will add the 1631 * supports these modes. The code which will add the
1632 * additional interface_modes is in fw.c. 1632 * additional interface_modes is in fw.c.
1633 */ 1633 */
1634 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1634 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1635 BIT(NL80211_IFTYPE_P2P_CLIENT);
1635 1636
1636 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1637 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1637 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1638 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index b575c865142d..7e6506a77bbb 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -810,7 +810,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
810 810
811 mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | 811 mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
812 AR9170_TX_MAC_BACKOFF); 812 AR9170_TX_MAC_BACKOFF);
813 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) && 813 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
814 AR9170_TX_MAC_QOS); 814 AR9170_TX_MAC_QOS);
815 815
816 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); 816 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 3317039cd28f..7504ed14c725 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -553,12 +553,12 @@ static int carl9170_usb_flush(struct ar9170 *ar)
553 usb_free_urb(urb); 553 usb_free_urb(urb);
554 } 554 }
555 555
556 ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ); 556 ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000);
557 if (ret == 0) 557 if (ret == 0)
558 err = -ETIMEDOUT; 558 err = -ETIMEDOUT;
559 559
560 /* lets wait a while until the tx - queues are dried out */ 560 /* lets wait a while until the tx - queues are dried out */
561 ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ); 561 ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000);
562 if (ret == 0) 562 if (ret == 0)
563 err = -ETIMEDOUT; 563 err = -ETIMEDOUT;
564 564
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 9a55338d957f..09e2dfd7b175 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func,
163err_free_ssb: 163err_free_ssb:
164 kfree(sdio); 164 kfree(sdio);
165err_disable_func: 165err_disable_func:
166 sdio_claim_host(func);
166 sdio_disable_func(func); 167 sdio_disable_func(func);
167err_release_host: 168err_release_host:
168 sdio_release_host(func); 169 sdio_release_host(func);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index e5685dc317a8..b4de0ca10feb 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1170,7 +1170,6 @@ static void if_sdio_remove(struct sdio_func *func)
1170 lbs_deb_sdio("call remove card\n"); 1170 lbs_deb_sdio("call remove card\n");
1171 lbs_stop_card(card->priv); 1171 lbs_stop_card(card->priv);
1172 lbs_remove_card(card->priv); 1172 lbs_remove_card(card->priv);
1173 card->priv->surpriseremoved = 1;
1174 1173
1175 flush_workqueue(card->workqueue); 1174 flush_workqueue(card->workqueue);
1176 destroy_workqueue(card->workqueue); 1175 destroy_workqueue(card->workqueue);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 79bcb4e5d2ca..ecd4d04b2c3c 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1055,7 +1055,6 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1055 lbs_stop_card(priv); 1055 lbs_stop_card(priv);
1056 lbs_remove_card(priv); /* will call free_netdev */ 1056 lbs_remove_card(priv); /* will call free_netdev */
1057 1057
1058 priv->surpriseremoved = 1;
1059 free_irq(spi->irq, card); 1058 free_irq(spi->irq, card);
1060 if_spi_terminate_spi_thread(card); 1059 if_spi_terminate_spi_thread(card);
1061 if (card->pdata->teardown) 1060 if (card->pdata->teardown)
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 46b88b118c99..fcd1bbfc632d 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -915,8 +915,6 @@ void lbs_remove_card(struct lbs_private *priv)
915 915
916 lbs_free_adapter(priv); 916 lbs_free_adapter(priv);
917 lbs_cfg_free(priv); 917 lbs_cfg_free(priv);
918
919 priv->dev = NULL;
920 free_netdev(dev); 918 free_netdev(dev);
921 919
922 lbs_deb_leave(LBS_DEB_MAIN); 920 lbs_deb_leave(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index e8e2d0f4763d..f3d396e7544b 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1392,10 +1392,9 @@ static void orinoco_process_scan_results(struct work_struct *work)
1392 orinoco_add_hostscan_results(priv, buf, len); 1392 orinoco_add_hostscan_results(priv, buf, len);
1393 1393
1394 kfree(buf); 1394 kfree(buf);
1395 } else if (priv->scan_request) { 1395 } else {
1396 /* Either abort or complete the scan */ 1396 /* Either abort or complete the scan */
1397 cfg80211_scan_done(priv->scan_request, (len < 0)); 1397 orinoco_scan_done(priv, (len < 0));
1398 priv->scan_request = NULL;
1399 } 1398 }
1400 1399
1401 spin_lock_irqsave(&priv->scan_lock, flags); 1400 spin_lock_irqsave(&priv->scan_lock, flags);
@@ -1684,6 +1683,8 @@ static int __orinoco_down(struct orinoco_private *priv)
1684 hermes_write_regn(hw, EVACK, 0xffff); 1683 hermes_write_regn(hw, EVACK, 0xffff);
1685 } 1684 }
1686 1685
1686 orinoco_scan_done(priv, true);
1687
1687 /* firmware will have to reassociate */ 1688 /* firmware will have to reassociate */
1688 netif_carrier_off(dev); 1689 netif_carrier_off(dev);
1689 priv->last_linkstatus = 0xffff; 1690 priv->last_linkstatus = 0xffff;
@@ -1762,10 +1763,7 @@ void orinoco_reset(struct work_struct *work)
1762 orinoco_unlock(priv, &flags); 1763 orinoco_unlock(priv, &flags);
1763 1764
1764 /* Scanning support: Notify scan cancellation */ 1765 /* Scanning support: Notify scan cancellation */
1765 if (priv->scan_request) { 1766 orinoco_scan_done(priv, true);
1766 cfg80211_scan_done(priv->scan_request, 1);
1767 priv->scan_request = NULL;
1768 }
1769 1767
1770 if (priv->hard_reset) { 1768 if (priv->hard_reset) {
1771 err = (*priv->hard_reset)(priv); 1769 err = (*priv->hard_reset)(priv);
@@ -1813,6 +1811,12 @@ static int __orinoco_commit(struct orinoco_private *priv)
1813 struct net_device *dev = priv->ndev; 1811 struct net_device *dev = priv->ndev;
1814 int err = 0; 1812 int err = 0;
1815 1813
1814 /* If we've called commit, we are reconfiguring or bringing the
1815 * interface up. Maintaining countermeasures across this would
1816 * be confusing, so note that we've disabled them. The port will
1817 * be enabled later in orinoco_commit or __orinoco_up. */
1818 priv->tkip_cm_active = 0;
1819
1816 err = orinoco_hw_program_rids(priv); 1820 err = orinoco_hw_program_rids(priv);
1817 1821
1818 /* FIXME: what about netif_tx_lock */ 1822 /* FIXME: what about netif_tx_lock */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 71b3d68b9403..32954c4b243a 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -151,20 +151,20 @@ orinoco_cs_config(struct pcmcia_device *link)
151 goto failed; 151 goto failed;
152 } 152 }
153 153
154 ret = pcmcia_request_irq(link, orinoco_interrupt);
155 if (ret)
156 goto failed;
157
158 /* We initialize the hermes structure before completing PCMCIA
159 * configuration just in case the interrupt handler gets
160 * called. */
161 mem = ioport_map(link->resource[0]->start, 154 mem = ioport_map(link->resource[0]->start,
162 resource_size(link->resource[0])); 155 resource_size(link->resource[0]));
163 if (!mem) 156 if (!mem)
164 goto failed; 157 goto failed;
165 158
159 /* We initialize the hermes structure before completing PCMCIA
160 * configuration just in case the interrupt handler gets
161 * called. */
166 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 162 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
167 163
164 ret = pcmcia_request_irq(link, orinoco_interrupt);
165 if (ret)
166 goto failed;
167
168 ret = pcmcia_enable_device(link); 168 ret = pcmcia_enable_device(link);
169 if (ret) 169 if (ret)
170 goto failed; 170 goto failed;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index a38a7bd25f19..b9aedf18a046 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -57,7 +57,6 @@
57#include <linux/fcntl.h> 57#include <linux/fcntl.h>
58#include <linux/spinlock.h> 58#include <linux/spinlock.h>
59#include <linux/list.h> 59#include <linux/list.h>
60#include <linux/smp_lock.h>
61#include <linux/usb.h> 60#include <linux/usb.h>
62#include <linux/timer.h> 61#include <linux/timer.h>
63 62
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 4300d9db7d8c..86cb54c842e7 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -229,3 +229,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
229 priv->scan_request = NULL; 229 priv->scan_request = NULL;
230 } 230 }
231} 231}
232
233void orinoco_scan_done(struct orinoco_private *priv, bool abort)
234{
235 if (priv->scan_request) {
236 cfg80211_scan_done(priv->scan_request, abort);
237 priv->scan_request = NULL;
238 }
239}
diff --git a/drivers/net/wireless/orinoco/scan.h b/drivers/net/wireless/orinoco/scan.h
index 2dc4e046dbdb..27281fb0a6dc 100644
--- a/drivers/net/wireless/orinoco/scan.h
+++ b/drivers/net/wireless/orinoco/scan.h
@@ -16,5 +16,6 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
16void orinoco_add_hostscan_results(struct orinoco_private *dev, 16void orinoco_add_hostscan_results(struct orinoco_private *dev,
17 unsigned char *buf, 17 unsigned char *buf,
18 size_t len); 18 size_t len);
19void orinoco_scan_done(struct orinoco_private *priv, bool abort);
19 20
20#endif /* _ORINOCO_SCAN_H_ */ 21#endif /* _ORINOCO_SCAN_H_ */
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index fb859a5ad2eb..db34c282e59b 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -214,21 +214,21 @@ spectrum_cs_config(struct pcmcia_device *link)
214 goto failed; 214 goto failed;
215 } 215 }
216 216
217 ret = pcmcia_request_irq(link, orinoco_interrupt);
218 if (ret)
219 goto failed;
220
221 /* We initialize the hermes structure before completing PCMCIA
222 * configuration just in case the interrupt handler gets
223 * called. */
224 mem = ioport_map(link->resource[0]->start, 217 mem = ioport_map(link->resource[0]->start,
225 resource_size(link->resource[0])); 218 resource_size(link->resource[0]));
226 if (!mem) 219 if (!mem)
227 goto failed; 220 goto failed;
228 221
222 /* We initialize the hermes structure before completing PCMCIA
223 * configuration just in case the interrupt handler gets
224 * called. */
229 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 225 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
230 hw->eeprom_pda = true; 226 hw->eeprom_pda = true;
231 227
228 ret = pcmcia_request_irq(link, orinoco_interrupt);
229 if (ret)
230 goto failed;
231
232 ret = pcmcia_enable_device(link); 232 ret = pcmcia_enable_device(link);
233 if (ret) 233 if (ret)
234 goto failed; 234 goto failed;
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 93505f93bf97..e5afabee60d1 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -911,10 +911,10 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
911 */ 911 */
912 if (param->value) { 912 if (param->value) {
913 priv->tkip_cm_active = 1; 913 priv->tkip_cm_active = 1;
914 ret = hermes_enable_port(hw, 0); 914 ret = hermes_disable_port(hw, 0);
915 } else { 915 } else {
916 priv->tkip_cm_active = 0; 916 priv->tkip_cm_active = 0;
917 ret = hermes_disable_port(hw, 0); 917 ret = hermes_enable_port(hw, 0);
918 } 918 }
919 break; 919 break;
920 920
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 458bb57914a3..cdbeec9f83ea 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,8 +66,8 @@ struct netfront_cb {
66 66
67#define GRANT_INVALID_REF 0 67#define GRANT_INVALID_REF 0
68 68
69#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) 69#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
70#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) 70#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
71#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 71#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
72 72
73struct netfront_info { 73struct netfront_info {