aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-07-29 20:09:39 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-07-29 20:09:39 -0400
commit73bcc49959e4e40911dd0dd634bf1b353827df66 (patch)
tree6b0c1d440c490a65c51ab5cf5aee7095cb4089d3 /drivers/net
parent8447c4d15e357a458c9051ddc84aa6c8b9c27000 (diff)
parent28a33cbc24e4256c143dce96c7d93bf423229f92 (diff)
Merge tag 'v3.5'
Linux 3.5 * tag 'v3.5': (1242 commits) Linux 3.5 Remove SYSTEM_SUSPEND_DISK system state kdb: Switch to nolock variants of kmsg_dump functions printk: Implement some unlocked kmsg_dump functions printk: Remove kdb_syslog_data kdb: Revive dmesg command dm raid1: set discard_zeroes_data_unsupported dm thin: do not send discards to shared blocks dm raid1: fix crash with mirror recovery and discard pnfs-obj: Fix __r4w_get_page when offset is beyond i_size pnfs-obj: don't leak objio_state if ore_write/read fails ore: Unlock r4w pages in exact reverse order of locking ore: Remove support of partial IO request (NFS crash) ore: Fix NFS crash by supporting any unaligned RAID IO UBIFS: fix a bug in empty space fix-up cx25821: Remove bad strcpy to read-only char* HID: hid-multitouch: add support for Zytronic panels MIPS: PCI: Move fixups from __init to __devinit. MIPS: Fix bug.h MIPS build regression MIPS: sync-r4k: remove redundant irq operation ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c18
-rw-r--r--drivers/net/bonding/bond_procfs.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/caif/caif_hsi.c5
-rw-r--r--drivers/net/can/c_can/c_can.c20
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c54
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c10
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c79
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c15
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c7
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h63
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c12
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1898
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c12
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/micrel.c8
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/qmi_wwan.c105
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/usbnet.c53
-rw-r--r--drivers/net/virtio_net.c19
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c31
-rw-r--r--drivers/net/wireless/ath/key.c4
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/common.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c5
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c22
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c39
-rw-r--r--drivers/net/wireless/mwifiex/fw.h6
-rw-r--r--drivers/net/wireless/mwifiex/ie.c1
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c9
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c10
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c21
-rw-r--r--drivers/net/wireless/mwifiex/usb.c28
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig1
-rw-r--r--drivers/net/xen-netfront.c8
118 files changed, 2767 insertions, 560 deletions
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3680aa251dea..2cf084eb9d52 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -6,7 +6,7 @@
6#include "bonding.h" 6#include "bonding.h"
7#include "bond_alb.h" 7#include "bond_alb.h"
8 8
9#ifdef CONFIG_DEBUG_FS 9#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
10 10
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9e8a3b..2ee76993f052 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h> 78#include <net/netns/generic.h>
79#include <net/pkt_sched.h>
79#include "bonding.h" 80#include "bonding.h"
80#include "bond_3ad.h" 81#include "bond_3ad.h"
81#include "bond_alb.h" 82#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
381 return next; 382 return next;
382} 383}
383 384
384#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
385
386/** 385/**
387 * bond_dev_queue_xmit - Prepare skb for xmit. 386 * bond_dev_queue_xmit - Prepare skb for xmit.
388 * 387 *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
395{ 394{
396 skb->dev = slave_dev; 395 skb->dev = slave_dev;
397 396
398 skb->queue_mapping = bond_queue_mapping(skb); 397 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
398 sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
399 skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
399 400
400 if (unlikely(netpoll_tx_running(slave_dev))) 401 if (unlikely(netpoll_tx_running(slave_dev)))
401 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -3226,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
3226 switch (event) { 3227 switch (event) {
3227 case NETDEV_CHANGENAME: 3228 case NETDEV_CHANGENAME:
3228 return bond_event_changename(event_bond); 3229 return bond_event_changename(event_bond);
3230 case NETDEV_UNREGISTER:
3231 bond_remove_proc_entry(event_bond);
3232 break;
3233 case NETDEV_REGISTER:
3234 bond_create_proc_entry(event_bond);
3235 break;
3229 default: 3236 default:
3230 break; 3237 break;
3231 } 3238 }
@@ -4171,7 +4178,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4171 /* 4178 /*
4172 * Save the original txq to restore before passing to the driver 4179 * Save the original txq to restore before passing to the driver
4173 */ 4180 */
4174 bond_queue_mapping(skb) = skb->queue_mapping; 4181 qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
4175 4182
4176 if (unlikely(txq >= dev->real_num_tx_queues)) { 4183 if (unlikely(txq >= dev->real_num_tx_queues)) {
4177 do { 4184 do {
@@ -4410,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
4410 4417
4411 bond_work_cancel_all(bond); 4418 bond_work_cancel_all(bond);
4412 4419
4413 bond_remove_proc_entry(bond);
4414
4415 bond_debug_unregister(bond); 4420 bond_debug_unregister(bond);
4416 4421
4417 __hw_addr_flush(&bond->mc_list); 4422 __hw_addr_flush(&bond->mc_list);
@@ -4813,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
4813 4818
4814 bond_set_lockdep_class(bond_dev); 4819 bond_set_lockdep_class(bond_dev);
4815 4820
4816 bond_create_proc_entry(bond);
4817 list_add_tail(&bond->bond_list, &bn->dev_list); 4821 list_add_tail(&bond->bond_list, &bn->dev_list);
4818 4822
4819 bond_prepare_sysfs_group(bond); 4823 bond_prepare_sysfs_group(bond);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index ad284baafe87..3cea38d37344 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
150 } 150 }
151} 151}
152 152
153static const char *bond_slave_link_status(s8 link)
154{
155 static const char * const status[] = {
156 [BOND_LINK_UP] = "up",
157 [BOND_LINK_FAIL] = "going down",
158 [BOND_LINK_DOWN] = "down",
159 [BOND_LINK_BACK] = "going back",
160 };
161
162 return status[link];
163}
164
153static void bond_info_show_slave(struct seq_file *seq, 165static void bond_info_show_slave(struct seq_file *seq,
154 const struct slave *slave) 166 const struct slave *slave)
155{ 167{
156 struct bonding *bond = seq->private; 168 struct bonding *bond = seq->private;
157 169
158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 170 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
159 seq_printf(seq, "MII Status: %s\n", 171 seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
160 (slave->link == BOND_LINK_UP) ? "up" : "down");
161 if (slave->speed == SPEED_UNKNOWN) 172 if (slave->speed == SPEED_UNKNOWN)
162 seq_printf(seq, "Speed: %s\n", "Unknown"); 173 seq_printf(seq, "Speed: %s\n", "Unknown");
163 else 174 else
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f045320..485bedb8278c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
1082 } 1082 }
1083 } 1083 }
1084 1084
1085 pr_info("%s: Unable to set %.*s as primary slave.\n", 1085 strncpy(bond->params.primary, ifname, IFNAMSIZ);
1086 bond->dev->name, (int)strlen(buf) - 1, buf); 1086 bond->params.primary[IFNAMSIZ - 1] = 0;
1087
1088 pr_info("%s: Recording %s as primary, "
1089 "but it has not been enslaved to %s yet.\n",
1090 bond->dev->name, ifname, bond->dev->name);
1087out: 1091out:
1088 write_unlock_bh(&bond->curr_slave_lock); 1092 write_unlock_bh(&bond->curr_slave_lock);
1089 read_unlock(&bond->lock); 1093 read_unlock(&bond->lock);
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 1520814c77c7..4a27adb7ae67 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -693,8 +693,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
693 */ 693 */
694 memcpy(rx_buf, (u8 *)piggy_desc, 694 memcpy(rx_buf, (u8 *)piggy_desc,
695 CFHSI_DESC_SHORT_SZ); 695 CFHSI_DESC_SHORT_SZ);
696 /* Mark no embedded frame here */
697 piggy_desc->offset = 0;
698 if (desc_pld_len == -EPROTO) 696 if (desc_pld_len == -EPROTO)
699 goto out_of_sync; 697 goto out_of_sync;
700 } 698 }
@@ -737,6 +735,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
737 /* Extract any payload in piggyback descriptor. */ 735 /* Extract any payload in piggyback descriptor. */
738 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0) 736 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
739 goto out_of_sync; 737 goto out_of_sync;
738 /* Mark no embedded frame after extracting it */
739 piggy_desc->offset = 0;
740 } 740 }
741 } 741 }
742 742
@@ -1178,6 +1178,7 @@ int cfhsi_probe(struct platform_device *pdev)
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n", 1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res); 1179 __func__, res);
1180 free_netdev(ndev); 1180 free_netdev(ndev);
1181 return -ENODEV;
1181 } 1182 }
1182 /* Add CAIF HSI device to list. */ 1183 /* Add CAIF HSI device to list. */
1183 spin_lock(&cfhsi_list_lock); 1184 spin_lock(&cfhsi_list_lock);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda072a16..86cd532c78f9 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
590 priv->write_reg(priv, &priv->regs->control, 590 priv->write_reg(priv, &priv->regs->control,
591 CONTROL_ENABLE_AR); 591 CONTROL_ENABLE_AR);
592 592
593 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & 593 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
594 CAN_CTRLMODE_LOOPBACK)) { 594 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
595 /* loopback + silent mode : useful for hot self-test */ 595 /* loopback + silent mode : useful for hot self-test */
596 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 596 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
597 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 597 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
686 * 686 *
687 * We iterate from priv->tx_echo to priv->tx_next and check if the 687 * We iterate from priv->tx_echo to priv->tx_next and check if the
688 * packet has been transmitted, echo it back to the CAN framework. 688 * packet has been transmitted, echo it back to the CAN framework.
689 * If we discover a not yet transmitted package, stop looking for more. 689 * If we discover a not yet transmitted packet, stop looking for more.
690 */ 690 */
691static void c_can_do_tx(struct net_device *dev) 691static void c_can_do_tx(struct net_device *dev)
692{ 692{
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
699 msg_obj_no = get_tx_echo_msg_obj(priv); 699 msg_obj_no = get_tx_echo_msg_obj(priv);
700 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 700 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
701 if (!(val & (1 << msg_obj_no))) { 701 if (!(val & (1 << (msg_obj_no - 1)))) {
702 can_get_echo_skb(dev, 702 can_get_echo_skb(dev,
703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
704 stats->tx_bytes += priv->read_reg(priv, 704 stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
706 & IF_MCONT_DLC_MASK; 706 & IF_MCONT_DLC_MASK;
707 stats->tx_packets++; 707 stats->tx_packets++;
708 c_can_inval_msg_object(dev, 0, msg_obj_no); 708 c_can_inval_msg_object(dev, 0, msg_obj_no);
709 } else {
710 break;
709 } 711 }
710 } 712 }
711 713
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
950 struct net_device *dev = napi->dev; 952 struct net_device *dev = napi->dev;
951 struct c_can_priv *priv = netdev_priv(dev); 953 struct c_can_priv *priv = netdev_priv(dev);
952 954
953 irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 955 irqstatus = priv->irqstatus;
954 if (!irqstatus) 956 if (!irqstatus)
955 goto end; 957 goto end;
956 958
@@ -1028,12 +1030,11 @@ end:
1028 1030
1029static irqreturn_t c_can_isr(int irq, void *dev_id) 1031static irqreturn_t c_can_isr(int irq, void *dev_id)
1030{ 1032{
1031 u16 irqstatus;
1032 struct net_device *dev = (struct net_device *)dev_id; 1033 struct net_device *dev = (struct net_device *)dev_id;
1033 struct c_can_priv *priv = netdev_priv(dev); 1034 struct c_can_priv *priv = netdev_priv(dev);
1034 1035
1035 irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1036 priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1036 if (!irqstatus) 1037 if (!priv->irqstatus)
1037 return IRQ_NONE; 1038 return IRQ_NONE;
1038 1039
1039 /* disable all interrupts and schedule the NAPI */ 1040 /* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
1063 goto exit_irq_fail; 1064 goto exit_irq_fail;
1064 } 1065 }
1065 1066
1067 napi_enable(&priv->napi);
1068
1066 /* start the c_can controller */ 1069 /* start the c_can controller */
1067 c_can_start(dev); 1070 c_can_start(dev);
1068 1071
1069 napi_enable(&priv->napi);
1070 netif_start_queue(dev); 1072 netif_start_queue(dev);
1071 1073
1072 return 0; 1074 return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef3d09a..5f32d34af507 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -76,6 +76,7 @@ struct c_can_priv {
76 unsigned int tx_next; 76 unsigned int tx_next;
77 unsigned int tx_echo; 77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */ 78 void *priv; /* for board-specific data */
79 u16 irqstatus;
79}; 80};
80 81
81struct net_device *alloc_c_can_dev(void); 82struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115eee8075..688371cda37a 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
154 struct cc770_platform_data *pdata = pdev->dev.platform_data; 154 struct cc770_platform_data *pdata = pdev->dev.platform_data;
155 155
156 priv->can.clock.freq = pdata->osc_freq; 156 priv->can.clock.freq = pdata->osc_freq;
157 if (priv->cpu_interface | CPUIF_DSC) 157 if (priv->cpu_interface & CPUIF_DSC)
158 priv->can.clock.freq /= 2; 158 priv->can.clock.freq /= 2;
159 priv->clkout = pdata->cor; 159 priv->clkout = pdata->cor;
160 priv->bus_config = pdata->bcr; 160 priv->bus_config = pdata->bcr;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 38c0690df5c8..81d474102378 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -939,12 +939,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
939 return PTR_ERR(pinctrl); 939 return PTR_ERR(pinctrl);
940 940
941 if (pdev->dev.of_node) { 941 if (pdev->dev.of_node) {
942 const u32 *clock_freq_p; 942 const __be32 *clock_freq_p;
943 943
944 clock_freq_p = of_get_property(pdev->dev.of_node, 944 clock_freq_p = of_get_property(pdev->dev.of_node,
945 "clock-frequency", NULL); 945 "clock-frequency", NULL);
946 if (clock_freq_p) 946 if (clock_freq_p)
947 clock_freq = *clock_freq_p; 947 clock_freq = be32_to_cpup(clock_freq_p);
948 } 948 }
949 949
950 if (!clock_freq) { 950 if (!clock_freq) {
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a2747b..bab0158f1cc3 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
187 rtnl_lock(); 187 rtnl_lock();
188 err = __rtnl_link_register(&dummy_link_ops); 188 err = __rtnl_link_register(&dummy_link_ops);
189 189
190 for (i = 0; i < numdummies && !err; i++) 190 for (i = 0; i < numdummies && !err; i++) {
191 err = dummy_init_one(); 191 err = dummy_init_one();
192 cond_resched();
193 }
192 if (err < 0) 194 if (err < 0)
193 __rtnl_link_unregister(&dummy_link_ops); 195 __rtnl_link_unregister(&dummy_link_ops);
194 rtnl_unlock(); 196 rtnl_unlock();
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 9cc15701101b..1f78b63d5efe 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
261 if ((phy_data & BMSR_LSTATUS) == 0) { 261 if ((phy_data & BMSR_LSTATUS) == 0) {
262 /* link down */ 262 /* link down */
263 netif_carrier_off(netdev); 263 netif_carrier_off(netdev);
264 netif_stop_queue(netdev);
265 hw->hibernate = true; 264 hw->hibernate = true;
266 if (atl1c_reset_mac(hw) != 0) 265 if (atl1c_reset_mac(hw) != 0)
267 if (netif_msg_hw(adapter)) 266 if (netif_msg_hw(adapter))
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d81633..d09c6b583d17 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
656 dma_unmap_single(bp->sdev->dma_dev, mapping, 656 dma_unmap_single(bp->sdev->dma_dev, mapping,
657 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 657 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
658 dev_kfree_skb_any(skb); 658 dev_kfree_skb_any(skb);
659 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 659 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
660 if (skb == NULL) 660 if (skb == NULL)
661 return -ENOMEM; 661 return -ENOMEM;
662 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, 662 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
967 dma_unmap_single(bp->sdev->dma_dev, mapping, len, 967 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
968 DMA_TO_DEVICE); 968 DMA_TO_DEVICE);
969 969
970 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); 970 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
971 if (!bounce_skb) 971 if (!bounce_skb)
972 goto err_out; 972 goto err_out;
973 973
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b74488531..1fa4927a45b1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5372 int k, last; 5372 int k, last;
5373 5373
5374 if (skb == NULL) { 5374 if (skb == NULL) {
5375 j++; 5375 j = NEXT_TX_BD(j);
5376 continue; 5376 continue;
5377 } 5377 }
5378 5378
@@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5384 tx_buf->skb = NULL; 5384 tx_buf->skb = NULL;
5385 5385
5386 last = tx_buf->nr_frags; 5386 last = tx_buf->nr_frags;
5387 j++; 5387 j = NEXT_TX_BD(j);
5388 for (k = 0; k < last; k++, j++) { 5388 for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
5389 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5389 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5390 dma_unmap_page(&bp->pdev->dev, 5390 dma_unmap_page(&bp->pdev->dev,
5391 dma_unmap_addr(tx_buf, mapping), 5391 dma_unmap_addr(tx_buf, mapping),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2f354c..7de824184979 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
747 747
748#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG 748#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
749 749
750#define BNX2X_IP_CSUM_ERR(cqe) \
751 (!((cqe)->fast_path_cqe.status_flags & \
752 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
753 ((cqe)->fast_path_cqe.type_error_flags & \
754 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
755
756#define BNX2X_L4_CSUM_ERR(cqe) \
757 (!((cqe)->fast_path_cqe.status_flags & \
758 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
759 ((cqe)->fast_path_cqe.type_error_flags & \
760 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
761
762#define BNX2X_RX_CSUM_OK(cqe) \
763 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
764
765#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ 750#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
766 (((le16_to_cpu(flags) & \ 751 (((le16_to_cpu(flags) & \
767 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ 752 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743bf4bde..8098eea9704d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
190 190
191 if ((netif_tx_queue_stopped(txq)) && 191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) && 192 (bp->state == BNX2X_STATE_OPEN) &&
193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) 193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
194 netif_tx_wake_queue(txq); 194 netif_tx_wake_queue(txq);
195 195
196 __netif_tx_unlock(txq); 196 __netif_tx_unlock(txq);
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
617 return 0; 617 return 0;
618} 618}
619 619
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621 struct bnx2x_fastpath *fp)
622{
623 /* Do nothing if no IP/L4 csum validation was done */
624
625 if (cqe->fast_path_cqe.status_flags &
626 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
628 return;
629
630 /* If both IP/L4 validation were done, check if an error was found. */
631
632 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++;
636 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
638}
620 639
621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 640int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
622{ 641{
@@ -806,13 +825,9 @@ reuse_rx:
806 825
807 skb_checksum_none_assert(skb); 826 skb_checksum_none_assert(skb);
808 827
809 if (bp->dev->features & NETIF_F_RXCSUM) { 828 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp);
810 830
811 if (likely(BNX2X_RX_CSUM_OK(cqe)))
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
813 else
814 fp->eth_q_stats.hw_csum_err++;
815 }
816 831
817 skb_record_rx_queue(skb, fp->rx_queue); 832 skb_record_rx_queue(skb, fp->rx_queue);
818 833
@@ -2501,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2501/* we split the first BD into headers and data BDs 2516/* we split the first BD into headers and data BDs
2502 * to ease the pain of our fellow microcode engineers 2517 * to ease the pain of our fellow microcode engineers
2503 * we use one mapping for both BDs 2518 * we use one mapping for both BDs
2504 * So far this has only been observed to happen
2505 * in Other Operating Systems(TM)
2506 */ 2519 */
2507static noinline u16 bnx2x_tx_split(struct bnx2x *bp, 2520static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2508 struct bnx2x_fp_txdata *txdata, 2521 struct bnx2x_fp_txdata *txdata,
@@ -3156,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3156 3169
3157 txdata->tx_bd_prod += nbd; 3170 txdata->tx_bd_prod += nbd;
3158 3171
3159 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { 3172 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
3160 netif_tx_stop_queue(txq); 3173 netif_tx_stop_queue(txq);
3161 3174
3162 /* paired memory barrier is in bnx2x_tx_int(), we have to keep 3175 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3165,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3165 smp_mb(); 3178 smp_mb();
3166 3179
3167 fp->eth_q_stats.driver_xoff++; 3180 fp->eth_q_stats.driver_xoff++;
3168 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) 3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
3169 netif_tx_wake_queue(txq); 3182 netif_tx_wake_queue(txq);
3170 } 3183 }
3171 txdata->tx_pkt++; 3184 txdata->tx_pkt++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a3fb7215cd89..6e7d5c0843b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -40,6 +40,7 @@
40#define I2C_BSC0 0 40#define I2C_BSC0 0
41#define I2C_BSC1 1 41#define I2C_BSC1 1
42#define I2C_WA_RETRY_CNT 3 42#define I2C_WA_RETRY_CNT 3
43#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
43#define MCPR_IMC_COMMAND_READ_OP 1 44#define MCPR_IMC_COMMAND_READ_OP 1
44#define MCPR_IMC_COMMAND_WRITE_OP 2 45#define MCPR_IMC_COMMAND_WRITE_OP 2
45 46
@@ -7659,6 +7660,28 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7659 return -EINVAL; 7660 return -EINVAL;
7660} 7661}
7661 7662
7663static void bnx2x_warpcore_power_module(struct link_params *params,
7664 struct bnx2x_phy *phy,
7665 u8 power)
7666{
7667 u32 pin_cfg;
7668 struct bnx2x *bp = params->bp;
7669
7670 pin_cfg = (REG_RD(bp, params->shmem_base +
7671 offsetof(struct shmem_region,
7672 dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
7673 PORT_HW_CFG_E3_PWR_DIS_MASK) >>
7674 PORT_HW_CFG_E3_PWR_DIS_SHIFT;
7675
7676 if (pin_cfg == PIN_CFG_NA)
7677 return;
7678 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
7679 power, pin_cfg);
7680 /* Low ==> corresponding SFP+ module is powered
7681 * high ==> the SFP+ module is powered down
7682 */
7683 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
7684}
7662static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7685static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7663 struct link_params *params, 7686 struct link_params *params,
7664 u16 addr, u8 byte_cnt, 7687 u16 addr, u8 byte_cnt,
@@ -7678,6 +7701,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7678 /* 4 byte aligned address */ 7701 /* 4 byte aligned address */
7679 addr32 = addr & (~0x3); 7702 addr32 = addr & (~0x3);
7680 do { 7703 do {
7704 if (cnt == I2C_WA_PWR_ITER) {
7705 bnx2x_warpcore_power_module(params, phy, 0);
7706 /* Note that 100us are not enough here */
7707 usleep_range(1000,1000);
7708 bnx2x_warpcore_power_module(params, phy, 1);
7709 }
7681 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7710 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
7682 data_array); 7711 data_array);
7683 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); 7712 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8200,29 +8229,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
8200 bnx2x_set_e1e2_module_fault_led(params, gpio_mode); 8229 bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
8201} 8230}
8202 8231
8203static void bnx2x_warpcore_power_module(struct link_params *params,
8204 struct bnx2x_phy *phy,
8205 u8 power)
8206{
8207 u32 pin_cfg;
8208 struct bnx2x *bp = params->bp;
8209
8210 pin_cfg = (REG_RD(bp, params->shmem_base +
8211 offsetof(struct shmem_region,
8212 dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
8213 PORT_HW_CFG_E3_PWR_DIS_MASK) >>
8214 PORT_HW_CFG_E3_PWR_DIS_SHIFT;
8215
8216 if (pin_cfg == PIN_CFG_NA)
8217 return;
8218 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
8219 power, pin_cfg);
8220 /* Low ==> corresponding SFP+ module is powered
8221 * high ==> the SFP+ module is powered down
8222 */
8223 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
8224}
8225
8226static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, 8232static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
8227 struct link_params *params) 8233 struct link_params *params)
8228{ 8234{
@@ -9748,7 +9754,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9748 9754
9749 msleep(1); 9755 msleep(1);
9750 9756
9751 if (!(CHIP_IS_E1(bp))) 9757 if (!(CHIP_IS_E1x(bp)))
9752 port = BP_PATH(bp); 9758 port = BP_PATH(bp);
9753 else 9759 else
9754 port = params->port; 9760 port = params->port;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5e2b85..2c89d17cbb29 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
534 } 534 }
535 535
536 if (atomic_read(&ulp_ops->ref_count) != 0) 536 if (atomic_read(&ulp_ops->ref_count) != 0)
537 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 537 pr_warn("%s: Failed waiting for ref count to go to zero\n",
538 __func__);
538 return 0; 539 return 0;
539 540
540out_unlock: 541out_unlock:
@@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
1053 1054
1054 uinfo = &udev->cnic_uinfo; 1055 uinfo = &udev->cnic_uinfo;
1055 1056
1056 uinfo->mem[0].addr = dev->netdev->base_addr; 1057 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1057 uinfo->mem[0].internal_addr = dev->regview; 1058 uinfo->mem[0].internal_addr = dev->regview;
1058 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1059 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1059 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1060 1060
1061 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1061 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1062 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1063 TX_MAX_TSS_RINGS + 1);
1062 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1064 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1063 PAGE_MASK; 1065 PAGE_MASK;
1064 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1066 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
1068 1070
1069 uinfo->name = "bnx2_cnic"; 1071 uinfo->name = "bnx2_cnic";
1070 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1072 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1073 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1074
1071 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1075 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1072 PAGE_MASK; 1076 PAGE_MASK;
1073 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1077 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb516807a..e47ff8be1d7b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14275 } 14275 }
14276 } 14276 }
14277 14277
14278 if (tg3_flag(tp, 5755_PLUS)) 14278 if (tg3_flag(tp, 5755_PLUS) ||
14279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14279 tg3_flag_set(tp, SHORT_DMA_BUG); 14280 tg3_flag_set(tp, SHORT_DMA_BUG);
14280 14281
14281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8d06ea381741..921c2082af4c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -122,15 +122,15 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
122 goto done; 122 goto done;
123 123
124 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 124 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
125 dev_warn(&adapter->pdev->dev, "This domain(VM) is not " 125 dev_warn(&adapter->pdev->dev,
126 "permitted to execute this cmd (opcode %d)\n", 126 "opcode %d-%d is not permitted\n",
127 opcode); 127 opcode, subsystem);
128 } else { 128 } else {
129 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 129 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
130 CQE_STATUS_EXTD_MASK; 130 CQE_STATUS_EXTD_MASK;
131 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" 131 dev_err(&adapter->pdev->dev,
132 "status %d, extd-status %d\n", 132 "opcode %d-%d failed:status %d-%d\n",
133 opcode, compl_status, extd_status); 133 opcode, subsystem, compl_status, extd_status);
134 } 134 }
135 } 135 }
136done: 136done:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 9625bf420c16..b3f3fc3d1323 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1566,7 +1566,7 @@ struct be_hw_stats_v1 {
1566 u32 rsvd0[BE_TXP_SW_SZ]; 1566 u32 rsvd0[BE_TXP_SW_SZ];
1567 struct be_erx_stats_v1 erx; 1567 struct be_erx_stats_v1 erx;
1568 struct be_pmem_stats pmem; 1568 struct be_pmem_stats pmem;
1569 u32 rsvd1[3]; 1569 u32 rsvd1[18];
1570}; 1570};
1571 1571
1572struct be_cmd_req_get_stats_v1 { 1572struct be_cmd_req_get_stats_v1 {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd308d78a..501dfa9c88ec 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
736 736
737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738 if (copied) { 738 if (copied) {
739 int gso_segs = skb_shinfo(skb)->gso_segs;
740
739 /* record the sent skb in the sent_skb table */ 741 /* record the sent skb in the sent_skb table */
740 BUG_ON(txo->sent_skb_list[start]); 742 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb; 743 txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
753 755
754 be_txq_notify(adapter, txq->id, wrb_cnt); 756 be_txq_notify(adapter, txq->id, wrb_cnt);
755 757
756 be_tx_stats_update(txo, wrb_cnt, copied, 758 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
757 skb_shinfo(skb)->gso_segs, stopped);
758 } else { 759 } else {
759 txq->head = start; 760 txq->head = start;
760 dev_kfree_skb_any(skb); 761 dev_kfree_skb_any(skb);
@@ -3236,7 +3237,7 @@ static void be_netdev_init(struct net_device *netdev)
3236 3237
3237 netdev->flags |= IFF_MULTICAST; 3238 netdev->flags |= IFF_MULTICAST;
3238 3239
3239 netif_set_gso_max_size(netdev, 65535); 3240 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3240 3241
3241 netdev->netdev_ops = &be_netdev_ops; 3242 netdev->netdev_ops = &be_netdev_ops;
3242 3243
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 0741aded9eb0..ab1d80ff0791 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1804 if (priv->mode == MQ_MG_MODE) { 1804 if (priv->mode == MQ_MG_MODE) {
1805 baddr = &regs->txic0; 1805 baddr = &regs->txic0;
1806 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1806 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1807 if (likely(priv->tx_queue[i]->txcoalescing)) { 1807 gfar_write(baddr + i, 0);
1808 gfar_write(baddr + i, 0); 1808 if (likely(priv->tx_queue[i]->txcoalescing))
1809 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1809 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1810 }
1811 } 1810 }
1812 1811
1813 baddr = &regs->rxic0; 1812 baddr = &regs->rxic0;
1814 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1813 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1815 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1814 gfar_write(baddr + i, 0);
1816 gfar_write(baddr + i, 0); 1815 if (likely(priv->rx_queue[i]->rxcoalescing))
1817 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1816 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1818 }
1819 } 1817 }
1820 } 1818 }
1821} 1819}
@@ -2065,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2065 return NETDEV_TX_OK; 2063 return NETDEV_TX_OK;
2066 } 2064 }
2067 2065
2068 /* Steal sock reference for processing TX time stamps */ 2066 if (skb->sk)
2069 swap(skb_new->sk, skb->sk); 2067 skb_set_owner_w(skb_new, skb->sk);
2070 swap(skb_new->destructor, skb->destructor); 2068 consume_skb(skb);
2071 kfree_skb(skb);
2072 skb = skb_new; 2069 skb = skb_new;
2073 } 2070 }
2074 2071
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 79b07ec6726f..0cafe4fe9406 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -122,8 +122,10 @@ config IGB_DCA
122 122
123config IGB_PTP 123config IGB_PTP
124 bool "PTP Hardware Clock (PHC)" 124 bool "PTP Hardware Clock (PHC)"
125 default y 125 default n
126 depends on IGB && PTP_1588_CLOCK 126 depends on IGB && EXPERIMENTAL
127 select PPS
128 select PTP_1588_CLOCK
127 ---help--- 129 ---help---
128 Say Y here if you want to use PTP Hardware Clock (PHC) in the 130 Say Y here if you want to use PTP Hardware Clock (PHC) in the
129 driver. Only the basic clock operations have been implemented. 131 driver. Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
223config IXGBE_PTP 225config IXGBE_PTP
224 bool "PTP Clock Support" 226 bool "PTP Clock Support"
225 default n 227 default n
226 depends on IXGBE && PTP_1588_CLOCK 228 depends on IXGBE && EXPERIMENTAL
229 select PPS
230 select PTP_1588_CLOCK
227 ---help--- 231 ---help---
228 Say Y here if you want support for 1588 Timestamping with a 232 Say Y here if you want support for 1588 Timestamping with a
229 PHC device, using the PTP 1588 Clock support. This is 233 PHC device, using the PTP 1588 Clock support. This is
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 36db4df09aed..1f063dcd8f85 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1572 ctrl = er32(CTRL); 1572 ctrl = er32(CTRL);
1573 status = er32(STATUS); 1573 status = er32(STATUS);
1574 rxcw = er32(RXCW); 1574 rxcw = er32(RXCW);
1575 /* SYNCH bit and IV bit are sticky */
1576 udelay(10);
1577 rxcw = er32(RXCW);
1575 1578
1576 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { 1579 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
1577 1580
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351a4097b2ba..76edbc1be33b 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -103,6 +103,7 @@
103#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ 103#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
104#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ 104#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
105#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ 105#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
106#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
106#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 107#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
107#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 108#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
108 109
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075df7a4..905e2147d918 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
258 * When SoL/IDER sessions are active, autoneg/speed/duplex 258 * When SoL/IDER sessions are active, autoneg/speed/duplex
259 * cannot be changed 259 * cannot be changed
260 */ 260 */
261 if (hw->phy.ops.check_reset_block(hw)) { 261 if (hw->phy.ops.check_reset_block &&
262 hw->phy.ops.check_reset_block(hw)) {
262 e_err("Cannot change link characteristics when SoL/IDER is active.\n"); 263 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
263 return -EINVAL; 264 return -EINVAL;
264 } 265 }
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1615 * PHY loopback cannot be performed if SoL/IDER 1616 * PHY loopback cannot be performed if SoL/IDER
1616 * sessions are active 1617 * sessions are active
1617 */ 1618 */
1618 if (hw->phy.ops.check_reset_block(hw)) { 1619 if (hw->phy.ops.check_reset_block &&
1620 hw->phy.ops.check_reset_block(hw)) {
1619 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); 1621 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1620 *data = 0; 1622 *data = 0;
1621 goto out; 1623 goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 238ab2f8a5e7..e3a7b07df629 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
325 **/ 325 **/
326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) 326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
327{ 327{
328 u16 phy_reg; 328 u16 phy_reg = 0;
329 u32 phy_id; 329 u32 phy_id = 0;
330 s32 ret_val;
331 u16 retry_count;
332
333 for (retry_count = 0; retry_count < 2; retry_count++) {
334 ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
335 if (ret_val || (phy_reg == 0xFFFF))
336 continue;
337 phy_id = (u32)(phy_reg << 16);
330 338
331 e1e_rphy_locked(hw, PHY_ID1, &phy_reg); 339 ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
332 phy_id = (u32)(phy_reg << 16); 340 if (ret_val || (phy_reg == 0xFFFF)) {
333 e1e_rphy_locked(hw, PHY_ID2, &phy_reg); 341 phy_id = 0;
334 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); 342 continue;
343 }
344 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
345 break;
346 }
335 347
336 if (hw->phy.id) { 348 if (hw->phy.id) {
337 if (hw->phy.id == phy_id) 349 if (hw->phy.id == phy_id)
338 return true; 350 return true;
339 } else { 351 } else if (phy_id) {
340 if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK)) 352 hw->phy.id = phy_id;
341 hw->phy.id = phy_id; 353 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
342 return true; 354 return true;
343 } 355 }
344 356
345 return false; 357 /*
358 * In case the PHY needs to be in mdio slow mode,
359 * set slow mode and try to get the PHY id again.
360 */
361 hw->phy.ops.release(hw);
362 ret_val = e1000_set_mdio_slow_mode_hv(hw);
363 if (!ret_val)
364 ret_val = e1000e_get_phy_id(hw);
365 hw->phy.ops.acquire(hw);
366
367 return !ret_val;
346} 368}
347 369
348/** 370/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3ab52e..a13439928488 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
709 * In the case of the phy reset being blocked, we already have a link. 709 * In the case of the phy reset being blocked, we already have a link.
710 * We do not need to set it up again. 710 * We do not need to set it up again.
711 */ 711 */
712 if (hw->phy.ops.check_reset_block(hw)) 712 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
713 return 0; 713 return 0;
714 714
715 /* 715 /*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435b00dc..623e30b9964d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
496 * @sk_buff: socket buffer with received data 496 * @sk_buff: socket buffer with received data
497 **/ 497 **/
498static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 498static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
499 __le16 csum, struct sk_buff *skb) 499 struct sk_buff *skb)
500{ 500{
501 u16 status = (u16)status_err; 501 u16 status = (u16)status_err;
502 u8 errors = (u8)(status_err >> 24); 502 u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
511 if (status & E1000_RXD_STAT_IXSM) 511 if (status & E1000_RXD_STAT_IXSM)
512 return; 512 return;
513 513
514 /* TCP/UDP checksum error bit is set */ 514 /* TCP/UDP checksum error bit or IP checksum error bit is set */
515 if (errors & E1000_RXD_ERR_TCPE) { 515 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
516 /* let the stack verify checksum errors */ 516 /* let the stack verify checksum errors */
517 adapter->hw_csum_err++; 517 adapter->hw_csum_err++;
518 return; 518 return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
523 return; 523 return;
524 524
525 /* It must be a TCP or UDP packet with a valid checksum */ 525 /* It must be a TCP or UDP packet with a valid checksum */
526 if (status & E1000_RXD_STAT_TCPCS) { 526 skb->ip_summed = CHECKSUM_UNNECESSARY;
527 /* TCP checksum is good */
528 skb->ip_summed = CHECKSUM_UNNECESSARY;
529 } else {
530 /*
531 * IP fragment with UDP payload
532 * Hardware complements the payload checksum, so we undo it
533 * and then put the value in host order for further stack use.
534 */
535 __sum16 sum = (__force __sum16)swab16((__force u16)csum);
536 skb->csum = csum_unfold(~sum);
537 skb->ip_summed = CHECKSUM_COMPLETE;
538 }
539 adapter->hw_csum_good++; 527 adapter->hw_csum_good++;
540} 528}
541 529
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
954 skb_put(skb, length); 942 skb_put(skb, length);
955 943
956 /* Receive Checksum Offload */ 944 /* Receive Checksum Offload */
957 e1000_rx_checksum(adapter, staterr, 945 e1000_rx_checksum(adapter, staterr, skb);
958 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
959 946
960 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 947 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
961 948
@@ -1341,8 +1328,7 @@ copydone:
1341 total_rx_bytes += skb->len; 1328 total_rx_bytes += skb->len;
1342 total_rx_packets++; 1329 total_rx_packets++;
1343 1330
1344 e1000_rx_checksum(adapter, staterr, 1331 e1000_rx_checksum(adapter, staterr, skb);
1345 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1346 1332
1347 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1333 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1348 1334
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1512 } 1498 }
1513 } 1499 }
1514 1500
1515 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1501 /* Receive Checksum Offload */
1516 e1000_rx_checksum(adapter, staterr, 1502 e1000_rx_checksum(adapter, staterr, skb);
1517 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1518 1503
1519 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1504 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1520 1505
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3098 3083
3099 /* Enable Receive Checksum Offload for TCP and UDP */ 3084 /* Enable Receive Checksum Offload for TCP and UDP */
3100 rxcsum = er32(RXCSUM); 3085 rxcsum = er32(RXCSUM);
3101 if (adapter->netdev->features & NETIF_F_RXCSUM) { 3086 if (adapter->netdev->features & NETIF_F_RXCSUM)
3102 rxcsum |= E1000_RXCSUM_TUOFL; 3087 rxcsum |= E1000_RXCSUM_TUOFL;
3103 3088 else
3104 /*
3105 * IPv4 payload checksum for UDP fragments must be
3106 * used in conjunction with packet-split.
3107 */
3108 if (adapter->rx_ps_pages)
3109 rxcsum |= E1000_RXCSUM_IPPCSE;
3110 } else {
3111 rxcsum &= ~E1000_RXCSUM_TUOFL; 3089 rxcsum &= ~E1000_RXCSUM_TUOFL;
3112 /* no need to clear IPPCSE as it defaults to 0 */
3113 }
3114 ew32(RXCSUM, rxcsum); 3090 ew32(RXCSUM, rxcsum);
3115 3091
3116 if (adapter->hw.mac.type == e1000_pch2lan) { 3092 if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5241 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5217 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5242 5218
5243 /* Jumbo frame support */ 5219 /* Jumbo frame support */
5244 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 5220 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5245 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5221 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5246 e_err("Jumbo Frames not supported.\n"); 5222 e_err("Jumbo Frames not supported.\n");
5247 return -EINVAL; 5223 return -EINVAL;
5248 }
5249
5250 /*
5251 * IP payload checksum (enabled with jumbos/packet-split when
5252 * Rx checksum is enabled) and generation of RSS hash is
5253 * mutually exclusive in the hardware.
5254 */
5255 if ((netdev->features & NETIF_F_RXCSUM) &&
5256 (netdev->features & NETIF_F_RXHASH)) {
5257 e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
5258 return -EINVAL;
5259 }
5260 } 5224 }
5261 5225
5262 /* Supported frame sizes */ 5226 /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
6030 NETIF_F_RXALL))) 5994 NETIF_F_RXALL)))
6031 return 0; 5995 return 0;
6032 5996
6033 /*
6034 * IP payload checksum (enabled with jumbos/packet-split when Rx
6035 * checksum is enabled) and generation of RSS hash is mutually
6036 * exclusive in the hardware.
6037 */
6038 if (adapter->rx_ps_pages &&
6039 (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
6040 e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
6041 return -EINVAL;
6042 }
6043
6044 if (changed & NETIF_F_RXFCS) { 5997 if (changed & NETIF_F_RXFCS) {
6045 if (features & NETIF_F_RXFCS) { 5998 if (features & NETIF_F_RXFCS) {
6046 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 5999 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6237,7 +6190,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6237 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6190 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6238 } 6191 }
6239 6192
6240 if (hw->phy.ops.check_reset_block(hw)) 6193 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6241 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6194 e_info("PHY reset is blocked due to SOL/IDER session.\n");
6242 6195
6243 /* Set initial default active device features */ 6196 /* Set initial default active device features */
@@ -6404,7 +6357,7 @@ err_register:
6404 if (!(adapter->flags & FLAG_HAS_AMT)) 6357 if (!(adapter->flags & FLAG_HAS_AMT))
6405 e1000e_release_hw_control(adapter); 6358 e1000e_release_hw_control(adapter);
6406err_eeprom: 6359err_eeprom:
6407 if (!hw->phy.ops.check_reset_block(hw)) 6360 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6408 e1000_phy_hw_reset(&adapter->hw); 6361 e1000_phy_hw_reset(&adapter->hw);
6409err_hw_init: 6362err_hw_init:
6410 kfree(adapter->tx_ring); 6363 kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d013bc3c..b860d4f7ea2a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
2155 s32 ret_val; 2155 s32 ret_val;
2156 u32 ctrl; 2156 u32 ctrl;
2157 2157
2158 ret_val = phy->ops.check_reset_block(hw); 2158 if (phy->ops.check_reset_block) {
2159 if (ret_val) 2159 ret_val = phy->ops.check_reset_block(hw);
2160 return 0; 2160 if (ret_val)
2161 return 0;
2162 }
2161 2163
2162 ret_val = phy->ops.acquire(hw); 2164 ret_val = phy->ops.acquire(hw);
2163 if (ret_val) 2165 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index e65083958421..5e84eaac48c1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
206 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 206 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
207 break; 207 break;
208 case e1000_i350: 208 case e1000_i350:
209 case e1000_i210:
210 case e1000_i211:
211 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 209 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
212 break; 210 break;
213 default: 211 default:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8ce67064b9c5..90eef07943f4 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
357 struct igbvf_adapter *adapter = netdev_priv(netdev); 357 struct igbvf_adapter *adapter = netdev_priv(netdev);
358 struct e1000_hw *hw = &adapter->hw; 358 struct e1000_hw *hw = &adapter->hw;
359 359
360 if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || 360 if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
361 ((ec->rx_coalesce_usecs > 3) && 361 (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
362 (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || 362 adapter->current_itr = ec->rx_coalesce_usecs << 2;
363 (ec->rx_coalesce_usecs == 2)) 363 adapter->requested_itr = 1000000000 /
364 return -EINVAL; 364 (adapter->current_itr * 256);
365 365 } else if ((ec->rx_coalesce_usecs == 3) ||
366 /* convert to rate of irq's per second */ 366 (ec->rx_coalesce_usecs == 2)) {
367 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
368 adapter->current_itr = IGBVF_START_ITR; 367 adapter->current_itr = IGBVF_START_ITR;
369 adapter->requested_itr = ec->rx_coalesce_usecs; 368 adapter->requested_itr = ec->rx_coalesce_usecs;
370 } else { 369 } else if (ec->rx_coalesce_usecs == 0) {
371 adapter->current_itr = ec->rx_coalesce_usecs << 2; 370 /*
371 * The user's desire is to turn off interrupt throttling
372 * altogether, but due to HW limitations, we can't do that.
373 * Instead we set a very small value in EITR, which would
374 * allow ~967k interrupts per second, but allow the adapter's
375 * internal clocking to still function properly.
376 */
377 adapter->current_itr = 4;
372 adapter->requested_itr = 1000000000 / 378 adapter->requested_itr = 1000000000 /
373 (adapter->current_itr * 256); 379 (adapter->current_itr * 256);
374 } 380 } else
381 return -EINVAL;
375 382
376 writel(adapter->current_itr, 383 writel(adapter->current_itr,
377 hw->hw_addr + adapter->rx_ring->itr_register); 384 hw->hw_addr + adapter->rx_ring->itr_register);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 3ef3c5284e52..7af291e236bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
196 __IXGBE_HANG_CHECK_ARMED, 196 __IXGBE_HANG_CHECK_ARMED,
197 __IXGBE_RX_RSC_ENABLED, 197 __IXGBE_RX_RSC_ENABLED,
198 __IXGBE_RX_CSUM_UDP_ZERO_ERR, 198 __IXGBE_RX_CSUM_UDP_ZERO_ERR,
199 __IXGBE_RX_FCOE_BUFSZ, 199 __IXGBE_RX_FCOE,
200}; 200};
201 201
202#define check_for_tx_hang(ring) \ 202#define check_for_tx_hang(ring) \
@@ -290,7 +290,7 @@ struct ixgbe_ring_feature {
290#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) 290#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
291static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) 291static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
292{ 292{
293 return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0; 293 return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
294} 294}
295#else 295#else
296#define ixgbe_rx_pg_order(_ring) 0 296#define ixgbe_rx_pg_order(_ring) 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index af1a5314b494..c377706e81a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -634,7 +634,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
634 f = &adapter->ring_feature[RING_F_FCOE]; 634 f = &adapter->ring_feature[RING_F_FCOE];
635 if ((rxr_idx >= f->mask) && 635 if ((rxr_idx >= f->mask) &&
636 (rxr_idx < f->mask + f->indices)) 636 (rxr_idx < f->mask + f->indices))
637 set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state); 637 set_bit(__IXGBE_RX_FCOE, &ring->state);
638 } 638 }
639 639
640#endif /* IXGBE_FCOE */ 640#endif /* IXGBE_FCOE */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457ea23a..e242104ab471 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1058,17 +1058,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1058#ifdef IXGBE_FCOE 1058#ifdef IXGBE_FCOE
1059/** 1059/**
1060 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type 1060 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1061 * @adapter: address of board private structure 1061 * @ring: structure containing ring specific data
1062 * @rx_desc: advanced rx descriptor 1062 * @rx_desc: advanced rx descriptor
1063 * 1063 *
1064 * Returns : true if it is FCoE pkt 1064 * Returns : true if it is FCoE pkt
1065 */ 1065 */
1066static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, 1066static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1067 union ixgbe_adv_rx_desc *rx_desc) 1067 union ixgbe_adv_rx_desc *rx_desc)
1068{ 1068{
1069 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1069 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1070 1070
1071 return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 1071 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1072 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == 1072 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1073 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << 1073 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1074 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); 1074 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1148 1148
1149 /* alloc new page for storage */ 1149 /* alloc new page for storage */
1150 if (likely(!page)) { 1150 if (likely(!page)) {
1151 page = alloc_pages(GFP_ATOMIC | __GFP_COLD, 1151 page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1152 ixgbe_rx_pg_order(rx_ring)); 1152 ixgbe_rx_pg_order(rx_ring));
1153 if (unlikely(!page)) { 1153 if (unlikely(!page)) {
1154 rx_ring->rx_stats.alloc_rx_page_failed++; 1154 rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1390 union ixgbe_adv_rx_desc *rx_desc, 1390 union ixgbe_adv_rx_desc *rx_desc,
1391 struct sk_buff *skb) 1391 struct sk_buff *skb)
1392{ 1392{
1393 struct net_device *dev = rx_ring->netdev;
1394
1393 ixgbe_update_rsc_stats(rx_ring, skb); 1395 ixgbe_update_rsc_stats(rx_ring, skb);
1394 1396
1395 ixgbe_rx_hash(rx_ring, rx_desc, skb); 1397 ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1401 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); 1403 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
1402#endif 1404#endif
1403 1405
1404 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1406 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1407 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1405 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1408 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1406 __vlan_hwaccel_put_tag(skb, vid); 1409 __vlan_hwaccel_put_tag(skb, vid);
1407 } 1410 }
1408 1411
1409 skb_record_rx_queue(skb, rx_ring->queue_index); 1412 skb_record_rx_queue(skb, rx_ring->queue_index);
1410 1413
1411 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1414 skb->protocol = eth_type_trans(skb, dev);
1412} 1415}
1413 1416
1414static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 1417static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1546,6 +1549,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1546 skb->truesize -= ixgbe_rx_bufsz(rx_ring); 1549 skb->truesize -= ixgbe_rx_bufsz(rx_ring);
1547 } 1550 }
1548 1551
1552#ifdef IXGBE_FCOE
1553 /* do not attempt to pad FCoE Frames as this will disrupt DDP */
1554 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1555 return false;
1556
1557#endif
1549 /* if skb_pad returns an error the skb was freed */ 1558 /* if skb_pad returns an error the skb was freed */
1550 if (unlikely(skb->len < 60)) { 1559 if (unlikely(skb->len < 60)) {
1551 int pad_len = 60 - skb->len; 1560 int pad_len = 60 - skb->len;
@@ -1772,7 +1781,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1772 1781
1773#ifdef IXGBE_FCOE 1782#ifdef IXGBE_FCOE
1774 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1783 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1775 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { 1784 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1776 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 1785 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1777 if (!ddp_bytes) { 1786 if (!ddp_bytes) {
1778 dev_kfree_skb_any(skb); 1787 dev_kfree_skb_any(skb);
@@ -3607,10 +3616,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3607 if (hw->mac.type == ixgbe_mac_82598EB) 3616 if (hw->mac.type == ixgbe_mac_82598EB)
3608 netif_set_gso_max_size(adapter->netdev, 32768); 3617 netif_set_gso_max_size(adapter->netdev, 32768);
3609 3618
3610
3611 /* Enable VLAN tag insert/strip */
3612 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3613
3614 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3619 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3615 3620
3616#ifdef IXGBE_FCOE 3621#ifdef IXGBE_FCOE
@@ -6642,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6642 return -EINVAL; 6647 return -EINVAL;
6643 } 6648 }
6644 6649
6650 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6651 e_err(drv, "Enable failed, SR-IOV enabled\n");
6652 return -EINVAL;
6653 }
6654
6645 /* Hardware supports up to 8 traffic classes */ 6655 /* Hardware supports up to 8 traffic classes */
6646 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 6656 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
6647 (hw->mac.type == ixgbe_mac_82598EB && 6657 (hw->mac.type == ixgbe_mac_82598EB &&
@@ -6701,11 +6711,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6701{ 6711{
6702 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6712 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6703 6713
6704#ifdef CONFIG_DCB
6705 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6706 features &= ~NETIF_F_HW_VLAN_RX;
6707#endif
6708
6709 /* return error if RXHASH is being enabled when RSS is not supported */ 6714 /* return error if RXHASH is being enabled when RSS is not supported */
6710 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 6715 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
6711 features &= ~NETIF_F_RXHASH; 6716 features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6723,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6718 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 6723 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6719 features &= ~NETIF_F_LRO; 6724 features &= ~NETIF_F_LRO;
6720 6725
6721
6722 return features; 6726 return features;
6723} 6727}
6724 6728
@@ -6766,6 +6770,11 @@ static int ixgbe_set_features(struct net_device *netdev,
6766 need_reset = true; 6770 need_reset = true;
6767 } 6771 }
6768 6772
6773 if (features & NETIF_F_HW_VLAN_RX)
6774 ixgbe_vlan_strip_enable(adapter);
6775 else
6776 ixgbe_vlan_strip_disable(adapter);
6777
6769 if (changed & NETIF_F_RXALL) 6778 if (changed & NETIF_F_RXALL)
6770 need_reset = true; 6779 need_reset = true;
6771 6780
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ddc6a4d19302..dcebd128becf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -708,6 +708,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
708{ 708{
709 struct ixgbe_hw *hw = &adapter->hw; 709 struct ixgbe_hw *hw = &adapter->hw;
710 u32 incval = 0; 710 u32 incval = 0;
711 u32 timinca = 0;
711 u32 shift = 0; 712 u32 shift = 0;
712 u32 cycle_speed; 713 u32 cycle_speed;
713 unsigned long flags; 714 unsigned long flags;
@@ -730,8 +731,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
730 break; 731 break;
731 } 732 }
732 733
733 /* Bail if the cycle speed didn't change */ 734 /*
734 if (adapter->cycle_speed == cycle_speed) 735 * grab the current TIMINCA value from the register so that it can be
736 * double checked. If the register value has been cleared, it must be
737 * reset to the correct value for generating a cyclecounter. If
738 * TIMINCA is zero, the SYSTIME registers do not increment at all.
739 */
740 timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
741
742 /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
743 if (adapter->cycle_speed == cycle_speed && timinca)
735 return; 744 return;
736 745
737 /* disable the SDP clock out */ 746 /* disable the SDP clock out */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec4288b10..41e32257a4e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
201 unsigned int i, eop, count = 0; 201 unsigned int i, eop, count = 0;
202 unsigned int total_bytes = 0, total_packets = 0; 202 unsigned int total_bytes = 0, total_packets = 0;
203 203
204 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
205 return true;
206
204 i = tx_ring->next_to_clean; 207 i = tx_ring->next_to_clean;
205 eop = tx_ring->tx_buffer_info[i].next_to_watch; 208 eop = tx_ring->tx_buffer_info[i].next_to_watch;
206 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 209 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
969 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 972 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
970 for (i = 0; i < q_vector->txr_count; i++) { 973 for (i = 0; i < q_vector->txr_count; i++) {
971 tx_ring = &(adapter->tx_ring[r_idx]); 974 tx_ring = &(adapter->tx_ring[r_idx]);
972 tx_ring->total_bytes = 0;
973 tx_ring->total_packets = 0;
974 ixgbevf_clean_tx_irq(adapter, tx_ring); 975 ixgbevf_clean_tx_irq(adapter, tx_ring);
975 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 976 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
976 r_idx + 1); 977 r_idx + 1);
@@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
994 struct ixgbe_hw *hw = &adapter->hw; 995 struct ixgbe_hw *hw = &adapter->hw;
995 struct ixgbevf_ring *rx_ring; 996 struct ixgbevf_ring *rx_ring;
996 int r_idx; 997 int r_idx;
997 int i;
998
999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1000 for (i = 0; i < q_vector->rxr_count; i++) {
1001 rx_ring = &(adapter->rx_ring[r_idx]);
1002 rx_ring->total_bytes = 0;
1003 rx_ring->total_packets = 0;
1004 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1005 r_idx + 1);
1006 }
1007 998
1008 if (!q_vector->rxr_count) 999 if (!q_vector->rxr_count)
1009 return IRQ_HANDLED; 1000 return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 04d901d0ff63..f0f06b2bc28b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
436 /* 436 /*
437 * Hardware-specific parameters. 437 * Hardware-specific parameters.
438 */ 438 */
439#if defined(CONFIG_HAVE_CLK)
439 struct clk *clk; 440 struct clk *clk;
441#endif
440 unsigned int t_clk; 442 unsigned int t_clk;
441}; 443};
442 444
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2895 mp->dev = dev; 2897 mp->dev = dev;
2896 2898
2897 /* 2899 /*
2898 * Get the clk rate, if there is one, otherwise use the default. 2900 * Start with a default rate, and if there is a clock, allow
2901 * it to override the default.
2899 */ 2902 */
2903 mp->t_clk = 133000000;
2904#if defined(CONFIG_HAVE_CLK)
2900 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); 2905 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
2901 if (!IS_ERR(mp->clk)) { 2906 if (!IS_ERR(mp->clk)) {
2902 clk_prepare_enable(mp->clk); 2907 clk_prepare_enable(mp->clk);
2903 mp->t_clk = clk_get_rate(mp->clk); 2908 mp->t_clk = clk_get_rate(mp->clk);
2904 } else {
2905 mp->t_clk = 133000000;
2906 printk(KERN_WARNING "Unable to get clock");
2907 } 2909 }
2908 2910#endif
2909 set_params(mp, pd); 2911 set_params(mp, pd);
2910 netif_set_real_num_tx_queues(dev, mp->txq_count); 2912 netif_set_real_num_tx_queues(dev, mp->txq_count);
2911 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2913 netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2995 phy_detach(mp->phy); 2997 phy_detach(mp->phy);
2996 cancel_work_sync(&mp->tx_timeout_task); 2998 cancel_work_sync(&mp->tx_timeout_task);
2997 2999
3000#if defined(CONFIG_HAVE_CLK)
2998 if (!IS_ERR(mp->clk)) { 3001 if (!IS_ERR(mp->clk)) {
2999 clk_disable_unprepare(mp->clk); 3002 clk_disable_unprepare(mp->clk);
3000 clk_put(mp->clk); 3003 clk_put(mp->clk);
3001 } 3004 }
3005#endif
3006
3002 free_netdev(mp->dev); 3007 free_netdev(mp->dev);
3003 3008
3004 platform_set_drvdata(pdev, NULL); 3009 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f2ab92..28a54451a3e5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4381 struct sky2_port *sky2 = netdev_priv(dev); 4381 struct sky2_port *sky2 = netdev_priv(dev);
4382 netdev_features_t changed = dev->features ^ features; 4382 netdev_features_t changed = dev->features ^ features;
4383 4383
4384 if (changed & NETIF_F_RXCSUM) { 4384 if ((changed & NETIF_F_RXCSUM) &&
4385 bool on = features & NETIF_F_RXCSUM; 4385 !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
4386 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 4386 sky2_write32(sky2->hw,
4387 on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 4387 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4388 (features & NETIF_F_RXCSUM)
4389 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4388 } 4390 }
4389 4391
4390 if (changed & NETIF_F_RXHASH) 4392 if (changed & NETIF_F_RXHASH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 926d8aac941c..073b85b45fc5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -929,15 +929,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
929 if (priv->rx_cq[i].buf) 929 if (priv->rx_cq[i].buf)
930 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 930 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
931 } 931 }
932
933 if (priv->base_tx_qpn) {
934 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
935 priv->base_tx_qpn = 0;
936 }
932} 937}
933 938
934int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 939int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
935{ 940{
936 struct mlx4_en_port_profile *prof = priv->prof; 941 struct mlx4_en_port_profile *prof = priv->prof;
937 int i; 942 int i;
938 int base_tx_qpn, err; 943 int err;
939 944
940 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); 945 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
941 if (err) { 946 if (err) {
942 en_err(priv, "failed reserving range for TX rings\n"); 947 en_err(priv, "failed reserving range for TX rings\n");
943 return err; 948 return err;
@@ -949,7 +954,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
949 prof->tx_ring_size, i, TX)) 954 prof->tx_ring_size, i, TX))
950 goto err; 955 goto err;
951 956
952 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, 957 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
953 prof->tx_ring_size, TXBB_SIZE)) 958 prof->tx_ring_size, TXBB_SIZE))
954 goto err; 959 goto err;
955 } 960 }
@@ -969,7 +974,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
969 974
970err: 975err:
971 en_err(priv, "Failed to allocate NIC resources\n"); 976 en_err(priv, "Failed to allocate NIC resources\n");
972 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
973 return -ENOMEM; 977 return -ENOMEM;
974} 978}
975 979
@@ -1204,9 +1208,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1204 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1208 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1205 1209
1206 /* Configure port */ 1210 /* Configure port */
1211 mlx4_en_calc_rx_buf(dev);
1207 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1212 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1208 MLX4_EN_MIN_MTU, 1213 priv->rx_skb_size + ETH_FCS_LEN,
1209 0, 0, 0, 0); 1214 prof->tx_pause, prof->tx_ppp,
1215 prof->rx_pause, prof->rx_ppp);
1210 if (err) { 1216 if (err) {
1211 en_err(priv, "Failed setting port general configurations " 1217 en_err(priv, "Failed setting port general configurations "
1212 "for port %d, with error %d\n", priv->port, err); 1218 "for port %d, with error %d\n", priv->port, err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ee6f4fe00837..a0313de122de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1975,6 +1975,8 @@ slave_start:
1975 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 1975 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
1976 !mlx4_is_mfunc(dev)) { 1976 !mlx4_is_mfunc(dev)) {
1977 dev->flags &= ~MLX4_FLAG_MSI_X; 1977 dev->flags &= ~MLX4_FLAG_MSI_X;
1978 dev->caps.num_comp_vectors = 1;
1979 dev->caps.comp_pool = 0;
1978 pci_disable_msix(pdev); 1980 pci_disable_msix(pdev);
1979 err = mlx4_setup_hca(dev); 1981 err = mlx4_setup_hca(dev);
1980 } 1982 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6ae350921b1a..225c20d47900 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -495,6 +495,7 @@ struct mlx4_en_priv {
495 int vids[128]; 495 int vids[128];
496 bool wol; 496 bool wol;
497 struct device *ddev; 497 struct device *ddev;
498 int base_tx_qpn;
498 499
499#ifdef CONFIG_MLX4_EN_DCB 500#ifdef CONFIG_MLX4_EN_DCB
500 struct ieee_ets ets; 501 struct ieee_ets ets;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666fcffd7..083d6715335c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
946 /* Update stats */ 946 /* Update stats */
947 ndev->stats.tx_packets++; 947 ndev->stats.tx_packets++;
948 ndev->stats.tx_bytes += skb->len; 948 ndev->stats.tx_bytes += skb->len;
949
950 /* Free buffer */
951 dev_kfree_skb_irq(skb);
952 } 949 }
950 dev_kfree_skb_irq(skb);
953 951
954 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
955 } 953 }
956 954
957 if (netif_queue_stopped(ndev)) 955 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
958 netif_wake_queue(ndev); 956 if (netif_queue_stopped(ndev))
957 netif_wake_queue(ndev);
958 }
959} 959}
960 960
961static int __lpc_handle_recv(struct net_device *ndev, int budget) 961static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list, 1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1321 .ndo_do_ioctl = lpc_eth_ioctl, 1321 .ndo_do_ioctl = lpc_eth_ioctl,
1322 .ndo_set_mac_address = lpc_set_mac_address, 1322 .ndo_set_mac_address = lpc_set_mac_address,
1323 .ndo_change_mtu = eth_change_mtu,
1323}; 1324};
1324 1325
1325static int lpc_eth_drv_probe(struct platform_device *pdev) 1326static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 46e77a2c5121..ad98f4d7919d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
479 479
480 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 480 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
481 pfn = pci_info[i].id; 481 pfn = pci_info[i].id;
482 if (pfn > QLCNIC_MAX_PCI_FUNC) { 482 if (pfn >= QLCNIC_MAX_PCI_FUNC) {
483 ret = QL_STATUS_INVALID_PARAM; 483 ret = QL_STATUS_INVALID_PARAM;
484 goto err_eswitch; 484 goto err_eswitch;
485 } 485 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 9757ce3543a0..d7a04e091101 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3894,6 +3894,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
3894 case RTL_GIGA_MAC_VER_22: 3894 case RTL_GIGA_MAC_VER_22:
3895 case RTL_GIGA_MAC_VER_23: 3895 case RTL_GIGA_MAC_VER_23:
3896 case RTL_GIGA_MAC_VER_24: 3896 case RTL_GIGA_MAC_VER_24:
3897 case RTL_GIGA_MAC_VER_34:
3897 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 3898 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3898 break; 3899 break;
3899 default: 3900 default:
@@ -5889,11 +5890,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
5889 if (status & LinkChg) 5890 if (status & LinkChg)
5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); 5891 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5891 5892
5892 napi_disable(&tp->napi); 5893 rtl_irq_enable_all(tp);
5893 rtl_irq_disable(tp);
5894
5895 napi_enable(&tp->napi);
5896 napi_schedule(&tp->napi);
5897} 5894}
5898 5895
5899static void rtl_task(struct work_struct *work) 5896static void rtl_task(struct work_struct *work)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 667169b82526..79bf09b41971 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1011,7 +1011,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1011} 1011}
1012 1012
1013/* Packet receive function */ 1013/* Packet receive function */
1014static int sh_eth_rx(struct net_device *ndev) 1014static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1015{ 1015{
1016 struct sh_eth_private *mdp = netdev_priv(ndev); 1016 struct sh_eth_private *mdp = netdev_priv(ndev);
1017 struct sh_eth_rxdesc *rxdesc; 1017 struct sh_eth_rxdesc *rxdesc;
@@ -1102,9 +1102,11 @@ static int sh_eth_rx(struct net_device *ndev)
1102 /* Restart Rx engine if stopped. */ 1102 /* Restart Rx engine if stopped. */
1103 /* If we don't need to check status, don't. -KDU */ 1103 /* If we don't need to check status, don't. -KDU */
1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1105 /* fix the values for the next receiving */ 1105 /* fix the values for the next receiving if RDE is set */
1106 mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) - 1106 if (intr_status & EESR_RDE)
1107 sh_eth_read(ndev, RDLAR)) >> 4; 1107 mdp->cur_rx = mdp->dirty_rx =
1108 (sh_eth_read(ndev, RDFAR) -
1109 sh_eth_read(ndev, RDLAR)) >> 4;
1108 sh_eth_write(ndev, EDRRR_R, EDRRR); 1110 sh_eth_write(ndev, EDRRR_R, EDRRR);
1109 } 1111 }
1110 1112
@@ -1273,7 +1275,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1273 EESR_RTSF | /* short frame recv */ 1275 EESR_RTSF | /* short frame recv */
1274 EESR_PRE | /* PHY-LSI recv error */ 1276 EESR_PRE | /* PHY-LSI recv error */
1275 EESR_CERF)){ /* recv frame CRC error */ 1277 EESR_CERF)){ /* recv frame CRC error */
1276 sh_eth_rx(ndev); 1278 sh_eth_rx(ndev, intr_status);
1277 } 1279 }
1278 1280
1279 /* Tx Check */ 1281 /* Tx Check */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 036428348faa..9f448279e12a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@ config STMMAC_ETH
13if STMMAC_ETH 13if STMMAC_ETH
14 14
15config STMMAC_PLATFORM 15config STMMAC_PLATFORM
16 tristate "STMMAC platform bus support" 16 bool "STMMAC Platform bus support"
17 depends on STMMAC_ETH 17 depends on STMMAC_ETH
18 default y 18 default y
19 ---help--- 19 ---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
26 If unsure, say N. 26 If unsure, say N.
27 27
28config STMMAC_PCI 28config STMMAC_PCI
29 tristate "STMMAC support on PCI bus (EXPERIMENTAL)" 29 bool "STMMAC PCI bus support (EXPERIMENTAL)"
30 depends on STMMAC_ETH && PCI && EXPERIMENTAL 30 depends on STMMAC_ETH && PCI && EXPERIMENTAL
31 ---help--- 31 ---help---
32 This is to select the Synopsys DWMAC available on PCI devices, 32 This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index fb8377da1687..4b785e10f2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
51 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 51 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, 52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
53 csum); 53 csum);
54 54 wmb();
55 entry = (++priv->cur_tx) % txsize; 55 entry = (++priv->cur_tx) % txsize;
56 desc = priv->dma_tx + entry; 56 desc = priv->dma_tx + entry;
57 57
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
59 len, DMA_TO_DEVICE); 59 len, DMA_TO_DEVICE);
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 60 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); 61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
62 wmb();
62 priv->hw->desc->set_tx_owner(desc); 63 priv->hw->desc->set_tx_owner(desc);
63 priv->tx_skbuff[entry] = NULL; 64 priv->tx_skbuff[entry] = NULL;
64 } else { 65 } else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060ee9de..dc20c56efc9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/stmmac.h> 27#include <linux/stmmac.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/pci.h>
29#include "common.h" 30#include "common.h"
30#ifdef CONFIG_STMMAC_TIMER 31#ifdef CONFIG_STMMAC_TIMER
31#include "stmmac_timer.h" 32#include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
95extern void stmmac_set_ethtool_ops(struct net_device *netdev); 96extern void stmmac_set_ethtool_ops(struct net_device *netdev);
96extern const struct stmmac_desc_ops enh_desc_ops; 97extern const struct stmmac_desc_ops enh_desc_ops;
97extern const struct stmmac_desc_ops ndesc_ops; 98extern const struct stmmac_desc_ops ndesc_ops;
98
99int stmmac_freeze(struct net_device *ndev); 99int stmmac_freeze(struct net_device *ndev);
100int stmmac_restore(struct net_device *ndev); 100int stmmac_restore(struct net_device *ndev);
101int stmmac_resume(struct net_device *ndev); 101int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
109static inline int stmmac_clk_enable(struct stmmac_priv *priv) 109static inline int stmmac_clk_enable(struct stmmac_priv *priv)
110{ 110{
111 if (!IS_ERR(priv->stmmac_clk)) 111 if (!IS_ERR(priv->stmmac_clk))
112 return clk_enable(priv->stmmac_clk); 112 return clk_prepare_enable(priv->stmmac_clk);
113 113
114 return 0; 114 return 0;
115} 115}
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
119 if (IS_ERR(priv->stmmac_clk)) 119 if (IS_ERR(priv->stmmac_clk))
120 return; 120 return;
121 121
122 clk_disable(priv->stmmac_clk); 122 clk_disable_unprepare(priv->stmmac_clk);
123} 123}
124static inline int stmmac_clk_get(struct stmmac_priv *priv) 124static inline int stmmac_clk_get(struct stmmac_priv *priv)
125{ 125{
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
143 return 0; 143 return 0;
144} 144}
145#endif /* CONFIG_HAVE_CLK */ 145#endif /* CONFIG_HAVE_CLK */
146
147
148#ifdef CONFIG_STMMAC_PLATFORM
149extern struct platform_driver stmmac_pltfr_driver;
150static inline int stmmac_register_platform(void)
151{
152 int err;
153
154 err = platform_driver_register(&stmmac_pltfr_driver);
155 if (err)
156 pr_err("stmmac: failed to register the platform driver\n");
157
158 return err;
159}
160static inline void stmmac_unregister_platform(void)
161{
162 platform_driver_register(&stmmac_pltfr_driver);
163}
164#else
165static inline int stmmac_register_platform(void)
166{
167 pr_debug("stmmac: do not register the platf driver\n");
168
169 return -EINVAL;
170}
171static inline void stmmac_unregister_platform(void)
172{
173}
174#endif /* CONFIG_STMMAC_PLATFORM */
175
176#ifdef CONFIG_STMMAC_PCI
177extern struct pci_driver stmmac_pci_driver;
178static inline int stmmac_register_pci(void)
179{
180 int err;
181
182 err = pci_register_driver(&stmmac_pci_driver);
183 if (err)
184 pr_err("stmmac: failed to register the PCI driver\n");
185
186 return err;
187}
188static inline void stmmac_unregister_pci(void)
189{
190 pci_unregister_driver(&stmmac_pci_driver);
191}
192#else
193static inline int stmmac_register_pci(void)
194{
195 pr_debug("stmmac: do not register the PCI driver\n");
196
197 return -EINVAL;
198}
199static inline void stmmac_unregister_pci(void)
200{
201}
202#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 70966330f44e..ea3003edde18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
833 833
834/** 834/**
835 * stmmac_selec_desc_mode 835 * stmmac_selec_desc_mode
836 * @dev : device pointer 836 * @priv : private structure
837 * Description: select the Enhanced/Alternate or Normal descriptors */ 837 * Description: select the Enhanced/Alternate or Normal descriptors
838 */
838static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 839static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
839{ 840{
840 if (priv->plat->enh_desc) { 841 if (priv->plat->enh_desc) {
@@ -1211,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1211 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1212 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
1212 wmb(); 1213 wmb();
1213 priv->hw->desc->set_tx_owner(desc); 1214 priv->hw->desc->set_tx_owner(desc);
1215 wmb();
1214 } 1216 }
1215 1217
1216 /* Interrupt on completition only for the latest segment */ 1218 /* Interrupt on completition only for the latest segment */
@@ -1226,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1226 1228
1227 /* To avoid raise condition */ 1229 /* To avoid raise condition */
1228 priv->hw->desc->set_tx_owner(first); 1230 priv->hw->desc->set_tx_owner(first);
1231 wmb();
1229 1232
1230 priv->cur_tx++; 1233 priv->cur_tx++;
1231 1234
@@ -1289,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1289 } 1292 }
1290 wmb(); 1293 wmb();
1291 priv->hw->desc->set_rx_owner(p + entry); 1294 priv->hw->desc->set_rx_owner(p + entry);
1295 wmb();
1292 } 1296 }
1293} 1297}
1294 1298
@@ -1861,6 +1865,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1861/** 1865/**
1862 * stmmac_dvr_probe 1866 * stmmac_dvr_probe
1863 * @device: device pointer 1867 * @device: device pointer
1868 * @plat_dat: platform data pointer
1869 * @addr: iobase memory address
1864 * Description: this is the main probe function used to 1870 * Description: this is the main probe function used to
1865 * call the alloc_etherdev, allocate the priv structure. 1871 * call the alloc_etherdev, allocate the priv structure.
1866 */ 1872 */
@@ -2090,6 +2096,34 @@ int stmmac_restore(struct net_device *ndev)
2090} 2096}
2091#endif /* CONFIG_PM */ 2097#endif /* CONFIG_PM */
2092 2098
2099/* Driver can be configured w/ and w/ both PCI and Platf drivers
2100 * depending on the configuration selected.
2101 */
2102static int __init stmmac_init(void)
2103{
2104 int err_plt = 0;
2105 int err_pci = 0;
2106
2107 err_plt = stmmac_register_platform();
2108 err_pci = stmmac_register_pci();
2109
2110 if ((err_pci) && (err_plt)) {
2111 pr_err("stmmac: driver registration failed\n");
2112 return -EINVAL;
2113 }
2114
2115 return 0;
2116}
2117
2118static void __exit stmmac_exit(void)
2119{
2120 stmmac_unregister_platform();
2121 stmmac_unregister_pci();
2122}
2123
2124module_init(stmmac_init);
2125module_exit(stmmac_exit);
2126
2093#ifndef MODULE 2127#ifndef MODULE
2094static int __init stmmac_cmdline_opt(char *str) 2128static int __init stmmac_cmdline_opt(char *str)
2095{ 2129{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab5303e9c..cf826e6b6aa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
179 179
180MODULE_DEVICE_TABLE(pci, stmmac_id_table); 180MODULE_DEVICE_TABLE(pci, stmmac_id_table);
181 181
182static struct pci_driver stmmac_driver = { 182struct pci_driver stmmac_pci_driver = {
183 .name = STMMAC_RESOURCE_NAME, 183 .name = STMMAC_RESOURCE_NAME,
184 .id_table = stmmac_id_table, 184 .id_table = stmmac_id_table,
185 .probe = stmmac_pci_probe, 185 .probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
190#endif 190#endif
191}; 191};
192 192
193/**
194 * stmmac_init_module - Entry point for the driver
195 * Description: This function is the entry point for the driver.
196 */
197static int __init stmmac_init_module(void)
198{
199 int ret;
200
201 ret = pci_register_driver(&stmmac_driver);
202 if (ret < 0)
203 pr_err("%s: ERROR: driver registration failed\n", __func__);
204
205 return ret;
206}
207
208/**
209 * stmmac_cleanup_module - Cleanup routine for the driver
210 * Description: This function is the cleanup routine for the driver.
211 */
212static void __exit stmmac_cleanup_module(void)
213{
214 pci_unregister_driver(&stmmac_driver);
215}
216
217module_init(stmmac_init_module);
218module_exit(stmmac_cleanup_module);
219
220MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); 193MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
221MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); 194MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
222MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 195MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f0803808..680d2b8dfe27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
255}; 255};
256MODULE_DEVICE_TABLE(of, stmmac_dt_ids); 256MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
257 257
258static struct platform_driver stmmac_driver = { 258struct platform_driver stmmac_pltfr_driver = {
259 .probe = stmmac_pltfr_probe, 259 .probe = stmmac_pltfr_probe,
260 .remove = stmmac_pltfr_remove, 260 .remove = stmmac_pltfr_remove,
261 .driver = { 261 .driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
266 }, 266 },
267}; 267};
268 268
269module_platform_driver(stmmac_driver);
270
271MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); 269MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
272MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 270MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
273MODULE_LICENSE("GPL"); 271MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cce2a2c..8c726b7004d3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3599{ 3599{
3600 struct netdev_queue *txq; 3600 struct netdev_queue *txq;
3601 unsigned int tx_bytes;
3602 u16 pkt_cnt, tmp; 3601 u16 pkt_cnt, tmp;
3603 int cons, index; 3602 int cons, index;
3604 u64 cs; 3603 u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3621 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3620 netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3622 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3621 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3623 3622
3624 tx_bytes = 0; 3623 while (pkt_cnt--)
3625 tmp = pkt_cnt;
3626 while (tmp--) {
3627 tx_bytes += rp->tx_buffs[cons].skb->len;
3628 cons = release_tx_packet(np, rp, cons); 3624 cons = release_tx_packet(np, rp, cons);
3629 }
3630 3625
3631 rp->cons = cons; 3626 rp->cons = cons;
3632 smp_mb(); 3627 smp_mb();
3633 3628
3634 netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
3635
3636out: 3629out:
3637 if (unlikely(netif_tx_queue_stopped(txq) && 3630 if (unlikely(netif_tx_queue_stopped(txq) &&
3638 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3631 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
4333 struct tx_ring_info *rp = &np->tx_rings[i]; 4326 struct tx_ring_info *rp = &np->tx_rings[i];
4334 4327
4335 niu_free_tx_ring_info(np, rp); 4328 niu_free_tx_ring_info(np, rp);
4336 netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
4337 } 4329 }
4338 kfree(np->tx_rings); 4330 kfree(np->tx_rings);
4339 np->tx_rings = NULL; 4331 np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6739 prod = NEXT_TX(rp, prod); 6731 prod = NEXT_TX(rp, prod);
6740 } 6732 }
6741 6733
6742 netdev_tx_sent_queue(txq, skb->len);
6743
6744 if (prod < rp->prod) 6734 if (prod < rp->prod)
6745 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6735 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6746 rp->prod = prod; 6736 rp->prod = prod;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d614c374ed9d..3b5c4571b55e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/module.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f86bca..098b1c42b393 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
7 depends on TILE 7 depends on TILE
8 default y 8 default y
9 select CRC32 9 select CRC32
10 select TILE_GXIO_MPIPE if TILEGX
11 select HIGH_RES_TIMERS if TILEGX
10 ---help--- 12 ---help---
11 This is a standard Linux network device driver for the 13 This is a standard Linux network device driver for the
12 on-chip Tilera Gigabit Ethernet and XAUI interfaces. 14 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f142cab4..0ef9eefd3211 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_TILE_NET) += tile_net.o 5obj-$(CONFIG_TILE_NET) += tile_net.o
6ifdef CONFIG_TILEGX 6ifdef CONFIG_TILEGX
7tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o 7tile_net-y := tilegx.o
8else 8else
9tile_net-objs := tilepro.o 9tile_net-y := tilepro.o
10endif 10endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 000000000000..83b4b388ad49
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h> /* printk() */
20#include <linux/slab.h> /* kmalloc() */
21#include <linux/errno.h> /* error codes */
22#include <linux/types.h> /* size_t */
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/irq.h>
26#include <linux/netdevice.h> /* struct device, and other headers */
27#include <linux/etherdevice.h> /* eth_type_trans */
28#include <linux/skbuff.h>
29#include <linux/ioctl.h>
30#include <linux/cdev.h>
31#include <linux/hugetlb.h>
32#include <linux/in6.h>
33#include <linux/timer.h>
34#include <linux/hrtimer.h>
35#include <linux/ktime.h>
36#include <linux/io.h>
37#include <linux/ctype.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40
41#include <asm/checksum.h>
42#include <asm/homecache.h>
43#include <gxio/mpipe.h>
44#include <arch/sim.h>
45
46/* Default transmit lockup timeout period, in jiffies. */
47#define TILE_NET_TIMEOUT (5 * HZ)
48
49/* The maximum number of distinct channels (idesc.channel is 5 bits). */
50#define TILE_NET_CHANNELS 32
51
52/* Maximum number of idescs to handle per "poll". */
53#define TILE_NET_BATCH 128
54
55/* Maximum number of packets to handle per "poll". */
56#define TILE_NET_WEIGHT 64
57
58/* Number of entries in each iqueue. */
59#define IQUEUE_ENTRIES 512
60
61/* Number of entries in each equeue. */
62#define EQUEUE_ENTRIES 2048
63
64/* Total header bytes per equeue slot. Must be big enough for 2 bytes
65 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
66 * 60 bytes of actual TCP header. We round up to align to cache lines.
67 */
68#define HEADER_BYTES 128
69
70/* Maximum completions per cpu per device (must be a power of two).
71 * ISSUE: What is the right number here? If this is too small, then
72 * egress might block waiting for free space in a completions array.
73 * ISSUE: At the least, allocate these only for initialized echannels.
74 */
75#define TILE_NET_MAX_COMPS 64
76
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78
79/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels.
81 */
82#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
83
84/* Size of NotifRing data to allocate. */
85#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
86
87/* Timeout to wake the per-device TX timer after we stop the queue.
88 * We don't want the timeout too short (adds overhead, and might end
89 * up causing stop/wake/stop/wake cycles) or too long (affects performance).
90 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
91 */
92#define TX_TIMER_DELAY_USEC 30
93
94/* Timeout to wake the per-cpu egress timer to free completions. */
95#define EGRESS_TIMER_DELAY_USEC 1000
96
97MODULE_AUTHOR("Tilera Corporation");
98MODULE_LICENSE("GPL");
99
100/* A "packet fragment" (a chunk of memory). */
101struct frag {
102 void *buf;
103 size_t length;
104};
105
106/* A single completion. */
107struct tile_net_comp {
108 /* The "complete_count" when the completion will be complete. */
109 s64 when;
110 /* The buffer to be freed when the completion is complete. */
111 struct sk_buff *skb;
112};
113
114/* The completions for a given cpu and echannel. */
115struct tile_net_comps {
116 /* The completions. */
117 struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
118 /* The number of completions used. */
119 unsigned long comp_next;
120 /* The number of completions freed. */
121 unsigned long comp_last;
122};
123
124/* The transmit wake timer for a given cpu and echannel. */
125struct tile_net_tx_wake {
126 struct hrtimer timer;
127 struct net_device *dev;
128};
129
130/* Info for a specific cpu. */
131struct tile_net_info {
132 /* The NAPI struct. */
133 struct napi_struct napi;
134 /* Packet queue. */
135 gxio_mpipe_iqueue_t iqueue;
136 /* Our cpu. */
137 int my_cpu;
138 /* True if iqueue is valid. */
139 bool has_iqueue;
140 /* NAPI flags. */
141 bool napi_added;
142 bool napi_enabled;
143 /* Number of small sk_buffs which must still be provided. */
144 unsigned int num_needed_small_buffers;
145 /* Number of large sk_buffs which must still be provided. */
146 unsigned int num_needed_large_buffers;
147 /* A timer for handling egress completions. */
148 struct hrtimer egress_timer;
149 /* True if "egress_timer" is scheduled. */
150 bool egress_timer_scheduled;
151 /* Comps for each egress channel. */
152 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
153 /* Transmit wake timer for each egress channel. */
154 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
155};
156
157/* Info for egress on a particular egress channel. */
158struct tile_net_egress {
159 /* The "equeue". */
160 gxio_mpipe_equeue_t *equeue;
161 /* The headers for TSO. */
162 unsigned char *headers;
163};
164
165/* Info for a specific device. */
166struct tile_net_priv {
167 /* Our network device. */
168 struct net_device *dev;
169 /* The primary link. */
170 gxio_mpipe_link_t link;
171 /* The primary channel, if open, else -1. */
172 int channel;
173 /* The "loopify" egress link, if needed. */
174 gxio_mpipe_link_t loopify_link;
175 /* The "loopify" egress channel, if open, else -1. */
176 int loopify_channel;
177 /* The egress channel (channel or loopify_channel). */
178 int echannel;
179 /* Total stats. */
180 struct net_device_stats stats;
181};
182
183/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
184static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
185
186/* Devices currently associated with each channel.
187 * NOTE: The array entry can become NULL after ifconfig down, but
188 * we do not free the underlying net_device structures, so it is
189 * safe to use a pointer after reading it from this array.
190 */
191static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
192
193/* A mutex for "tile_net_devs_for_channel". */
194static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
195
196/* The per-cpu info. */
197static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
198
199/* The "context" for all devices. */
200static gxio_mpipe_context_t context;
201
202/* Buffer sizes and mpipe enum codes for buffer stacks.
203 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
204 */
205#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
206#define BUFFER_SIZE_SMALL 128
207#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
208#define BUFFER_SIZE_LARGE 1664
209
210/* The small/large "buffer stacks". */
211static int small_buffer_stack = -1;
212static int large_buffer_stack = -1;
213
214/* Amount of memory allocated for each buffer stack. */
215static size_t buffer_stack_size;
216
217/* The actual memory allocated for the buffer stacks. */
218static void *small_buffer_stack_va;
219static void *large_buffer_stack_va;
220
221/* The buckets. */
222static int first_bucket = -1;
223static int num_buckets = 1;
224
225/* The ingress irq. */
226static int ingress_irq = -1;
227
228/* Text value of tile_net.cpus if passed as a module parameter. */
229static char *network_cpus_string;
230
231/* The actual cpus in "network_cpus". */
232static struct cpumask network_cpus_map;
233
234/* If "loopify=LINK" was specified, this is "LINK". */
235static char *loopify_link_name;
236
237/* If "tile_net.custom" was specified, this is non-NULL. */
238static char *custom_str;
239
240/* The "tile_net.cpus" argument specifies the cpus that are dedicated
241 * to handle ingress packets.
242 *
243 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
244 * m, n, x, y are integer numbers that represent the cpus that can be
245 * neither a dedicated cpu nor a dataplane cpu.
246 */
247static bool network_cpus_init(void)
248{
249 char buf[1024];
250 int rc;
251
252 if (network_cpus_string == NULL)
253 return false;
254
255 rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
256 if (rc != 0) {
257 pr_warn("tile_net.cpus=%s: malformed cpu list\n",
258 network_cpus_string);
259 return false;
260 }
261
262 /* Remove dedicated cpus. */
263 cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
264
265 if (cpumask_empty(&network_cpus_map)) {
266 pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
267 network_cpus_string);
268 return false;
269 }
270
271 cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
272 pr_info("Linux network CPUs: %s\n", buf);
273 return true;
274}
275
276module_param_named(cpus, network_cpus_string, charp, 0444);
277MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
278
279/* The "tile_net.loopify=LINK" argument causes the named device to
280 * actually use "loop0" for ingress, and "loop1" for egress. This
281 * allows an app to sit between the actual link and linux, passing
282 * (some) packets along to linux, and forwarding (some) packets sent
283 * out by linux.
284 */
285module_param_named(loopify, loopify_link_name, charp, 0444);
286MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
287
288/* The "tile_net.custom" argument causes us to ignore the "conventional"
289 * classifier metadata, in particular, the "l2_offset".
290 */
291module_param_named(custom, custom_str, charp, 0444);
292MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
293
294/* Atomically update a statistics field.
295 * Note that on TILE-Gx, this operation is fire-and-forget on the
296 * issuing core (single-cycle dispatch) and takes only a few cycles
297 * longer than a regular store when the request reaches the home cache.
298 * No expensive bus management overhead is required.
299 */
300static void tile_net_stats_add(unsigned long value, unsigned long *field)
301{
302 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
303 atomic_long_add(value, (atomic_long_t *)field);
304}
305
306/* Allocate and push a buffer. */
307static bool tile_net_provide_buffer(bool small)
308{
309 int stack = small ? small_buffer_stack : large_buffer_stack;
310 const unsigned long buffer_alignment = 128;
311 struct sk_buff *skb;
312 int len;
313
314 len = sizeof(struct sk_buff **) + buffer_alignment;
315 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
316 skb = dev_alloc_skb(len);
317 if (skb == NULL)
318 return false;
319
320 /* Make room for a back-pointer to 'skb' and guarantee alignment. */
321 skb_reserve(skb, sizeof(struct sk_buff **));
322 skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
323
324 /* Save a back-pointer to 'skb'. */
325 *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
326
327 /* Make sure "skb" and the back-pointer have been flushed. */
328 wmb();
329
330 gxio_mpipe_push_buffer(&context, stack,
331 (void *)va_to_tile_io_addr(skb->data));
332
333 return true;
334}
335
336/* Convert a raw mpipe buffer to its matching skb pointer. */
337static struct sk_buff *mpipe_buf_to_skb(void *va)
338{
339 /* Acquire the associated "skb". */
340 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
341 struct sk_buff *skb = *skb_ptr;
342
343 /* Paranoia. */
344 if (skb->data != va) {
345 /* Panic here since there's a reasonable chance
346 * that corrupt buffers means generic memory
347 * corruption, with unpredictable system effects.
348 */
349 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
350 va, skb, skb->data);
351 }
352
353 return skb;
354}
355
356static void tile_net_pop_all_buffers(int stack)
357{
358 for (;;) {
359 tile_io_addr_t addr =
360 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
361 if (addr == 0)
362 break;
363 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
364 }
365}
366
367/* Provide linux buffers to mPIPE. */
368static void tile_net_provide_needed_buffers(void)
369{
370 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
371
372 while (info->num_needed_small_buffers != 0) {
373 if (!tile_net_provide_buffer(true))
374 goto oops;
375 info->num_needed_small_buffers--;
376 }
377
378 while (info->num_needed_large_buffers != 0) {
379 if (!tile_net_provide_buffer(false))
380 goto oops;
381 info->num_needed_large_buffers--;
382 }
383
384 return;
385
386oops:
387 /* Add a description to the page allocation failure dump. */
388 pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
389}
390
391static inline bool filter_packet(struct net_device *dev, void *buf)
392{
393 /* Filter packets received before we're up. */
394 if (dev == NULL || !(dev->flags & IFF_UP))
395 return true;
396
397 /* Filter out packets that aren't for us. */
398 if (!(dev->flags & IFF_PROMISC) &&
399 !is_multicast_ether_addr(buf) &&
400 compare_ether_addr(dev->dev_addr, buf) != 0)
401 return true;
402
403 return false;
404}
405
406static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
407 gxio_mpipe_idesc_t *idesc, unsigned long len)
408{
409 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
410 struct tile_net_priv *priv = netdev_priv(dev);
411
412 /* Encode the actual packet length. */
413 skb_put(skb, len);
414
415 skb->protocol = eth_type_trans(skb, dev);
416
417 /* Acknowledge "good" hardware checksums. */
418 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
419 skb->ip_summed = CHECKSUM_UNNECESSARY;
420
421 netif_receive_skb(skb);
422
423 /* Update stats. */
424 tile_net_stats_add(1, &priv->stats.rx_packets);
425 tile_net_stats_add(len, &priv->stats.rx_bytes);
426
427 /* Need a new buffer. */
428 if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
429 info->num_needed_small_buffers++;
430 else
431 info->num_needed_large_buffers++;
432}
433
434/* Handle a packet. Return true if "processed", false if "filtered". */
435static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
436{
437 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
438 struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
439 uint8_t l2_offset;
440 void *va;
441 void *buf;
442 unsigned long len;
443 bool filter;
444
445 /* Drop packets for which no buffer was available.
446 * NOTE: This happens under heavy load.
447 */
448 if (idesc->be) {
449 struct tile_net_priv *priv = netdev_priv(dev);
450 tile_net_stats_add(1, &priv->stats.rx_dropped);
451 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
452 if (net_ratelimit())
453 pr_info("Dropping packet (insufficient buffers).\n");
454 return false;
455 }
456
457 /* Get the "l2_offset", if allowed. */
458 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
459
460 /* Get the raw buffer VA (includes "headroom"). */
461 va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
462
463 /* Get the actual packet start/length. */
464 buf = va + l2_offset;
465 len = idesc->l2_size - l2_offset;
466
467 /* Point "va" at the raw buffer. */
468 va -= NET_IP_ALIGN;
469
470 filter = filter_packet(dev, buf);
471 if (filter) {
472 gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
473 } else {
474 struct sk_buff *skb = mpipe_buf_to_skb(va);
475
476 /* Skip headroom, and any custom header. */
477 skb_reserve(skb, NET_IP_ALIGN + l2_offset);
478
479 tile_net_receive_skb(dev, skb, idesc, len);
480 }
481
482 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
483 return !filter;
484}
485
486/* Handle some packets for the current CPU.
487 *
488 * This function handles up to TILE_NET_BATCH idescs per call.
489 *
490 * ISSUE: Since we do not provide new buffers until this function is
491 * complete, we must initially provide enough buffers for each network
492 * cpu to fill its iqueue and also its batched idescs.
493 *
494 * ISSUE: The "rotting packet" race condition occurs if a packet
495 * arrives after the queue appears to be empty, and before the
496 * hypervisor interrupt is re-enabled.
497 */
498static int tile_net_poll(struct napi_struct *napi, int budget)
499{
500 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
501 unsigned int work = 0;
502 gxio_mpipe_idesc_t *idesc;
503 int i, n;
504
505 /* Process packets. */
506 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
507 for (i = 0; i < n; i++) {
508 if (i == TILE_NET_BATCH)
509 goto done;
510 if (tile_net_handle_packet(idesc + i)) {
511 if (++work >= budget)
512 goto done;
513 }
514 }
515 }
516
517 /* There are no packets left. */
518 napi_complete(&info->napi);
519
520 /* Re-enable hypervisor interrupts. */
521 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
522
523 /* HACK: Avoid the "rotting packet" problem. */
524 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
525 napi_schedule(&info->napi);
526
527 /* ISSUE: Handle completions? */
528
529done:
530 tile_net_provide_needed_buffers();
531
532 return work;
533}
534
535/* Handle an ingress interrupt on the current cpu. */
536static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
537{
538 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
539 napi_schedule(&info->napi);
540 return IRQ_HANDLED;
541}
542
543/* Free some completions. This must be called with interrupts blocked. */
544static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
545 struct tile_net_comps *comps,
546 int limit, bool force_update)
547{
548 int n = 0;
549 while (comps->comp_last < comps->comp_next) {
550 unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
551 struct tile_net_comp *comp = &comps->comp_queue[cid];
552 if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
553 force_update || n == 0))
554 break;
555 dev_kfree_skb_irq(comp->skb);
556 comps->comp_last++;
557 if (++n == limit)
558 break;
559 }
560 return n;
561}
562
563/* Add a completion. This must be called with interrupts blocked.
564 * tile_net_equeue_try_reserve() will have ensured a free completion entry.
565 */
566static void add_comp(gxio_mpipe_equeue_t *equeue,
567 struct tile_net_comps *comps,
568 uint64_t when, struct sk_buff *skb)
569{
570 int cid = comps->comp_next % TILE_NET_MAX_COMPS;
571 comps->comp_queue[cid].when = when;
572 comps->comp_queue[cid].skb = skb;
573 comps->comp_next++;
574}
575
576static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
577{
578 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
579 struct tile_net_priv *priv = netdev_priv(dev);
580
581 hrtimer_start(&info->tx_wake[priv->echannel].timer,
582 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
583 HRTIMER_MODE_REL_PINNED);
584}
585
586static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
587{
588 struct tile_net_tx_wake *tx_wake =
589 container_of(t, struct tile_net_tx_wake, timer);
590 netif_wake_subqueue(tx_wake->dev, smp_processor_id());
591 return HRTIMER_NORESTART;
592}
593
594/* Make sure the egress timer is scheduled. */
595static void tile_net_schedule_egress_timer(void)
596{
597 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
598
599 if (!info->egress_timer_scheduled) {
600 hrtimer_start(&info->egress_timer,
601 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
602 HRTIMER_MODE_REL_PINNED);
603 info->egress_timer_scheduled = true;
604 }
605}
606
607/* The "function" for "info->egress_timer".
608 *
609 * This timer will reschedule itself as long as there are any pending
610 * completions expected for this tile.
611 */
612static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
613{
614 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
615 unsigned long irqflags;
616 bool pending = false;
617 int i;
618
619 local_irq_save(irqflags);
620
621 /* The timer is no longer scheduled. */
622 info->egress_timer_scheduled = false;
623
624 /* Free all possible comps for this tile. */
625 for (i = 0; i < TILE_NET_CHANNELS; i++) {
626 struct tile_net_egress *egress = &egress_for_echannel[i];
627 struct tile_net_comps *comps = info->comps_for_echannel[i];
628 if (comps->comp_last >= comps->comp_next)
629 continue;
630 tile_net_free_comps(egress->equeue, comps, -1, true);
631 pending = pending || (comps->comp_last < comps->comp_next);
632 }
633
634 /* Reschedule timer if needed. */
635 if (pending)
636 tile_net_schedule_egress_timer();
637
638 local_irq_restore(irqflags);
639
640 return HRTIMER_NORESTART;
641}
642
643/* Helper function for "tile_net_update()".
644 * "dev" (i.e. arg) is the device being brought up or down,
645 * or NULL if all devices are now down.
646 */
647static void tile_net_update_cpu(void *arg)
648{
649 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
650 struct net_device *dev = arg;
651
652 if (!info->has_iqueue)
653 return;
654
655 if (dev != NULL) {
656 if (!info->napi_added) {
657 netif_napi_add(dev, &info->napi,
658 tile_net_poll, TILE_NET_WEIGHT);
659 info->napi_added = true;
660 }
661 if (!info->napi_enabled) {
662 napi_enable(&info->napi);
663 info->napi_enabled = true;
664 }
665 enable_percpu_irq(ingress_irq, 0);
666 } else {
667 disable_percpu_irq(ingress_irq);
668 if (info->napi_enabled) {
669 napi_disable(&info->napi);
670 info->napi_enabled = false;
671 }
672 /* FIXME: Drain the iqueue. */
673 }
674}
675
676/* Helper function for tile_net_open() and tile_net_stop().
677 * Always called under tile_net_devs_for_channel_mutex.
678 */
679static int tile_net_update(struct net_device *dev)
680{
681 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
682 bool saw_channel = false;
683 int channel;
684 int rc;
685 int cpu;
686
687 gxio_mpipe_rules_init(&rules, &context);
688
689 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
690 if (tile_net_devs_for_channel[channel] == NULL)
691 continue;
692 if (!saw_channel) {
693 saw_channel = true;
694 gxio_mpipe_rules_begin(&rules, first_bucket,
695 num_buckets, NULL);
696 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
697 }
698 gxio_mpipe_rules_add_channel(&rules, channel);
699 }
700
701 /* NOTE: This can fail if there is no classifier.
702 * ISSUE: Can anything else cause it to fail?
703 */
704 rc = gxio_mpipe_rules_commit(&rules);
705 if (rc != 0) {
706 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
707 return -EIO;
708 }
709
710 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
711 for_each_online_cpu(cpu)
712 smp_call_function_single(cpu, tile_net_update_cpu,
713 (saw_channel ? dev : NULL), 1);
714
715 /* HACK: Allow packets to flow in the simulator. */
716 if (saw_channel)
717 sim_enable_mpipe_links(0, -1);
718
719 return 0;
720}
721
722/* Allocate and initialize mpipe buffer stacks, and register them in
723 * the mPIPE TLBs, for both small and large packet sizes.
724 * This routine supports tile_net_init_mpipe(), below.
725 */
726static int init_buffer_stacks(struct net_device *dev, int num_buffers)
727{
728 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
729 int rc;
730
731 /* Compute stack bytes; we round up to 64KB and then use
732 * alloc_pages() so we get the required 64KB alignment as well.
733 */
734 buffer_stack_size =
735 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
736 64 * 1024);
737
738 /* Allocate two buffer stack indices. */
739 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
740 if (rc < 0) {
741 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
742 rc);
743 return rc;
744 }
745 small_buffer_stack = rc;
746 large_buffer_stack = rc + 1;
747
748 /* Allocate the small memory stack. */
749 small_buffer_stack_va =
750 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
751 if (small_buffer_stack_va == NULL) {
752 netdev_err(dev,
753 "Could not alloc %zd bytes for buffer stacks\n",
754 buffer_stack_size);
755 return -ENOMEM;
756 }
757 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
758 BUFFER_SIZE_SMALL_ENUM,
759 small_buffer_stack_va,
760 buffer_stack_size, 0);
761 if (rc != 0) {
762 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
763 return rc;
764 }
765 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
766 hash_pte, 0);
767 if (rc != 0) {
768 netdev_err(dev,
769 "gxio_mpipe_register_buffer_memory failed: %d\n",
770 rc);
771 return rc;
772 }
773
774 /* Allocate the large buffer stack. */
775 large_buffer_stack_va =
776 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
777 if (large_buffer_stack_va == NULL) {
778 netdev_err(dev,
779 "Could not alloc %zd bytes for buffer stacks\n",
780 buffer_stack_size);
781 return -ENOMEM;
782 }
783 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
784 BUFFER_SIZE_LARGE_ENUM,
785 large_buffer_stack_va,
786 buffer_stack_size, 0);
787 if (rc != 0) {
788 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
789 rc);
790 return rc;
791 }
792 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
793 hash_pte, 0);
794 if (rc != 0) {
795 netdev_err(dev,
796 "gxio_mpipe_register_buffer_memory failed: %d\n",
797 rc);
798 return rc;
799 }
800
801 return 0;
802}
803
804/* Allocate per-cpu resources (memory for completions and idescs).
805 * This routine supports tile_net_init_mpipe(), below.
806 */
807static int alloc_percpu_mpipe_resources(struct net_device *dev,
808 int cpu, int ring)
809{
810 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
811 int order, i, rc;
812 struct page *page;
813 void *addr;
814
815 /* Allocate the "comps". */
816 order = get_order(COMPS_SIZE);
817 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
818 if (page == NULL) {
819 netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
820 COMPS_SIZE);
821 return -ENOMEM;
822 }
823 addr = pfn_to_kaddr(page_to_pfn(page));
824 memset(addr, 0, COMPS_SIZE);
825 for (i = 0; i < TILE_NET_CHANNELS; i++)
826 info->comps_for_echannel[i] =
827 addr + i * sizeof(struct tile_net_comps);
828
829 /* If this is a network cpu, create an iqueue. */
830 if (cpu_isset(cpu, network_cpus_map)) {
831 order = get_order(NOTIF_RING_SIZE);
832 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
833 if (page == NULL) {
834 netdev_err(dev,
835 "Failed to alloc %zd bytes iqueue memory\n",
836 NOTIF_RING_SIZE);
837 return -ENOMEM;
838 }
839 addr = pfn_to_kaddr(page_to_pfn(page));
840 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
841 addr, NOTIF_RING_SIZE, 0);
842 if (rc < 0) {
843 netdev_err(dev,
844 "gxio_mpipe_iqueue_init failed: %d\n", rc);
845 return rc;
846 }
847 info->has_iqueue = true;
848 }
849
850 return ring;
851}
852
853/* Initialize NotifGroup and buckets.
854 * This routine supports tile_net_init_mpipe(), below.
855 */
856static int init_notif_group_and_buckets(struct net_device *dev,
857 int ring, int network_cpus_count)
858{
859 int group, rc;
860
861 /* Allocate one NotifGroup. */
862 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
863 if (rc < 0) {
864 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
865 rc);
866 return rc;
867 }
868 group = rc;
869
870 /* Initialize global num_buckets value. */
871 if (network_cpus_count > 4)
872 num_buckets = 256;
873 else if (network_cpus_count > 1)
874 num_buckets = 16;
875
876 /* Allocate some buckets, and set global first_bucket value. */
877 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
878 if (rc < 0) {
879 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
880 return rc;
881 }
882 first_bucket = rc;
883
884 /* Init group and buckets. */
885 rc = gxio_mpipe_init_notif_group_and_buckets(
886 &context, group, ring, network_cpus_count,
887 first_bucket, num_buckets,
888 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
889 if (rc != 0) {
890 netdev_err(
891 dev,
892 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
893 rc);
894 return rc;
895 }
896
897 return 0;
898}
899
900/* Create an irq and register it, then activate the irq and request
901 * interrupts on all cores. Note that "ingress_irq" being initialized
902 * is how we know not to call tile_net_init_mpipe() again.
903 * This routine supports tile_net_init_mpipe(), below.
904 */
905static int tile_net_setup_interrupts(struct net_device *dev)
906{
907 int cpu, rc;
908
909 rc = create_irq();
910 if (rc < 0) {
911 netdev_err(dev, "create_irq failed: %d\n", rc);
912 return rc;
913 }
914 ingress_irq = rc;
915 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
916 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
917 0, NULL, NULL);
918 if (rc != 0) {
919 netdev_err(dev, "request_irq failed: %d\n", rc);
920 destroy_irq(ingress_irq);
921 ingress_irq = -1;
922 return rc;
923 }
924
925 for_each_online_cpu(cpu) {
926 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
927 if (info->has_iqueue) {
928 gxio_mpipe_request_notif_ring_interrupt(
929 &context, cpu_x(cpu), cpu_y(cpu),
930 1, ingress_irq, info->iqueue.ring);
931 }
932 }
933
934 return 0;
935}
936
937/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
938static void tile_net_init_mpipe_fail(void)
939{
940 int cpu;
941
942 /* Do cleanups that require the mpipe context first. */
943 if (small_buffer_stack >= 0)
944 tile_net_pop_all_buffers(small_buffer_stack);
945 if (large_buffer_stack >= 0)
946 tile_net_pop_all_buffers(large_buffer_stack);
947
948 /* Destroy mpipe context so the hardware no longer owns any memory. */
949 gxio_mpipe_destroy(&context);
950
951 for_each_online_cpu(cpu) {
952 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
953 free_pages((unsigned long)(info->comps_for_echannel[0]),
954 get_order(COMPS_SIZE));
955 info->comps_for_echannel[0] = NULL;
956 free_pages((unsigned long)(info->iqueue.idescs),
957 get_order(NOTIF_RING_SIZE));
958 info->iqueue.idescs = NULL;
959 }
960
961 if (small_buffer_stack_va)
962 free_pages_exact(small_buffer_stack_va, buffer_stack_size);
963 if (large_buffer_stack_va)
964 free_pages_exact(large_buffer_stack_va, buffer_stack_size);
965
966 small_buffer_stack_va = NULL;
967 large_buffer_stack_va = NULL;
968 large_buffer_stack = -1;
969 small_buffer_stack = -1;
970 first_bucket = -1;
971}
972
973/* The first time any tilegx network device is opened, we initialize
974 * the global mpipe state. If this step fails, we fail to open the
975 * device, but if it succeeds, we never need to do it again, and since
976 * tile_net can't be unloaded, we never undo it.
977 *
978 * Note that some resources in this path (buffer stack indices,
979 * bindings from init_buffer_stack, etc.) are hypervisor resources
980 * that are freed implicitly by gxio_mpipe_destroy().
981 */
982static int tile_net_init_mpipe(struct net_device *dev)
983{
984 int i, num_buffers, rc;
985 int cpu;
986 int first_ring, ring;
987 int network_cpus_count = cpus_weight(network_cpus_map);
988
989 if (!hash_default) {
990 netdev_err(dev, "Networking requires hash_default!\n");
991 return -EIO;
992 }
993
994 rc = gxio_mpipe_init(&context, 0);
995 if (rc != 0) {
996 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
997 return -EIO;
998 }
999
1000 /* Set up the buffer stacks. */
1001 num_buffers =
1002 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1003 rc = init_buffer_stacks(dev, num_buffers);
1004 if (rc != 0)
1005 goto fail;
1006
1007 /* Provide initial buffers. */
1008 rc = -ENOMEM;
1009 for (i = 0; i < num_buffers; i++) {
1010 if (!tile_net_provide_buffer(true)) {
1011 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1012 goto fail;
1013 }
1014 }
1015 for (i = 0; i < num_buffers; i++) {
1016 if (!tile_net_provide_buffer(false)) {
1017 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1018 goto fail;
1019 }
1020 }
1021
1022 /* Allocate one NotifRing for each network cpu. */
1023 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
1024 if (rc < 0) {
1025 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1026 rc);
1027 goto fail;
1028 }
1029
1030 /* Init NotifRings per-cpu. */
1031 first_ring = rc;
1032 ring = first_ring;
1033 for_each_online_cpu(cpu) {
1034 rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
1035 if (rc < 0)
1036 goto fail;
1037 ring = rc;
1038 }
1039
1040 /* Initialize NotifGroup and buckets. */
1041 rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
1042 if (rc != 0)
1043 goto fail;
1044
1045 /* Create and enable interrupts. */
1046 rc = tile_net_setup_interrupts(dev);
1047 if (rc != 0)
1048 goto fail;
1049
1050 return 0;
1051
1052fail:
1053 tile_net_init_mpipe_fail();
1054 return rc;
1055}
1056
1057/* Create persistent egress info for a given egress channel.
1058 * Note that this may be shared between, say, "gbe0" and "xgbe0".
1059 * ISSUE: Defer header allocation until TSO is actually needed?
1060 */
1061static int tile_net_init_egress(struct net_device *dev, int echannel)
1062{
1063 struct page *headers_page, *edescs_page, *equeue_page;
1064 gxio_mpipe_edesc_t *edescs;
1065 gxio_mpipe_equeue_t *equeue;
1066 unsigned char *headers;
1067 int headers_order, edescs_order, equeue_order;
1068 size_t edescs_size;
1069 int edma;
1070 int rc = -ENOMEM;
1071
1072 /* Only initialize once. */
1073 if (egress_for_echannel[echannel].equeue != NULL)
1074 return 0;
1075
1076 /* Allocate memory for the "headers". */
1077 headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
1078 headers_page = alloc_pages(GFP_KERNEL, headers_order);
1079 if (headers_page == NULL) {
1080 netdev_warn(dev,
1081 "Could not alloc %zd bytes for TSO headers.\n",
1082 PAGE_SIZE << headers_order);
1083 goto fail;
1084 }
1085 headers = pfn_to_kaddr(page_to_pfn(headers_page));
1086
1087 /* Allocate memory for the "edescs". */
1088 edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
1089 edescs_order = get_order(edescs_size);
1090 edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
1091 if (edescs_page == NULL) {
1092 netdev_warn(dev,
1093 "Could not alloc %zd bytes for eDMA ring.\n",
1094 edescs_size);
1095 goto fail_headers;
1096 }
1097 edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
1098
1099 /* Allocate memory for the "equeue". */
1100 equeue_order = get_order(sizeof(*equeue));
1101 equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
1102 if (equeue_page == NULL) {
1103 netdev_warn(dev,
1104 "Could not alloc %zd bytes for equeue info.\n",
1105 PAGE_SIZE << equeue_order);
1106 goto fail_edescs;
1107 }
1108 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1109
1110 /* Allocate an edma ring. Note that in practice this can't
1111 * fail, which is good, because we will leak an edma ring if so.
1112 */
1113 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
1114 if (rc < 0) {
1115 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
1116 rc);
1117 goto fail_equeue;
1118 }
1119 edma = rc;
1120
1121 /* Initialize the equeue. */
1122 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
1123 edescs, edescs_size, 0);
1124 if (rc != 0) {
1125 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
1126 goto fail_equeue;
1127 }
1128
1129 /* Done. */
1130 egress_for_echannel[echannel].equeue = equeue;
1131 egress_for_echannel[echannel].headers = headers;
1132 return 0;
1133
1134fail_equeue:
1135 __free_pages(equeue_page, equeue_order);
1136
1137fail_edescs:
1138 __free_pages(edescs_page, edescs_order);
1139
1140fail_headers:
1141 __free_pages(headers_page, headers_order);
1142
1143fail:
1144 return rc;
1145}
1146
1147/* Return channel number for a newly-opened link. */
1148static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1149 const char *link_name)
1150{
1151 int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
1152 if (rc < 0) {
1153 netdev_err(dev, "Failed to open '%s'\n", link_name);
1154 return rc;
1155 }
1156 rc = gxio_mpipe_link_channel(link);
1157 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1158 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
1159 gxio_mpipe_link_close(link);
1160 return -EINVAL;
1161 }
1162 return rc;
1163}
1164
1165/* Help the kernel activate the given network interface. */
1166static int tile_net_open(struct net_device *dev)
1167{
1168 struct tile_net_priv *priv = netdev_priv(dev);
1169 int cpu, rc;
1170
1171 mutex_lock(&tile_net_devs_for_channel_mutex);
1172
1173 /* Do one-time initialization the first time any device is opened. */
1174 if (ingress_irq < 0) {
1175 rc = tile_net_init_mpipe(dev);
1176 if (rc != 0)
1177 goto fail;
1178 }
1179
1180 /* Determine if this is the "loopify" device. */
1181 if (unlikely((loopify_link_name != NULL) &&
1182 !strcmp(dev->name, loopify_link_name))) {
1183 rc = tile_net_link_open(dev, &priv->link, "loop0");
1184 if (rc < 0)
1185 goto fail;
1186 priv->channel = rc;
1187 rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
1188 if (rc < 0)
1189 goto fail;
1190 priv->loopify_channel = rc;
1191 priv->echannel = rc;
1192 } else {
1193 rc = tile_net_link_open(dev, &priv->link, dev->name);
1194 if (rc < 0)
1195 goto fail;
1196 priv->channel = rc;
1197 priv->echannel = rc;
1198 }
1199
1200 /* Initialize egress info (if needed). Once ever, per echannel. */
1201 rc = tile_net_init_egress(dev, priv->echannel);
1202 if (rc != 0)
1203 goto fail;
1204
1205 tile_net_devs_for_channel[priv->channel] = dev;
1206
1207 rc = tile_net_update(dev);
1208 if (rc != 0)
1209 goto fail;
1210
1211 mutex_unlock(&tile_net_devs_for_channel_mutex);
1212
1213 /* Initialize the transmit wake timer for this device for each cpu. */
1214 for_each_online_cpu(cpu) {
1215 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1216 struct tile_net_tx_wake *tx_wake =
1217 &info->tx_wake[priv->echannel];
1218
1219 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1220 HRTIMER_MODE_REL);
1221 tx_wake->timer.function = tile_net_handle_tx_wake_timer;
1222 tx_wake->dev = dev;
1223 }
1224
1225 for_each_online_cpu(cpu)
1226 netif_start_subqueue(dev, cpu);
1227 netif_carrier_on(dev);
1228 return 0;
1229
1230fail:
1231 if (priv->loopify_channel >= 0) {
1232 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1233 netdev_warn(dev, "Failed to close loopify link!\n");
1234 priv->loopify_channel = -1;
1235 }
1236 if (priv->channel >= 0) {
1237 if (gxio_mpipe_link_close(&priv->link) != 0)
1238 netdev_warn(dev, "Failed to close link!\n");
1239 priv->channel = -1;
1240 }
1241 priv->echannel = -1;
1242 tile_net_devs_for_channel[priv->channel] = NULL;
1243 mutex_unlock(&tile_net_devs_for_channel_mutex);
1244
1245 /* Don't return raw gxio error codes to generic Linux. */
1246 return (rc > -512) ? rc : -EIO;
1247}
1248
1249/* Help the kernel deactivate the given network interface. */
1250static int tile_net_stop(struct net_device *dev)
1251{
1252 struct tile_net_priv *priv = netdev_priv(dev);
1253 int cpu;
1254
1255 for_each_online_cpu(cpu) {
1256 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1257 struct tile_net_tx_wake *tx_wake =
1258 &info->tx_wake[priv->echannel];
1259
1260 hrtimer_cancel(&tx_wake->timer);
1261 netif_stop_subqueue(dev, cpu);
1262 }
1263
1264 mutex_lock(&tile_net_devs_for_channel_mutex);
1265 tile_net_devs_for_channel[priv->channel] = NULL;
1266 (void)tile_net_update(dev);
1267 if (priv->loopify_channel >= 0) {
1268 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1269 netdev_warn(dev, "Failed to close loopify link!\n");
1270 priv->loopify_channel = -1;
1271 }
1272 if (priv->channel >= 0) {
1273 if (gxio_mpipe_link_close(&priv->link) != 0)
1274 netdev_warn(dev, "Failed to close link!\n");
1275 priv->channel = -1;
1276 }
1277 priv->echannel = -1;
1278 mutex_unlock(&tile_net_devs_for_channel_mutex);
1279
1280 return 0;
1281}
1282
1283/* Determine the VA for a fragment. */
1284static inline void *tile_net_frag_buf(skb_frag_t *f)
1285{
1286 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1287 return pfn_to_kaddr(pfn) + f->page_offset;
1288}
1289
1290/* Acquire a completion entry and an egress slot, or if we can't,
1291 * stop the queue and schedule the tx_wake timer.
1292 */
1293static s64 tile_net_equeue_try_reserve(struct net_device *dev,
1294 struct tile_net_comps *comps,
1295 gxio_mpipe_equeue_t *equeue,
1296 int num_edescs)
1297{
1298 /* Try to acquire a completion entry. */
1299 if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
1300 tile_net_free_comps(equeue, comps, 32, false) != 0) {
1301
1302 /* Try to acquire an egress slot. */
1303 s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1304 if (slot >= 0)
1305 return slot;
1306
1307 /* Freeing some completions gives the equeue time to drain. */
1308 tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
1309
1310 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1311 if (slot >= 0)
1312 return slot;
1313 }
1314
1315 /* Still nothing; give up and stop the queue for a short while. */
1316 netif_stop_subqueue(dev, smp_processor_id());
1317 tile_net_schedule_tx_wake_timer(dev);
1318 return -1;
1319}
1320
1321/* Determine how many edesc's are needed for TSO.
1322 *
1323 * Sometimes, if "sendfile()" requires copying, we will be called with
1324 * "data" containing the header and payload, with "frags" being empty.
1325 * Sometimes, for example when using NFS over TCP, a single segment can
1326 * span 3 fragments. This requires special care.
1327 */
1328static int tso_count_edescs(struct sk_buff *skb)
1329{
1330 struct skb_shared_info *sh = skb_shinfo(skb);
1331 unsigned int data_len = skb->data_len;
1332 unsigned int p_len = sh->gso_size;
1333 long f_id = -1; /* id of the current fragment */
1334 long f_size = -1; /* size of the current fragment */
1335 long f_used = -1; /* bytes used from the current fragment */
1336 long n; /* size of the current piece of payload */
1337 int num_edescs = 0;
1338 int segment;
1339
1340 for (segment = 0; segment < sh->gso_segs; segment++) {
1341
1342 unsigned int p_used = 0;
1343
1344 /* One edesc for header and for each piece of the payload. */
1345 for (num_edescs++; p_used < p_len; num_edescs++) {
1346
1347 /* Advance as needed. */
1348 while (f_used >= f_size) {
1349 f_id++;
1350 f_size = sh->frags[f_id].size;
1351 f_used = 0;
1352 }
1353
1354 /* Use bytes from the current fragment. */
1355 n = p_len - p_used;
1356 if (n > f_size - f_used)
1357 n = f_size - f_used;
1358 f_used += n;
1359 p_used += n;
1360 }
1361
1362 /* The last segment may be less than gso_size. */
1363 data_len -= p_len;
1364 if (data_len < p_len)
1365 p_len = data_len;
1366 }
1367
1368 return num_edescs;
1369}
1370
1371/* Prepare modified copies of the skbuff headers.
1372 * FIXME: add support for IPv6.
1373 */
1374static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1375 s64 slot)
1376{
1377 struct skb_shared_info *sh = skb_shinfo(skb);
1378 struct iphdr *ih;
1379 struct tcphdr *th;
1380 unsigned int data_len = skb->data_len;
1381 unsigned char *data = skb->data;
1382 unsigned int ih_off, th_off, sh_len, p_len;
1383 unsigned int isum_seed, tsum_seed, id, seq;
1384 long f_id = -1; /* id of the current fragment */
1385 long f_size = -1; /* size of the current fragment */
1386 long f_used = -1; /* bytes used from the current fragment */
1387 long n; /* size of the current piece of payload */
1388 int segment;
1389
1390 /* Locate original headers and compute various lengths. */
1391 ih = ip_hdr(skb);
1392 th = tcp_hdr(skb);
1393 ih_off = skb_network_offset(skb);
1394 th_off = skb_transport_offset(skb);
1395 sh_len = th_off + tcp_hdrlen(skb);
1396 p_len = sh->gso_size;
1397
1398 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1399 isum_seed = ((0xFFFF - ih->check) +
1400 (0xFFFF - ih->tot_len) +
1401 (0xFFFF - ih->id));
1402 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1403 id = ntohs(ih->id);
1404 seq = ntohl(th->seq);
1405
1406 /* Prepare all the headers. */
1407 for (segment = 0; segment < sh->gso_segs; segment++) {
1408 unsigned char *buf;
1409 unsigned int p_used = 0;
1410
1411 /* Copy to the header memory for this segment. */
1412 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1413 NET_IP_ALIGN;
1414 memcpy(buf, data, sh_len);
1415
1416 /* Update copied ip header. */
1417 ih = (struct iphdr *)(buf + ih_off);
1418 ih->tot_len = htons(sh_len + p_len - ih_off);
1419 ih->id = htons(id);
1420 ih->check = csum_long(isum_seed + ih->tot_len +
1421 ih->id) ^ 0xffff;
1422
1423 /* Update copied tcp header. */
1424 th = (struct tcphdr *)(buf + th_off);
1425 th->seq = htonl(seq);
1426 th->check = csum_long(tsum_seed + htons(sh_len + p_len));
1427 if (segment != sh->gso_segs - 1) {
1428 th->fin = 0;
1429 th->psh = 0;
1430 }
1431
1432 /* Skip past the header. */
1433 slot++;
1434
1435 /* Skip past the payload. */
1436 while (p_used < p_len) {
1437
1438 /* Advance as needed. */
1439 while (f_used >= f_size) {
1440 f_id++;
1441 f_size = sh->frags[f_id].size;
1442 f_used = 0;
1443 }
1444
1445 /* Use bytes from the current fragment. */
1446 n = p_len - p_used;
1447 if (n > f_size - f_used)
1448 n = f_size - f_used;
1449 f_used += n;
1450 p_used += n;
1451
1452 slot++;
1453 }
1454
1455 id++;
1456 seq += p_len;
1457
1458 /* The last segment may be less than gso_size. */
1459 data_len -= p_len;
1460 if (data_len < p_len)
1461 p_len = data_len;
1462 }
1463
1464 /* Flush the headers so they are ready for hardware DMA. */
1465 wmb();
1466}
1467
1468/* Pass all the data to mpipe for egress. */
1469static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1470 struct sk_buff *skb, unsigned char *headers, s64 slot)
1471{
1472 struct tile_net_priv *priv = netdev_priv(dev);
1473 struct skb_shared_info *sh = skb_shinfo(skb);
1474 unsigned int data_len = skb->data_len;
1475 unsigned int p_len = sh->gso_size;
1476 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1477 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1478 long f_id = -1; /* id of the current fragment */
1479 long f_size = -1; /* size of the current fragment */
1480 long f_used = -1; /* bytes used from the current fragment */
1481 long n; /* size of the current piece of payload */
1482 unsigned long tx_packets = 0, tx_bytes = 0;
1483 unsigned int csum_start, sh_len;
1484 int segment;
1485
1486 /* Prepare to egress the headers: set up header edesc. */
1487 csum_start = skb_checksum_start_offset(skb);
1488 sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1489 edesc_head.csum = 1;
1490 edesc_head.csum_start = csum_start;
1491 edesc_head.csum_dest = csum_start + skb->csum_offset;
1492 edesc_head.xfer_size = sh_len;
1493
1494 /* This is only used to specify the TLB. */
1495 edesc_head.stack_idx = large_buffer_stack;
1496 edesc_body.stack_idx = large_buffer_stack;
1497
1498 /* Egress all the edescs. */
1499 for (segment = 0; segment < sh->gso_segs; segment++) {
1500 void *va;
1501 unsigned char *buf;
1502 unsigned int p_used = 0;
1503
1504 /* Egress the header. */
1505 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1506 NET_IP_ALIGN;
1507 edesc_head.va = va_to_tile_io_addr(buf);
1508 gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
1509 slot++;
1510
1511 /* Egress the payload. */
1512 while (p_used < p_len) {
1513
1514 /* Advance as needed. */
1515 while (f_used >= f_size) {
1516 f_id++;
1517 f_size = sh->frags[f_id].size;
1518 f_used = 0;
1519 }
1520
1521 va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
1522
1523 /* Use bytes from the current fragment. */
1524 n = p_len - p_used;
1525 if (n > f_size - f_used)
1526 n = f_size - f_used;
1527 f_used += n;
1528 p_used += n;
1529
1530 /* Egress a piece of the payload. */
1531 edesc_body.va = va_to_tile_io_addr(va);
1532 edesc_body.xfer_size = n;
1533 edesc_body.bound = !(p_used < p_len);
1534 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
1535 slot++;
1536 }
1537
1538 tx_packets++;
1539 tx_bytes += sh_len + p_len;
1540
1541 /* The last segment may be less than gso_size. */
1542 data_len -= p_len;
1543 if (data_len < p_len)
1544 p_len = data_len;
1545 }
1546
1547 /* Update stats. */
1548 tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
1549 tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
1550}
1551
1552/* Do "TSO" handling for egress.
1553 *
1554 * Normally drivers set NETIF_F_TSO only to support hardware TSO;
1555 * otherwise the stack uses scatter-gather to implement GSO in software.
1556 * On our testing, enabling GSO support (via NETIF_F_SG) drops network
1557 * performance down to around 7.5 Gbps on the 10G interfaces, although
1558 * also dropping cpu utilization way down, to under 8%. But
1559 * implementing "TSO" in the driver brings performance back up to line
1560 * rate, while dropping cpu usage even further, to less than 4%. In
1561 * practice, profiling of GSO shows that skb_segment() is what causes
1562 * the performance overheads; we benefit in the driver from using
1563 * preallocated memory to duplicate the TCP/IP headers.
1564 */
1565static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1566{
1567 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1568 struct tile_net_priv *priv = netdev_priv(dev);
1569 int channel = priv->echannel;
1570 struct tile_net_egress *egress = &egress_for_echannel[channel];
1571 struct tile_net_comps *comps = info->comps_for_echannel[channel];
1572 gxio_mpipe_equeue_t *equeue = egress->equeue;
1573 unsigned long irqflags;
1574 int num_edescs;
1575 s64 slot;
1576
1577 /* Determine how many mpipe edesc's are needed. */
1578 num_edescs = tso_count_edescs(skb);
1579
1580 local_irq_save(irqflags);
1581
1582 /* Try to acquire a completion entry and an egress slot. */
1583 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1584 if (slot < 0) {
1585 local_irq_restore(irqflags);
1586 return NETDEV_TX_BUSY;
1587 }
1588
1589 /* Set up copies of header data properly. */
1590 tso_headers_prepare(skb, egress->headers, slot);
1591
1592 /* Actually pass the data to the network hardware. */
1593 tso_egress(dev, equeue, skb, egress->headers, slot);
1594
1595 /* Add a completion record. */
1596 add_comp(equeue, comps, slot + num_edescs - 1, skb);
1597
1598 local_irq_restore(irqflags);
1599
1600 /* Make sure the egress timer is scheduled. */
1601 tile_net_schedule_egress_timer();
1602
1603 return NETDEV_TX_OK;
1604}
1605
1606/* Analyze the body and frags for a transmit request. */
1607static unsigned int tile_net_tx_frags(struct frag *frags,
1608 struct sk_buff *skb,
1609 void *b_data, unsigned int b_len)
1610{
1611 unsigned int i, n = 0;
1612
1613 struct skb_shared_info *sh = skb_shinfo(skb);
1614
1615 if (b_len != 0) {
1616 frags[n].buf = b_data;
1617 frags[n++].length = b_len;
1618 }
1619
1620 for (i = 0; i < sh->nr_frags; i++) {
1621 skb_frag_t *f = &sh->frags[i];
1622 frags[n].buf = tile_net_frag_buf(f);
1623 frags[n++].length = skb_frag_size(f);
1624 }
1625
1626 return n;
1627}
1628
1629/* Help the kernel transmit a packet. */
1630static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1631{
1632 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1633 struct tile_net_priv *priv = netdev_priv(dev);
1634 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
1635 gxio_mpipe_equeue_t *equeue = egress->equeue;
1636 struct tile_net_comps *comps =
1637 info->comps_for_echannel[priv->echannel];
1638 unsigned int len = skb->len;
1639 unsigned char *data = skb->data;
1640 unsigned int num_edescs;
1641 struct frag frags[MAX_FRAGS];
1642 gxio_mpipe_edesc_t edescs[MAX_FRAGS];
1643 unsigned long irqflags;
1644 gxio_mpipe_edesc_t edesc = { { 0 } };
1645 unsigned int i;
1646 s64 slot;
1647
1648 if (skb_is_gso(skb))
1649 return tile_net_tx_tso(skb, dev);
1650
1651 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1652
1653 /* This is only used to specify the TLB. */
1654 edesc.stack_idx = large_buffer_stack;
1655
1656 /* Prepare the edescs. */
1657 for (i = 0; i < num_edescs; i++) {
1658 edesc.xfer_size = frags[i].length;
1659 edesc.va = va_to_tile_io_addr(frags[i].buf);
1660 edescs[i] = edesc;
1661 }
1662
1663 /* Mark the final edesc. */
1664 edescs[num_edescs - 1].bound = 1;
1665
1666 /* Add checksum info to the initial edesc, if needed. */
1667 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1668 unsigned int csum_start = skb_checksum_start_offset(skb);
1669 edescs[0].csum = 1;
1670 edescs[0].csum_start = csum_start;
1671 edescs[0].csum_dest = csum_start + skb->csum_offset;
1672 }
1673
1674 local_irq_save(irqflags);
1675
1676 /* Try to acquire a completion entry and an egress slot. */
1677 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1678 if (slot < 0) {
1679 local_irq_restore(irqflags);
1680 return NETDEV_TX_BUSY;
1681 }
1682
1683 for (i = 0; i < num_edescs; i++)
1684 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
1685
1686 /* Add a completion record. */
1687 add_comp(equeue, comps, slot - 1, skb);
1688
1689 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1690 tile_net_stats_add(1, &priv->stats.tx_packets);
1691 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
1692 &priv->stats.tx_bytes);
1693
1694 local_irq_restore(irqflags);
1695
1696 /* Make sure the egress timer is scheduled. */
1697 tile_net_schedule_egress_timer();
1698
1699 return NETDEV_TX_OK;
1700}
1701
1702/* Return subqueue id on this core (one per core). */
1703static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
1704{
1705 return smp_processor_id();
1706}
1707
1708/* Deal with a transmit timeout. */
1709static void tile_net_tx_timeout(struct net_device *dev)
1710{
1711 int cpu;
1712
1713 for_each_online_cpu(cpu)
1714 netif_wake_subqueue(dev, cpu);
1715}
1716
1717/* Ioctl commands. */
1718static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1719{
1720 return -EOPNOTSUPP;
1721}
1722
1723/* Get system network statistics for device. */
1724static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1725{
1726 struct tile_net_priv *priv = netdev_priv(dev);
1727 return &priv->stats;
1728}
1729
1730/* Change the MTU. */
1731static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1732{
1733 if ((new_mtu < 68) || (new_mtu > 1500))
1734 return -EINVAL;
1735 dev->mtu = new_mtu;
1736 return 0;
1737}
1738
1739/* Change the Ethernet address of the NIC.
1740 *
1741 * The hypervisor driver does not support changing MAC address. However,
1742 * the hardware does not do anything with the MAC address, so the address
1743 * which gets used on outgoing packets, and which is accepted on incoming
1744 * packets, is completely up to us.
1745 *
1746 * Returns 0 on success, negative on failure.
1747 */
1748static int tile_net_set_mac_address(struct net_device *dev, void *p)
1749{
1750 struct sockaddr *addr = p;
1751
1752 if (!is_valid_ether_addr(addr->sa_data))
1753 return -EINVAL;
1754 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1755 return 0;
1756}
1757
1758#ifdef CONFIG_NET_POLL_CONTROLLER
1759/* Polling 'interrupt' - used by things like netconsole to send skbs
1760 * without having to re-enable interrupts. It's not called while
1761 * the interrupt routine is executing.
1762 */
1763static void tile_net_netpoll(struct net_device *dev)
1764{
1765 disable_percpu_irq(ingress_irq);
1766 tile_net_handle_ingress_irq(ingress_irq, NULL);
1767 enable_percpu_irq(ingress_irq, 0);
1768}
1769#endif
1770
1771static const struct net_device_ops tile_net_ops = {
1772 .ndo_open = tile_net_open,
1773 .ndo_stop = tile_net_stop,
1774 .ndo_start_xmit = tile_net_tx,
1775 .ndo_select_queue = tile_net_select_queue,
1776 .ndo_do_ioctl = tile_net_ioctl,
1777 .ndo_get_stats = tile_net_get_stats,
1778 .ndo_change_mtu = tile_net_change_mtu,
1779 .ndo_tx_timeout = tile_net_tx_timeout,
1780 .ndo_set_mac_address = tile_net_set_mac_address,
1781#ifdef CONFIG_NET_POLL_CONTROLLER
1782 .ndo_poll_controller = tile_net_netpoll,
1783#endif
1784};
1785
1786/* The setup function.
1787 *
1788 * This uses ether_setup() to assign various fields in dev, including
1789 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
1790 */
1791static void tile_net_setup(struct net_device *dev)
1792{
1793 ether_setup(dev);
1794 dev->netdev_ops = &tile_net_ops;
1795 dev->watchdog_timeo = TILE_NET_TIMEOUT;
1796 dev->features |= NETIF_F_LLTX;
1797 dev->features |= NETIF_F_HW_CSUM;
1798 dev->features |= NETIF_F_SG;
1799 dev->features |= NETIF_F_TSO;
1800 dev->mtu = 1500;
1801}
1802
1803/* Allocate the device structure, register the device, and obtain the
1804 * MAC address from the hypervisor.
1805 */
1806static void tile_net_dev_init(const char *name, const uint8_t *mac)
1807{
1808 int ret;
1809 int i;
1810 int nz_addr = 0;
1811 struct net_device *dev;
1812 struct tile_net_priv *priv;
1813
1814 /* HACK: Ignore "loop" links. */
1815 if (strncmp(name, "loop", 4) == 0)
1816 return;
1817
1818 /* Allocate the device structure. Normally, "name" is a
1819 * template, instantiated by register_netdev(), but not for us.
1820 */
1821 dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
1822 NR_CPUS, 1);
1823 if (!dev) {
1824 pr_err("alloc_netdev_mqs(%s) failed\n", name);
1825 return;
1826 }
1827
1828 /* Initialize "priv". */
1829 priv = netdev_priv(dev);
1830 memset(priv, 0, sizeof(*priv));
1831 priv->dev = dev;
1832 priv->channel = -1;
1833 priv->loopify_channel = -1;
1834 priv->echannel = -1;
1835
1836 /* Get the MAC address and set it in the device struct; this must
1837 * be done before the device is opened. If the MAC is all zeroes,
1838 * we use a random address, since we're probably on the simulator.
1839 */
1840 for (i = 0; i < 6; i++)
1841 nz_addr |= mac[i];
1842
1843 if (nz_addr) {
1844 memcpy(dev->dev_addr, mac, 6);
1845 dev->addr_len = 6;
1846 } else {
1847 random_ether_addr(dev->dev_addr);
1848 }
1849
1850 /* Register the network device. */
1851 ret = register_netdev(dev);
1852 if (ret) {
1853 netdev_err(dev, "register_netdev failed %d\n", ret);
1854 free_netdev(dev);
1855 return;
1856 }
1857}
1858
1859/* Per-cpu module initialization. */
1860static void tile_net_init_module_percpu(void *unused)
1861{
1862 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1863 int my_cpu = smp_processor_id();
1864
1865 info->has_iqueue = false;
1866
1867 info->my_cpu = my_cpu;
1868
1869 /* Initialize the egress timer. */
1870 hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1871 info->egress_timer.function = tile_net_handle_egress_timer;
1872}
1873
1874/* Module initialization. */
1875static int __init tile_net_init_module(void)
1876{
1877 int i;
1878 char name[GXIO_MPIPE_LINK_NAME_LEN];
1879 uint8_t mac[6];
1880
1881 pr_info("Tilera Network Driver\n");
1882
1883 mutex_init(&tile_net_devs_for_channel_mutex);
1884
1885 /* Initialize each CPU. */
1886 on_each_cpu(tile_net_init_module_percpu, NULL, 1);
1887
1888 /* Find out what devices we have, and initialize them. */
1889 for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
1890 tile_net_dev_init(name, mac);
1891
1892 if (!network_cpus_init())
1893 network_cpus_map = *cpu_online_mask;
1894
1895 return 0;
1896}
1897
1898module_init(tile_net_init_module);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57b011b..2857ab078aac 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -478,6 +478,7 @@ struct netvsc_device {
478 u32 nvsp_version; 478 u32 nvsp_version;
479 479
480 atomic_t num_outstanding_sends; 480 atomic_t num_outstanding_sends;
481 wait_queue_head_t wait_drain;
481 bool start_remove; 482 bool start_remove;
482 bool destroy; 483 bool destroy;
483 /* 484 /*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b919471472f..0c569831db5a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
42 if (!net_device) 42 if (!net_device)
43 return NULL; 43 return NULL;
44 44
45 init_waitqueue_head(&net_device->wait_drain);
45 net_device->start_remove = false; 46 net_device->start_remove = false;
46 net_device->destroy = false; 47 net_device->destroy = false;
47 net_device->dev = device; 48 net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
387 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 388 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
388 389
389 /* Wait for all send completions */ 390 /* Wait for all send completions */
390 while (atomic_read(&net_device->num_outstanding_sends)) { 391 wait_event(net_device->wait_drain,
391 dev_info(&device->device, 392 atomic_read(&net_device->num_outstanding_sends) == 0);
392 "waiting for %d requests to complete...\n",
393 atomic_read(&net_device->num_outstanding_sends));
394 udelay(100);
395 }
396 393
397 netvsc_disconnect_vsp(net_device); 394 netvsc_disconnect_vsp(net_device);
398 395
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
486 num_outstanding_sends = 483 num_outstanding_sends =
487 atomic_dec_return(&net_device->num_outstanding_sends); 484 atomic_dec_return(&net_device->num_outstanding_sends);
488 485
486 if (net_device->destroy && num_outstanding_sends == 0)
487 wake_up(&net_device->wait_drain);
488
489 if (netif_queue_stopped(ndev) && !net_device->start_remove && 489 if (netif_queue_stopped(ndev) && !net_device->start_remove &&
490 (hv_ringbuf_avail_percent(&device->channel->outbound) 490 (hv_ringbuf_avail_percent(&device->channel->outbound)
491 > RING_AVAIL_PERCENT_HIWATER || 491 > RING_AVAIL_PERCENT_HIWATER ||
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5226f3..47f8e8939266 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ 43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
44#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
45#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
44 46
45static int ip175c_config_init(struct phy_device *phydev) 47static int ip175c_config_init(struct phy_device *phydev)
46{ 48{
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
136 if (c < 0) 138 if (c < 0)
137 return c; 139 return c;
138 140
141 /* INTR pin used: speed/link/duplex will cause an interrupt */
142 c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
143 if (c < 0)
144 return c;
145
139 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 146 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
140 /* Additional delay (2ns) used to adjust RX clock phase 147 /* Additional delay (2ns) used to adjust RX clock phase
141 * at RGMII interface */ 148 * at RGMII interface */
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 39ea0674dcde..5c120189ec86 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
46 struct mdio_mux_parent_bus *pb = cb->parent; 46 struct mdio_mux_parent_bus *pb = cb->parent;
47 int r; 47 int r;
48 48
49 mutex_lock(&pb->mii_bus->mdio_lock); 49 /* In theory multiple mdio_mux could be stacked, thus creating
50 * more than a single level of nesting. But in practice,
51 * SINGLE_DEPTH_NESTING will cover the vast majority of use
52 * cases. We use it, instead of trying to handle the general
53 * case.
54 */
55 mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
50 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); 56 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
51 if (r) 57 if (r)
52 goto out; 58 goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
71 77
72 int r; 78 int r;
73 79
74 mutex_lock(&pb->mii_bus->mdio_lock); 80 mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
75 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); 81 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
76 if (r) 82 if (r)
77 goto out; 83 goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1ce5519..5061608f408c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
96} 96}
97/** 97/**
98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
99 * @mdio_np: Pointer to the mii_bus. 99 * @mdio_bus_np: Pointer to the mii_bus.
100 * 100 *
101 * Returns a pointer to the mii_bus, or NULL if none found. 101 * Returns a pointer to the mii_bus, or NULL if none found.
102 * 102 *
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 590f902deb6b..9d6c80c8a0cf 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -161,7 +161,7 @@ static struct phy_driver ks8051_driver = {
161static struct phy_driver ks8001_driver = { 161static struct phy_driver ks8001_driver = {
162 .phy_id = PHY_ID_KS8001, 162 .phy_id = PHY_ID_KS8001,
163 .name = "Micrel KS8001 or KS8721", 163 .name = "Micrel KS8001 or KS8721",
164 .phy_id_mask = 0x00fffff0, 164 .phy_id_mask = 0x00ffffff,
165 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), 165 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
166 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 166 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
167 .config_init = kszphy_config_init, 167 .config_init = kszphy_config_init,
@@ -174,7 +174,7 @@ static struct phy_driver ks8001_driver = {
174 174
175static struct phy_driver ksz9021_driver = { 175static struct phy_driver ksz9021_driver = {
176 .phy_id = PHY_ID_KSZ9021, 176 .phy_id = PHY_ID_KSZ9021,
177 .phy_id_mask = 0x000fff10, 177 .phy_id_mask = 0x000ffffe,
178 .name = "Micrel KSZ9021 Gigabit PHY", 178 .name = "Micrel KSZ9021 Gigabit PHY",
179 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause 179 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
180 | SUPPORTED_Asym_Pause), 180 | SUPPORTED_Asym_Pause),
@@ -240,8 +240,8 @@ MODULE_AUTHOR("David J. Choi");
240MODULE_LICENSE("GPL"); 240MODULE_LICENSE("GPL");
241 241
242static struct mdio_device_id __maybe_unused micrel_tbl[] = { 242static struct mdio_device_id __maybe_unused micrel_tbl[] = {
243 { PHY_ID_KSZ9021, 0x000fff10 }, 243 { PHY_ID_KSZ9021, 0x000ffffe },
244 { PHY_ID_KS8001, 0x00fffff0 }, 244 { PHY_ID_KS8001, 0x00ffffff },
245 { PHY_ID_KS8737, 0x00fffff0 }, 245 { PHY_ID_KS8737, 0x00fffff0 },
246 { PHY_ID_KS8041, 0x00fffff0 }, 246 { PHY_ID_KS8041, 0x00fffff0 },
247 { PHY_ID_KS8051, 0x00fffff0 }, 247 { PHY_ID_KS8051, 0x00fffff0 },
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 964031e3da87..a28a983d465e 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPAD 0x129a
62#define USB_PRODUCT_IPHONE_4_VZW 0x129c 63#define USB_PRODUCT_IPHONE_4_VZW 0x129c
63#define USB_PRODUCT_IPHONE_4S 0x12a0 64#define USB_PRODUCT_IPHONE_4S 0x12a0
64 65
@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
101 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 102 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
102 IPHETH_USBINTF_PROTO) }, 103 IPHETH_USBINTF_PROTO) },
103 { USB_DEVICE_AND_INTERFACE_INFO( 104 { USB_DEVICE_AND_INTERFACE_INFO(
105 USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
106 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
107 IPHETH_USBINTF_PROTO) },
108 { USB_DEVICE_AND_INTERFACE_INFO(
104 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, 109 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
105 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 110 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
106 IPHETH_USBINTF_PROTO) }, 111 IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3b206786b5e7..a051cedd64bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -197,6 +197,10 @@ err:
197static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on) 197static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
198{ 198{
199 struct usbnet *dev = usb_get_intfdata(intf); 199 struct usbnet *dev = usb_get_intfdata(intf);
200
201 /* can be called while disconnecting */
202 if (!dev)
203 return 0;
200 return qmi_wwan_manage_power(dev, on); 204 return qmi_wwan_manage_power(dev, on);
201} 205}
202 206
@@ -257,29 +261,6 @@ err:
257 return rv; 261 return rv;
258} 262}
259 263
260/* Gobi devices uses identical class/protocol codes for all interfaces regardless
261 * of function. Some of these are CDC ACM like and have the exact same endpoints
262 * we are looking for. This leaves two possible strategies for identifying the
263 * correct interface:
264 * a) hardcoding interface number, or
265 * b) use the fact that the wwan interface is the only one lacking additional
266 * (CDC functional) descriptors
267 *
268 * Let's see if we can get away with the generic b) solution.
269 */
270static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
271{
272 int rv = -EINVAL;
273
274 /* ignore any interface with additional descriptors */
275 if (intf->cur_altsetting->extralen)
276 goto err;
277
278 rv = qmi_wwan_bind_shared(dev, intf);
279err:
280 return rv;
281}
282
283static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf) 264static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
284{ 265{
285 struct usb_driver *subdriver = (void *)dev->data[0]; 266 struct usb_driver *subdriver = (void *)dev->data[0];
@@ -347,15 +328,15 @@ static const struct driver_info qmi_wwan_shared = {
347 .manage_power = qmi_wwan_manage_power, 328 .manage_power = qmi_wwan_manage_power,
348}; 329};
349 330
350static const struct driver_info qmi_wwan_gobi = { 331static const struct driver_info qmi_wwan_force_int0 = {
351 .description = "Qualcomm Gobi wwan/QMI device", 332 .description = "Qualcomm WWAN/QMI device",
352 .flags = FLAG_WWAN, 333 .flags = FLAG_WWAN,
353 .bind = qmi_wwan_bind_gobi, 334 .bind = qmi_wwan_bind_shared,
354 .unbind = qmi_wwan_unbind_shared, 335 .unbind = qmi_wwan_unbind_shared,
355 .manage_power = qmi_wwan_manage_power, 336 .manage_power = qmi_wwan_manage_power,
337 .data = BIT(0), /* interface whitelist bitmap */
356}; 338};
357 339
358/* ZTE suck at making USB descriptors */
359static const struct driver_info qmi_wwan_force_int1 = { 340static const struct driver_info qmi_wwan_force_int1 = {
360 .description = "Qualcomm WWAN/QMI device", 341 .description = "Qualcomm WWAN/QMI device",
361 .flags = FLAG_WWAN, 342 .flags = FLAG_WWAN,
@@ -365,6 +346,24 @@ static const struct driver_info qmi_wwan_force_int1 = {
365 .data = BIT(1), /* interface whitelist bitmap */ 346 .data = BIT(1), /* interface whitelist bitmap */
366}; 347};
367 348
349static const struct driver_info qmi_wwan_force_int2 = {
350 .description = "Qualcomm WWAN/QMI device",
351 .flags = FLAG_WWAN,
352 .bind = qmi_wwan_bind_shared,
353 .unbind = qmi_wwan_unbind_shared,
354 .manage_power = qmi_wwan_manage_power,
355 .data = BIT(2), /* interface whitelist bitmap */
356};
357
358static const struct driver_info qmi_wwan_force_int3 = {
359 .description = "Qualcomm WWAN/QMI device",
360 .flags = FLAG_WWAN,
361 .bind = qmi_wwan_bind_shared,
362 .unbind = qmi_wwan_unbind_shared,
363 .manage_power = qmi_wwan_manage_power,
364 .data = BIT(3), /* interface whitelist bitmap */
365};
366
368static const struct driver_info qmi_wwan_force_int4 = { 367static const struct driver_info qmi_wwan_force_int4 = {
369 .description = "Qualcomm WWAN/QMI device", 368 .description = "Qualcomm WWAN/QMI device",
370 .flags = FLAG_WWAN, 369 .flags = FLAG_WWAN,
@@ -390,16 +389,23 @@ static const struct driver_info qmi_wwan_force_int4 = {
390static const struct driver_info qmi_wwan_sierra = { 389static const struct driver_info qmi_wwan_sierra = {
391 .description = "Sierra Wireless wwan/QMI device", 390 .description = "Sierra Wireless wwan/QMI device",
392 .flags = FLAG_WWAN, 391 .flags = FLAG_WWAN,
393 .bind = qmi_wwan_bind_gobi, 392 .bind = qmi_wwan_bind_shared,
394 .unbind = qmi_wwan_unbind_shared, 393 .unbind = qmi_wwan_unbind_shared,
395 .manage_power = qmi_wwan_manage_power, 394 .manage_power = qmi_wwan_manage_power,
396 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ 395 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
397}; 396};
398 397
399#define HUAWEI_VENDOR_ID 0x12D1 398#define HUAWEI_VENDOR_ID 0x12D1
399
400/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
401#define QMI_GOBI1K_DEVICE(vend, prod) \
402 USB_DEVICE(vend, prod), \
403 .driver_info = (unsigned long)&qmi_wwan_force_int3
404
405/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
400#define QMI_GOBI_DEVICE(vend, prod) \ 406#define QMI_GOBI_DEVICE(vend, prod) \
401 USB_DEVICE(vend, prod), \ 407 USB_DEVICE(vend, prod), \
402 .driver_info = (unsigned long)&qmi_wwan_gobi 408 .driver_info = (unsigned long)&qmi_wwan_force_int0
403 409
404static const struct usb_device_id products[] = { 410static const struct usb_device_id products[] = {
405 { /* Huawei E392, E398 and possibly others sharing both device id and more... */ 411 { /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -501,6 +507,15 @@ static const struct usb_device_id products[] = {
501 .bInterfaceProtocol = 0xff, 507 .bInterfaceProtocol = 0xff,
502 .driver_info = (unsigned long)&qmi_wwan_force_int4, 508 .driver_info = (unsigned long)&qmi_wwan_force_int4,
503 }, 509 },
510 { /* ZTE MF60 */
511 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
512 .idVendor = 0x19d2,
513 .idProduct = 0x1402,
514 .bInterfaceClass = 0xff,
515 .bInterfaceSubClass = 0xff,
516 .bInterfaceProtocol = 0xff,
517 .driver_info = (unsigned long)&qmi_wwan_force_int2,
518 },
504 { /* Sierra Wireless MC77xx in QMI mode */ 519 { /* Sierra Wireless MC77xx in QMI mode */
505 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 520 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
506 .idVendor = 0x1199, 521 .idVendor = 0x1199,
@@ -510,20 +525,24 @@ static const struct usb_device_id products[] = {
510 .bInterfaceProtocol = 0xff, 525 .bInterfaceProtocol = 0xff,
511 .driver_info = (unsigned long)&qmi_wwan_sierra, 526 .driver_info = (unsigned long)&qmi_wwan_sierra,
512 }, 527 },
513 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 528
514 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 529 /* Gobi 1000 devices */
515 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ 530 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
516 {QMI_GOBI_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ 531 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
517 {QMI_GOBI_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ 532 {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
518 {QMI_GOBI_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ 533 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
519 {QMI_GOBI_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ 534 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
520 {QMI_GOBI_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ 535 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
521 {QMI_GOBI_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ 536 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
522 {QMI_GOBI_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ 537 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
523 {QMI_GOBI_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ 538 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
524 {QMI_GOBI_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ 539 {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
525 {QMI_GOBI_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ 540 {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
526 {QMI_GOBI_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ 541 {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
542 {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
543 {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
544
545 /* Gobi 2000 and 3000 devices */
527 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ 546 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
528 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ 547 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
529 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ 548 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef5670d1f..d75d1f56becf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
946} 946}
947 947
948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; 948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
949static const struct sierra_net_info_data sierra_net_info_data_68A3 = { 949static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
950 .rx_urb_size = 8 * 1024, 950 .rx_urb_size = 8 * 1024,
951 .whitelist = { 951 .whitelist = {
952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list), 952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
954 } 954 }
955}; 955};
956 956
957static const struct driver_info sierra_net_info_68A3 = { 957static const struct driver_info sierra_net_info_direct_ip = {
958 .description = "Sierra Wireless USB-to-WWAN Modem", 958 .description = "Sierra Wireless USB-to-WWAN Modem",
959 .flags = FLAG_WWAN | FLAG_SEND_ZLP, 959 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
960 .bind = sierra_net_bind, 960 .bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
962 .status = sierra_net_status, 962 .status = sierra_net_status,
963 .rx_fixup = sierra_net_rx_fixup, 963 .rx_fixup = sierra_net_rx_fixup,
964 .tx_fixup = sierra_net_tx_fixup, 964 .tx_fixup = sierra_net_tx_fixup,
965 .data = (unsigned long)&sierra_net_info_data_68A3, 965 .data = (unsigned long)&sierra_net_info_data_direct_ip,
966}; 966};
967 967
968static const struct usb_device_id products[] = { 968static const struct usb_device_id products[] = {
969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ 969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
970 .driver_info = (unsigned long) &sierra_net_info_68A3}, 970 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
971 {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
972 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
973 {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
974 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
975 {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
976 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
971 977
972 {}, /* last item */ 978 {}, /* last item */
973}; 979};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9f58330f1312..aba769d77459 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -796,11 +796,13 @@ int usbnet_open (struct net_device *net)
796 if (info->manage_power) { 796 if (info->manage_power) {
797 retval = info->manage_power(dev, 1); 797 retval = info->manage_power(dev, 1);
798 if (retval < 0) 798 if (retval < 0)
799 goto done; 799 goto done_manage_power_error;
800 usb_autopm_put_interface(dev->intf); 800 usb_autopm_put_interface(dev->intf);
801 } 801 }
802 return retval; 802 return retval;
803 803
804done_manage_power_error:
805 clear_bit(EVENT_DEV_OPEN, &dev->flags);
804done: 806done:
805 usb_autopm_put_interface(dev->intf); 807 usb_autopm_put_interface(dev->intf);
806done_nopm: 808done_nopm:
@@ -876,9 +878,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
876{ 878{
877 struct usbnet *dev = netdev_priv(net); 879 struct usbnet *dev = netdev_priv(net);
878 880
879 strncpy (info->driver, dev->driver_name, sizeof info->driver); 881 strlcpy (info->driver, dev->driver_name, sizeof info->driver);
880 strncpy (info->version, DRIVER_VERSION, sizeof info->version); 882 strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
881 strncpy (info->fw_version, dev->driver_info->description, 883 strlcpy (info->fw_version, dev->driver_info->description,
882 sizeof info->fw_version); 884 sizeof info->fw_version);
883 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 885 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
884} 886}
@@ -1202,6 +1204,21 @@ deferred:
1202} 1204}
1203EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1205EXPORT_SYMBOL_GPL(usbnet_start_xmit);
1204 1206
1207static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
1208{
1209 struct urb *urb;
1210 int i;
1211
1212 /* don't refill the queue all at once */
1213 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
1214 urb = usb_alloc_urb(0, flags);
1215 if (urb != NULL) {
1216 if (rx_submit(dev, urb, flags) == -ENOLINK)
1217 return;
1218 }
1219 }
1220}
1221
1205/*-------------------------------------------------------------------------*/ 1222/*-------------------------------------------------------------------------*/
1206 1223
1207// tasklet (work deferred from completions, in_irq) or timer 1224// tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1258,14 @@ static void usbnet_bh (unsigned long param)
1241 !timer_pending (&dev->delay) && 1258 !timer_pending (&dev->delay) &&
1242 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1259 !test_bit (EVENT_RX_HALT, &dev->flags)) {
1243 int temp = dev->rxq.qlen; 1260 int temp = dev->rxq.qlen;
1244 int qlen = RX_QLEN (dev); 1261
1245 1262 if (temp < RX_QLEN(dev)) {
1246 if (temp < qlen) { 1263 rx_alloc_submit(dev, GFP_ATOMIC);
1247 struct urb *urb;
1248 int i;
1249
1250 // don't refill the queue all at once
1251 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1252 urb = usb_alloc_urb (0, GFP_ATOMIC);
1253 if (urb != NULL) {
1254 if (rx_submit (dev, urb, GFP_ATOMIC) ==
1255 -ENOLINK)
1256 return;
1257 }
1258 }
1259 if (temp != dev->rxq.qlen) 1264 if (temp != dev->rxq.qlen)
1260 netif_dbg(dev, link, dev->net, 1265 netif_dbg(dev, link, dev->net,
1261 "rxqlen %d --> %d\n", 1266 "rxqlen %d --> %d\n",
1262 temp, dev->rxq.qlen); 1267 temp, dev->rxq.qlen);
1263 if (dev->rxq.qlen < qlen) 1268 if (dev->rxq.qlen < RX_QLEN(dev))
1264 tasklet_schedule (&dev->bh); 1269 tasklet_schedule (&dev->bh);
1265 } 1270 }
1266 if (dev->txq.qlen < TX_QLEN (dev)) 1271 if (dev->txq.qlen < TX_QLEN (dev))
@@ -1513,6 +1518,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1513 spin_lock_irq(&dev->txq.lock); 1518 spin_lock_irq(&dev->txq.lock);
1514 /* don't autosuspend while transmitting */ 1519 /* don't autosuspend while transmitting */
1515 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1520 if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1521 dev->suspend_count--;
1516 spin_unlock_irq(&dev->txq.lock); 1522 spin_unlock_irq(&dev->txq.lock);
1517 return -EBUSY; 1523 return -EBUSY;
1518 } else { 1524 } else {
@@ -1569,6 +1575,13 @@ int usbnet_resume (struct usb_interface *intf)
1569 spin_unlock_irq(&dev->txq.lock); 1575 spin_unlock_irq(&dev->txq.lock);
1570 1576
1571 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1577 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1578 /* handle remote wakeup ASAP */
1579 if (!dev->wait &&
1580 netif_device_present(dev->net) &&
1581 !timer_pending(&dev->delay) &&
1582 !test_bit(EVENT_RX_HALT, &dev->flags))
1583 rx_alloc_submit(dev, GFP_KERNEL);
1584
1572 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1585 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1573 netif_tx_wake_all_queues(dev->net); 1586 netif_tx_wake_all_queues(dev->net);
1574 tasklet_schedule (&dev->bh); 1587 tasklet_schedule (&dev->bh);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5214b1eceb95..f18149ae2588 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
42#define VIRTNET_DRIVER_VERSION "1.0.0" 42#define VIRTNET_DRIVER_VERSION "1.0.0"
43 43
44struct virtnet_stats { 44struct virtnet_stats {
45 struct u64_stats_sync syncp; 45 struct u64_stats_sync tx_syncp;
46 struct u64_stats_sync rx_syncp;
46 u64 tx_bytes; 47 u64 tx_bytes;
47 u64 tx_packets; 48 u64 tx_packets;
48 49
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
300 301
301 hdr = skb_vnet_hdr(skb); 302 hdr = skb_vnet_hdr(skb);
302 303
303 u64_stats_update_begin(&stats->syncp); 304 u64_stats_update_begin(&stats->rx_syncp);
304 stats->rx_bytes += skb->len; 305 stats->rx_bytes += skb->len;
305 stats->rx_packets++; 306 stats->rx_packets++;
306 u64_stats_update_end(&stats->syncp); 307 u64_stats_update_end(&stats->rx_syncp);
307 308
308 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 309 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
309 pr_debug("Needs csum!\n"); 310 pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
565 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 566 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
566 pr_debug("Sent skb %p\n", skb); 567 pr_debug("Sent skb %p\n", skb);
567 568
568 u64_stats_update_begin(&stats->syncp); 569 u64_stats_update_begin(&stats->tx_syncp);
569 stats->tx_bytes += skb->len; 570 stats->tx_bytes += skb->len;
570 stats->tx_packets++; 571 stats->tx_packets++;
571 u64_stats_update_end(&stats->syncp); 572 u64_stats_update_end(&stats->tx_syncp);
572 573
573 tot_sgs += skb_vnet_hdr(skb)->num_sg; 574 tot_sgs += skb_vnet_hdr(skb)->num_sg;
574 dev_kfree_skb_any(skb); 575 dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
703 u64 tpackets, tbytes, rpackets, rbytes; 704 u64 tpackets, tbytes, rpackets, rbytes;
704 705
705 do { 706 do {
706 start = u64_stats_fetch_begin(&stats->syncp); 707 start = u64_stats_fetch_begin(&stats->tx_syncp);
707 tpackets = stats->tx_packets; 708 tpackets = stats->tx_packets;
708 tbytes = stats->tx_bytes; 709 tbytes = stats->tx_bytes;
710 } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
711
712 do {
713 start = u64_stats_fetch_begin(&stats->rx_syncp);
709 rpackets = stats->rx_packets; 714 rpackets = stats->rx_packets;
710 rbytes = stats->rx_bytes; 715 rbytes = stats->rx_bytes;
711 } while (u64_stats_fetch_retry(&stats->syncp, start)); 716 } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
712 717
713 tot->rx_packets += rpackets; 718 tot->rx_packets += rpackets;
714 tot->tx_packets += tpackets; 719 tot->tx_packets += tpackets;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 520a4b2eb9cc..a747c632597a 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -7233,8 +7233,8 @@ static int airo_get_aplist(struct net_device *dev,
7233 } 7233 }
7234 } else { 7234 } else {
7235 dwrq->flags = 1; /* Should be define'd */ 7235 dwrq->flags = 1; /* Should be define'd */
7236 memcpy(extra + sizeof(struct sockaddr)*i, 7236 memcpy(extra + sizeof(struct sockaddr) * i, qual,
7237 &qual, sizeof(struct iw_quality)*i); 7237 sizeof(struct iw_quality) * i);
7238 } 7238 }
7239 dwrq->length = i; 7239 dwrq->length = i;
7240 7240
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index c54b7d37bff1..420d69b2674c 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -143,6 +143,7 @@ struct ath_common {
143 u32 keymax; 143 u32 keymax;
144 DECLARE_BITMAP(keymap, ATH_KEYMAX); 144 DECLARE_BITMAP(keymap, ATH_KEYMAX);
145 DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX); 145 DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
146 DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
146 enum ath_crypt_caps crypt_caps; 147 enum ath_crypt_caps crypt_caps;
147 148
148 unsigned int clockrate; 149 unsigned int clockrate;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index fbaa30930076..44ad6fe0278f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1045,11 +1045,11 @@ ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1045 1045
1046 ath5k_txbuf_free_skb(ah, bf); 1046 ath5k_txbuf_free_skb(ah, bf);
1047 1047
1048 spin_lock_bh(&ah->txbuflock); 1048 spin_lock(&ah->txbuflock);
1049 list_move_tail(&bf->list, &ah->txbuf); 1049 list_move_tail(&bf->list, &ah->txbuf);
1050 ah->txbuf_len++; 1050 ah->txbuf_len++;
1051 txq->txq_len--; 1051 txq->txq_len--;
1052 spin_unlock_bh(&ah->txbuflock); 1052 spin_unlock(&ah->txbuflock);
1053 } 1053 }
1054 txq->link = NULL; 1054 txq->link = NULL;
1055 txq->txq_poll_mark = false; 1055 txq->txq_poll_mark = false;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a277cf6f339d..4866550ddd96 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -214,6 +214,7 @@ struct ath_frame_info {
214 enum ath9k_key_type keytype; 214 enum ath9k_key_type keytype;
215 u8 keyix; 215 u8 keyix;
216 u8 retries; 216 u8 retries;
217 u8 rtscts_rate;
217}; 218};
218 219
219struct ath_buf_state { 220struct ath_buf_state {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 2b8f61c210e1..abbd6effd60d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1496,6 +1496,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1496 priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--; 1496 priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
1497 1497
1498 if (priv->ah->opmode == NL80211_IFTYPE_STATION) { 1498 if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
1499 ath9k_htc_choose_set_bssid(priv);
1499 if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1)) 1500 if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
1500 ath9k_htc_start_ani(priv); 1501 ath9k_htc_start_ani(priv);
1501 else if (priv->num_sta_assoc_vif == 0) 1502 else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1504,11 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1503 } 1504 }
1504 } 1505 }
1505 1506
1506 if (changed & BSS_CHANGED_BSSID) { 1507 if (changed & BSS_CHANGED_IBSS) {
1507 if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) { 1508 if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
1508 common->curaid = bss_conf->aid; 1509 common->curaid = bss_conf->aid;
1509 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1510 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1510 ath9k_htc_set_bssid(priv); 1511 ath9k_htc_set_bssid(priv);
1511 } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
1512 ath9k_htc_choose_set_bssid(priv);
1513 } 1512 }
1514 } 1513 }
1515 1514
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 7db1890448f2..995ca8e1302e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
622 622
623 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) { 623 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
624 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || 624 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
625 ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && 625 ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
626 !ah->is_pciexpress)) { 626 !ah->is_pciexpress)) {
627 ah->config.serialize_regmode = 627 ah->config.serialize_regmode =
628 SER_REG_MODE_ON; 628 SER_REG_MODE_ON;
@@ -784,13 +784,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
784 784
785u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) 785u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
786{ 786{
787 struct ath_common *common = ath9k_hw_common(ah);
788 int i = 0;
789
787 REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); 790 REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
788 udelay(100); 791 udelay(100);
789 REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); 792 REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
790 793
791 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) 794 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
795
792 udelay(100); 796 udelay(100);
793 797
798 if (WARN_ON_ONCE(i >= 100)) {
799 ath_err(common, "PLL4 meaurement not done\n");
800 break;
801 }
802
803 i++;
804 }
805
794 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; 806 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
795} 807}
796EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); 808EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 4de4473776ac..dac1a2709e3c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -971,6 +971,15 @@ void ath_hw_pll_work(struct work_struct *work)
971 hw_pll_work.work); 971 hw_pll_work.work);
972 u32 pll_sqsum; 972 u32 pll_sqsum;
973 973
974 /*
975 * ensure that the PLL WAR is executed only
976 * after the STA is associated (or) if the
977 * beaconing had started in interfaces that
978 * uses beacons.
979 */
980 if (!(sc->sc_flags & SC_OP_BEACONS))
981 return;
982
974 if (AR_SREV_9485(sc->sc_ah)) { 983 if (AR_SREV_9485(sc->sc_ah)) {
975 984
976 ath9k_ps_wakeup(sc); 985 ath9k_ps_wakeup(sc);
@@ -1443,15 +1452,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1443 } 1452 }
1444 } 1453 }
1445 1454
1446 if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
1447 ((vif->type == NL80211_IFTYPE_ADHOC) &&
1448 sc->nvifs > 0)) {
1449 ath_err(common, "Cannot create ADHOC interface when other"
1450 " interfaces already exist.\n");
1451 ret = -EINVAL;
1452 goto out;
1453 }
1454
1455 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1455 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1456 1456
1457 sc->nvifs++; 1457 sc->nvifs++;
@@ -1476,15 +1476,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1476 mutex_lock(&sc->mutex); 1476 mutex_lock(&sc->mutex);
1477 ath9k_ps_wakeup(sc); 1477 ath9k_ps_wakeup(sc);
1478 1478
1479 /* See if new interface type is valid. */
1480 if ((new_type == NL80211_IFTYPE_ADHOC) &&
1481 (sc->nvifs > 1)) {
1482 ath_err(common, "When using ADHOC, it must be the only"
1483 " interface.\n");
1484 ret = -EINVAL;
1485 goto out;
1486 }
1487
1488 if (ath9k_uses_beacons(new_type) && 1479 if (ath9k_uses_beacons(new_type) &&
1489 !ath9k_uses_beacons(vif->type)) { 1480 !ath9k_uses_beacons(vif->type)) {
1490 if (sc->nbcnvifs >= ATH_BCBUF) { 1481 if (sc->nbcnvifs >= ATH_BCBUF) {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index e1fcc68124dc..0735aeb3b26c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
695 __skb_unlink(skb, &rx_edma->rx_fifo); 695 __skb_unlink(skb, &rx_edma->rx_fifo);
696 list_add_tail(&bf->list, &sc->rx.rxbuf); 696 list_add_tail(&bf->list, &sc->rx.rxbuf);
697 ath_rx_edma_buf_link(sc, qtype); 697 ath_rx_edma_buf_link(sc, qtype);
698 } else {
699 bf = NULL;
700 } 698 }
699
700 bf = NULL;
701 } 701 }
702 702
703 *dest = bf; 703 *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
822 * descriptor does contain a valid key index. This has been observed 822 * descriptor does contain a valid key index. This has been observed
823 * mostly with CCMP encryption. 823 * mostly with CCMP encryption.
824 */ 824 */
825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) 825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
826 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
826 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 827 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
827 828
828 if (!rx_stats->rs_datalen) { 829 if (!rx_stats->rs_datalen) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d59dd01d6cde..4d571394c7a8 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -938,6 +938,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
938 struct ieee80211_tx_rate *rates; 938 struct ieee80211_tx_rate *rates;
939 const struct ieee80211_rate *rate; 939 const struct ieee80211_rate *rate;
940 struct ieee80211_hdr *hdr; 940 struct ieee80211_hdr *hdr;
941 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
941 int i; 942 int i;
942 u8 rix = 0; 943 u8 rix = 0;
943 944
@@ -948,18 +949,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
948 949
949 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 950 /* set dur_update_en for l-sig computation except for PS-Poll frames */
950 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 951 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
951 952 info->rtscts_rate = fi->rtscts_rate;
952 /*
953 * We check if Short Preamble is needed for the CTS rate by
954 * checking the BSS's global flag.
955 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
956 */
957 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
958 info->rtscts_rate = rate->hw_value;
959
960 if (tx_info->control.vif &&
961 tx_info->control.vif->bss_conf.use_short_preamble)
962 info->rtscts_rate |= rate->hw_value_short;
963 953
964 for (i = 0; i < 4; i++) { 954 for (i = 0; i < 4; i++) {
965 bool is_40, is_sgi, is_sp; 955 bool is_40, is_sgi, is_sp;
@@ -1001,13 +991,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1001 } 991 }
1002 992
1003 /* legacy rates */ 993 /* legacy rates */
994 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1004 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 995 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1005 !(rate->flags & IEEE80211_RATE_ERP_G)) 996 !(rate->flags & IEEE80211_RATE_ERP_G))
1006 phy = WLAN_RC_PHY_CCK; 997 phy = WLAN_RC_PHY_CCK;
1007 else 998 else
1008 phy = WLAN_RC_PHY_OFDM; 999 phy = WLAN_RC_PHY_OFDM;
1009 1000
1010 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1011 info->rates[i].Rate = rate->hw_value; 1001 info->rates[i].Rate = rate->hw_value;
1012 if (rate->hw_value_short) { 1002 if (rate->hw_value_short) {
1013 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1003 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1776,10 +1766,22 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1776 struct ieee80211_sta *sta = tx_info->control.sta; 1766 struct ieee80211_sta *sta = tx_info->control.sta;
1777 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1767 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1778 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1769 const struct ieee80211_rate *rate;
1779 struct ath_frame_info *fi = get_frame_info(skb); 1770 struct ath_frame_info *fi = get_frame_info(skb);
1780 struct ath_node *an = NULL; 1771 struct ath_node *an = NULL;
1781 enum ath9k_key_type keytype; 1772 enum ath9k_key_type keytype;
1773 bool short_preamble = false;
1774
1775 /*
1776 * We check if Short Preamble is needed for the CTS rate by
1777 * checking the BSS's global flag.
1778 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1779 */
1780 if (tx_info->control.vif &&
1781 tx_info->control.vif->bss_conf.use_short_preamble)
1782 short_preamble = true;
1782 1783
1784 rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1783 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1785 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1784 1786
1785 if (sta) 1787 if (sta)
@@ -1794,6 +1796,9 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1794 fi->keyix = ATH9K_TXKEYIX_INVALID; 1796 fi->keyix = ATH9K_TXKEYIX_INVALID;
1795 fi->keytype = keytype; 1797 fi->keytype = keytype;
1796 fi->framelen = framelen; 1798 fi->framelen = framelen;
1799 fi->rtscts_rate = rate->hw_value;
1800 if (short_preamble)
1801 fi->rtscts_rate |= rate->hw_value_short;
1797} 1802}
1798 1803
1799u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1804u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 0e81904956cf..5c54aa43ca2d 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
556 return -EIO; 556 return -EIO;
557 557
558 set_bit(idx, common->keymap); 558 set_bit(idx, common->keymap);
559 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
560 set_bit(idx, common->ccmp_keymap);
561
559 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 562 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
560 set_bit(idx + 64, common->keymap); 563 set_bit(idx + 64, common->keymap);
561 set_bit(idx, common->tkip_keymap); 564 set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
582 return; 585 return;
583 586
584 clear_bit(key->hw_key_idx, common->keymap); 587 clear_bit(key->hw_key_idx, common->keymap);
588 clear_bit(key->hw_key_idx, common->ccmp_keymap);
585 if (key->cipher != WLAN_CIPHER_SUITE_TKIP) 589 if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
586 return; 590 return;
587 591
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af6f206..c06b6cb5c91e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -877,6 +877,10 @@ struct b43_wl {
877 * from the mac80211 subsystem. */ 877 * from the mac80211 subsystem. */
878 u16 mac80211_initially_registered_queues; 878 u16 mac80211_initially_registered_queues;
879 879
880 /* Set this if we call ieee80211_register_hw() and check if we call
881 * ieee80211_unregister_hw(). */
882 bool hw_registred;
883
880 /* We can only have one operating interface (802.11 core) 884 /* We can only have one operating interface (802.11 core)
881 * at a time. General information about this interface follows. 885 * at a time. General information about this interface follows.
882 */ 886 */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a39b226b2e3..1b988f26bdf1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2437,6 +2437,7 @@ start_ieee80211:
2437 err = ieee80211_register_hw(wl->hw); 2437 err = ieee80211_register_hw(wl->hw);
2438 if (err) 2438 if (err)
2439 goto err_one_core_detach; 2439 goto err_one_core_detach;
2440 wl->hw_registred = true;
2440 b43_leds_register(wl->current_dev); 2441 b43_leds_register(wl->current_dev);
2441 goto out; 2442 goto out;
2442 2443
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
3766 if (prev_status >= B43_STAT_STARTED) { 3767 if (prev_status >= B43_STAT_STARTED) {
3767 err = b43_wireless_core_start(up_dev); 3768 err = b43_wireless_core_start(up_dev);
3768 if (err) { 3769 if (err) {
3769 b43err(wl, "Fatal: Coult not start device for " 3770 b43err(wl, "Fatal: Could not start device for "
3770 "selected %s-GHz band\n", 3771 "selected %s-GHz band\n",
3771 band_to_string(chan->band)); 3772 band_to_string(chan->band));
3772 b43_wireless_core_exit(up_dev); 3773 b43_wireless_core_exit(up_dev);
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5299 5300
5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5301 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
5301 wl->mac80211_initially_registered_queues = hw->queues; 5302 wl->mac80211_initially_registered_queues = hw->queues;
5303 wl->hw_registred = false;
5302 hw->max_rates = 2; 5304 hw->max_rates = 2;
5303 SET_IEEE80211_DEV(hw, dev->dev); 5305 SET_IEEE80211_DEV(hw, dev->dev);
5304 if (is_valid_ether_addr(sprom->et1mac)) 5306 if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
5370 * as the ieee80211 unreg will destroy the workqueue. */ 5372 * as the ieee80211 unreg will destroy the workqueue. */
5371 cancel_work_sync(&wldev->restart_work); 5373 cancel_work_sync(&wldev->restart_work);
5372 5374
5373 /* Restore the queues count before unregistering, because firmware detect 5375 B43_WARN_ON(!wl);
5374 * might have modified it. Restoring is important, so the networking 5376 if (wl->current_dev == wldev && wl->hw_registred) {
5375 * stack can properly free resources. */ 5377 /* Restore the queues count before unregistering, because firmware detect
5376 wl->hw->queues = wl->mac80211_initially_registered_queues; 5378 * might have modified it. Restoring is important, so the networking
5377 b43_leds_stop(wldev); 5379 * stack can properly free resources. */
5378 ieee80211_unregister_hw(wl->hw); 5380 wl->hw->queues = wl->mac80211_initially_registered_queues;
5381 b43_leds_stop(wldev);
5382 ieee80211_unregister_hw(wl->hw);
5383 }
5379 5384
5380 b43_one_core_detach(wldev->dev); 5385 b43_one_core_detach(wldev->dev);
5381 5386
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5446 cancel_work_sync(&wldev->restart_work); 5451 cancel_work_sync(&wldev->restart_work);
5447 5452
5448 B43_WARN_ON(!wl); 5453 B43_WARN_ON(!wl);
5449 if (wl->current_dev == wldev) { 5454 if (wl->current_dev == wldev && wl->hw_registred) {
5450 /* Restore the queues count before unregistering, because firmware detect 5455 /* Restore the queues count before unregistering, because firmware detect
5451 * might have modified it. Restoring is important, so the networking 5456 * might have modified it. Restoring is important, so the networking
5452 * stack can properly free resources. */ 5457 * stack can properly free resources. */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index f1f8bd09bd87..c8baf020c20f 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1072 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1072 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1073 /* create a bounce buffer in zone_dma on mapping failure. */ 1073 /* create a bounce buffer in zone_dma on mapping failure. */
1074 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1074 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1075 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1075 bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1076 if (!bounce_skb) { 1076 if (!bounce_skb) {
1077 ring->current_slot = old_top_slot; 1077 ring->current_slot = old_top_slot;
1078 ring->used_slots = old_used_slots; 1078 ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index cd9c9bc186d9..eae691e2f7dd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
2633 if (prev_status >= B43legacy_STAT_STARTED) { 2633 if (prev_status >= B43legacy_STAT_STARTED) {
2634 err = b43legacy_wireless_core_start(up_dev); 2634 err = b43legacy_wireless_core_start(up_dev);
2635 if (err) { 2635 if (err) {
2636 b43legacyerr(wl, "Fatal: Coult not start device for " 2636 b43legacyerr(wl, "Fatal: Could not start device for "
2637 "newly selected %s-PHY mode\n", 2637 "newly selected %s-PHY mode\n",
2638 phymode_to_string(new_mode)); 2638 phymode_to_string(new_mode));
2639 b43legacy_wireless_core_exit(up_dev); 2639 b43legacy_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e2480d196276..8e7e6928c936 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; 89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); 90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
91 91
92 /* redirect, configure ane enable io for interrupt signal */ 92 /* redirect, configure and enable io for interrupt signal */
93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; 93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
94 if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) 94 if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
95 data |= SDIO_SEPINT_ACT_HI; 95 data |= SDIO_SEPINT_ACT_HI;
96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); 96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
97 97
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c08707..95aa8e1683ec 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903 netif_stop_queue(priv->net_dev); 1903 netif_stop_queue(priv->net_dev);
1904} 1904}
1905 1905
1906/* Called by register_netdev() */
1907static int ipw2100_net_init(struct net_device *dev)
1908{
1909 struct ipw2100_priv *priv = libipw_priv(dev);
1910
1911 return ipw2100_up(priv, 1);
1912}
1913
1914static int ipw2100_wdev_init(struct net_device *dev) 1906static int ipw2100_wdev_init(struct net_device *dev)
1915{ 1907{
1916 struct ipw2100_priv *priv = libipw_priv(dev); 1908 struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6087 .ndo_stop = ipw2100_close, 6079 .ndo_stop = ipw2100_close,
6088 .ndo_start_xmit = libipw_xmit, 6080 .ndo_start_xmit = libipw_xmit,
6089 .ndo_change_mtu = libipw_change_mtu, 6081 .ndo_change_mtu = libipw_change_mtu,
6090 .ndo_init = ipw2100_net_init,
6091 .ndo_tx_timeout = ipw2100_tx_timeout, 6082 .ndo_tx_timeout = ipw2100_tx_timeout,
6092 .ndo_set_mac_address = ipw2100_set_address, 6083 .ndo_set_mac_address = ipw2100_set_address,
6093 .ndo_validate_addr = eth_validate_addr, 6084 .ndo_validate_addr = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6329 printk(KERN_INFO DRV_NAME 6320 printk(KERN_INFO DRV_NAME
6330 ": Detected Intel PRO/Wireless 2100 Network Connection\n"); 6321 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6331 6322
6323 err = ipw2100_up(priv, 1);
6324 if (err)
6325 goto fail;
6326
6332 err = ipw2100_wdev_init(dev); 6327 err = ipw2100_wdev_init(dev);
6333 if (err) 6328 if (err)
6334 goto fail; 6329 goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6338 * network device we would call ipw2100_up. This introduced a race 6333 * network device we would call ipw2100_up. This introduced a race
6339 * condition with newer hotplug configurations (network was coming 6334 * condition with newer hotplug configurations (network was coming
6340 * up and making calls before the device was initialized). 6335 * up and making calls before the device was initialized).
6341 * 6336 */
6342 * If we called ipw2100_up before we registered the device, then the
6343 * device name wasn't registered. So, we instead use the net_dev->init
6344 * member to call a function that then just turns and calls ipw2100_up.
6345 * net_dev->init is called after name allocation but before the
6346 * notifier chain is called */
6347 err = register_netdev(dev); 6337 err = register_netdev(dev);
6348 if (err) { 6338 if (err) {
6349 printk(KERN_WARNING DRV_NAME 6339 printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 509301a5e7e2..ff5d689e13f3 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
3405 return 0; 3405 return 0;
3406 } 3406 }
3407 3407
3408 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { 3408 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3409 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, 3409 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3410 key_flags); 3410 key_flags);
3411 spin_unlock_irqrestore(&il->sta_lock, flags); 3411 spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
3420 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); 3420 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3421 il->stations[sta_id].sta.key.key_flags = 3421 il->stations[sta_id].sta.key.key_flags =
3422 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; 3422 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3423 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; 3423 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3424 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 3424 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3425 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3425 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3426 3426
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index cbf2dc18341f..5d4807c2b56d 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
4767 return; 4767 return;
4768 4768
4769 /* monitor and check for other stuck queues */ 4769 /* monitor and check for other stuck queues */
4770 if (il_is_any_associated(il)) { 4770 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4771 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4771 /* skip as we already checked the command queue */
4772 /* skip as we already checked the command queue */ 4772 if (cnt == il->cmd_queue)
4773 if (cnt == il->cmd_queue) 4773 continue;
4774 continue; 4774 if (il_check_stuck_queue(il, cnt))
4775 if (il_check_stuck_queue(il, cnt)) 4775 return;
4776 return;
4777 }
4778 } 4776 }
4779 4777
4780 mod_timer(&il->watchdog, 4778 mod_timer(&il->watchdog,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 19f7ee84ae89..e5e8ada4aaf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -35,17 +35,20 @@
35#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
36#define IWL6050_UCODE_API_MAX 5 36#define IWL6050_UCODE_API_MAX 5
37#define IWL6000G2_UCODE_API_MAX 6 37#define IWL6000G2_UCODE_API_MAX 6
38#define IWL6035_UCODE_API_MAX 6
38 39
39/* Oldest version we won't warn about */ 40/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4 41#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5 42#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5 43#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6 44#define IWL6000G2B_UCODE_API_OK 6
45#define IWL6035_UCODE_API_OK 6
44 46
45/* Lowest firmware API version supported */ 47/* Lowest firmware API version supported */
46#define IWL6000_UCODE_API_MIN 4 48#define IWL6000_UCODE_API_MIN 4
47#define IWL6050_UCODE_API_MIN 4 49#define IWL6050_UCODE_API_MIN 4
48#define IWL6000G2_UCODE_API_MIN 4 50#define IWL6000G2_UCODE_API_MIN 5
51#define IWL6035_UCODE_API_MIN 6
49 52
50/* EEPROM versions */ 53/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4) 54#define EEPROM_6000_TX_POWER_VERSION (4)
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
227 IWL_DEVICE_6030, 230 IWL_DEVICE_6030,
228}; 231};
229 232
233#define IWL_DEVICE_6035 \
234 .fw_name_pre = IWL6030_FW_PRE, \
235 .ucode_api_max = IWL6035_UCODE_API_MAX, \
236 .ucode_api_ok = IWL6035_UCODE_API_OK, \
237 .ucode_api_min = IWL6035_UCODE_API_MIN, \
238 .device_family = IWL_DEVICE_FAMILY_6030, \
239 .max_inst_size = IWL60_RTC_INST_SIZE, \
240 .max_data_size = IWL60_RTC_DATA_SIZE, \
241 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
242 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
243 .base_params = &iwl6000_g2_base_params, \
244 .bt_params = &iwl6000_bt_params, \
245 .need_temp_offset_calib = true, \
246 .led_mode = IWL_LED_RF_STATE, \
247 .adv_pm = true
248
230const struct iwl_cfg iwl6035_2agn_cfg = { 249const struct iwl_cfg iwl6035_2agn_cfg = {
231 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 250 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
232 IWL_DEVICE_6030, 251 IWL_DEVICE_6035,
233 .ht_params = &iwl6000_ht_params, 252 .ht_params = &iwl6000_ht_params,
234}; 253};
235 254
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index aea07aab3c9e..eb6a8eaf42fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1267 key_flags |= STA_KEY_MULTICAST_MSK; 1267 key_flags |= STA_KEY_MULTICAST_MSK;
1268 1268
1269 sta_cmd.key.key_flags = key_flags; 1269 sta_cmd.key.key_flags = key_flags;
1270 sta_cmd.key.key_offset = WEP_INVALID_OFFSET; 1270 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1273 1273
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index e7c157e5ebeb..7f97dec8534d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2239,6 +2239,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2239 return count; 2239 return count;
2240} 2240}
2241 2241
2242#ifdef CONFIG_IWLWIFI_DEBUG
2242static ssize_t iwl_dbgfs_log_event_read(struct file *file, 2243static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2243 char __user *user_buf, 2244 char __user *user_buf,
2244 size_t count, loff_t *ppos) 2245 size_t count, loff_t *ppos)
@@ -2276,6 +2277,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2276 2277
2277 return count; 2278 return count;
2278} 2279}
2280#endif
2279 2281
2280static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file, 2282static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
2281 char __user *user_buf, 2283 char __user *user_buf,
@@ -2345,7 +2347,9 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
2345DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); 2347DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2346DEBUGFS_READ_FILE_OPS(reply_tx_error); 2348DEBUGFS_READ_FILE_OPS(reply_tx_error);
2347DEBUGFS_WRITE_FILE_OPS(echo_test); 2349DEBUGFS_WRITE_FILE_OPS(echo_test);
2350#ifdef CONFIG_IWLWIFI_DEBUG
2348DEBUGFS_READ_WRITE_FILE_OPS(log_event); 2351DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2352#endif
2349DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); 2353DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2350 2354
2351/* 2355/*
@@ -2405,7 +2409,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2405 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 2409 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2406 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 2410 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2407 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); 2411 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2412#ifdef CONFIG_IWLWIFI_DEBUG
2408 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); 2413 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2414#endif
2409 2415
2410 if (iwl_advanced_bt_coexist(priv)) 2416 if (iwl_advanced_bt_coexist(priv))
2411 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 2417 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d742900969ea..fac67a526a30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
861 861
862 /* We have our copies now, allow OS release its copies */ 862 /* We have our copies now, allow OS release its copies */
863 release_firmware(ucode_raw); 863 release_firmware(ucode_raw);
864 complete(&drv->request_firmware_complete);
865 864
866 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); 865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
867 866
868 if (!drv->op_mode) 867 if (!drv->op_mode)
869 goto out_free_fw; 868 goto out_unbind;
870 869
870 /*
871 * Complete the firmware request last so that
872 * a driver unbind (stop) doesn't run while we
873 * are doing the start() above.
874 */
875 complete(&drv->request_firmware_complete);
871 return; 876 return;
872 877
873 try_again: 878 try_again:
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 50c58911e718..b8e2b223ac36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
568 * iwl_get_max_txpower_avg - get the highest tx power from all chains. 568 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
569 * find the highest tx power from all chains for the channel 569 * find the highest tx power from all chains for the channel
570 */ 570 */
571static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, 571static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
573 int element, s8 *max_txpower_in_half_dbm) 573 int element, s8 *max_txpower_in_half_dbm)
574{ 574{
575 s8 max_txpower_avg = 0; /* (dBm) */ 575 s8 max_txpower_avg = 0; /* (dBm) */
576 576
577 /* Take the highest tx power from any valid chains */ 577 /* Take the highest tx power from any valid chains */
578 if ((cfg->valid_tx_ant & ANT_A) && 578 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
579 (enhanced_txpower[element].chain_a_max > max_txpower_avg)) 579 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
580 max_txpower_avg = enhanced_txpower[element].chain_a_max; 580 max_txpower_avg = enhanced_txpower[element].chain_a_max;
581 if ((cfg->valid_tx_ant & ANT_B) && 581 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
582 (enhanced_txpower[element].chain_b_max > max_txpower_avg)) 582 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
583 max_txpower_avg = enhanced_txpower[element].chain_b_max; 583 max_txpower_avg = enhanced_txpower[element].chain_b_max;
584 if ((cfg->valid_tx_ant & ANT_C) && 584 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
585 (enhanced_txpower[element].chain_c_max > max_txpower_avg)) 585 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
586 max_txpower_avg = enhanced_txpower[element].chain_c_max; 586 max_txpower_avg = enhanced_txpower[element].chain_c_max;
587 if (((cfg->valid_tx_ant == ANT_AB) | 587 if (((priv->hw_params.valid_tx_ant == ANT_AB) |
588 (cfg->valid_tx_ant == ANT_BC) | 588 (priv->hw_params.valid_tx_ant == ANT_BC) |
589 (cfg->valid_tx_ant == ANT_AC)) && 589 (priv->hw_params.valid_tx_ant == ANT_AC)) &&
590 (enhanced_txpower[element].mimo2_max > max_txpower_avg)) 590 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
591 max_txpower_avg = enhanced_txpower[element].mimo2_max; 591 max_txpower_avg = enhanced_txpower[element].mimo2_max;
592 if ((cfg->valid_tx_ant == ANT_ABC) && 592 if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
593 (enhanced_txpower[element].mimo3_max > max_txpower_avg)) 593 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
594 max_txpower_avg = enhanced_txpower[element].mimo3_max; 594 max_txpower_avg = enhanced_txpower[element].mimo3_max;
595 595
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
691 ((txp->delta_20_in_40 & 0xf0) >> 4), 691 ((txp->delta_20_in_40 & 0xf0) >> 4),
692 (txp->delta_20_in_40 & 0x0f)); 692 (txp->delta_20_in_40 & 0x0f));
693 693
694 max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, 694 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
695 &max_txp_avg_halfdbm); 695 &max_txp_avg_halfdbm);
696 696
697 /* 697 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index ab2f4d7500a4..013680332f07 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
199 WIPHY_FLAG_DISABLE_BEACON_HINTS | 199 WIPHY_FLAG_DISABLE_BEACON_HINTS |
200 WIPHY_FLAG_IBSS_RSN; 200 WIPHY_FLAG_IBSS_RSN;
201 201
202#ifdef CONFIG_PM_SLEEP
202 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 203 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
203 priv->trans->ops->wowlan_suspend && 204 priv->trans->ops->wowlan_suspend &&
204 device_can_wakeup(priv->trans->dev)) { 205 device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
217 hw->wiphy->wowlan.pattern_max_len = 218 hw->wiphy->wowlan.pattern_max_len =
218 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 219 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
219 } 220 }
221#endif
220 222
221 if (iwlwifi_mod_params.power_save) 223 if (iwlwifi_mod_params.power_save)
222 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 224 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
249 ret = ieee80211_register_hw(priv->hw); 251 ret = ieee80211_register_hw(priv->hw);
250 if (ret) { 252 if (ret) {
251 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 253 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
254 iwl_leds_exit(priv);
252 return ret; 255 return ret;
253 } 256 }
254 priv->mac80211_registered = 1; 257 priv->mac80211_registered = 1;
@@ -793,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
793 switch (op) { 796 switch (op) {
794 case ADD: 797 case ADD:
795 ret = iwlagn_mac_sta_add(hw, vif, sta); 798 ret = iwlagn_mac_sta_add(hw, vif, sta);
799 if (ret)
800 break;
801 /*
802 * Clear the in-progress flag, the AP station entry was added
803 * but we'll initialize LQ only when we've associated (which
804 * would also clear the in-progress flag). This is necessary
805 * in case we never initialize LQ because association fails.
806 */
807 spin_lock_bh(&priv->sta_lock);
808 priv->stations[iwl_sta_id(sta)].used &=
809 ~IWL_STA_UCODE_INPROGRESS;
810 spin_unlock_bh(&priv->sta_lock);
796 break; 811 break;
797 case REMOVE: 812 case REMOVE:
798 ret = iwlagn_mac_sta_remove(hw, vif, sta); 813 ret = iwlagn_mac_sta_remove(hw, vif, sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 3b1069290fa9..dfd54662e3e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -224,6 +224,7 @@
224#define SCD_TXFACT (SCD_BASE + 0x10) 224#define SCD_TXFACT (SCD_BASE + 0x10)
225#define SCD_ACTIVE (SCD_BASE + 0x14) 225#define SCD_ACTIVE (SCD_BASE + 0x14)
226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
227#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
227#define SCD_AGGR_SEL (SCD_BASE + 0x248) 228#define SCD_AGGR_SEL (SCD_BASE + 0x248)
228#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 229#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
229 230
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index ec6fb395b84d..79c6b91417f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1058,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
1058 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 1058 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1059 trans_pcie->scd_bc_tbls.dma >> 10); 1059 trans_pcie->scd_bc_tbls.dma >> 10);
1060 1060
1061 /* The chain extension of the SCD doesn't work well. This feature is
1062 * enabled by default by the HW, so we need to disable it manually.
1063 */
1064 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1065
1061 /* Enable DMA channel */ 1066 /* Enable DMA channel */
1062 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1067 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1063 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1068 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fb787df01666..a0b7cfd34685 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1555 hdr = (struct ieee80211_hdr *) skb->data; 1555 hdr = (struct ieee80211_hdr *) skb->data;
1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); 1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
1557 } 1557 }
1558 txi->flags |= IEEE80211_TX_STAT_ACK;
1558 } 1559 }
1559 ieee80211_tx_status_irqsafe(data2->hw, skb); 1560 ieee80211_tx_status_irqsafe(data2->hw, skb);
1560 return 0; 1561 return 0;
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void)
1721 "unregister family %i\n", ret); 1722 "unregister family %i\n", ret);
1722} 1723}
1723 1724
1725static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1726 { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
1727 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
1728 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1729#ifdef CONFIG_MAC80211_MESH
1730 BIT(NL80211_IFTYPE_MESH_POINT) |
1731#endif
1732 BIT(NL80211_IFTYPE_AP) |
1733 BIT(NL80211_IFTYPE_P2P_GO) },
1734};
1735
1736static const struct ieee80211_iface_combination hwsim_if_comb = {
1737 .limits = hwsim_if_limits,
1738 .n_limits = ARRAY_SIZE(hwsim_if_limits),
1739 .max_interfaces = 2048,
1740 .num_different_channels = 1,
1741};
1742
1724static int __init init_mac80211_hwsim(void) 1743static int __init init_mac80211_hwsim(void)
1725{ 1744{
1726 int i, err = 0; 1745 int i, err = 0;
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
1782 hw->wiphy->n_addresses = 2; 1801 hw->wiphy->n_addresses = 2;
1783 hw->wiphy->addresses = data->addresses; 1802 hw->wiphy->addresses = data->addresses;
1784 1803
1804 hw->wiphy->iface_combinations = &hwsim_if_comb;
1805 hw->wiphy->n_iface_combinations = 1;
1806
1785 if (fake_hw_scan) { 1807 if (fake_hw_scan) {
1786 hw->wiphy->max_scan_ssids = 255; 1808 hw->wiphy->max_scan_ssids = 255;
1787 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 1809 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9c44088054dd..900ee129e825 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
256 else 256 else
257 last_seq = priv->rx_seq[tid]; 257 last_seq = priv->rx_seq[tid];
258 258
259 if (last_seq >= new_node->start_win) 259 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
260 last_seq >= new_node->start_win)
260 new_node->start_win = last_seq + 1; 261 new_node->start_win = last_seq + 1;
261 262
262 new_node->win_size = win_size; 263 new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
596 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 597 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
597 598
598 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 599 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
599 memset(priv->rx_seq, 0, sizeof(priv->rx_seq)); 600 mwifiex_reset_11n_rx_seq_num(priv);
600} 601}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index f1bffebabc60..6c9815a0f5d8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -37,6 +37,13 @@
37 37
38#define ADDBA_RSP_STATUS_ACCEPT 0 38#define ADDBA_RSP_STATUS_ACCEPT 0
39 39
40#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
41
42static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
43{
44 memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
45}
46
40int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *, 47int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
41 u16 seqNum, 48 u16 seqNum,
42 u16 tid, u8 *ta, 49 u16 tid, u8 *ta,
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 87671446e24b..5c7fd185373c 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -948,6 +948,20 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
948 bss_cfg->ssid.ssid_len = params->ssid_len; 948 bss_cfg->ssid.ssid_len = params->ssid_len;
949 } 949 }
950 950
951 switch (params->hidden_ssid) {
952 case NL80211_HIDDEN_SSID_NOT_IN_USE:
953 bss_cfg->bcast_ssid_ctl = 1;
954 break;
955 case NL80211_HIDDEN_SSID_ZERO_LEN:
956 bss_cfg->bcast_ssid_ctl = 0;
957 break;
958 case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
959 /* firmware doesn't support this type of hidden SSID */
960 default:
961 kfree(bss_cfg);
962 return -EINVAL;
963 }
964
951 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 965 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
952 kfree(bss_cfg); 966 kfree(bss_cfg);
953 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); 967 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
@@ -1471,7 +1485,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1471 struct wireless_dev *wdev; 1485 struct wireless_dev *wdev;
1472 1486
1473 if (!adapter) 1487 if (!adapter)
1474 return NULL; 1488 return ERR_PTR(-EFAULT);
1475 1489
1476 switch (type) { 1490 switch (type) {
1477 case NL80211_IFTYPE_UNSPECIFIED: 1491 case NL80211_IFTYPE_UNSPECIFIED:
@@ -1481,12 +1495,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1481 if (priv->bss_mode) { 1495 if (priv->bss_mode) {
1482 wiphy_err(wiphy, 1496 wiphy_err(wiphy,
1483 "cannot create multiple sta/adhoc ifaces\n"); 1497 "cannot create multiple sta/adhoc ifaces\n");
1484 return NULL; 1498 return ERR_PTR(-EINVAL);
1485 } 1499 }
1486 1500
1487 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 1501 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1488 if (!wdev) 1502 if (!wdev)
1489 return NULL; 1503 return ERR_PTR(-ENOMEM);
1490 1504
1491 wdev->wiphy = wiphy; 1505 wdev->wiphy = wiphy;
1492 priv->wdev = wdev; 1506 priv->wdev = wdev;
@@ -1509,12 +1523,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1509 1523
1510 if (priv->bss_mode) { 1524 if (priv->bss_mode) {
1511 wiphy_err(wiphy, "Can't create multiple AP interfaces"); 1525 wiphy_err(wiphy, "Can't create multiple AP interfaces");
1512 return NULL; 1526 return ERR_PTR(-EINVAL);
1513 } 1527 }
1514 1528
1515 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 1529 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1516 if (!wdev) 1530 if (!wdev)
1517 return NULL; 1531 return ERR_PTR(-ENOMEM);
1518 1532
1519 priv->wdev = wdev; 1533 priv->wdev = wdev;
1520 wdev->wiphy = wiphy; 1534 wdev->wiphy = wiphy;
@@ -1531,14 +1545,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1531 break; 1545 break;
1532 default: 1546 default:
1533 wiphy_err(wiphy, "type not supported\n"); 1547 wiphy_err(wiphy, "type not supported\n");
1534 return NULL; 1548 return ERR_PTR(-EINVAL);
1535 } 1549 }
1536 1550
1537 dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name, 1551 dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
1538 ether_setup, 1); 1552 ether_setup, 1);
1539 if (!dev) { 1553 if (!dev) {
1540 wiphy_err(wiphy, "no memory available for netdevice\n"); 1554 wiphy_err(wiphy, "no memory available for netdevice\n");
1541 goto error; 1555 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
1556 return ERR_PTR(-ENOMEM);
1542 } 1557 }
1543 1558
1544 mwifiex_init_priv_params(priv, dev); 1559 mwifiex_init_priv_params(priv, dev);
@@ -1569,7 +1584,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1569 /* Register network device */ 1584 /* Register network device */
1570 if (register_netdevice(dev)) { 1585 if (register_netdevice(dev)) {
1571 wiphy_err(wiphy, "cannot register virtual network device\n"); 1586 wiphy_err(wiphy, "cannot register virtual network device\n");
1572 goto error; 1587 free_netdev(dev);
1588 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
1589 return ERR_PTR(-EFAULT);
1573 } 1590 }
1574 1591
1575 sema_init(&priv->async_sem, 1); 1592 sema_init(&priv->async_sem, 1);
@@ -1581,12 +1598,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1581 mwifiex_dev_debugfs_init(priv); 1598 mwifiex_dev_debugfs_init(priv);
1582#endif 1599#endif
1583 return dev; 1600 return dev;
1584error:
1585 if (dev && (dev->reg_state == NETREG_UNREGISTERED))
1586 free_netdev(dev);
1587 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
1588
1589 return NULL;
1590} 1601}
1591EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); 1602EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
1592 1603
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 9f674bbebe65..561452a5c818 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
122#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 122#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
123#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) 123#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
125#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
126#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 127#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
127#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 128#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid {
1209 u8 ssid[0]; 1210 u8 ssid[0];
1210} __packed; 1211} __packed;
1211 1212
1213struct host_cmd_tlv_bcast_ssid {
1214 struct host_cmd_tlv tlv;
1215 u8 bcast_ctl;
1216} __packed;
1217
1212struct host_cmd_tlv_beacon_period { 1218struct host_cmd_tlv_beacon_period {
1213 struct host_cmd_tlv tlv; 1219 struct host_cmd_tlv tlv;
1214 __le16 period; 1220 __le16 period;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index ceb82cd749cc..383820a52beb 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
213 /* save assoc resp ie index after auto-indexing */ 213 /* save assoc resp ie index after auto-indexing */
214 *assoc_idx = *((u16 *)pos); 214 *assoc_idx = *((u16 *)pos);
215 215
216 kfree(ap_custom_ie);
216 return ret; 217 return ret;
217} 218}
218 219
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index e0377473282f..fc8a9bfa1248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
978 dev_dbg(adapter->dev, "info: --- Rx: Event ---\n"); 978 dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
979 adapter->event_cause = *(u32 *) skb->data; 979 adapter->event_cause = *(u32 *) skb->data;
980 980
981 skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
982
983 if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE)) 981 if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
984 memcpy(adapter->event_body, skb->data, skb->len); 982 memcpy(adapter->event_body,
983 skb->data + MWIFIEX_EVENT_HEADER_LEN,
984 skb->len);
985 985
986 /* event cause has been saved to adapter->event_cause */ 986 /* event cause has been saved to adapter->event_cause */
987 adapter->event_received = true; 987 adapter->event_received = true;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 4ace5a3dcd23..11e731f3581c 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
406 break; 406 break;
407 407
408 case EVENT_UAP_STA_ASSOC: 408 case EVENT_UAP_STA_ASSOC:
409 skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
410 memset(&sinfo, 0, sizeof(sinfo)); 409 memset(&sinfo, 0, sizeof(sinfo));
411 event = (struct mwifiex_assoc_event *)adapter->event_skb->data; 410 event = (struct mwifiex_assoc_event *)
411 (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
412 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { 412 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
413 len = -1; 413 len = -1;
414 414
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
433 GFP_KERNEL); 433 GFP_KERNEL);
434 break; 434 break;
435 case EVENT_UAP_STA_DEAUTH: 435 case EVENT_UAP_STA_DEAUTH:
436 skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER); 436 cfg80211_del_sta(priv->netdev, adapter->event_body +
437 cfg80211_del_sta(priv->netdev, adapter->event_skb->data, 437 MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
438 GFP_KERNEL);
439 break; 438 break;
440 case EVENT_UAP_BSS_IDLE: 439 case EVENT_UAP_BSS_IDLE:
441 priv->media_connected = false; 440 priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index e2faec4db108..cecb27283196 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -161,15 +161,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
161 goto done; 161 goto done;
162 162
163 for (i = 0; i < adapter->priv_num; i++) { 163 for (i = 0; i < adapter->priv_num; i++) {
164
165 tpriv = adapter->priv[i]; 164 tpriv = adapter->priv[i];
166 165
167 if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) && 166 if (tpriv->media_connected &&
168 (tpriv->media_connected)) { 167 netif_queue_stopped(tpriv->netdev))
169 if (netif_queue_stopped(tpriv->netdev)) 168 mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
170 mwifiex_wake_up_net_dev_queue(tpriv->netdev,
171 adapter);
172 }
173 } 169 }
174done: 170done:
175 dev_kfree_skb_any(skb); 171 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 76dfbc42a732..89f9a2a45de3 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -27,6 +27,17 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
27 struct cfg80211_ap_settings *params) { 27 struct cfg80211_ap_settings *params) {
28 int i; 28 int i;
29 29
30 if (!params->privacy) {
31 bss_config->protocol = PROTOCOL_NO_SECURITY;
32 bss_config->key_mgmt = KEY_MGMT_NONE;
33 bss_config->wpa_cfg.length = 0;
34 priv->sec_info.wep_enabled = 0;
35 priv->sec_info.wpa_enabled = 0;
36 priv->sec_info.wpa2_enabled = 0;
37
38 return 0;
39 }
40
30 switch (params->auth_type) { 41 switch (params->auth_type) {
31 case NL80211_AUTHTYPE_OPEN_SYSTEM: 42 case NL80211_AUTHTYPE_OPEN_SYSTEM:
32 bss_config->auth_mode = WLAN_AUTH_OPEN; 43 bss_config->auth_mode = WLAN_AUTH_OPEN;
@@ -132,6 +143,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
132 struct host_cmd_tlv_dtim_period *dtim_period; 143 struct host_cmd_tlv_dtim_period *dtim_period;
133 struct host_cmd_tlv_beacon_period *beacon_period; 144 struct host_cmd_tlv_beacon_period *beacon_period;
134 struct host_cmd_tlv_ssid *ssid; 145 struct host_cmd_tlv_ssid *ssid;
146 struct host_cmd_tlv_bcast_ssid *bcast_ssid;
135 struct host_cmd_tlv_channel_band *chan_band; 147 struct host_cmd_tlv_channel_band *chan_band;
136 struct host_cmd_tlv_frag_threshold *frag_threshold; 148 struct host_cmd_tlv_frag_threshold *frag_threshold;
137 struct host_cmd_tlv_rts_threshold *rts_threshold; 149 struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +165,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
153 cmd_size += sizeof(struct host_cmd_tlv) + 165 cmd_size += sizeof(struct host_cmd_tlv) +
154 bss_cfg->ssid.ssid_len; 166 bss_cfg->ssid.ssid_len;
155 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; 167 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
168
169 bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
170 bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
171 bcast_ssid->tlv.len =
172 cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
173 bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
174 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
175 tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
156 } 176 }
157 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { 177 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
158 chan_band = (struct host_cmd_tlv_channel_band *)tlv; 178 chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +436,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
416 if (!bss_cfg) 436 if (!bss_cfg)
417 return -ENOMEM; 437 return -ENOMEM;
418 438
439 mwifiex_set_sys_config_invalid_data(bss_cfg);
419 bss_cfg->band_cfg = BAND_CONFIG_MANUAL; 440 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
420 bss_cfg->channel = channel; 441 bss_cfg->channel = channel;
421 442
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 49ebf20c56eb..22a5916564b8 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
49 struct device *dev = adapter->dev; 49 struct device *dev = adapter->dev;
50 u32 recv_type; 50 u32 recv_type;
51 __le32 tmp; 51 __le32 tmp;
52 int ret;
52 53
53 if (adapter->hs_activated) 54 if (adapter->hs_activated)
54 mwifiex_process_hs_config(adapter); 55 mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
69 case MWIFIEX_USB_TYPE_CMD: 70 case MWIFIEX_USB_TYPE_CMD:
70 if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) { 71 if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
71 dev_err(dev, "CMD: skb->len too large\n"); 72 dev_err(dev, "CMD: skb->len too large\n");
72 return -1; 73 ret = -1;
74 goto exit_restore_skb;
73 } else if (!adapter->curr_cmd) { 75 } else if (!adapter->curr_cmd) {
74 dev_dbg(dev, "CMD: no curr_cmd\n"); 76 dev_dbg(dev, "CMD: no curr_cmd\n");
75 if (adapter->ps_state == PS_STATE_SLEEP_CFM) { 77 if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
76 mwifiex_process_sleep_confirm_resp( 78 mwifiex_process_sleep_confirm_resp(
77 adapter, skb->data, 79 adapter, skb->data,
78 skb->len); 80 skb->len);
79 return 0; 81 ret = 0;
82 goto exit_restore_skb;
80 } 83 }
81 return -1; 84 ret = -1;
85 goto exit_restore_skb;
82 } 86 }
83 87
84 adapter->curr_cmd->resp_skb = skb; 88 adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
87 case MWIFIEX_USB_TYPE_EVENT: 91 case MWIFIEX_USB_TYPE_EVENT:
88 if (skb->len < sizeof(u32)) { 92 if (skb->len < sizeof(u32)) {
89 dev_err(dev, "EVENT: skb->len too small\n"); 93 dev_err(dev, "EVENT: skb->len too small\n");
90 return -1; 94 ret = -1;
95 goto exit_restore_skb;
91 } 96 }
92 skb_copy_from_linear_data(skb, &tmp, sizeof(u32)); 97 skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
93 adapter->event_cause = le32_to_cpu(tmp); 98 adapter->event_cause = le32_to_cpu(tmp);
94 skb_pull(skb, sizeof(u32));
95 dev_dbg(dev, "event_cause %#x\n", adapter->event_cause); 99 dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
96 100
97 if (skb->len > MAX_EVENT_SIZE) { 101 if (skb->len > MAX_EVENT_SIZE) {
98 dev_err(dev, "EVENT: event body too large\n"); 102 dev_err(dev, "EVENT: event body too large\n");
99 return -1; 103 ret = -1;
104 goto exit_restore_skb;
100 } 105 }
101 106
102 skb_copy_from_linear_data(skb, adapter->event_body, 107 memcpy(adapter->event_body, skb->data +
103 skb->len); 108 MWIFIEX_EVENT_HEADER_LEN, skb->len);
109
104 adapter->event_received = true; 110 adapter->event_received = true;
105 adapter->event_skb = skb; 111 adapter->event_skb = skb;
106 break; 112 break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
124 } 130 }
125 131
126 return -EINPROGRESS; 132 return -EINPROGRESS;
133
134exit_restore_skb:
135 /* The buffer will be reused for further cmds/events */
136 skb_push(skb, INTF_HEADER_LEN);
137
138 return ret;
127} 139}
128 140
129static void mwifiex_usb_rx_complete(struct urb *urb) 141static void mwifiex_usb_rx_complete(struct urb *urb)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index f3fc65515857..3fa4d4176993 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
404 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE; 404 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
405 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE; 405 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
406 406
407 mwifiex_reset_11n_rx_seq_num(priv);
408
407 atomic_set(&priv->wmm.tx_pkts_queued, 0); 409 atomic_set(&priv->wmm.tx_pkts_queued, 0);
408 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); 410 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
409 } 411 }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1221 1223
1222 if (!ptr->is_11n_enabled || 1224 if (!ptr->is_11n_enabled ||
1223 mwifiex_is_ba_stream_setup(priv, ptr, tid) || 1225 mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
1226 priv->wps.session_enable ||
1224 ((priv->sec_info.wpa_enabled || 1227 ((priv->sec_info.wpa_enabled ||
1225 priv->sec_info.wpa2_enabled) && 1228 priv->sec_info.wpa2_enabled) &&
1226 !priv->wpa_is_gtk_set)) { 1229 !priv->wpa_is_gtk_set)) {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2e9e6af21362..dfcd02ab6cae 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2110,7 +2110,7 @@ resize_buf:
2110 while (check_bssid_list_item(bssid, bssid_len, buf, len)) { 2110 while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
2111 if (rndis_bss_info_update(usbdev, bssid) && match_bssid && 2111 if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
2112 matched) { 2112 matched) {
2113 if (!ether_addr_equal(bssid->mac, match_bssid)) 2113 if (ether_addr_equal(bssid->mac, match_bssid))
2114 *matched = true; 2114 *matched = true;
2115 } 2115 }
2116 2116
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ca36cccaba31..8f754025b06e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -396,8 +396,7 @@ struct rt2x00_intf {
396 * for hardware which doesn't support hardware 396 * for hardware which doesn't support hardware
397 * sequence counting. 397 * sequence counting.
398 */ 398 */
399 spinlock_t seqlock; 399 atomic_t seqno;
400 u16 seqno;
401}; 400};
402 401
403static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) 402static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index b49773ef72f2..dd24b2663b5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
277 else 277 else
278 rt2x00dev->intf_sta_count++; 278 rt2x00dev->intf_sta_count++;
279 279
280 spin_lock_init(&intf->seqlock);
281 mutex_init(&intf->beacon_skb_mutex); 280 mutex_init(&intf->beacon_skb_mutex);
282 intf->beacon = entry; 281 intf->beacon = entry;
283 282
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4c662eccf53c..2fd830103415 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
210 u16 seqno;
210 211
211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 212 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
212 return; 213 return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
238 * sequence counting per-frame, since those will override the 239 * sequence counting per-frame, since those will override the
239 * sequence counter given by mac80211. 240 * sequence counter given by mac80211.
240 */ 241 */
241 spin_lock(&intf->seqlock);
242
243 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 242 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
244 intf->seqno += 0x10; 243 seqno = atomic_add_return(0x10, &intf->seqno);
245 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 244 else
246 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 245 seqno = atomic_read(&intf->seqno);
247
248 spin_unlock(&intf->seqlock);
249 246
247 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
248 hdr->seq_ctrl |= cpu_to_le16(seqno);
250} 249}
251 250
252static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 251static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index d357d1ed92f6..74ecc33fdd90 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
436 case QID_RX: 436 case QID_RX:
437 if (!rt2x00queue_full(queue)) 437 if (!rt2x00queue_full(queue))
438 rt2x00queue_for_each_entry(queue, 438 rt2x00queue_for_each_entry(queue,
439 Q_INDEX_DONE,
440 Q_INDEX, 439 Q_INDEX,
440 Q_INDEX_DONE,
441 NULL, 441 NULL,
442 rt2x00usb_kick_rx_entry); 442 rt2x00usb_kick_rx_entry);
443 break; 443 break;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f5f0f9..c2d5b495c179 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
117 radio_on = true; 117 radio_on = true;
118 } else if (radio_on) { 118 } else if (radio_on) {
119 radio_on = false; 119 radio_on = false;
120 cancel_delayed_work_sync(&priv->led_on); 120 cancel_delayed_work(&priv->led_on);
121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0); 121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
122 } 122 }
123 } else if (radio_on) { 123 } else if (radio_on) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index d228358e6a40..9970c2b1b199 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
301 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ 301 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
302 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ 302 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
303 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ 303 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
304 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
304 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 305 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
305 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 306 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
306 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ 307 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
308 {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
307 /* HP - Lite-On ,8188CUS Slim Combo */ 309 /* HP - Lite-On ,8188CUS Slim Combo */
308 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, 310 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
309 {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */ 311 {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
346 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ 348 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
347 {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ 349 {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
348 {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ 350 {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
351 {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
349 {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ 352 {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
350 {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/ 353 {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
351 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ 354 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index ad87a1ac6462..db6430c1a084 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -869,7 +869,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
869 } 869 }
870 870
871 *mactime = tsf_info->current_tsf_lsb | 871 *mactime = tsf_info->current_tsf_lsb |
872 (tsf_info->current_tsf_msb << 31); 872 ((u64)tsf_info->current_tsf_msb << 32);
873 873
874out: 874out:
875 kfree(tsf_info); 875 kfree(tsf_info);
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 9f15ccaf8f05..5ec50a476a69 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -76,8 +76,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
76 } 76 }
77 } 77 }
78 78
79 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID && 79 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
80 wl->station_mode != STATION_ACTIVE_MODE) {
81 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); 80 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
82 81
83 /* indicate to the stack, that beacons have been lost */ 82 /* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 87f6305bda2c..567660cd2fcd 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
73 spi_sync(wl_to_spi(wl), &m); 73 spi_sync(wl_to_spi(wl), &m);
74 74
75 wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 75 wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
76
77 kfree(cmd);
76} 78}
77 79
78static void wl1251_spi_wake(struct wl1251 *wl) 80static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
127 spi_sync(wl_to_spi(wl), &m); 129 spi_sync(wl_to_spi(wl), &m);
128 130
129 wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 131 wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
132
133 kfree(cmd);
130} 134}
131 135
132static void wl1251_spi_reset_wake(struct wl1251 *wl) 136static void wl1251_spi_reset_wake(struct wl1251 *wl)
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 54156b0b5c2d..d7b907e67170 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -1,7 +1,6 @@
1config WLCORE 1config WLCORE
2 tristate "TI wlcore support" 2 tristate "TI wlcore support"
3 depends on WL_TI && GENERIC_HARDIRQS && MAC80211 3 depends on WL_TI && GENERIC_HARDIRQS && MAC80211
4 depends on INET
5 select FW_LOADER 4 select FW_LOADER
6 ---help--- 5 ---help---
7 This module contains the main code for TI WLAN chips. It abstracts 6 This module contains the main code for TI WLAN chips. It abstracts
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2027afe405fe..30899901aef5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1935 1935
1936 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1936 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1937 1937
1938 unregister_netdev(info->netdev);
1939
1940 xennet_disconnect_backend(info); 1938 xennet_disconnect_backend(info);
1941 1939
1942 del_timer_sync(&info->rx_refill_timer);
1943
1944 xennet_sysfs_delif(info->netdev); 1940 xennet_sysfs_delif(info->netdev);
1945 1941
1942 unregister_netdev(info->netdev);
1943
1944 del_timer_sync(&info->rx_refill_timer);
1945
1946 free_percpu(info->stats); 1946 free_percpu(info->stats);
1947 1947
1948 free_netdev(info->netdev); 1948 free_netdev(info->netdev);