aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c46
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/tun.c38
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c13
-rw-r--r--drivers/net/usb/usbnet.c35
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c35
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c40
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/mwifiex/scan.c9
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c4
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c23
-rw-r--r--drivers/net/xen-netback/netback.c115
27 files changed, 298 insertions, 160 deletions
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1877ed7ca086..1c9e09fbdff8 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
1053 pr_info("%s: Setting primary slave to None.\n", 1053 pr_info("%s: Setting primary slave to None.\n",
1054 bond->dev->name); 1054 bond->dev->name);
1055 bond->primary_slave = NULL; 1055 bond->primary_slave = NULL;
1056 memset(bond->params.primary, 0, sizeof(bond->params.primary));
1056 bond_select_active_slave(bond); 1057 bond_select_active_slave(bond);
1057 goto out; 1058 goto out;
1058 } 1059 }
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 58607f196c9e..2282b1ae9765 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
488 488
489 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 489 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
490 IFX_WRITE_LOW_16BIT(mask)); 490 IFX_WRITE_LOW_16BIT(mask));
491
492 /* According to C_CAN documentation, the reserved bit
493 * in IFx_MASK2 register is fixed 1
494 */
491 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), 495 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
492 IFX_WRITE_HIGH_16BIT(mask)); 496 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
493 497
494 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 498 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
495 IFX_WRITE_LOW_16BIT(id)); 499 IFX_WRITE_LOW_16BIT(id));
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4eba17b83ba8..f1b3df167ff2 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -36,13 +36,13 @@
36 36
37#define DRV_VER "4.4.161.0u" 37#define DRV_VER "4.4.161.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 40#define BE3_NAME "Emulex BladeEngine3"
41#define OC_NAME "Emulex OneConnect 10Gbps NIC" 41#define OC_NAME "Emulex OneConnect"
42#define OC_NAME_BE OC_NAME "(be3)" 42#define OC_NAME_BE OC_NAME "(be3)"
43#define OC_NAME_LANCER OC_NAME "(Lancer)" 43#define OC_NAME_LANCER OC_NAME "(Lancer)"
44#define OC_NAME_SH OC_NAME "(Skyhawk)" 44#define OC_NAME_SH OC_NAME "(Skyhawk)"
45#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 45#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
46 46
47#define BE_VENDOR_ID 0x19a2 47#define BE_VENDOR_ID 0x19a2
48#define EMULEX_VENDOR_ID 0x10df 48#define EMULEX_VENDOR_ID 0x10df
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5c995700e534..4d6f3c54427a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -25,7 +25,7 @@
25MODULE_VERSION(DRV_VER); 25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids); 26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); 27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation"); 28MODULE_AUTHOR("Emulex Corporation");
29MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30 30
31static unsigned int num_vfs; 31static unsigned int num_vfs;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 02a12b69555f..4dab6fc265a2 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -232,6 +232,7 @@
232#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 232#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
233#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ 233#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
234#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ 234#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
235#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
235#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 236#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
236#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 237#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
237#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 238#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -389,6 +390,12 @@
389 390
390#define E1000_PBS_16K E1000_PBA_16K 391#define E1000_PBS_16K E1000_PBA_16K
391 392
393/* Uncorrectable/correctable ECC Error counts and enable bits */
394#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
395#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
396#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
397#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
398
392#define IFS_MAX 80 399#define IFS_MAX 80
393#define IFS_MIN 40 400#define IFS_MIN 40
394#define IFS_RATIO 4 401#define IFS_RATIO 4
@@ -408,6 +415,7 @@
408#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ 415#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
409#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 416#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
410#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 417#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
418#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
411#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 419#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
412#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ 420#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
413#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 421#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@@ -443,6 +451,7 @@
443#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 451#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
444#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 452#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
445#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 453#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
454#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
446#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 455#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
447#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 456#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
448#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ 457#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6782a2eea1bc..7e95f221d60b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,8 @@ struct e1000_adapter {
309 309
310 struct napi_struct napi; 310 struct napi_struct napi;
311 311
312 unsigned int uncorr_errors; /* uncorrectable ECC errors */
313 unsigned int corr_errors; /* correctable ECC errors */
312 unsigned int restart_queue; 314 unsigned int restart_queue;
313 u32 txd_cmd; 315 u32 txd_cmd;
314 316
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f95bc6ee1c22..fd4772a2691c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
108 E1000_STAT("dropped_smbus", stats.mgpdc), 108 E1000_STAT("dropped_smbus", stats.mgpdc),
109 E1000_STAT("rx_dma_failed", rx_dma_failed), 109 E1000_STAT("rx_dma_failed", rx_dma_failed),
110 E1000_STAT("tx_dma_failed", tx_dma_failed), 110 E1000_STAT("tx_dma_failed", tx_dma_failed),
111 E1000_STAT("uncorr_ecc_errors", uncorr_errors),
112 E1000_STAT("corr_ecc_errors", corr_errors),
111}; 113};
112 114
113#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) 115#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index cf217777586c..b88676ff3d86 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -77,6 +77,7 @@ enum e1e_registers {
77#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ 77#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
78 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 78 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
79 E1000_PBS = 0x01008, /* Packet Buffer Size */ 79 E1000_PBS = 0x01008, /* Packet Buffer Size */
80 E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
80 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 81 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
81 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ 82 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
82 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ 83 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 976336547607..24d9f61956f0 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3624 if (hw->mac.type == e1000_ich8lan) 3624 if (hw->mac.type == e1000_ich8lan)
3625 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 3625 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3626 ew32(RFCTL, reg); 3626 ew32(RFCTL, reg);
3627
3628 /* Enable ECC on Lynxpoint */
3629 if (hw->mac.type == e1000_pch_lpt) {
3630 reg = er32(PBECCSTS);
3631 reg |= E1000_PBECCSTS_ECC_ENABLE;
3632 ew32(PBECCSTS, reg);
3633
3634 reg = er32(CTRL);
3635 reg |= E1000_CTRL_MEHE;
3636 ew32(CTRL, reg);
3637 }
3627} 3638}
3628 3639
3629/** 3640/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fbf75fdca994..643c883dd795 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1678 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1678 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1679 } 1679 }
1680 1680
1681 /* Reset on uncorrectable ECC error */
1682 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1683 u32 pbeccsts = er32(PBECCSTS);
1684
1685 adapter->corr_errors +=
1686 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1687 adapter->uncorr_errors +=
1688 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1689 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1690
1691 /* Do the reset outside of interrupt context */
1692 schedule_work(&adapter->reset_task);
1693
1694 /* return immediately since reset is imminent */
1695 return IRQ_HANDLED;
1696 }
1697
1681 if (napi_schedule_prep(&adapter->napi)) { 1698 if (napi_schedule_prep(&adapter->napi)) {
1682 adapter->total_tx_bytes = 0; 1699 adapter->total_tx_bytes = 0;
1683 adapter->total_tx_packets = 0; 1700 adapter->total_tx_packets = 0;
@@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
1741 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1758 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1742 } 1759 }
1743 1760
1761 /* Reset on uncorrectable ECC error */
1762 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1763 u32 pbeccsts = er32(PBECCSTS);
1764
1765 adapter->corr_errors +=
1766 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1767 adapter->uncorr_errors +=
1768 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1769 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1770
1771 /* Do the reset outside of interrupt context */
1772 schedule_work(&adapter->reset_task);
1773
1774 /* return immediately since reset is imminent */
1775 return IRQ_HANDLED;
1776 }
1777
1744 if (napi_schedule_prep(&adapter->napi)) { 1778 if (napi_schedule_prep(&adapter->napi)) {
1745 adapter->total_tx_bytes = 0; 1779 adapter->total_tx_bytes = 0;
1746 adapter->total_tx_packets = 0; 1780 adapter->total_tx_packets = 0;
@@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
2104 if (adapter->msix_entries) { 2138 if (adapter->msix_entries) {
2105 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2139 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2106 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2140 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2141 } else if (hw->mac.type == e1000_pch_lpt) {
2142 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2107 } else { 2143 } else {
2108 ew32(IMS, IMS_ENABLE_MASK); 2144 ew32(IMS, IMS_ENABLE_MASK);
2109 } 2145 }
@@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4251 adapter->stats.mgptc += er32(MGTPTC); 4287 adapter->stats.mgptc += er32(MGTPTC);
4252 adapter->stats.mgprc += er32(MGTPRC); 4288 adapter->stats.mgprc += er32(MGTPRC);
4253 adapter->stats.mgpdc += er32(MGTPDC); 4289 adapter->stats.mgpdc += er32(MGTPDC);
4290
4291 /* Correctable ECC Errors */
4292 if (hw->mac.type == e1000_pch_lpt) {
4293 u32 pbeccsts = er32(PBECCSTS);
4294 adapter->corr_errors +=
4295 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4296 adapter->uncorr_errors +=
4297 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4298 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4299 }
4254} 4300}
4255 4301
4256/** 4302/**
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 7992b3e05d3d..78ace59efd29 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
1801 rp->tx_skbuff[entry]->len, 1801 rp->tx_skbuff[entry]->len,
1802 PCI_DMA_TODEVICE); 1802 PCI_DMA_TODEVICE);
1803 } 1803 }
1804 dev_kfree_skb_irq(rp->tx_skbuff[entry]); 1804 dev_kfree_skb(rp->tx_skbuff[entry]);
1805 rp->tx_skbuff[entry] = NULL; 1805 rp->tx_skbuff[entry] = NULL;
1806 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1806 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1807 } 1807 }
@@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
2010 if (intr_status & IntrPCIErr) 2010 if (intr_status & IntrPCIErr)
2011 netif_warn(rp, hw, dev, "PCI error\n"); 2011 netif_warn(rp, hw, dev, "PCI error\n");
2012 2012
2013 napi_disable(&rp->napi); 2013 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2014 rhine_irq_disable(rp);
2015 /* Slow and safe. Consider __napi_schedule as a replacement ? */
2016 napi_enable(&rp->napi);
2017 napi_schedule(&rp->napi);
2018 2014
2019out_unlock: 2015out_unlock:
2020 mutex_unlock(&rp->task_lock); 2016 mutex_unlock(&rp->task_lock);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cc09b67c23bc..2917a86f4c43 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
298} 298}
299 299
300static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 300static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
301 u16 queue_index) 301 struct tun_file *tfile)
302{ 302{
303 struct hlist_head *head; 303 struct hlist_head *head;
304 struct tun_flow_entry *e; 304 struct tun_flow_entry *e;
305 unsigned long delay = tun->ageing_time; 305 unsigned long delay = tun->ageing_time;
306 u16 queue_index = tfile->queue_index;
306 307
307 if (!rxhash) 308 if (!rxhash)
308 return; 309 return;
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
311 312
312 rcu_read_lock(); 313 rcu_read_lock();
313 314
314 if (tun->numqueues == 1) 315 /* We may get a very small possibility of OOO during switching, not
316 * worth to optimize.*/
317 if (tun->numqueues == 1 || tfile->detached)
315 goto unlock; 318 goto unlock;
316 319
317 e = tun_flow_find(head, rxhash); 320 e = tun_flow_find(head, rxhash);
@@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
411 414
412 tun = rtnl_dereference(tfile->tun); 415 tun = rtnl_dereference(tfile->tun);
413 416
414 if (tun) { 417 if (tun && !tfile->detached) {
415 u16 index = tfile->queue_index; 418 u16 index = tfile->queue_index;
416 BUG_ON(index >= tun->numqueues); 419 BUG_ON(index >= tun->numqueues);
417 dev = tun->dev; 420 dev = tun->dev;
418 421
419 rcu_assign_pointer(tun->tfiles[index], 422 rcu_assign_pointer(tun->tfiles[index],
420 tun->tfiles[tun->numqueues - 1]); 423 tun->tfiles[tun->numqueues - 1]);
421 rcu_assign_pointer(tfile->tun, NULL);
422 ntfile = rtnl_dereference(tun->tfiles[index]); 424 ntfile = rtnl_dereference(tun->tfiles[index]);
423 ntfile->queue_index = index; 425 ntfile->queue_index = index;
424 426
425 --tun->numqueues; 427 --tun->numqueues;
426 if (clean) 428 if (clean) {
429 rcu_assign_pointer(tfile->tun, NULL);
427 sock_put(&tfile->sk); 430 sock_put(&tfile->sk);
428 else 431 } else
429 tun_disable_queue(tun, tfile); 432 tun_disable_queue(tun, tfile);
430 433
431 synchronize_net(); 434 synchronize_net();
@@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
439 } 442 }
440 443
441 if (clean) { 444 if (clean) {
442 if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && 445 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
443 !(tun->flags & TUN_PERSIST)) 446 netif_carrier_off(tun->dev);
444 if (tun->dev->reg_state == NETREG_REGISTERED) 447
448 if (!(tun->flags & TUN_PERSIST) &&
449 tun->dev->reg_state == NETREG_REGISTERED)
445 unregister_netdevice(tun->dev); 450 unregister_netdevice(tun->dev);
451 }
446 452
447 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 453 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
448 &tfile->socket.flags)); 454 &tfile->socket.flags));
@@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
470 rcu_assign_pointer(tfile->tun, NULL); 476 rcu_assign_pointer(tfile->tun, NULL);
471 --tun->numqueues; 477 --tun->numqueues;
472 } 478 }
479 list_for_each_entry(tfile, &tun->disabled, next) {
480 wake_up_all(&tfile->wq.wait);
481 rcu_assign_pointer(tfile->tun, NULL);
482 }
473 BUG_ON(tun->numqueues != 0); 483 BUG_ON(tun->numqueues != 0);
474 484
475 synchronize_net(); 485 synchronize_net();
@@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
500 goto out; 510 goto out;
501 511
502 err = -EINVAL; 512 err = -EINVAL;
503 if (rtnl_dereference(tfile->tun)) 513 if (rtnl_dereference(tfile->tun) && !tfile->detached)
504 goto out; 514 goto out;
505 515
506 err = -EBUSY; 516 err = -EBUSY;
@@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1199 tun->dev->stats.rx_packets++; 1209 tun->dev->stats.rx_packets++;
1200 tun->dev->stats.rx_bytes += len; 1210 tun->dev->stats.rx_bytes += len;
1201 1211
1202 tun_flow_update(tun, rxhash, tfile->queue_index); 1212 tun_flow_update(tun, rxhash, tfile);
1203 return total_len; 1213 return total_len;
1204} 1214}
1205 1215
@@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1658 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1668 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1659 device_create_file(&tun->dev->dev, &dev_attr_group)) 1669 device_create_file(&tun->dev->dev, &dev_attr_group))
1660 pr_err("Failed to create tun sysfs files\n"); 1670 pr_err("Failed to create tun sysfs files\n");
1661
1662 netif_carrier_on(tun->dev);
1663 } 1671 }
1664 1672
1673 netif_carrier_on(tun->dev);
1674
1665 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1675 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1666 1676
1667 if (ifr->ifr_flags & IFF_NO_PI) 1677 if (ifr->ifr_flags & IFF_NO_PI)
@@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1813 ret = tun_attach(tun, file); 1823 ret = tun_attach(tun, file);
1814 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1824 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1815 tun = rtnl_dereference(tfile->tun); 1825 tun = rtnl_dereference(tfile->tun);
1816 if (!tun || !(tun->flags & TUN_TAP_MQ)) 1826 if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
1817 ret = -EINVAL; 1827 ret = -EINVAL;
1818 else 1828 else
1819 __tun_detach(tfile, false); 1829 __tun_detach(tfile, false);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9197b2c72ca3..00d3b2d37828 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = {
1215 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), 1215 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
1216 .driver_info = (unsigned long)&wwan_info, 1216 .driver_info = (unsigned long)&wwan_info,
1217 }, 1217 },
1218 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
1219 .driver_info = (unsigned long)&wwan_info,
1220 },
1218 1221
1219 /* Infineon(now Intel) HSPA Modem platform */ 1222 /* Infineon(now Intel) HSPA Modem platform */
1220 { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, 1223 { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 575a5839ee34..c8e05e27f38c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
351 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), 351 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
352 .driver_info = (unsigned long)&qmi_wwan_info, 352 .driver_info = (unsigned long)&qmi_wwan_info,
353 }, 353 },
354 { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
355 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
356 .driver_info = (unsigned long)&qmi_wwan_info,
357 },
354 358
355 /* 2. Combined interface devices matching on class+protocol */ 359 /* 2. Combined interface devices matching on class+protocol */
356 { /* Huawei E367 and possibly others in "Windows mode" */ 360 { /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
361 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), 365 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
362 .driver_info = (unsigned long)&qmi_wwan_info, 366 .driver_info = (unsigned long)&qmi_wwan_info,
363 }, 367 },
368 { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
369 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
370 .driver_info = (unsigned long)&qmi_wwan_info,
371 },
372 { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
373 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
374 .driver_info = (unsigned long)&qmi_wwan_info,
375 },
364 { /* Pantech UML290, P4200 and more */ 376 { /* Pantech UML290, P4200 and more */
365 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), 377 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
366 .driver_info = (unsigned long)&qmi_wwan_info, 378 .driver_info = (unsigned long)&qmi_wwan_info,
@@ -461,6 +473,7 @@ static const struct usb_device_id products[] = {
461 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 473 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
462 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 474 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
463 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 475 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
476 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
464 477
465 /* 4. Gobi 1000 devices */ 478 /* 4. Gobi 1000 devices */
466 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 479 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f34b2ebee815..5e33606c1366 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
380 unsigned long lockflags; 380 unsigned long lockflags;
381 size_t size = dev->rx_urb_size; 381 size_t size = dev->rx_urb_size;
382 382
383 /* prevent rx skb allocation when error ratio is high */
384 if (test_bit(EVENT_RX_KILL, &dev->flags)) {
385 usb_free_urb(urb);
386 return -ENOLINK;
387 }
388
383 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 389 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
384 if (!skb) { 390 if (!skb) {
385 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 391 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
539 break; 545 break;
540 } 546 }
541 547
548 /* stop rx if packet error rate is high */
549 if (++dev->pkt_cnt > 30) {
550 dev->pkt_cnt = 0;
551 dev->pkt_err = 0;
552 } else {
553 if (state == rx_cleanup)
554 dev->pkt_err++;
555 if (dev->pkt_err > 20)
556 set_bit(EVENT_RX_KILL, &dev->flags);
557 }
558
542 state = defer_bh(dev, skb, &dev->rxq, state); 559 state = defer_bh(dev, skb, &dev->rxq, state);
543 560
544 if (urb) { 561 if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 808 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
792 "simple"); 809 "simple");
793 810
811 /* reset rx error state */
812 dev->pkt_cnt = 0;
813 dev->pkt_err = 0;
814 clear_bit(EVENT_RX_KILL, &dev->flags);
815
794 // delay posting reads until we're fully open 816 // delay posting reads until we're fully open
795 tasklet_schedule (&dev->bh); 817 tasklet_schedule (&dev->bh);
796 if (info->manage_power) { 818 if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1103 if (info->tx_fixup) { 1125 if (info->tx_fixup) {
1104 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1126 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1105 if (!skb) { 1127 if (!skb) {
1106 if (netif_msg_tx_err(dev)) { 1128 /* packet collected; minidriver waiting for more */
1107 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1129 if (info->flags & FLAG_MULTI_PACKET)
1108 goto drop;
1109 } else {
1110 /* cdc_ncm collected packet; waits for more */
1111 goto not_drop; 1130 goto not_drop;
1112 } 1131 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1132 goto drop;
1113 } 1133 }
1114 } 1134 }
1115 length = skb->len; 1135 length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
1254 } 1274 }
1255 } 1275 }
1256 1276
1277 /* restart RX again after disabling due to high error rate */
1278 clear_bit(EVENT_RX_KILL, &dev->flags);
1279
1257 // waiting for all pending urbs to complete? 1280 // waiting for all pending urbs to complete?
1258 if (dev->wait) { 1281 if (dev->wait) {
1259 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1282 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index dc8913c6238c..12c6440d1649 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
154 if (ret & 1) { /* Link is up. */ 154 if (ret & 1) { /* Link is up. */
155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
156 adapter->netdev->name, adapter->link_speed); 156 adapter->netdev->name, adapter->link_speed);
157 if (!netif_carrier_ok(adapter->netdev)) 157 netif_carrier_on(adapter->netdev);
158 netif_carrier_on(adapter->netdev);
159 158
160 if (affectTxQueue) { 159 if (affectTxQueue) {
161 for (i = 0; i < adapter->num_tx_queues; i++) 160 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
165 } else { 164 } else {
166 printk(KERN_INFO "%s: NIC Link is Down\n", 165 printk(KERN_INFO "%s: NIC Link is Down\n",
167 adapter->netdev->name); 166 adapter->netdev->name);
168 if (netif_carrier_ok(adapter->netdev)) 167 netif_carrier_off(adapter->netdev);
169 netif_carrier_off(adapter->netdev);
170 168
171 if (affectTxQueue) { 169 if (affectTxQueue) {
172 for (i = 0; i < adapter->num_tx_queues; i++) 170 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3061 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3059 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3062 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3060 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3063 3061
3062 netif_carrier_off(netdev);
3064 err = register_netdev(netdev); 3063 err = register_netdev(netdev);
3065 3064
3066 if (err) { 3065 if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 0f71d1d4339d..e5fd20994bec 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -36,6 +36,7 @@
36#include "debug.h" 36#include "debug.h"
37 37
38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
39#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
39 40
40/* Flags we support */ 41/* Flags we support */
41#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ 42#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
708 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); 709 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
709} 710}
710 711
712static bool brcms_tx_flush_completed(struct brcms_info *wl)
713{
714 bool result;
715
716 spin_lock_bh(&wl->lock);
717 result = brcms_c_tx_flush_completed(wl->wlc);
718 spin_unlock_bh(&wl->lock);
719 return result;
720}
721
711static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) 722static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
712{ 723{
713 struct brcms_info *wl = hw->priv; 724 struct brcms_info *wl = hw->priv;
725 int ret;
714 726
715 no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); 727 no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
716 728
717 /* wait for packet queue and dma fifos to run empty */ 729 ret = wait_event_timeout(wl->tx_flush_wq,
718 spin_lock_bh(&wl->lock); 730 brcms_tx_flush_completed(wl),
719 brcms_c_wait_for_tx_completion(wl->wlc, drop); 731 msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
720 spin_unlock_bh(&wl->lock); 732
733 brcms_dbg_mac80211(wl->wlc->hw->d11core,
734 "ret=%d\n", jiffies_to_msecs(ret));
721} 735}
722 736
723static const struct ieee80211_ops brcms_ops = { 737static const struct ieee80211_ops brcms_ops = {
@@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
772 786
773 done: 787 done:
774 spin_unlock_bh(&wl->lock); 788 spin_unlock_bh(&wl->lock);
789 wake_up(&wl->tx_flush_wq);
775} 790}
776 791
777/* 792/*
@@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1020 1035
1021 atomic_set(&wl->callbacks, 0); 1036 atomic_set(&wl->callbacks, 0);
1022 1037
1038 init_waitqueue_head(&wl->tx_flush_wq);
1039
1023 /* setup the bottom half handler */ 1040 /* setup the bottom half handler */
1024 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); 1041 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
1025 1042
@@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
1609 spin_lock_bh(&wl->lock); 1626 spin_lock_bh(&wl->lock);
1610 return blocked; 1627 return blocked;
1611} 1628}
1612
1613/*
1614 * precondition: perimeter lock has been acquired
1615 */
1616void brcms_msleep(struct brcms_info *wl, uint ms)
1617{
1618 spin_unlock_bh(&wl->lock);
1619 msleep(ms);
1620 spin_lock_bh(&wl->lock);
1621}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 9358bd5ebd35..947ccacf43e6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,6 +68,8 @@ struct brcms_info {
68 spinlock_t lock; /* per-device perimeter lock */ 68 spinlock_t lock; /* per-device perimeter lock */
69 spinlock_t isr_lock; /* per-device ISR synchronization lock */ 69 spinlock_t isr_lock; /* per-device ISR synchronization lock */
70 70
71 /* tx flush */
72 wait_queue_head_t tx_flush_wq;
71 73
72 /* timer related fields */ 74 /* timer related fields */
73 atomic_t callbacks; /* # outstanding callback functions */ 75 atomic_t callbacks; /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
100extern void brcms_free_timer(struct brcms_timer *timer); 102extern void brcms_free_timer(struct brcms_timer *timer);
101extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); 103extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
102extern bool brcms_del_timer(struct brcms_timer *timer); 104extern bool brcms_del_timer(struct brcms_timer *timer);
103extern void brcms_msleep(struct brcms_info *wl, uint ms);
104extern void brcms_dpc(unsigned long data); 105extern void brcms_dpc(unsigned long data);
105extern void brcms_timer(struct brcms_timer *t); 106extern void brcms_timer(struct brcms_timer *t);
106extern void brcms_fatal_error(struct brcms_info *wl); 107extern void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 17594de4199e..8b5839008af3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
1027static bool 1027static bool
1028brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) 1028brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1029{ 1029{
1030 bool morepending = false;
1031 struct bcma_device *core; 1030 struct bcma_device *core;
1032 struct tx_status txstatus, *txs; 1031 struct tx_status txstatus, *txs;
1033 u32 s1, s2; 1032 u32 s1, s2;
@@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1041 txs = &txstatus; 1040 txs = &txstatus;
1042 core = wlc_hw->d11core; 1041 core = wlc_hw->d11core;
1043 *fatal = false; 1042 *fatal = false;
1044 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1045 while (!(*fatal)
1046 && (s1 & TXS_V)) {
1047 /* !give others some time to run! */
1048 if (n >= max_tx_num) {
1049 morepending = true;
1050 break;
1051 }
1052 1043
1044 while (n < max_tx_num) {
1045 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1053 if (s1 == 0xffffffff) { 1046 if (s1 == 0xffffffff) {
1054 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, 1047 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
1055 __func__); 1048 __func__);
1056 *fatal = true; 1049 *fatal = true;
1057 return false; 1050 return false;
1058 } 1051 }
1059 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1052 /* only process when valid */
1053 if (!(s1 & TXS_V))
1054 break;
1060 1055
1056 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
1061 txs->status = s1 & TXS_STATUS_MASK; 1057 txs->status = s1 & TXS_STATUS_MASK;
1062 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; 1058 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
1063 txs->sequence = s2 & TXS_SEQ_MASK; 1059 txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1065 txs->lasttxtime = 0; 1061 txs->lasttxtime = 0;
1066 1062
1067 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); 1063 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
1068 1064 if (*fatal == true)
1069 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1065 return false;
1070 n++; 1066 n++;
1071 } 1067 }
1072 1068
1073 if (*fatal) 1069 return n >= max_tx_num;
1074 return false;
1075
1076 return morepending;
1077} 1070}
1078 1071
1079static void brcms_c_tbtt(struct brcms_c_info *wlc) 1072static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
7518 return wlc->band->bandunit; 7511 return wlc->band->bandunit;
7519} 7512}
7520 7513
7521void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7514bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
7522{ 7515{
7523 int timeout = 20;
7524 int i; 7516 int i;
7525 7517
7526 /* Kick DMA to send any pending AMPDU */ 7518 /* Kick DMA to send any pending AMPDU */
7527 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) 7519 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
7528 if (wlc->hw->di[i]) 7520 if (wlc->hw->di[i])
7529 dma_txflush(wlc->hw->di[i]); 7521 dma_kick_tx(wlc->hw->di[i]);
7530
7531 /* wait for queue and DMA fifos to run dry */
7532 while (brcms_txpktpendtot(wlc) > 0) {
7533 brcms_msleep(wlc->wl, 1);
7534
7535 if (--timeout == 0)
7536 break;
7537 }
7538 7522
7539 WARN_ON_ONCE(timeout == 0); 7523 return !brcms_txpktpendtot(wlc);
7540} 7524}
7541 7525
7542void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) 7526void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 4fb2834f4e64..b0f14b7b8616 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
314extern void brcms_c_scan_start(struct brcms_c_info *wlc); 314extern void brcms_c_scan_start(struct brcms_c_info *wlc);
315extern void brcms_c_scan_stop(struct brcms_c_info *wlc); 315extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
316extern int brcms_c_get_curband(struct brcms_c_info *wlc); 316extern int brcms_c_get_curband(struct brcms_c_info *wlc);
317extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
318 bool drop);
319extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); 317extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
320extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); 318extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
321extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, 319extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
332extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); 330extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
333extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); 331extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
334extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); 332extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
333extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
335 334
336#endif /* _BRCM_PUB_H_ */ 335#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 31534f7c0548..279796419ea0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1153 next_reclaimed = ssn; 1153 next_reclaimed = ssn;
1154 } 1154 }
1155 1155
1156 if (tid != IWL_TID_NON_QOS) {
1157 priv->tid_data[sta_id][tid].next_reclaimed =
1158 next_reclaimed;
1159 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1160 next_reclaimed);
1161 }
1162
1156 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1163 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1157 1164
1158 iwlagn_check_ratid_empty(priv, sta_id, tid); 1165 iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1203 if (!is_agg) 1210 if (!is_agg)
1204 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1211 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1205 1212
1206 /*
1207 * W/A for FW bug - the seq_ctl isn't updated when the
1208 * queues are flushed. Fetch it from the packet itself
1209 */
1210 if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
1211 next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
1212 next_reclaimed =
1213 SEQ_TO_SN(next_reclaimed + 0x10);
1214 }
1215
1216 is_offchannel_skb = 1213 is_offchannel_skb =
1217 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); 1214 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1218 freed++; 1215 freed++;
1219 } 1216 }
1220 1217
1221 if (tid != IWL_TID_NON_QOS) {
1222 priv->tid_data[sta_id][tid].next_reclaimed =
1223 next_reclaimed;
1224 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1225 next_reclaimed);
1226 }
1227
1228 WARN_ON(!is_agg && freed != 1); 1218 WARN_ON(!is_agg && freed != 1);
1229 1219
1230 /* 1220 /*
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9189a32b7844..973a9d90e9ea 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1563 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", 1563 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
1564 scan_rsp->number_of_sets); 1564 scan_rsp->number_of_sets);
1565 ret = -1; 1565 ret = -1;
1566 goto done; 1566 goto check_next_scan;
1567 } 1567 }
1568 1568
1569 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); 1569 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1634 if (!beacon_size || beacon_size > bytes_left) { 1634 if (!beacon_size || beacon_size > bytes_left) {
1635 bss_info += bytes_left; 1635 bss_info += bytes_left;
1636 bytes_left = 0; 1636 bytes_left = 0;
1637 return -1; 1637 ret = -1;
1638 goto check_next_scan;
1638 } 1639 }
1639 1640
1640 /* Initialize the current working beacon pointer for this BSS 1641 /* Initialize the current working beacon pointer for this BSS
@@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1690 dev_err(priv->adapter->dev, 1691 dev_err(priv->adapter->dev,
1691 "%s: bytes left < IE length\n", 1692 "%s: bytes left < IE length\n",
1692 __func__); 1693 __func__);
1693 goto done; 1694 goto check_next_scan;
1694 } 1695 }
1695 if (element_id == WLAN_EID_DS_PARAMS) { 1696 if (element_id == WLAN_EID_DS_PARAMS) {
1696 channel = *(current_ptr + sizeof(struct ieee_types_header)); 1697 channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1753 } 1754 }
1754 } 1755 }
1755 1756
1757check_next_scan:
1756 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 1758 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
1757 if (list_empty(&adapter->scan_pending_q)) { 1759 if (list_empty(&adapter->scan_pending_q)) {
1758 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); 1760 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1813 } 1815 }
1814 } 1816 }
1815 1817
1816done:
1817 return ret; 1818 return ret;
1818} 1819}
1819 1820
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 4494d130b37c..0f8b05185eda 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1004 is_tx ? "Tx" : "Rx"); 1004 is_tx ? "Tx" : "Rx");
1005 1005
1006 if (is_tx) { 1006 if (is_tx) {
1007 rtl_lps_leave(hw); 1007 schedule_work(&rtlpriv->
1008 works.lps_leave_work);
1008 ppsc->last_delaylps_stamp_jiffies = 1009 ppsc->last_delaylps_stamp_jiffies =
1009 jiffies; 1010 jiffies;
1010 } 1011 }
@@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1014 } 1015 }
1015 } else if (ETH_P_ARP == ether_type) { 1016 } else if (ETH_P_ARP == ether_type) {
1016 if (is_tx) { 1017 if (is_tx) {
1017 rtl_lps_leave(hw); 1018 schedule_work(&rtlpriv->works.lps_leave_work);
1018 ppsc->last_delaylps_stamp_jiffies = jiffies; 1019 ppsc->last_delaylps_stamp_jiffies = jiffies;
1019 } 1020 }
1020 1021
@@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1024 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); 1025 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
1025 1026
1026 if (is_tx) { 1027 if (is_tx) {
1027 rtl_lps_leave(hw); 1028 schedule_work(&rtlpriv->works.lps_leave_work);
1028 ppsc->last_delaylps_stamp_jiffies = jiffies; 1029 ppsc->last_delaylps_stamp_jiffies = jiffies;
1029 } 1030 }
1030 1031
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index f2ecdeb3a90d..1535efda3d52 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
542 WARN_ON(skb_queue_empty(&rx_queue)); 542 WARN_ON(skb_queue_empty(&rx_queue));
543 while (!skb_queue_empty(&rx_queue)) { 543 while (!skb_queue_empty(&rx_queue)) {
544 _skb = skb_dequeue(&rx_queue); 544 _skb = skb_dequeue(&rx_queue);
545 _rtl_usb_rx_process_agg(hw, skb); 545 _rtl_usb_rx_process_agg(hw, _skb);
546 ieee80211_rx_irqsafe(hw, skb); 546 ieee80211_rx_irqsafe(hw, _skb);
547 } 547 }
548} 548}
549 549
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 94b79c3338c4..9d7f1723dd8f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
151/* Notify xenvif that ring now has space to send an skb to the frontend */ 151/* Notify xenvif that ring now has space to send an skb to the frontend */
152void xenvif_notify_tx_completion(struct xenvif *vif); 152void xenvif_notify_tx_completion(struct xenvif *vif);
153 153
154/* Prevent the device from generating any further traffic. */
155void xenvif_carrier_off(struct xenvif *vif);
156
154/* Returns number of ring slots required to send an skb to the frontend */ 157/* Returns number of ring slots required to send an skb to the frontend */
155unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 158unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
156 159
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b7d41f8c338a..b8c5193bd420 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -343,17 +343,22 @@ err:
343 return err; 343 return err;
344} 344}
345 345
346void xenvif_disconnect(struct xenvif *vif) 346void xenvif_carrier_off(struct xenvif *vif)
347{ 347{
348 struct net_device *dev = vif->dev; 348 struct net_device *dev = vif->dev;
349 if (netif_carrier_ok(dev)) { 349
350 rtnl_lock(); 350 rtnl_lock();
351 netif_carrier_off(dev); /* discard queued packets */ 351 netif_carrier_off(dev); /* discard queued packets */
352 if (netif_running(dev)) 352 if (netif_running(dev))
353 xenvif_down(vif); 353 xenvif_down(vif);
354 rtnl_unlock(); 354 rtnl_unlock();
355 xenvif_put(vif); 355 xenvif_put(vif);
356 } 356}
357
358void xenvif_disconnect(struct xenvif *vif)
359{
360 if (netif_carrier_ok(vif->dev))
361 xenvif_carrier_off(vif);
357 362
358 atomic_dec(&vif->refcnt); 363 atomic_dec(&vif->refcnt);
359 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); 364 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f2d6b78d901d..2b9520c46e97 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
147 atomic_dec(&netbk->netfront_count); 147 atomic_dec(&netbk->netfront_count);
148} 148}
149 149
150static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); 150static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
151 u8 status);
151static void make_tx_response(struct xenvif *vif, 152static void make_tx_response(struct xenvif *vif,
152 struct xen_netif_tx_request *txp, 153 struct xen_netif_tx_request *txp,
153 s8 st); 154 s8 st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
879 880
880 do { 881 do {
881 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 882 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
882 if (cons >= end) 883 if (cons == end)
883 break; 884 break;
884 txp = RING_GET_REQUEST(&vif->tx, cons++); 885 txp = RING_GET_REQUEST(&vif->tx, cons++);
885 } while (1); 886 } while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
888 xenvif_put(vif); 889 xenvif_put(vif);
889} 890}
890 891
892static void netbk_fatal_tx_err(struct xenvif *vif)
893{
894 netdev_err(vif->dev, "fatal error; disabling device\n");
895 xenvif_carrier_off(vif);
896 xenvif_put(vif);
897}
898
891static int netbk_count_requests(struct xenvif *vif, 899static int netbk_count_requests(struct xenvif *vif,
892 struct xen_netif_tx_request *first, 900 struct xen_netif_tx_request *first,
893 struct xen_netif_tx_request *txp, 901 struct xen_netif_tx_request *txp,
@@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
901 909
902 do { 910 do {
903 if (frags >= work_to_do) { 911 if (frags >= work_to_do) {
904 netdev_dbg(vif->dev, "Need more frags\n"); 912 netdev_err(vif->dev, "Need more frags\n");
913 netbk_fatal_tx_err(vif);
905 return -frags; 914 return -frags;
906 } 915 }
907 916
908 if (unlikely(frags >= MAX_SKB_FRAGS)) { 917 if (unlikely(frags >= MAX_SKB_FRAGS)) {
909 netdev_dbg(vif->dev, "Too many frags\n"); 918 netdev_err(vif->dev, "Too many frags\n");
919 netbk_fatal_tx_err(vif);
910 return -frags; 920 return -frags;
911 } 921 }
912 922
913 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), 923 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
914 sizeof(*txp)); 924 sizeof(*txp));
915 if (txp->size > first->size) { 925 if (txp->size > first->size) {
916 netdev_dbg(vif->dev, "Frags galore\n"); 926 netdev_err(vif->dev, "Frag is bigger than frame.\n");
927 netbk_fatal_tx_err(vif);
917 return -frags; 928 return -frags;
918 } 929 }
919 930
@@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
921 frags++; 932 frags++;
922 933
923 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 934 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
924 netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", 935 netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
925 txp->offset, txp->size); 936 txp->offset, txp->size);
937 netbk_fatal_tx_err(vif);
926 return -frags; 938 return -frags;
927 } 939 }
928 } while ((txp++)->flags & XEN_NETTXF_more_data); 940 } while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
966 pending_idx = netbk->pending_ring[index]; 978 pending_idx = netbk->pending_ring[index];
967 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 979 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
968 if (!page) 980 if (!page)
969 return NULL; 981 goto err;
970 982
971 gop->source.u.ref = txp->gref; 983 gop->source.u.ref = txp->gref;
972 gop->source.domid = vif->domid; 984 gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
988 } 1000 }
989 1001
990 return gop; 1002 return gop;
1003err:
1004 /* Unwind, freeing all pages and sending error responses. */
1005 while (i-- > start) {
1006 xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
1007 XEN_NETIF_RSP_ERROR);
1008 }
1009 /* The head too, if necessary. */
1010 if (start)
1011 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1012
1013 return NULL;
991} 1014}
992 1015
993static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, 1016static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
996{ 1019{
997 struct gnttab_copy *gop = *gopp; 1020 struct gnttab_copy *gop = *gopp;
998 u16 pending_idx = *((u16 *)skb->data); 1021 u16 pending_idx = *((u16 *)skb->data);
999 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1000 struct xenvif *vif = pending_tx_info[pending_idx].vif;
1001 struct xen_netif_tx_request *txp;
1002 struct skb_shared_info *shinfo = skb_shinfo(skb); 1022 struct skb_shared_info *shinfo = skb_shinfo(skb);
1003 int nr_frags = shinfo->nr_frags; 1023 int nr_frags = shinfo->nr_frags;
1004 int i, err, start; 1024 int i, err, start;
1005 1025
1006 /* Check status of header. */ 1026 /* Check status of header. */
1007 err = gop->status; 1027 err = gop->status;
1008 if (unlikely(err)) { 1028 if (unlikely(err))
1009 pending_ring_idx_t index; 1029 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1010 index = pending_index(netbk->pending_prod++);
1011 txp = &pending_tx_info[pending_idx].req;
1012 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1013 netbk->pending_ring[index] = pending_idx;
1014 xenvif_put(vif);
1015 }
1016 1030
1017 /* Skip first skb fragment if it is on same page as header fragment. */ 1031 /* Skip first skb fragment if it is on same page as header fragment. */
1018 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 1032 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1019 1033
1020 for (i = start; i < nr_frags; i++) { 1034 for (i = start; i < nr_frags; i++) {
1021 int j, newerr; 1035 int j, newerr;
1022 pending_ring_idx_t index;
1023 1036
1024 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1037 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1025 1038
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1028 if (likely(!newerr)) { 1041 if (likely(!newerr)) {
1029 /* Had a previous error? Invalidate this fragment. */ 1042 /* Had a previous error? Invalidate this fragment. */
1030 if (unlikely(err)) 1043 if (unlikely(err))
1031 xen_netbk_idx_release(netbk, pending_idx); 1044 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1032 continue; 1045 continue;
1033 } 1046 }
1034 1047
1035 /* Error on this fragment: respond to client with an error. */ 1048 /* Error on this fragment: respond to client with an error. */
1036 txp = &netbk->pending_tx_info[pending_idx].req; 1049 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1037 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1038 index = pending_index(netbk->pending_prod++);
1039 netbk->pending_ring[index] = pending_idx;
1040 xenvif_put(vif);
1041 1050
1042 /* Not the first error? Preceding frags already invalidated. */ 1051 /* Not the first error? Preceding frags already invalidated. */
1043 if (err) 1052 if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1045 1054
1046 /* First error: invalidate header and preceding fragments. */ 1055 /* First error: invalidate header and preceding fragments. */
1047 pending_idx = *((u16 *)skb->data); 1056 pending_idx = *((u16 *)skb->data);
1048 xen_netbk_idx_release(netbk, pending_idx); 1057 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1049 for (j = start; j < i; j++) { 1058 for (j = start; j < i; j++) {
1050 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1059 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1051 xen_netbk_idx_release(netbk, pending_idx); 1060 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1052 } 1061 }
1053 1062
1054 /* Remember the error: invalidate all subsequent fragments. */ 1063 /* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1082 1091
1083 /* Take an extra reference to offset xen_netbk_idx_release */ 1092 /* Take an extra reference to offset xen_netbk_idx_release */
1084 get_page(netbk->mmap_pages[pending_idx]); 1093 get_page(netbk->mmap_pages[pending_idx]);
1085 xen_netbk_idx_release(netbk, pending_idx); 1094 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1086 } 1095 }
1087} 1096}
1088 1097
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1095 1104
1096 do { 1105 do {
1097 if (unlikely(work_to_do-- <= 0)) { 1106 if (unlikely(work_to_do-- <= 0)) {
1098 netdev_dbg(vif->dev, "Missing extra info\n"); 1107 netdev_err(vif->dev, "Missing extra info\n");
1108 netbk_fatal_tx_err(vif);
1099 return -EBADR; 1109 return -EBADR;
1100 } 1110 }
1101 1111
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1104 if (unlikely(!extra.type || 1114 if (unlikely(!extra.type ||
1105 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1115 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1106 vif->tx.req_cons = ++cons; 1116 vif->tx.req_cons = ++cons;
1107 netdev_dbg(vif->dev, 1117 netdev_err(vif->dev,
1108 "Invalid extra type: %d\n", extra.type); 1118 "Invalid extra type: %d\n", extra.type);
1119 netbk_fatal_tx_err(vif);
1109 return -EINVAL; 1120 return -EINVAL;
1110 } 1121 }
1111 1122
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1121 struct xen_netif_extra_info *gso) 1132 struct xen_netif_extra_info *gso)
1122{ 1133{
1123 if (!gso->u.gso.size) { 1134 if (!gso->u.gso.size) {
1124 netdev_dbg(vif->dev, "GSO size must not be zero.\n"); 1135 netdev_err(vif->dev, "GSO size must not be zero.\n");
1136 netbk_fatal_tx_err(vif);
1125 return -EINVAL; 1137 return -EINVAL;
1126 } 1138 }
1127 1139
1128 /* Currently only TCPv4 S.O. is supported. */ 1140 /* Currently only TCPv4 S.O. is supported. */
1129 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1141 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1130 netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1142 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1143 netbk_fatal_tx_err(vif);
1131 return -EINVAL; 1144 return -EINVAL;
1132 } 1145 }
1133 1146
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1264 1277
1265 /* Get a netif from the list with work to do. */ 1278 /* Get a netif from the list with work to do. */
1266 vif = poll_net_schedule_list(netbk); 1279 vif = poll_net_schedule_list(netbk);
1280 /* This can sometimes happen because the test of
1281 * list_empty(net_schedule_list) at the top of the
1282 * loop is unlocked. Just go back and have another
1283 * look.
1284 */
1267 if (!vif) 1285 if (!vif)
1268 continue; 1286 continue;
1269 1287
1288 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1289 XEN_NETIF_TX_RING_SIZE) {
1290 netdev_err(vif->dev,
1291 "Impossible number of requests. "
1292 "req_prod %d, req_cons %d, size %ld\n",
1293 vif->tx.sring->req_prod, vif->tx.req_cons,
1294 XEN_NETIF_TX_RING_SIZE);
1295 netbk_fatal_tx_err(vif);
1296 continue;
1297 }
1298
1270 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1299 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1271 if (!work_to_do) { 1300 if (!work_to_do) {
1272 xenvif_put(vif); 1301 xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1294 work_to_do = xen_netbk_get_extras(vif, extras, 1323 work_to_do = xen_netbk_get_extras(vif, extras,
1295 work_to_do); 1324 work_to_do);
1296 idx = vif->tx.req_cons; 1325 idx = vif->tx.req_cons;
1297 if (unlikely(work_to_do < 0)) { 1326 if (unlikely(work_to_do < 0))
1298 netbk_tx_err(vif, &txreq, idx);
1299 continue; 1327 continue;
1300 }
1301 } 1328 }
1302 1329
1303 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1330 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1304 if (unlikely(ret < 0)) { 1331 if (unlikely(ret < 0))
1305 netbk_tx_err(vif, &txreq, idx - ret);
1306 continue; 1332 continue;
1307 } 1333
1308 idx += ret; 1334 idx += ret;
1309 1335
1310 if (unlikely(txreq.size < ETH_HLEN)) { 1336 if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1316 1342
1317 /* No crossing a page as the payload mustn't fragment. */ 1343 /* No crossing a page as the payload mustn't fragment. */
1318 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1344 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1319 netdev_dbg(vif->dev, 1345 netdev_err(vif->dev,
1320 "txreq.offset: %x, size: %u, end: %lu\n", 1346 "txreq.offset: %x, size: %u, end: %lu\n",
1321 txreq.offset, txreq.size, 1347 txreq.offset, txreq.size,
1322 (txreq.offset&~PAGE_MASK) + txreq.size); 1348 (txreq.offset&~PAGE_MASK) + txreq.size);
1323 netbk_tx_err(vif, &txreq, idx); 1349 netbk_fatal_tx_err(vif);
1324 continue; 1350 continue;
1325 } 1351 }
1326 1352
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1348 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1374 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1349 1375
1350 if (netbk_set_skb_gso(vif, skb, gso)) { 1376 if (netbk_set_skb_gso(vif, skb, gso)) {
1377 /* Failure in netbk_set_skb_gso is fatal. */
1351 kfree_skb(skb); 1378 kfree_skb(skb);
1352 netbk_tx_err(vif, &txreq, idx);
1353 continue; 1379 continue;
1354 } 1380 }
1355 } 1381 }
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1448 txp->size -= data_len; 1474 txp->size -= data_len;
1449 } else { 1475 } else {
1450 /* Schedule a response immediately. */ 1476 /* Schedule a response immediately. */
1451 xen_netbk_idx_release(netbk, pending_idx); 1477 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1452 } 1478 }
1453 1479
1454 if (txp->flags & XEN_NETTXF_csum_blank) 1480 if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1500 xen_netbk_tx_submit(netbk); 1526 xen_netbk_tx_submit(netbk);
1501} 1527}
1502 1528
1503static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1529static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1530 u8 status)
1504{ 1531{
1505 struct xenvif *vif; 1532 struct xenvif *vif;
1506 struct pending_tx_info *pending_tx_info; 1533 struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1514 1541
1515 vif = pending_tx_info->vif; 1542 vif = pending_tx_info->vif;
1516 1543
1517 make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); 1544 make_tx_response(vif, &pending_tx_info->req, status);
1518 1545
1519 index = pending_index(netbk->pending_prod++); 1546 index = pending_index(netbk->pending_prod++);
1520 netbk->pending_ring[index] = pending_idx; 1547 netbk->pending_ring[index] = pending_idx;