aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/ethernet/8390/ne3210.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h30
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c27
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c29
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c12
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c13
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c550
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c9
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/sun/niu.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c3
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/irda/irtty-sir.c4
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c4
-rw-r--r--drivers/net/irda/sh_irda.c4
-rw-r--r--drivers/net/irda/sh_sir.c5
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/rionet.c141
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/mcs7830.c30
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/vxlan.c153
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h164
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/b43/main.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c70
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c39
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/mwifiex/scan.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/net/xen-netback/netback.c52
-rw-r--r--drivers/net/xen-netfront.c1
95 files changed, 1091 insertions, 640 deletions
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 034c16b60e96..adc3708d8829 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -56,7 +56,7 @@
56#include <linux/kernel.h> 56#include <linux/kernel.h>
57#include <linux/can.h> 57#include <linux/can.h>
58 58
59static __initdata const char banner[] = 59static __initconst const char banner[] =
60 KERN_INFO "slcan: serial line CAN interface driver\n"; 60 KERN_INFO "slcan: serial line CAN interface driver\n";
61 61
62MODULE_ALIAS_LDISC(N_SLCAN); 62MODULE_ALIAS_LDISC(N_SLCAN);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 4f93c0be0053..0a2a5ee79a17 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -49,7 +49,7 @@
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <net/rtnetlink.h> 50#include <net/rtnetlink.h>
51 51
52static __initdata const char banner[] = 52static __initconst const char banner[] =
53 KERN_INFO "vcan: Virtual CAN interface driver\n"; 53 KERN_INFO "vcan: Virtual CAN interface driver\n";
54 54
55MODULE_DESCRIPTION("virtual CAN interface"); 55MODULE_DESCRIPTION("virtual CAN interface");
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index a2f8b2b8e27c..e3f57427d5c5 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -81,7 +81,7 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
81 81
82static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; 82static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
83static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; 83static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
84static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; 84static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"};
85static int ifmap_val[] __initdata = { 85static int ifmap_val[] __initdata = {
86 IF_PORT_10BASET, 86 IF_PORT_10BASET,
87 IF_PORT_UNKNOWN, 87 IF_PORT_UNKNOWN,
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d920a529ba22..5b65992c2a0a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
295static const struct chip_info { 295static const struct chip_info {
296 const char *name; 296 const char *name;
297 int drv_flags; 297 int drv_flags;
298} netdrv_tbl[] __devinitdata = { 298} netdrv_tbl[] __devinitconst = {
299 { "Adaptec Starfire 6915", CanHaveMII }, 299 { "Adaptec Starfire 6915", CanHaveMII },
300}; 300};
301 301
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 64d0d9c1afa2..3491d4312fc9 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1845,6 +1845,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ 1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1846 printk(KERN_ERR "amd8111e: No Power Management capability, " 1846 printk(KERN_ERR "amd8111e: No Power Management capability, "
1847 "exiting.\n"); 1847 "exiting.\n");
1848 err = -ENODEV;
1848 goto err_free_reg; 1849 goto err_free_reg;
1849 } 1850 }
1850 1851
@@ -1852,6 +1853,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1852 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { 1853 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1853 printk(KERN_ERR "amd8111e: DMA not supported," 1854 printk(KERN_ERR "amd8111e: DMA not supported,"
1854 "exiting.\n"); 1855 "exiting.\n");
1856 err = -ENODEV;
1855 goto err_free_reg; 1857 goto err_free_reg;
1856 } 1858 }
1857 1859
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 397596b078d9..f195acfa2df7 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1174,8 +1174,10 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1174 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1174 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1175 pdev->name, aup->mac_id); 1175 pdev->name, aup->mac_id);
1176 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1176 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1177 if (aup->mii_bus->irq == NULL) 1177 if (aup->mii_bus->irq == NULL) {
1178 err = -ENOMEM;
1178 goto err_out; 1179 goto err_out;
1180 }
1179 1181
1180 for (i = 0; i < PHY_MAX_ADDR; ++i) 1182 for (i = 0; i < PHY_MAX_ADDR; ++i)
1181 aup->mii_bus->irq[i] = PHY_POLL; 1183 aup->mii_bus->irq[i] = PHY_POLL;
@@ -1190,7 +1192,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1190 goto err_mdiobus_reg; 1192 goto err_mdiobus_reg;
1191 } 1193 }
1192 1194
1193 if (au1000_mii_probe(dev) != 0) 1195 err = au1000_mii_probe(dev);
1196 if (err != 0)
1194 goto err_out; 1197 goto err_out;
1195 1198
1196 pDBfree = NULL; 1199 pDBfree = NULL;
@@ -1205,6 +1208,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1205 } 1208 }
1206 aup->pDBfree = pDBfree; 1209 aup->pDBfree = pDBfree;
1207 1210
1211 err = -ENODEV;
1208 for (i = 0; i < NUM_RX_DMA; i++) { 1212 for (i = 0; i < NUM_RX_DMA; i++) {
1209 pDB = au1000_GetFreeDB(aup); 1213 pDB = au1000_GetFreeDB(aup);
1210 if (!pDB) 1214 if (!pDB)
@@ -1213,6 +1217,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1213 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1217 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1214 aup->rx_db_inuse[i] = pDB; 1218 aup->rx_db_inuse[i] = pDB;
1215 } 1219 }
1220
1221 err = -ENODEV;
1216 for (i = 0; i < NUM_TX_DMA; i++) { 1222 for (i = 0; i < NUM_TX_DMA; i++) {
1217 pDB = au1000_GetFreeDB(aup); 1223 pDB = au1000_GetFreeDB(aup);
1218 if (!pDB) 1224 if (!pDB)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 55a2e3795055..d19f82f7597a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -702,7 +702,7 @@ struct atl1c_platform_patch {
702 u32 patch_flag; 702 u32 patch_flag;
703#define ATL1C_LINK_PATCH 0x1 703#define ATL1C_LINK_PATCH 0x1
704}; 704};
705static const struct atl1c_platform_patch plats[] __devinitdata = { 705static const struct atl1c_platform_patch plats[] __devinitconst = {
706{0x2060, 0xC1, 0x1019, 0x8152, 0x1}, 706{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
707{0x2060, 0xC1, 0x1019, 0x2060, 0x1}, 707{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
708{0x2060, 0xC1, 0x1019, 0xE000, 0x1}, 708{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 57d64b80fd72..623dd8635c46 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
2845 */ 2845 */
2846 2846
2847#define ATL2_PARAM(X, desc) \ 2847#define ATL2_PARAM(X, desc) \
2848 static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ 2848 static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
2849 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ 2849 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
2850 MODULE_PARM_DESC(X, desc); 2850 MODULE_PARM_DESC(X, desc);
2851#else 2851#else
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 0e3048b788c2..133d5857b9e2 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -10,6 +10,7 @@
10#include <bcm63xx_regs.h> 10#include <bcm63xx_regs.h>
11#include <bcm63xx_irq.h> 11#include <bcm63xx_irq.h>
12#include <bcm63xx_io.h> 12#include <bcm63xx_io.h>
13#include <bcm63xx_iudma.h>
13 14
14/* default number of descriptor */ 15/* default number of descriptor */
15#define BCMENET_DEF_RX_DESC 64 16#define BCMENET_DEF_RX_DESC 64
@@ -31,35 +32,6 @@
31#define BCMENET_MAX_MTU 2046 32#define BCMENET_MAX_MTU 2046
32 33
33/* 34/*
34 * rx/tx dma descriptor
35 */
36struct bcm_enet_desc {
37 u32 len_stat;
38 u32 address;
39};
40
41#define DMADESC_LENGTH_SHIFT 16
42#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
43#define DMADESC_OWNER_MASK (1 << 15)
44#define DMADESC_EOP_MASK (1 << 14)
45#define DMADESC_SOP_MASK (1 << 13)
46#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
47#define DMADESC_WRAP_MASK (1 << 12)
48
49#define DMADESC_UNDER_MASK (1 << 9)
50#define DMADESC_APPEND_CRC (1 << 8)
51#define DMADESC_OVSIZE_MASK (1 << 4)
52#define DMADESC_RXER_MASK (1 << 2)
53#define DMADESC_CRC_MASK (1 << 1)
54#define DMADESC_OV_MASK (1 << 0)
55#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
56 DMADESC_OVSIZE_MASK | \
57 DMADESC_RXER_MASK | \
58 DMADESC_CRC_MASK | \
59 DMADESC_OV_MASK)
60
61
62/*
63 * MIB Counters register definitions 35 * MIB Counters register definitions
64*/ 36*/
65#define ETH_MIB_TX_GD_OCTETS 0 37#define ETH_MIB_TX_GD_OCTETS 0
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f529ab4..16814b34d4b6 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
375 unsigned int tx_tail; 375 unsigned int tx_tail;
376 376
377 void __iomem *base; 377 void __iomem *base;
378 struct sk_buff_head rx_recycle;
379 unsigned int dma_buf_sz; 378 unsigned int dma_buf_sz;
380 dma_addr_t dma_rx_phy; 379 dma_addr_t dma_rx_phy;
381 dma_addr_t dma_tx_phy; 380 dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
672 p = priv->dma_rx + entry; 671 p = priv->dma_rx + entry;
673 672
674 if (priv->rx_skbuff[entry] == NULL) { 673 if (priv->rx_skbuff[entry] == NULL) {
675 skb = __skb_dequeue(&priv->rx_recycle); 674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
676 if (skb == NULL)
677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
678 if (unlikely(skb == NULL)) 675 if (unlikely(skb == NULL))
679 break; 676 break;
680 677
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
887 desc_get_buf_len(p), DMA_TO_DEVICE); 884 desc_get_buf_len(p), DMA_TO_DEVICE);
888 } 885 }
889 886
890 /* 887 dev_kfree_skb(skb);
891 * If there's room in the queue (limit it to size)
892 * we add this skb back into the pool,
893 * if it's the right size.
894 */
895 if ((skb_queue_len(&priv->rx_recycle) <
896 DMA_RX_RING_SZ) &&
897 skb_recycle_check(skb, priv->dma_buf_sz))
898 __skb_queue_head(&priv->rx_recycle, skb);
899 else
900 dev_kfree_skb(skb);
901 } 888 }
902 889
903 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
1016 dev->dev_addr); 1003 dev->dev_addr);
1017 } 1004 }
1018 1005
1019 skb_queue_head_init(&priv->rx_recycle);
1020 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1006 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1021 1007
1022 /* Initialize the XGMAC and descriptors */ 1008 /* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
1053 napi_disable(&priv->napi); 1039 napi_disable(&priv->napi);
1054 1040
1055 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1041 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1056 skb_queue_purge(&priv->rx_recycle);
1057 1042
1058 /* Disable the MAC core */ 1043 /* Disable the MAC core */
1059 xgmac_mac_disable(priv->base); 1044 xgmac_mac_disable(priv->base);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 31752b24434e..a4da893ac1e1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -696,6 +696,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
696int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 696int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
697int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 697int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
698unsigned int t4_flash_cfg_addr(struct adapter *adapter); 698unsigned int t4_flash_cfg_addr(struct adapter *adapter);
699int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
699int t4_check_fw_version(struct adapter *adapter); 700int t4_check_fw_version(struct adapter *adapter);
700int t4_prep_adapter(struct adapter *adapter); 701int t4_prep_adapter(struct adapter *adapter);
701int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 702int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6b9f6bb2f7ed..604f4f87f550 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -443,7 +443,10 @@ int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
443module_param(dbfifo_int_thresh, int, 0644); 443module_param(dbfifo_int_thresh, int, 0644);
444MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); 444MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
445 445
446int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ 446/*
447 * usecs to sleep while draining the dbfifo
448 */
449static int dbfifo_drain_delay = 1000;
447module_param(dbfifo_drain_delay, int, 0644); 450module_param(dbfifo_drain_delay, int, 0644);
448MODULE_PARM_DESC(dbfifo_drain_delay, 451MODULE_PARM_DESC(dbfifo_drain_delay,
449 "usecs to sleep while draining the dbfifo"); 452 "usecs to sleep while draining the dbfifo");
@@ -636,7 +639,7 @@ static void name_msix_vecs(struct adapter *adap)
636static int request_msix_queue_irqs(struct adapter *adap) 639static int request_msix_queue_irqs(struct adapter *adap)
637{ 640{
638 struct sge *s = &adap->sge; 641 struct sge *s = &adap->sge;
639 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; 642 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
640 643
641 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 644 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
642 adap->msix_info[1].desc, &s->fw_evtq); 645 adap->msix_info[1].desc, &s->fw_evtq);
@@ -644,56 +647,60 @@ static int request_msix_queue_irqs(struct adapter *adap)
644 return err; 647 return err;
645 648
646 for_each_ethrxq(s, ethqidx) { 649 for_each_ethrxq(s, ethqidx) {
647 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 650 err = request_irq(adap->msix_info[msi_index].vec,
648 adap->msix_info[msi].desc, 651 t4_sge_intr_msix, 0,
652 adap->msix_info[msi_index].desc,
649 &s->ethrxq[ethqidx].rspq); 653 &s->ethrxq[ethqidx].rspq);
650 if (err) 654 if (err)
651 goto unwind; 655 goto unwind;
652 msi++; 656 msi_index++;
653 } 657 }
654 for_each_ofldrxq(s, ofldqidx) { 658 for_each_ofldrxq(s, ofldqidx) {
655 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 659 err = request_irq(adap->msix_info[msi_index].vec,
656 adap->msix_info[msi].desc, 660 t4_sge_intr_msix, 0,
661 adap->msix_info[msi_index].desc,
657 &s->ofldrxq[ofldqidx].rspq); 662 &s->ofldrxq[ofldqidx].rspq);
658 if (err) 663 if (err)
659 goto unwind; 664 goto unwind;
660 msi++; 665 msi_index++;
661 } 666 }
662 for_each_rdmarxq(s, rdmaqidx) { 667 for_each_rdmarxq(s, rdmaqidx) {
663 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 668 err = request_irq(adap->msix_info[msi_index].vec,
664 adap->msix_info[msi].desc, 669 t4_sge_intr_msix, 0,
670 adap->msix_info[msi_index].desc,
665 &s->rdmarxq[rdmaqidx].rspq); 671 &s->rdmarxq[rdmaqidx].rspq);
666 if (err) 672 if (err)
667 goto unwind; 673 goto unwind;
668 msi++; 674 msi_index++;
669 } 675 }
670 return 0; 676 return 0;
671 677
672unwind: 678unwind:
673 while (--rdmaqidx >= 0) 679 while (--rdmaqidx >= 0)
674 free_irq(adap->msix_info[--msi].vec, 680 free_irq(adap->msix_info[--msi_index].vec,
675 &s->rdmarxq[rdmaqidx].rspq); 681 &s->rdmarxq[rdmaqidx].rspq);
676 while (--ofldqidx >= 0) 682 while (--ofldqidx >= 0)
677 free_irq(adap->msix_info[--msi].vec, 683 free_irq(adap->msix_info[--msi_index].vec,
678 &s->ofldrxq[ofldqidx].rspq); 684 &s->ofldrxq[ofldqidx].rspq);
679 while (--ethqidx >= 0) 685 while (--ethqidx >= 0)
680 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); 686 free_irq(adap->msix_info[--msi_index].vec,
687 &s->ethrxq[ethqidx].rspq);
681 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 688 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
682 return err; 689 return err;
683} 690}
684 691
685static void free_msix_queue_irqs(struct adapter *adap) 692static void free_msix_queue_irqs(struct adapter *adap)
686{ 693{
687 int i, msi = 2; 694 int i, msi_index = 2;
688 struct sge *s = &adap->sge; 695 struct sge *s = &adap->sge;
689 696
690 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 697 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
691 for_each_ethrxq(s, i) 698 for_each_ethrxq(s, i)
692 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); 699 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
693 for_each_ofldrxq(s, i) 700 for_each_ofldrxq(s, i)
694 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); 701 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
695 for_each_rdmarxq(s, i) 702 for_each_rdmarxq(s, i)
696 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); 703 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
697} 704}
698 705
699/** 706/**
@@ -2535,9 +2542,8 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2535 2542
2536 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); 2543 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2537 if (!ret) { 2544 if (!ret) {
2538 indices = be64_to_cpu(indices); 2545 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2539 *cidx = (indices >> 25) & 0xffff; 2546 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2540 *pidx = (indices >> 9) & 0xffff;
2541 } 2547 }
2542 return ret; 2548 return ret;
2543} 2549}
@@ -3634,10 +3640,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
3634 * field selections will fit in the 36-bit budget. 3640 * field selections will fit in the 36-bit budget.
3635 */ 3641 */
3636 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { 3642 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
3637 int i, bits = 0; 3643 int j, bits = 0;
3638 3644
3639 for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++) 3645 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
3640 switch (tp_vlan_pri_map & (1 << i)) { 3646 switch (tp_vlan_pri_map & (1 << j)) {
3641 case 0: 3647 case 0:
3642 /* compressed filter field not enabled */ 3648 /* compressed filter field not enabled */
3643 break; 3649 break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 137a24438d9c..32eec15fe4c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -380,9 +380,11 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { 381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir) 382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i)); 383 *data++ = (__force __be32) t4_read_reg(adap,
384 (MEMWIN0_BASE + i));
384 else 385 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++); 386 t4_write_reg(adap, (MEMWIN0_BASE + i),
387 (__force u32) *data++);
386 } 388 }
387 389
388 return 0; 390 return 0;
@@ -417,7 +419,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
417 if ((addr & 0x3) || (len & 0x3)) 419 if ((addr & 0x3) || (len & 0x3))
418 return -EINVAL; 420 return -EINVAL;
419 421
420 data = vmalloc(MEMWIN0_APERTURE/sizeof(__be32)); 422 data = vmalloc(MEMWIN0_APERTURE);
421 if (!data) 423 if (!data)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
@@ -744,7 +746,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
744 if (ret) 746 if (ret)
745 return ret; 747 return ret;
746 if (byte_oriented) 748 if (byte_oriented)
747 *data = htonl(*data); 749 *data = (__force __u32) (htonl(*data));
748 } 750 }
749 return 0; 751 return 0;
750} 752}
@@ -992,7 +994,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
992 int ret, addr; 994 int ret, addr;
993 unsigned int i; 995 unsigned int i;
994 u8 first_page[SF_PAGE_SIZE]; 996 u8 first_page[SF_PAGE_SIZE];
995 const u32 *p = (const u32 *)fw_data; 997 const __be32 *p = (const __be32 *)fw_data;
996 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 998 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
997 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 999 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
998 unsigned int fw_img_start = adap->params.sf_fw_start; 1000 unsigned int fw_img_start = adap->params.sf_fw_start;
@@ -2315,7 +2317,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2315 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2317 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2316 2318
2317 for (i = 0; i < len; i += 4) 2319 for (i = 0; i < len; i += 4)
2318 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); 2320 *data++ = (__force __be32) t4_read_reg(adap,
2321 (MEMWIN0_BASE + off + i));
2319 2322
2320 return 0; 2323 return 0;
2321} 2324}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 61cc09342865..77335853ac36 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -661,9 +661,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
661 new frame, not around filling de->setup_frame. This is non-deterministic 661 new frame, not around filling de->setup_frame. This is non-deterministic
662 when re-entered but still correct. */ 662 when re-entered but still correct. */
663 663
664#undef set_bit_le
665#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
666
667static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) 664static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
668{ 665{
669 struct de_private *de = netdev_priv(dev); 666 struct de_private *de = netdev_priv(dev);
@@ -673,12 +670,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
673 u16 *eaddrs; 670 u16 *eaddrs;
674 671
675 memset(hash_table, 0, sizeof(hash_table)); 672 memset(hash_table, 0, sizeof(hash_table));
676 set_bit_le(255, hash_table); /* Broadcast entry */ 673 __set_bit_le(255, hash_table); /* Broadcast entry */
677 /* This should work on big-endian machines as well. */ 674 /* This should work on big-endian machines as well. */
678 netdev_for_each_mc_addr(ha, dev) { 675 netdev_for_each_mc_addr(ha, dev) {
679 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; 676 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
680 677
681 set_bit_le(index, hash_table); 678 __set_bit_le(index, hash_table);
682 } 679 }
683 680
684 for (i = 0; i < 32; i++) { 681 for (i = 0; i < 32; i++) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 4d6fe604fa64..d23755ea9bc7 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -446,13 +446,17 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
446 /* Allocate Tx/Rx descriptor memory */ 446 /* Allocate Tx/Rx descriptor memory */
447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * 447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
449 if (!db->desc_pool_ptr) 449 if (!db->desc_pool_ptr) {
450 err = -ENOMEM;
450 goto err_out_res; 451 goto err_out_res;
452 }
451 453
452 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * 454 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
453 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 455 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
454 if (!db->buf_pool_ptr) 456 if (!db->buf_pool_ptr) {
457 err = -ENOMEM;
455 goto err_out_free_desc; 458 goto err_out_free_desc;
459 }
456 460
457 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 461 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
458 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 462 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -462,8 +466,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
462 db->chip_id = ent->driver_data; 466 db->chip_id = ent->driver_data;
463 /* IO type range. */ 467 /* IO type range. */
464 db->ioaddr = pci_iomap(pdev, 0, 0); 468 db->ioaddr = pci_iomap(pdev, 0, 0);
465 if (!db->ioaddr) 469 if (!db->ioaddr) {
470 err = -ENOMEM;
466 goto err_out_free_buf; 471 goto err_out_free_buf;
472 }
467 473
468 db->chip_revision = pdev->revision; 474 db->chip_revision = pdev->revision;
469 db->wol_mode = 0; 475 db->wol_mode = 0;
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index ed7d1dcd9566..44f7e8e82d85 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
79 {NULL}}; 79 {NULL}};
80 80
81 81
82static const char *block_name[] __devinitdata = { 82static const char *const block_name[] __devinitconst = {
83 "21140 non-MII", 83 "21140 non-MII",
84 "21140 MII PHY", 84 "21140 MII PHY",
85 "21142 Serial PHY", 85 "21142 Serial PHY",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c4f37aca2269..885700a19978 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1010,9 +1010,6 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1010 new frame, not around filling tp->setup_frame. This is non-deterministic 1010 new frame, not around filling tp->setup_frame. This is non-deterministic
1011 when re-entered but still correct. */ 1011 when re-entered but still correct. */
1012 1012
1013#undef set_bit_le
1014#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
1015
1016static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) 1013static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1017{ 1014{
1018 struct tulip_private *tp = netdev_priv(dev); 1015 struct tulip_private *tp = netdev_priv(dev);
@@ -1022,12 +1019,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1022 u16 *eaddrs; 1019 u16 *eaddrs;
1023 1020
1024 memset(hash_table, 0, sizeof(hash_table)); 1021 memset(hash_table, 0, sizeof(hash_table));
1025 set_bit_le(255, hash_table); /* Broadcast entry */ 1022 __set_bit_le(255, hash_table); /* Broadcast entry */
1026 /* This should work on big-endian machines as well. */ 1023 /* This should work on big-endian machines as well. */
1027 netdev_for_each_mc_addr(ha, dev) { 1024 netdev_for_each_mc_addr(ha, dev) {
1028 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; 1025 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1029 1026
1030 set_bit_le(index, hash_table); 1027 __set_bit_le(index, hash_table);
1031 } 1028 }
1032 for (i = 0; i < 32; i++) { 1029 for (i = 0; i < 32; i++) {
1033 *setup_frm++ = hash_table[i]; 1030 *setup_frm++ = hash_table[i];
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d1ffca83c82..7c1ec4d7920b 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -236,7 +236,7 @@ struct pci_id_info {
236 int drv_flags; /* Driver use, intended as capability flags. */ 236 int drv_flags; /* Driver use, intended as capability flags. */
237}; 237};
238 238
239static const struct pci_id_info pci_id_tbl[] __devinitdata = { 239static const struct pci_id_info pci_id_tbl[] __devinitconst = {
240 { /* Sometime a Level-One switch card. */ 240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, 241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx}, 242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d7bb52a7bda1..3b83588e51f6 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -218,7 +218,7 @@ enum {
218struct pci_id_info { 218struct pci_id_info {
219 const char *name; 219 const char *name;
220}; 220};
221static const struct pci_id_info pci_id_tbl[] __devinitdata = { 221static const struct pci_id_info pci_id_tbl[] __devinitconst = {
222 {"D-Link DFE-550TX FAST Ethernet Adapter"}, 222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, 223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"}, 224 {"D-Link DFE-580TX 4 port Server Adapter"},
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb3f2cb3b93b..d1b6cc587639 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2129,8 +2129,11 @@ void be_detect_error(struct be_adapter *adapter)
2129 ue_hi = (ue_hi & ~ue_hi_mask); 2129 ue_hi = (ue_hi & ~ue_hi_mask);
2130 } 2130 }
2131 2131
2132 if (ue_lo || ue_hi || 2132 /* On certain platforms BE hardware can indicate spurious UEs.
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) { 2133 * Allow the h/w to stop working completely in case of a real UE.
2134 * Hence not setting the hw_error for UE detection.
2135 */
2136 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2134 adapter->hw_error = true; 2137 adapter->hw_error = true;
2135 dev_err(&adapter->pdev->dev, 2138 dev_err(&adapter->pdev->dev,
2136 "Error detected in the card\n"); 2139 "Error detected in the card\n");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 9d71c9cc300b..0e4a0ac86aa8 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -150,7 +150,7 @@ struct chip_info {
150 int flags; 150 int flags;
151}; 151};
152 152
153static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 153static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a1b52ec3b930..1d03dcdd5e56 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
1765 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1765 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1766 priv->tx_queue[0]->tx_bd_base, 1766 priv->tx_queue[0]->tx_bd_base,
1767 priv->tx_queue[0]->tx_bd_dma_base); 1767 priv->tx_queue[0]->tx_bd_dma_base);
1768 skb_queue_purge(&priv->rx_recycle);
1769} 1768}
1770 1769
1771void gfar_start(struct net_device *dev) 1770void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)
1943 1942
1944 enable_napi(priv); 1943 enable_napi(priv);
1945 1944
1946 skb_queue_head_init(&priv->rx_recycle);
1947
1948 /* Initialize a bunch of registers */ 1945 /* Initialize a bunch of registers */
1949 init_registers(dev); 1946 init_registers(dev);
1950 1947
@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2533 2530
2534 bytes_sent += skb->len; 2531 bytes_sent += skb->len;
2535 2532
2536 /* If there's room in the queue (limit it to rx_buffer_size) 2533 dev_kfree_skb_any(skb);
2537 * we add this skb back into the pool, if it's the right size
2538 */
2539 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2540 skb_recycle_check(skb, priv->rx_buffer_size +
2541 RXBUF_ALIGNMENT)) {
2542 gfar_align_skb(skb);
2543 skb_queue_head(&priv->rx_recycle, skb);
2544 } else
2545 dev_kfree_skb_any(skb);
2546 2534
2547 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2535 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2548 2536
@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2608static struct sk_buff *gfar_alloc_skb(struct net_device *dev) 2596static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2609{ 2597{
2610 struct gfar_private *priv = netdev_priv(dev); 2598 struct gfar_private *priv = netdev_priv(dev);
2611 struct sk_buff *skb = NULL; 2599 struct sk_buff *skb;
2612 2600
2613 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2601 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2614 if (!skb) 2602 if (!skb)
@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2621 2609
2622struct sk_buff *gfar_new_skb(struct net_device *dev) 2610struct sk_buff *gfar_new_skb(struct net_device *dev)
2623{ 2611{
2624 struct gfar_private *priv = netdev_priv(dev); 2612 return gfar_alloc_skb(dev);
2625 struct sk_buff *skb = NULL;
2626
2627 skb = skb_dequeue(&priv->rx_recycle);
2628 if (!skb)
2629 skb = gfar_alloc_skb(dev);
2630
2631 return skb;
2632} 2613}
2633 2614
2634static inline void count_errors(unsigned short status, struct net_device *dev) 2615static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2787 if (unlikely(!newskb)) 2768 if (unlikely(!newskb))
2788 newskb = skb; 2769 newskb = skb;
2789 else if (skb) 2770 else if (skb)
2790 skb_queue_head(&priv->rx_recycle, skb); 2771 dev_kfree_skb(skb);
2791 } else { 2772 } else {
2792 /* Increment the number of packets */ 2773 /* Increment the number of packets */
2793 rx_queue->stats.rx_packets++; 2774 rx_queue->stats.rx_packets++;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4141ef2ddafc..22eabc13ca99 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1080,8 +1080,6 @@ struct gfar_private {
1080 1080
1081 u32 cur_filer_idx; 1081 u32 cur_filer_idx;
1082 1082
1083 struct sk_buff_head rx_recycle;
1084
1085 /* RX queue filer rule set*/ 1083 /* RX queue filer rule set*/
1086 struct ethtool_rx_list rx_list; 1084 struct ethtool_rx_list rx_list;
1087 struct mutex rx_queue_access; 1085 struct mutex rx_queue_access;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 164288439220..0a70bb55d1b0 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
209static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 209static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
210 u8 __iomem *bd) 210 u8 __iomem *bd)
211{ 211{
212 struct sk_buff *skb = NULL; 212 struct sk_buff *skb;
213 213
214 skb = __skb_dequeue(&ugeth->rx_recycle); 214 skb = netdev_alloc_skb(ugeth->ndev,
215 ugeth->ug_info->uf_info.max_rx_buf_length +
216 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
215 if (!skb) 217 if (!skb)
216 skb = netdev_alloc_skb(ugeth->ndev,
217 ugeth->ug_info->uf_info.max_rx_buf_length +
218 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
219 if (skb == NULL)
220 return NULL; 218 return NULL;
221 219
222 /* We need the data buffer to be aligned properly. We will reserve 220 /* We need the data buffer to be aligned properly. We will reserve
@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2020 iounmap(ugeth->ug_regs); 2018 iounmap(ugeth->ug_regs);
2021 ugeth->ug_regs = NULL; 2019 ugeth->ug_regs = NULL;
2022 } 2020 }
2023
2024 skb_queue_purge(&ugeth->rx_recycle);
2025} 2021}
2026 2022
2027static void ucc_geth_set_multi(struct net_device *dev) 2023static void ucc_geth_set_multi(struct net_device *dev)
@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2230 return -ENOMEM; 2226 return -ENOMEM;
2231 } 2227 }
2232 2228
2233 skb_queue_head_init(&ugeth->rx_recycle);
2234
2235 return 0; 2229 return 0;
2236} 2230}
2237 2231
@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3274 if (netif_msg_rx_err(ugeth)) 3268 if (netif_msg_rx_err(ugeth))
3275 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3269 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3276 __func__, __LINE__, (u32) skb); 3270 __func__, __LINE__, (u32) skb);
3277 if (skb) { 3271 dev_kfree_skb(skb);
3278 skb->data = skb->head + NET_SKB_PAD;
3279 skb->len = 0;
3280 skb_reset_tail_pointer(skb);
3281 __skb_queue_head(&ugeth->rx_recycle, skb);
3282 }
3283 3272
3284 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3273 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3285 dev->stats.rx_dropped++; 3274 dev->stats.rx_dropped++;
@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3349 3338
3350 dev->stats.tx_packets++; 3339 dev->stats.tx_packets++;
3351 3340
3352 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3341 dev_kfree_skb(skb);
3353 skb_recycle_check(skb,
3354 ugeth->ug_info->uf_info.max_rx_buf_length +
3355 UCC_GETH_RX_DATA_BUF_ALIGNMENT))
3356 __skb_queue_head(&ugeth->rx_recycle, skb);
3357 else
3358 dev_kfree_skb(skb);
3359 3342
3360 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3343 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3361 ugeth->skb_dirtytx[txQ] = 3344 ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index f71b3e7b12de..75f337163ce3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
1214 /* index of the first skb which hasn't been transmitted yet. */ 1214 /* index of the first skb which hasn't been transmitted yet. */
1215 u16 skb_dirtytx[NUM_TX_QUEUES]; 1215 u16 skb_dirtytx[NUM_TX_QUEUES];
1216 1216
1217 struct sk_buff_head rx_recycle;
1218
1219 struct ugeth_mii_info *mii_info; 1217 struct ugeth_mii_info *mii_info;
1220 struct phy_device *phydev; 1218 struct phy_device *phydev;
1221 phy_interface_t phy_interface; 1219 phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index b8e46cc31e53..6be7b9839f35 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -35,7 +35,6 @@
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36 36
37#include <asm/ibmebus.h> 37#include <asm/ibmebus.h>
38#include <asm/abs_addr.h>
39#include <asm/io.h> 38#include <asm/io.h>
40 39
41#define DRV_NAME "ehea" 40#define DRV_NAME "ehea"
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
index 30f903332e92..d3a130ccdcc8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -141,7 +141,7 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
141 qp_category, /* R5 */ 141 qp_category, /* R5 */
142 qp_handle, /* R6 */ 142 qp_handle, /* R6 */
143 sel_mask, /* R7 */ 143 sel_mask, /* R7 */
144 virt_to_abs(cb_addr), /* R8 */ 144 __pa(cb_addr), /* R8 */
145 0, 0); 145 0, 0);
146} 146}
147 147
@@ -415,7 +415,7 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
415 (u64) cat, /* R5 */ 415 (u64) cat, /* R5 */
416 qp_handle, /* R6 */ 416 qp_handle, /* R6 */
417 sel_mask, /* R7 */ 417 sel_mask, /* R7 */
418 virt_to_abs(cb_addr), /* R8 */ 418 __pa(cb_addr), /* R8 */
419 0, 0, 0, 0); /* R9-R12 */ 419 0, 0, 0, 0); /* R9-R12 */
420 420
421 *inv_attr_id = outs[0]; 421 *inv_attr_id = outs[0];
@@ -528,7 +528,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
528{ 528{
529 u64 hret, cb_logaddr; 529 u64 hret, cb_logaddr;
530 530
531 cb_logaddr = virt_to_abs(cb_addr); 531 cb_logaddr = __pa(cb_addr);
532 532
533 hret = ehea_plpar_hcall_norets(H_QUERY_HEA, 533 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
534 adapter_handle, /* R4 */ 534 adapter_handle, /* R4 */
@@ -545,7 +545,7 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
545 void *cb_addr) 545 void *cb_addr)
546{ 546{
547 u64 port_info; 547 u64 port_info;
548 u64 cb_logaddr = virt_to_abs(cb_addr); 548 u64 cb_logaddr = __pa(cb_addr);
549 u64 arr_index = 0; 549 u64 arr_index = 0;
550 550
551 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) 551 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
@@ -567,7 +567,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
567 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 567 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
568 u64 port_info; 568 u64 port_info;
569 u64 arr_index = 0; 569 u64 arr_index = 0;
570 u64 cb_logaddr = virt_to_abs(cb_addr); 570 u64 cb_logaddr = __pa(cb_addr);
571 571
572 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) 572 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
573 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); 573 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
@@ -621,6 +621,6 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
621 return ehea_plpar_hcall_norets(H_ERROR_DATA, 621 return ehea_plpar_hcall_norets(H_ERROR_DATA,
622 adapter_handle, /* R4 */ 622 adapter_handle, /* R4 */
623 ressource_handle, /* R5 */ 623 ressource_handle, /* R5 */
624 virt_to_abs(rblock), /* R6 */ 624 __pa(rblock), /* R6 */
625 0, 0, 0, 0); /* R7-R12 */ 625 0, 0, 0, 0); /* R7-R12 */
626} 626}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index cb66f574dc97..27f881758d16 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -163,7 +163,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
163 goto out_kill_hwq; 163 goto out_kill_hwq;
164 } 164 }
165 165
166 rpage = virt_to_abs(vpage); 166 rpage = __pa(vpage);
167 hret = ehea_h_register_rpage(adapter->handle, 167 hret = ehea_h_register_rpage(adapter->handle,
168 0, EHEA_CQ_REGISTER_ORIG, 168 0, EHEA_CQ_REGISTER_ORIG,
169 cq->fw_handle, rpage, 1); 169 cq->fw_handle, rpage, 1);
@@ -290,7 +290,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
290 goto out_kill_hwq; 290 goto out_kill_hwq;
291 } 291 }
292 292
293 rpage = virt_to_abs(vpage); 293 rpage = __pa(vpage);
294 294
295 hret = ehea_h_register_rpage(adapter->handle, 0, 295 hret = ehea_h_register_rpage(adapter->handle, 0,
296 EHEA_EQ_REGISTER_ORIG, 296 EHEA_EQ_REGISTER_ORIG,
@@ -395,7 +395,7 @@ static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
395 pr_err("hw_qpageit_get_inc failed\n"); 395 pr_err("hw_qpageit_get_inc failed\n");
396 goto out_kill_hwq; 396 goto out_kill_hwq;
397 } 397 }
398 rpage = virt_to_abs(vpage); 398 rpage = __pa(vpage);
399 hret = ehea_h_register_rpage(adapter->handle, 399 hret = ehea_h_register_rpage(adapter->handle,
400 0, h_call_q_selector, 400 0, h_call_q_selector,
401 qp->fw_handle, rpage, 1); 401 qp->fw_handle, rpage, 1);
@@ -790,7 +790,7 @@ u64 ehea_map_vaddr(void *caddr)
790 if (!ehea_bmap) 790 if (!ehea_bmap)
791 return EHEA_INVAL_ADDR; 791 return EHEA_INVAL_ADDR;
792 792
793 index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; 793 index = __pa(caddr) >> SECTION_SIZE_BITS;
794 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; 794 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
795 if (!ehea_bmap->top[top]) 795 if (!ehea_bmap->top[top])
796 return EHEA_INVAL_ADDR; 796 return EHEA_INVAL_ADDR;
@@ -812,7 +812,7 @@ static inline void *ehea_calc_sectbase(int top, int dir, int idx)
812 unsigned long ret = idx; 812 unsigned long ret = idx;
813 ret |= dir << EHEA_DIR_INDEX_SHIFT; 813 ret |= dir << EHEA_DIR_INDEX_SHIFT;
814 ret |= top << EHEA_TOP_INDEX_SHIFT; 814 ret |= top << EHEA_TOP_INDEX_SHIFT;
815 return abs_to_virt(ret << SECTION_SIZE_BITS); 815 return __va(ret << SECTION_SIZE_BITS);
816} 816}
817 817
818static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, 818static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
@@ -822,7 +822,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
822 void *pg; 822 void *pg;
823 u64 j, m, hret; 823 u64 j, m, hret;
824 unsigned long k = 0; 824 unsigned long k = 0;
825 u64 pt_abs = virt_to_abs(pt); 825 u64 pt_abs = __pa(pt);
826 826
827 void *sectbase = ehea_calc_sectbase(top, dir, idx); 827 void *sectbase = ehea_calc_sectbase(top, dir, idx);
828 828
@@ -830,7 +830,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
830 830
831 for (m = 0; m < EHEA_MAX_RPAGE; m++) { 831 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
832 pg = sectbase + ((k++) * EHEA_PAGESIZE); 832 pg = sectbase + ((k++) * EHEA_PAGESIZE);
833 pt[m] = virt_to_abs(pg); 833 pt[m] = __pa(pg);
834 } 834 }
835 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, 835 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
836 0, pt_abs, EHEA_MAX_RPAGE); 836 0, pt_abs, EHEA_MAX_RPAGE);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cb3356c9af80..04668b47a1df 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -175,13 +175,13 @@ struct e1000_info;
175/* 175/*
176 * in the case of WTHRESH, it appears at least the 82571/2 hardware 176 * in the case of WTHRESH, it appears at least the 82571/2 hardware
177 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when 177 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
178 * WTHRESH=4, and since we want 64 bytes at a time written back, set 178 * WTHRESH=4, so a setting of 5 gives the most efficient bus
179 * it to 5 179 * utilization but to avoid possible Tx stalls, set it to 1
180 */ 180 */
181#define E1000_TXDCTL_DMA_BURST_ENABLE \ 181#define E1000_TXDCTL_DMA_BURST_ENABLE \
182 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ 182 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
183 E1000_TXDCTL_COUNT_DESC | \ 183 E1000_TXDCTL_COUNT_DESC | \
184 (5 << 16) | /* wthresh must be +1 more than desired */\ 184 (1 << 16) | /* wthresh must be +1 more than desired */\
185 (1 << 8) | /* hthresh */ \ 185 (1 << 8) | /* hthresh */ \
186 0x1f) /* pthresh */ 186 0x1f) /* pthresh */
187 187
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index ed5b40985edb..d37bfd96c987 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -412,6 +412,8 @@ enum e1e_registers {
412#define E1000_DEV_ID_PCH2_LV_V 0x1503 412#define E1000_DEV_ID_PCH2_LV_V 0x1503
413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A 413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B 414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
415#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
416#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
415 417
416#define E1000_REVISION_4 4 418#define E1000_REVISION_4 4
417 419
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fb659dd8db03..f444eb0b76d8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2831 * set up some performance related parameters to encourage the 2831 * set up some performance related parameters to encourage the
2832 * hardware to use the bus more efficiently in bursts, depends 2832 * hardware to use the bus more efficiently in bursts, depends
2833 * on the tx_int_delay to be enabled, 2833 * on the tx_int_delay to be enabled,
2834 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time 2834 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2835 * hthresh = 1 ==> prefetch when one or more available 2835 * hthresh = 1 ==> prefetch when one or more available
2836 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2836 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2837 * BEWARE: this seems to work but should be considered first if 2837 * BEWARE: this seems to work but should be considered first if
@@ -6558,6 +6558,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6558 6558
6559 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, 6559 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6561 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6562 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
6561 6563
6562 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6564 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6563}; 6565};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5bd26763554c..30efc9f0f47a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -410,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
410#define IXGBE_TX_CTXTDESC(R, i) \ 410#define IXGBE_TX_CTXTDESC(R, i) \
411 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 411 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
412 412
413#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 413#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
414#ifdef IXGBE_FCOE 414#ifdef IXGBE_FCOE
415/* Use 3K as the baby jumbo frame size for FCoE */ 415/* Use 3K as the baby jumbo frame size for FCoE */
416#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 416#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 383b4e1cd175..4a9c9c285685 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -175,7 +175,7 @@ struct ixgbevf_q_vector {
175#define IXGBEVF_TX_CTXTDESC(R, i) \ 175#define IXGBEVF_TX_CTXTDESC(R, i) \
176 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 176 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
177 177
178#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 178#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
179 179
180#define OTHER_VECTOR 1 180#define OTHER_VECTOR 1
181#define NON_Q_VECTORS (OTHER_VECTOR) 181#define NON_Q_VECTORS (OTHER_VECTOR)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ee9bd4819f4..de1ad506665d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1747,6 +1747,7 @@ err_tx_ring_allocation:
1747 **/ 1747 **/
1748static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1748static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1749{ 1749{
1750 struct net_device *netdev = adapter->netdev;
1750 int err = 0; 1751 int err = 0;
1751 int vector, v_budget; 1752 int vector, v_budget;
1752 1753
@@ -1775,6 +1776,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1775 1776
1776 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1777 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1777 1778
1779 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1780 if (err)
1781 goto out;
1782
1783 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1784
1778out: 1785out:
1779 return err; 1786 return err;
1780} 1787}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index c911d883c27e..f8064df10cc4 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/pci-aspm.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
32#include <linux/ethtool.h> 33#include <linux/ethtool.h>
@@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev,
2973 /* 2974 /*
2974 * set up PCI device basics 2975 * set up PCI device basics
2975 */ 2976 */
2977 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2978 PCIE_LINK_STATE_CLKPM);
2979
2976 rc = pci_enable_device(pdev); 2980 rc = pci_enable_device(pdev);
2977 if (rc) { 2981 if (rc) {
2978 pr_err("Cannot enable PCI device\n"); 2982 pr_err("Cannot enable PCI device\n");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 087b9e0669f1..84c13263c514 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
412 u8 work_rx_refill; 412 u8 work_rx_refill;
413 413
414 int skb_size; 414 int skb_size;
415 struct sk_buff_head rx_recycle;
416 415
417 /* 416 /*
418 * RX state. 417 * RX state.
@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
673 struct rx_desc *rx_desc; 672 struct rx_desc *rx_desc;
674 int size; 673 int size;
675 674
676 skb = __skb_dequeue(&mp->rx_recycle); 675 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
677 if (skb == NULL)
678 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
679 676
680 if (skb == NULL) { 677 if (skb == NULL) {
681 mp->oom = 1; 678 mp->oom = 1;
@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
989 desc->byte_cnt, DMA_TO_DEVICE); 986 desc->byte_cnt, DMA_TO_DEVICE);
990 } 987 }
991 988
992 if (skb != NULL) { 989 dev_kfree_skb(skb);
993 if (skb_queue_len(&mp->rx_recycle) <
994 mp->rx_ring_size &&
995 skb_recycle_check(skb, mp->skb_size))
996 __skb_queue_head(&mp->rx_recycle, skb);
997 else
998 dev_kfree_skb(skb);
999 }
1000 } 990 }
1001 991
1002 __netif_tx_unlock(nq); 992 __netif_tx_unlock(nq);
@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
2349 2339
2350 napi_enable(&mp->napi); 2340 napi_enable(&mp->napi);
2351 2341
2352 skb_queue_head_init(&mp->rx_recycle);
2353
2354 mp->int_mask = INT_EXT; 2342 mp->int_mask = INT_EXT;
2355 2343
2356 for (i = 0; i < mp->rxq_count; i++) { 2344 for (i = 0; i < mp->rxq_count; i++) {
@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
2445 mib_counters_update(mp); 2433 mib_counters_update(mp);
2446 del_timer_sync(&mp->mib_counters_timer); 2434 del_timer_sync(&mp->mib_counters_timer);
2447 2435
2448 skb_queue_purge(&mp->rx_recycle);
2449
2450 for (i = 0; i < mp->rxq_count; i++) 2436 for (i = 0; i < mp->rxq_count; i++)
2451 rxq_deinit(mp->rxq + i); 2437 rxq_deinit(mp->rxq + i);
2452 for (i = 0; i < mp->txq_count; i++) 2438 for (i = 0; i < mp->txq_count; i++)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5a30bf823099..9b9c2ac5c4c2 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3189,7 +3189,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3189 if (work_done < to_do) { 3189 if (work_done < to_do) {
3190 unsigned long flags; 3190 unsigned long flags;
3191 3191
3192 napi_gro_flush(napi); 3192 napi_gro_flush(napi, false);
3193 spin_lock_irqsave(&hw->hw_lock, flags); 3193 spin_lock_irqsave(&hw->hw_lock, flags);
3194 __napi_complete(napi); 3194 __napi_complete(napi);
3195 hw->intr_mask |= napimask[skge->port]; 3195 hw->intr_mask |= napimask[skge->port];
@@ -3945,8 +3945,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3945 skge_board_name(hw), hw->chip_rev); 3945 skge_board_name(hw), hw->chip_rev);
3946 3946
3947 dev = skge_devinit(hw, 0, using_dac); 3947 dev = skge_devinit(hw, 0, using_dac);
3948 if (!dev) 3948 if (!dev) {
3949 err = -ENOMEM;
3949 goto err_out_led_off; 3950 goto err_out_led_off;
3951 }
3950 3952
3951 /* Some motherboards are broken and has zero in ROM. */ 3953 /* Some motherboards are broken and has zero in ROM. */
3952 if (!is_valid_ether_addr(dev->dev_addr)) 3954 if (!is_valid_ether_addr(dev->dev_addr))
@@ -4153,6 +4155,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
4153 DMI_MATCH(DMI_BOARD_NAME, "nForce"), 4155 DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4154 }, 4156 },
4155 }, 4157 },
4158 {
4159 .ident = "ASUS P5NSLI",
4160 .matches = {
4161 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4162 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
4163 },
4164 },
4156 {} 4165 {}
4157}; 4166};
4158 4167
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 2b0748dba8b8..78946feab4a2 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4924 4924
4925 if (~reg == 0) { 4925 if (~reg == 0) {
4926 dev_err(&pdev->dev, "PCI configuration read error\n"); 4926 dev_err(&pdev->dev, "PCI configuration read error\n");
4927 err = -EIO;
4927 goto err_out; 4928 goto err_out;
4928 } 4929 }
4929 4930
@@ -4993,8 +4994,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4993 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); 4994 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4994 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), 4995 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4995 &hw->st_dma); 4996 &hw->st_dma);
4996 if (!hw->st_le) 4997 if (!hw->st_le) {
4998 err = -ENOMEM;
4997 goto err_out_reset; 4999 goto err_out_reset;
5000 }
4998 5001
4999 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", 5002 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
5000 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); 5003 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index ba6506ff4abb..926c911c0ac4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3094,6 +3094,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3094 if (validate_eth_header_mac(slave, rule_header, rlist)) 3094 if (validate_eth_header_mac(slave, rule_header, rlist))
3095 return -EINVAL; 3095 return -EINVAL;
3096 break; 3096 break;
3097 case MLX4_NET_TRANS_RULE_ID_IB:
3098 break;
3097 case MLX4_NET_TRANS_RULE_ID_IPV4: 3099 case MLX4_NET_TRANS_RULE_ID_IPV4:
3098 case MLX4_NET_TRANS_RULE_ID_TCP: 3100 case MLX4_NET_TRANS_RULE_ID_TCP:
3099 case MLX4_NET_TRANS_RULE_ID_UDP: 3101 case MLX4_NET_TRANS_RULE_ID_UDP:
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 5b61d12f8b91..dbaaa99a0d43 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -947,8 +947,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
947 i = register_netdev(dev); 947 i = register_netdev(dev);
948 if (i) 948 if (i)
949 goto err_register_netdev; 949 goto err_register_netdev;
950 950 i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
951 if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround)) 951 if (i)
952 goto err_create_file; 952 goto err_create_file;
953 953
954 if (netif_msg_drv(np)) { 954 if (netif_msg_drv(np)) {
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e01c0a07a93a..7dfe88398d7d 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -205,6 +205,7 @@ static int __init sonic_probe1(struct net_device *dev)
205 if (lp->descriptors == NULL) { 205 if (lp->descriptors == NULL) {
206 printk(KERN_ERR "%s: couldn't alloc DMA memory for " 206 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
207 " descriptors.\n", dev_name(lp->device)); 207 " descriptors.\n", dev_name(lp->device));
208 err = -ENOMEM;
208 goto out; 209 goto out;
209 } 210 }
210 211
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index a688a2ddcfd6..f97719c48516 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -3,13 +3,14 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2009 Cavium Networks 6 * Copyright (C) 2009-2012 Cavium, Inc
7 */ 7 */
8 8
9#include <linux/platform_device.h> 9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/net_tstamp.h>
13#include <linux/interrupt.h> 14#include <linux/interrupt.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -33,8 +34,7 @@
33 34
34#define OCTEON_MGMT_NAPI_WEIGHT 16 35#define OCTEON_MGMT_NAPI_WEIGHT 16
35 36
36/* 37/* Ring sizes that are powers of two allow for more efficient modulo
37 * Ring sizes that are powers of two allow for more efficient modulo
38 * opertions. 38 * opertions.
39 */ 39 */
40#define OCTEON_MGMT_RX_RING_SIZE 512 40#define OCTEON_MGMT_RX_RING_SIZE 512
@@ -93,6 +93,7 @@ union mgmt_port_ring_entry {
93#define AGL_GMX_RX_ADR_CAM4 0x1a0 93#define AGL_GMX_RX_ADR_CAM4 0x1a0
94#define AGL_GMX_RX_ADR_CAM5 0x1a8 94#define AGL_GMX_RX_ADR_CAM5 0x1a8
95 95
96#define AGL_GMX_TX_CLK 0x208
96#define AGL_GMX_TX_STATS_CTL 0x268 97#define AGL_GMX_TX_STATS_CTL 0x268
97#define AGL_GMX_TX_CTL 0x270 98#define AGL_GMX_TX_CTL 0x270
98#define AGL_GMX_TX_STAT0 0x280 99#define AGL_GMX_TX_STAT0 0x280
@@ -110,8 +111,10 @@ struct octeon_mgmt {
110 struct net_device *netdev; 111 struct net_device *netdev;
111 u64 mix; 112 u64 mix;
112 u64 agl; 113 u64 agl;
114 u64 agl_prt_ctl;
113 int port; 115 int port;
114 int irq; 116 int irq;
117 bool has_rx_tstamp;
115 u64 *tx_ring; 118 u64 *tx_ring;
116 dma_addr_t tx_ring_handle; 119 dma_addr_t tx_ring_handle;
117 unsigned int tx_next; 120 unsigned int tx_next;
@@ -131,6 +134,7 @@ struct octeon_mgmt {
131 spinlock_t lock; 134 spinlock_t lock;
132 unsigned int last_duplex; 135 unsigned int last_duplex;
133 unsigned int last_link; 136 unsigned int last_link;
137 unsigned int last_speed;
134 struct device *dev; 138 struct device *dev;
135 struct napi_struct napi; 139 struct napi_struct napi;
136 struct tasklet_struct tx_clean_tasklet; 140 struct tasklet_struct tx_clean_tasklet;
@@ -140,6 +144,8 @@ struct octeon_mgmt {
140 resource_size_t mix_size; 144 resource_size_t mix_size;
141 resource_size_t agl_phys; 145 resource_size_t agl_phys;
142 resource_size_t agl_size; 146 resource_size_t agl_size;
147 resource_size_t agl_prt_ctl_phys;
148 resource_size_t agl_prt_ctl_size;
143}; 149};
144 150
145static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 151static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
@@ -166,22 +172,22 @@ static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
166 spin_unlock_irqrestore(&p->lock, flags); 172 spin_unlock_irqrestore(&p->lock, flags);
167} 173}
168 174
169static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) 175static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
170{ 176{
171 octeon_mgmt_set_rx_irq(p, 1); 177 octeon_mgmt_set_rx_irq(p, 1);
172} 178}
173 179
174static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) 180static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
175{ 181{
176 octeon_mgmt_set_rx_irq(p, 0); 182 octeon_mgmt_set_rx_irq(p, 0);
177} 183}
178 184
179static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) 185static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
180{ 186{
181 octeon_mgmt_set_tx_irq(p, 1); 187 octeon_mgmt_set_tx_irq(p, 1);
182} 188}
183 189
184static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) 190static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
185{ 191{
186 octeon_mgmt_set_tx_irq(p, 0); 192 octeon_mgmt_set_tx_irq(p, 0);
187} 193}
@@ -233,6 +239,28 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
233 } 239 }
234} 240}
235 241
242static ktime_t ptp_to_ktime(u64 ptptime)
243{
244 ktime_t ktimebase;
245 u64 ptpbase;
246 unsigned long flags;
247
248 local_irq_save(flags);
249 /* Fill the icache with the code */
250 ktime_get_real();
251 /* Flush all pending operations */
252 mb();
253 /* Read the time and PTP clock as close together as
254 * possible. It is important that this sequence take the same
255 * amount of time to reduce jitter
256 */
257 ktimebase = ktime_get_real();
258 ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
259 local_irq_restore(flags);
260
261 return ktime_sub_ns(ktimebase, ptpbase - ptptime);
262}
263
236static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) 264static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
237{ 265{
238 union cvmx_mixx_orcnt mix_orcnt; 266 union cvmx_mixx_orcnt mix_orcnt;
@@ -272,6 +300,20 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
272 300
273 dma_unmap_single(p->dev, re.s.addr, re.s.len, 301 dma_unmap_single(p->dev, re.s.addr, re.s.len,
274 DMA_TO_DEVICE); 302 DMA_TO_DEVICE);
303
304 /* Read the hardware TX timestamp if one was recorded */
305 if (unlikely(re.s.tstamp)) {
306 struct skb_shared_hwtstamps ts;
307 /* Read the timestamp */
308 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
309 /* Remove the timestamp from the FIFO */
310 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
311 /* Tell the kernel about the timestamp */
312 ts.syststamp = ptp_to_ktime(ns);
313 ts.hwtstamp = ns_to_ktime(ns);
314 skb_tstamp_tx(skb, &ts);
315 }
316
275 dev_kfree_skb_any(skb); 317 dev_kfree_skb_any(skb);
276 cleaned++; 318 cleaned++;
277 319
@@ -372,14 +414,23 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
372 /* A good packet, send it up. */ 414 /* A good packet, send it up. */
373 skb_put(skb, re.s.len); 415 skb_put(skb, re.s.len);
374good: 416good:
417 /* Process the RX timestamp if it was recorded */
418 if (p->has_rx_tstamp) {
419 /* The first 8 bytes are the timestamp */
420 u64 ns = *(u64 *)skb->data;
421 struct skb_shared_hwtstamps *ts;
422 ts = skb_hwtstamps(skb);
423 ts->hwtstamp = ns_to_ktime(ns);
424 ts->syststamp = ptp_to_ktime(ns);
425 __skb_pull(skb, 8);
426 }
375 skb->protocol = eth_type_trans(skb, netdev); 427 skb->protocol = eth_type_trans(skb, netdev);
376 netdev->stats.rx_packets++; 428 netdev->stats.rx_packets++;
377 netdev->stats.rx_bytes += skb->len; 429 netdev->stats.rx_bytes += skb->len;
378 netif_receive_skb(skb); 430 netif_receive_skb(skb);
379 rc = 0; 431 rc = 0;
380 } else if (re.s.code == RING_ENTRY_CODE_MORE) { 432 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
381 /* 433 /* Packet split across skbs. This can happen if we
382 * Packet split across skbs. This can happen if we
383 * increase the MTU. Buffers that are already in the 434 * increase the MTU. Buffers that are already in the
384 * rx ring can then end up being too small. As the rx 435 * rx ring can then end up being too small. As the rx
385 * ring is refilled, buffers sized for the new MTU 436 * ring is refilled, buffers sized for the new MTU
@@ -409,8 +460,7 @@ good:
409 } else { 460 } else {
410 /* Some other error, discard it. */ 461 /* Some other error, discard it. */
411 dev_kfree_skb_any(skb); 462 dev_kfree_skb_any(skb);
412 /* 463 /* Error statistics are accumulated in
413 * Error statistics are accumulated in
414 * octeon_mgmt_update_rx_stats. 464 * octeon_mgmt_update_rx_stats.
415 */ 465 */
416 } 466 }
@@ -488,7 +538,7 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
488 mix_ctl.s.reset = 1; 538 mix_ctl.s.reset = 1;
489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 539 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
490 cvmx_read_csr(p->mix + MIX_CTL); 540 cvmx_read_csr(p->mix + MIX_CTL);
491 cvmx_wait(64); 541 octeon_io_clk_delay(64);
492 542
493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); 543 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
494 if (mix_bist.u64) 544 if (mix_bist.u64)
@@ -537,8 +587,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
537 cam_mode = 0; 587 cam_mode = 0;
538 available_cam_entries = 8; 588 available_cam_entries = 8;
539 } else { 589 } else {
540 /* 590 /* One CAM entry for the primary address, leaves seven
541 * One CAM entry for the primary address, leaves seven
542 * for the secondary addresses. 591 * for the secondary addresses.
543 */ 592 */
544 available_cam_entries = 7 - netdev->uc.count; 593 available_cam_entries = 7 - netdev->uc.count;
@@ -595,12 +644,10 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
595 644
596static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) 645static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
597{ 646{
598 struct sockaddr *sa = addr; 647 int r = eth_mac_addr(netdev, addr);
599 648
600 if (!is_valid_ether_addr(sa->sa_data)) 649 if (r)
601 return -EADDRNOTAVAIL; 650 return r;
602
603 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
604 651
605 octeon_mgmt_set_rx_filtering(netdev); 652 octeon_mgmt_set_rx_filtering(netdev);
606 653
@@ -612,8 +659,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
612 struct octeon_mgmt *p = netdev_priv(netdev); 659 struct octeon_mgmt *p = netdev_priv(netdev);
613 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 660 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
614 661
615 /* 662 /* Limit the MTU to make sure the ethernet packets are between
616 * Limit the MTU to make sure the ethernet packets are between
617 * 64 bytes and 16383 bytes. 663 * 64 bytes and 16383 bytes.
618 */ 664 */
619 if (size_without_fcs < 64 || size_without_fcs > 16383) { 665 if (size_without_fcs < 64 || size_without_fcs > 16383) {
@@ -656,53 +702,258 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
656 return IRQ_HANDLED; 702 return IRQ_HANDLED;
657} 703}
658 704
705static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
706 struct ifreq *rq, int cmd)
707{
708 struct octeon_mgmt *p = netdev_priv(netdev);
709 struct hwtstamp_config config;
710 union cvmx_mio_ptp_clock_cfg ptp;
711 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
712 bool have_hw_timestamps = false;
713
714 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
715 return -EFAULT;
716
717 if (config.flags) /* reserved for future extensions */
718 return -EINVAL;
719
720 /* Check the status of hardware for tiemstamps */
721 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
722 /* Get the current state of the PTP clock */
723 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
724 if (!ptp.s.ext_clk_en) {
725 /* The clock has not been configured to use an
726 * external source. Program it to use the main clock
727 * reference.
728 */
729 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
730 if (!ptp.s.ptp_en)
731 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
732 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
733 (NSEC_PER_SEC << 32) / clock_comp);
734 } else {
735 /* The clock is already programmed to use a GPIO */
736 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
737 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
738 ptp.s.ext_clk_in,
739 (NSEC_PER_SEC << 32) / clock_comp);
740 }
741
742 /* Enable the clock if it wasn't done already */
743 if (!ptp.s.ptp_en) {
744 ptp.s.ptp_en = 1;
745 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
746 }
747 have_hw_timestamps = true;
748 }
749
750 if (!have_hw_timestamps)
751 return -EINVAL;
752
753 switch (config.tx_type) {
754 case HWTSTAMP_TX_OFF:
755 case HWTSTAMP_TX_ON:
756 break;
757 default:
758 return -ERANGE;
759 }
760
761 switch (config.rx_filter) {
762 case HWTSTAMP_FILTER_NONE:
763 p->has_rx_tstamp = false;
764 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
765 rxx_frm_ctl.s.ptp_mode = 0;
766 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
767 break;
768 case HWTSTAMP_FILTER_ALL:
769 case HWTSTAMP_FILTER_SOME:
770 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
771 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
772 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
773 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
774 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
775 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
776 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
777 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
778 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
779 case HWTSTAMP_FILTER_PTP_V2_EVENT:
780 case HWTSTAMP_FILTER_PTP_V2_SYNC:
781 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
782 p->has_rx_tstamp = have_hw_timestamps;
783 config.rx_filter = HWTSTAMP_FILTER_ALL;
784 if (p->has_rx_tstamp) {
785 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
786 rxx_frm_ctl.s.ptp_mode = 1;
787 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
788 }
789 break;
790 default:
791 return -ERANGE;
792 }
793
794 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
795 return -EFAULT;
796
797 return 0;
798}
799
659static int octeon_mgmt_ioctl(struct net_device *netdev, 800static int octeon_mgmt_ioctl(struct net_device *netdev,
660 struct ifreq *rq, int cmd) 801 struct ifreq *rq, int cmd)
661{ 802{
662 struct octeon_mgmt *p = netdev_priv(netdev); 803 struct octeon_mgmt *p = netdev_priv(netdev);
663 804
664 if (!netif_running(netdev)) 805 switch (cmd) {
806 case SIOCSHWTSTAMP:
807 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
808 default:
809 if (p->phydev)
810 return phy_mii_ioctl(p->phydev, rq, cmd);
665 return -EINVAL; 811 return -EINVAL;
812 }
813}
666 814
667 if (!p->phydev) 815static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
668 return -EINVAL; 816{
817 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
669 818
670 return phy_mii_ioctl(p->phydev, rq, cmd); 819 /* Disable GMX before we make any changes. */
820 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
821 prtx_cfg.s.en = 0;
822 prtx_cfg.s.tx_en = 0;
823 prtx_cfg.s.rx_en = 0;
824 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
825
826 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
827 int i;
828 for (i = 0; i < 10; i++) {
829 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
830 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
831 break;
832 mdelay(1);
833 i++;
834 }
835 }
836}
837
838static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
839{
840 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
841
842 /* Restore the GMX enable state only if link is set */
843 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
844 prtx_cfg.s.tx_en = 1;
845 prtx_cfg.s.rx_en = 1;
846 prtx_cfg.s.en = 1;
847 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
848}
849
850static void octeon_mgmt_update_link(struct octeon_mgmt *p)
851{
852 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
853
854 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
855
856 if (!p->phydev->link)
857 prtx_cfg.s.duplex = 1;
858 else
859 prtx_cfg.s.duplex = p->phydev->duplex;
860
861 switch (p->phydev->speed) {
862 case 10:
863 prtx_cfg.s.speed = 0;
864 prtx_cfg.s.slottime = 0;
865
866 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
867 prtx_cfg.s.burst = 1;
868 prtx_cfg.s.speed_msb = 1;
869 }
870 break;
871 case 100:
872 prtx_cfg.s.speed = 0;
873 prtx_cfg.s.slottime = 0;
874
875 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
876 prtx_cfg.s.burst = 1;
877 prtx_cfg.s.speed_msb = 0;
878 }
879 break;
880 case 1000:
881 /* 1000 MBits is only supported on 6XXX chips */
882 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
883 prtx_cfg.s.speed = 1;
884 prtx_cfg.s.speed_msb = 0;
885 /* Only matters for half-duplex */
886 prtx_cfg.s.slottime = 1;
887 prtx_cfg.s.burst = p->phydev->duplex;
888 }
889 break;
890 case 0: /* No link */
891 default:
892 break;
893 }
894
895 /* Write the new GMX setting with the port still disabled. */
896 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
897
898 /* Read GMX CFG again to make sure the config is completed. */
899 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
900
901 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
902 union cvmx_agl_gmx_txx_clk agl_clk;
903 union cvmx_agl_prtx_ctl prtx_ctl;
904
905 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
906 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
907 /* MII (both speeds) and RGMII 1000 speed. */
908 agl_clk.s.clk_cnt = 1;
909 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
910 if (p->phydev->speed == 10)
911 agl_clk.s.clk_cnt = 50;
912 else if (p->phydev->speed == 100)
913 agl_clk.s.clk_cnt = 5;
914 }
915 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
916 }
671} 917}
672 918
673static void octeon_mgmt_adjust_link(struct net_device *netdev) 919static void octeon_mgmt_adjust_link(struct net_device *netdev)
674{ 920{
675 struct octeon_mgmt *p = netdev_priv(netdev); 921 struct octeon_mgmt *p = netdev_priv(netdev);
676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
677 unsigned long flags; 922 unsigned long flags;
678 int link_changed = 0; 923 int link_changed = 0;
679 924
925 if (!p->phydev)
926 return;
927
680 spin_lock_irqsave(&p->lock, flags); 928 spin_lock_irqsave(&p->lock, flags);
681 if (p->phydev->link) { 929
682 if (!p->last_link) 930
683 link_changed = 1; 931 if (!p->phydev->link && p->last_link)
684 if (p->last_duplex != p->phydev->duplex) { 932 link_changed = -1;
685 p->last_duplex = p->phydev->duplex; 933
686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 934 if (p->phydev->link
687 prtx_cfg.s.duplex = p->phydev->duplex; 935 && (p->last_duplex != p->phydev->duplex
688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 936 || p->last_link != p->phydev->link
689 } 937 || p->last_speed != p->phydev->speed)) {
690 } else { 938 octeon_mgmt_disable_link(p);
691 if (p->last_link) 939 link_changed = 1;
692 link_changed = -1; 940 octeon_mgmt_update_link(p);
941 octeon_mgmt_enable_link(p);
693 } 942 }
943
694 p->last_link = p->phydev->link; 944 p->last_link = p->phydev->link;
945 p->last_speed = p->phydev->speed;
946 p->last_duplex = p->phydev->duplex;
947
695 spin_unlock_irqrestore(&p->lock, flags); 948 spin_unlock_irqrestore(&p->lock, flags);
696 949
697 if (link_changed != 0) { 950 if (link_changed != 0) {
698 if (link_changed > 0) { 951 if (link_changed > 0) {
699 netif_carrier_on(netdev);
700 pr_info("%s: Link is up - %d/%s\n", netdev->name, 952 pr_info("%s: Link is up - %d/%s\n", netdev->name,
701 p->phydev->speed, 953 p->phydev->speed,
702 DUPLEX_FULL == p->phydev->duplex ? 954 DUPLEX_FULL == p->phydev->duplex ?
703 "Full" : "Half"); 955 "Full" : "Half");
704 } else { 956 } else {
705 netif_carrier_off(netdev);
706 pr_info("%s: Link is down\n", netdev->name); 957 pr_info("%s: Link is down\n", netdev->name);
707 } 958 }
708 } 959 }
@@ -723,9 +974,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
723 PHY_INTERFACE_MODE_MII); 974 PHY_INTERFACE_MODE_MII);
724 975
725 if (!p->phydev) 976 if (!p->phydev)
726 return -1; 977 return -ENODEV;
727
728 phy_start_aneg(p->phydev);
729 978
730 return 0; 979 return 0;
731} 980}
@@ -733,12 +982,10 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
733static int octeon_mgmt_open(struct net_device *netdev) 982static int octeon_mgmt_open(struct net_device *netdev)
734{ 983{
735 struct octeon_mgmt *p = netdev_priv(netdev); 984 struct octeon_mgmt *p = netdev_priv(netdev);
736 int port = p->port;
737 union cvmx_mixx_ctl mix_ctl; 985 union cvmx_mixx_ctl mix_ctl;
738 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; 986 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
739 union cvmx_mixx_oring1 oring1; 987 union cvmx_mixx_oring1 oring1;
740 union cvmx_mixx_iring1 iring1; 988 union cvmx_mixx_iring1 iring1;
741 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
742 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 989 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
743 union cvmx_mixx_irhwm mix_irhwm; 990 union cvmx_mixx_irhwm mix_irhwm;
744 union cvmx_mixx_orhwm mix_orhwm; 991 union cvmx_mixx_orhwm mix_orhwm;
@@ -785,9 +1032,30 @@ static int octeon_mgmt_open(struct net_device *netdev)
785 } while (mix_ctl.s.reset); 1032 } while (mix_ctl.s.reset);
786 } 1033 }
787 1034
788 agl_gmx_inf_mode.u64 = 0; 1035 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
789 agl_gmx_inf_mode.s.en = 1; 1036 agl_gmx_inf_mode.u64 = 0;
790 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1037 agl_gmx_inf_mode.s.en = 1;
1038 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1039 }
1040 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1041 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1042 /* Force compensation values, as they are not
1043 * determined properly by HW
1044 */
1045 union cvmx_agl_gmx_drv_ctl drv_ctl;
1046
1047 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1048 if (p->port) {
1049 drv_ctl.s.byp_en1 = 1;
1050 drv_ctl.s.nctl1 = 6;
1051 drv_ctl.s.pctl1 = 6;
1052 } else {
1053 drv_ctl.s.byp_en = 1;
1054 drv_ctl.s.nctl = 6;
1055 drv_ctl.s.pctl = 6;
1056 }
1057 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1058 }
791 1059
792 oring1.u64 = 0; 1060 oring1.u64 = 0;
793 oring1.s.obase = p->tx_ring_handle >> 3; 1061 oring1.s.obase = p->tx_ring_handle >> 3;
@@ -799,18 +1067,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
799 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 1067 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
800 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); 1068 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
801 1069
802 /* Disable packet I/O. */
803 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
804 prtx_cfg.s.en = 0;
805 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
806
807 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 1070 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
808 octeon_mgmt_set_mac_address(netdev, &sa); 1071 octeon_mgmt_set_mac_address(netdev, &sa);
809 1072
810 octeon_mgmt_change_mtu(netdev, netdev->mtu); 1073 octeon_mgmt_change_mtu(netdev, netdev->mtu);
811 1074
812 /* 1075 /* Enable the port HW. Packets are not allowed until
813 * Enable the port HW. Packets are not allowed until
814 * cvmx_mgmt_port_enable() is called. 1076 * cvmx_mgmt_port_enable() is called.
815 */ 1077 */
816 mix_ctl.u64 = 0; 1078 mix_ctl.u64 = 0;
@@ -819,27 +1081,70 @@ static int octeon_mgmt_open(struct net_device *netdev)
819 mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 1081 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
820 /* MII CB-request FIFO programmable high watermark */ 1082 /* MII CB-request FIFO programmable high watermark */
821 mix_ctl.s.mrq_hwm = 1; 1083 mix_ctl.s.mrq_hwm = 1;
1084#ifdef __LITTLE_ENDIAN
1085 mix_ctl.s.lendian = 1;
1086#endif
822 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 1087 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
823 1088
824 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 1089 /* Read the PHY to find the mode of the interface. */
825 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 1090 if (octeon_mgmt_init_phy(netdev)) {
826 /* 1091 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
827 * Force compensation values, as they are not 1092 goto err_noirq;
828 * determined properly by HW 1093 }
829 */
830 union cvmx_agl_gmx_drv_ctl drv_ctl;
831 1094
832 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); 1095 /* Set the mode of the interface, RGMII/MII. */
833 if (port) { 1096 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
834 drv_ctl.s.byp_en1 = 1; 1097 union cvmx_agl_prtx_ctl agl_prtx_ctl;
835 drv_ctl.s.nctl1 = 6; 1098 int rgmii_mode = (p->phydev->supported &
836 drv_ctl.s.pctl1 = 6; 1099 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
837 } else { 1100
838 drv_ctl.s.byp_en = 1; 1101 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
839 drv_ctl.s.nctl = 6; 1102 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
840 drv_ctl.s.pctl = 6; 1103 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1104
1105 /* MII clocks counts are based on the 125Mhz
1106 * reference, which has an 8nS period. So our delays
1107 * need to be multiplied by this factor.
1108 */
1109#define NS_PER_PHY_CLK 8
1110
1111 /* Take the DLL and clock tree out of reset */
1112 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1113 agl_prtx_ctl.s.clkrst = 0;
1114 if (rgmii_mode) {
1115 agl_prtx_ctl.s.dllrst = 0;
1116 agl_prtx_ctl.s.clktx_byp = 0;
841 } 1117 }
842 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); 1118 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1119 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1120
1121 /* Wait for the DLL to lock. External 125 MHz
1122 * reference clock must be stable at this point.
1123 */
1124 ndelay(256 * NS_PER_PHY_CLK);
1125
1126 /* Enable the interface */
1127 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1128 agl_prtx_ctl.s.enable = 1;
1129 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1130
1131 /* Read the value back to force the previous write */
1132 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1133
1134 /* Enable the compensation controller */
1135 agl_prtx_ctl.s.comp = 1;
1136 agl_prtx_ctl.s.drv_byp = 0;
1137 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1138 /* Force write out before wait. */
1139 cvmx_read_csr(p->agl_prt_ctl);
1140
1141 /* For compensation state to lock. */
1142 ndelay(1040 * NS_PER_PHY_CLK);
1143
1144 /* Some Ethernet switches cannot handle standard
1145 * Interframe Gap, increase to 16 bytes.
1146 */
1147 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
843 } 1148 }
844 1149
845 octeon_mgmt_rx_fill_ring(netdev); 1150 octeon_mgmt_rx_fill_ring(netdev);
@@ -870,7 +1175,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
870 1175
871 /* Interrupt when we have 1 or more packets to clean. */ 1176 /* Interrupt when we have 1 or more packets to clean. */
872 mix_orhwm.u64 = 0; 1177 mix_orhwm.u64 = 0;
873 mix_orhwm.s.orhwm = 1; 1178 mix_orhwm.s.orhwm = 0;
874 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); 1179 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
875 1180
876 /* Enable receive and transmit interrupts */ 1181 /* Enable receive and transmit interrupts */
@@ -879,13 +1184,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
879 mix_intena.s.othena = 1; 1184 mix_intena.s.othena = 1;
880 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 1185 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
881 1186
882
883 /* Enable packet I/O. */ 1187 /* Enable packet I/O. */
884 1188
885 rxx_frm_ctl.u64 = 0; 1189 rxx_frm_ctl.u64 = 0;
1190 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
886 rxx_frm_ctl.s.pre_align = 1; 1191 rxx_frm_ctl.s.pre_align = 1;
887 /* 1192 /* When set, disables the length check for non-min sized pkts
888 * When set, disables the length check for non-min sized pkts
889 * with padding in the client data. 1193 * with padding in the client data.
890 */ 1194 */
891 rxx_frm_ctl.s.pad_len = 1; 1195 rxx_frm_ctl.s.pad_len = 1;
@@ -903,33 +1207,26 @@ static int octeon_mgmt_open(struct net_device *netdev)
903 rxx_frm_ctl.s.ctl_drp = 1; 1207 rxx_frm_ctl.s.ctl_drp = 1;
904 /* Strip off the preamble */ 1208 /* Strip off the preamble */
905 rxx_frm_ctl.s.pre_strp = 1; 1209 rxx_frm_ctl.s.pre_strp = 1;
906 /* 1210 /* This port is configured to send PREAMBLE+SFD to begin every
907 * This port is configured to send PREAMBLE+SFD to begin every
908 * frame. GMX checks that the PREAMBLE is sent correctly. 1211 * frame. GMX checks that the PREAMBLE is sent correctly.
909 */ 1212 */
910 rxx_frm_ctl.s.pre_chk = 1; 1213 rxx_frm_ctl.s.pre_chk = 1;
911 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 1214 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
912 1215
913 /* Enable the AGL block */ 1216 /* Configure the port duplex, speed and enables */
914 agl_gmx_inf_mode.u64 = 0; 1217 octeon_mgmt_disable_link(p);
915 agl_gmx_inf_mode.s.en = 1; 1218 if (p->phydev)
916 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1219 octeon_mgmt_update_link(p);
917 1220 octeon_mgmt_enable_link(p);
918 /* Configure the port duplex and enables */
919 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
920 prtx_cfg.s.tx_en = 1;
921 prtx_cfg.s.rx_en = 1;
922 prtx_cfg.s.en = 1;
923 p->last_duplex = 1;
924 prtx_cfg.s.duplex = p->last_duplex;
925 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
926 1221
927 p->last_link = 0; 1222 p->last_link = 0;
928 netif_carrier_off(netdev); 1223 p->last_speed = 0;
929 1224 /* PHY is not present in simulator. The carrier is enabled
930 if (octeon_mgmt_init_phy(netdev)) { 1225 * while initializing the phy for simulator, leave it enabled.
931 dev_err(p->dev, "Cannot initialize PHY.\n"); 1226 */
932 goto err_noirq; 1227 if (p->phydev) {
1228 netif_carrier_off(netdev);
1229 phy_start_aneg(p->phydev);
933 } 1230 }
934 1231
935 netif_wake_queue(netdev); 1232 netif_wake_queue(netdev);
@@ -959,6 +1256,7 @@ static int octeon_mgmt_stop(struct net_device *netdev)
959 1256
960 if (p->phydev) 1257 if (p->phydev)
961 phy_disconnect(p->phydev); 1258 phy_disconnect(p->phydev);
1259 p->phydev = NULL;
962 1260
963 netif_carrier_off(netdev); 1261 netif_carrier_off(netdev);
964 1262
@@ -991,6 +1289,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
991 int rv = NETDEV_TX_BUSY; 1289 int rv = NETDEV_TX_BUSY;
992 1290
993 re.d64 = 0; 1291 re.d64 = 0;
1292 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
994 re.s.len = skb->len; 1293 re.s.len = skb->len;
995 re.s.addr = dma_map_single(p->dev, skb->data, 1294 re.s.addr = dma_map_single(p->dev, skb->data,
996 skb->len, 1295 skb->len,
@@ -1031,6 +1330,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1031 /* Ring the bell. */ 1330 /* Ring the bell. */
1032 cvmx_write_csr(p->mix + MIX_ORING2, 1); 1331 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1033 1332
1333 netdev->trans_start = jiffies;
1034 rv = NETDEV_TX_OK; 1334 rv = NETDEV_TX_OK;
1035out: 1335out:
1036 octeon_mgmt_update_tx_stats(netdev); 1336 octeon_mgmt_update_tx_stats(netdev);
@@ -1068,7 +1368,7 @@ static int octeon_mgmt_get_settings(struct net_device *netdev,
1068 if (p->phydev) 1368 if (p->phydev)
1069 return phy_ethtool_gset(p->phydev, cmd); 1369 return phy_ethtool_gset(p->phydev, cmd);
1070 1370
1071 return -EINVAL; 1371 return -EOPNOTSUPP;
1072} 1372}
1073 1373
1074static int octeon_mgmt_set_settings(struct net_device *netdev, 1374static int octeon_mgmt_set_settings(struct net_device *netdev,
@@ -1082,23 +1382,37 @@ static int octeon_mgmt_set_settings(struct net_device *netdev,
1082 if (p->phydev) 1382 if (p->phydev)
1083 return phy_ethtool_sset(p->phydev, cmd); 1383 return phy_ethtool_sset(p->phydev, cmd);
1084 1384
1085 return -EINVAL; 1385 return -EOPNOTSUPP;
1386}
1387
1388static int octeon_mgmt_nway_reset(struct net_device *dev)
1389{
1390 struct octeon_mgmt *p = netdev_priv(dev);
1391
1392 if (!capable(CAP_NET_ADMIN))
1393 return -EPERM;
1394
1395 if (p->phydev)
1396 return phy_start_aneg(p->phydev);
1397
1398 return -EOPNOTSUPP;
1086} 1399}
1087 1400
1088static const struct ethtool_ops octeon_mgmt_ethtool_ops = { 1401static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1089 .get_drvinfo = octeon_mgmt_get_drvinfo, 1402 .get_drvinfo = octeon_mgmt_get_drvinfo,
1090 .get_link = ethtool_op_get_link,
1091 .get_settings = octeon_mgmt_get_settings, 1403 .get_settings = octeon_mgmt_get_settings,
1092 .set_settings = octeon_mgmt_set_settings 1404 .set_settings = octeon_mgmt_set_settings,
1405 .nway_reset = octeon_mgmt_nway_reset,
1406 .get_link = ethtool_op_get_link,
1093}; 1407};
1094 1408
1095static const struct net_device_ops octeon_mgmt_ops = { 1409static const struct net_device_ops octeon_mgmt_ops = {
1096 .ndo_open = octeon_mgmt_open, 1410 .ndo_open = octeon_mgmt_open,
1097 .ndo_stop = octeon_mgmt_stop, 1411 .ndo_stop = octeon_mgmt_stop,
1098 .ndo_start_xmit = octeon_mgmt_xmit, 1412 .ndo_start_xmit = octeon_mgmt_xmit,
1099 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, 1413 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1100 .ndo_set_mac_address = octeon_mgmt_set_mac_address, 1414 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1101 .ndo_do_ioctl = octeon_mgmt_ioctl, 1415 .ndo_do_ioctl = octeon_mgmt_ioctl,
1102 .ndo_change_mtu = octeon_mgmt_change_mtu, 1416 .ndo_change_mtu = octeon_mgmt_change_mtu,
1103#ifdef CONFIG_NET_POLL_CONTROLLER 1417#ifdef CONFIG_NET_POLL_CONTROLLER
1104 .ndo_poll_controller = octeon_mgmt_poll_controller, 1418 .ndo_poll_controller = octeon_mgmt_poll_controller,
@@ -1113,6 +1427,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1113 const u8 *mac; 1427 const u8 *mac;
1114 struct resource *res_mix; 1428 struct resource *res_mix;
1115 struct resource *res_agl; 1429 struct resource *res_agl;
1430 struct resource *res_agl_prt_ctl;
1116 int len; 1431 int len;
1117 int result; 1432 int result;
1118 1433
@@ -1120,6 +1435,8 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1120 if (netdev == NULL) 1435 if (netdev == NULL)
1121 return -ENOMEM; 1436 return -ENOMEM;
1122 1437
1438 SET_NETDEV_DEV(netdev, &pdev->dev);
1439
1123 dev_set_drvdata(&pdev->dev, netdev); 1440 dev_set_drvdata(&pdev->dev, netdev);
1124 p = netdev_priv(netdev); 1441 p = netdev_priv(netdev);
1125 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, 1442 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
@@ -1127,6 +1444,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1127 1444
1128 p->netdev = netdev; 1445 p->netdev = netdev;
1129 p->dev = &pdev->dev; 1446 p->dev = &pdev->dev;
1447 p->has_rx_tstamp = false;
1130 1448
1131 data = of_get_property(pdev->dev.of_node, "cell-index", &len); 1449 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1132 if (data && len == sizeof(*data)) { 1450 if (data && len == sizeof(*data)) {
@@ -1159,10 +1477,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1159 goto err; 1477 goto err;
1160 } 1478 }
1161 1479
1480 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1481 if (res_agl_prt_ctl == NULL) {
1482 dev_err(&pdev->dev, "no 'reg' resource\n");
1483 result = -ENXIO;
1484 goto err;
1485 }
1486
1162 p->mix_phys = res_mix->start; 1487 p->mix_phys = res_mix->start;
1163 p->mix_size = resource_size(res_mix); 1488 p->mix_size = resource_size(res_mix);
1164 p->agl_phys = res_agl->start; 1489 p->agl_phys = res_agl->start;
1165 p->agl_size = resource_size(res_agl); 1490 p->agl_size = resource_size(res_agl);
1491 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1492 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1166 1493
1167 1494
1168 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, 1495 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
@@ -1181,10 +1508,18 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1181 goto err; 1508 goto err;
1182 } 1509 }
1183 1510
1511 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1512 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1513 result = -ENXIO;
1514 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1515 res_agl_prt_ctl->name);
1516 goto err;
1517 }
1184 1518
1185 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); 1519 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1186 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); 1520 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1187 1521 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1522 p->agl_prt_ctl_size);
1188 spin_lock_init(&p->lock); 1523 spin_lock_init(&p->lock);
1189 1524
1190 skb_queue_head_init(&p->tx_list); 1525 skb_queue_head_init(&p->tx_list);
@@ -1199,14 +1534,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1199 1534
1200 mac = of_get_mac_address(pdev->dev.of_node); 1535 mac = of_get_mac_address(pdev->dev.of_node);
1201 1536
1202 if (mac) 1537 if (mac && is_valid_ether_addr(mac)) {
1203 memcpy(netdev->dev_addr, mac, 6); 1538 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1539 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1540 } else {
1541 eth_hw_addr_random(netdev);
1542 }
1204 1543
1205 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1544 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1206 1545
1207 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1546 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1208 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1547 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1209 1548
1549 netif_carrier_off(netdev);
1210 result = register_netdev(netdev); 1550 result = register_netdev(netdev);
1211 if (result) 1551 if (result)
1212 goto err; 1552 goto err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 97302419a377..5296cc8d3cba 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -26,6 +26,9 @@ if PCH_GBE
26config PCH_PTP 26config PCH_PTP
27 bool "PCH PTP clock support" 27 bool "PCH PTP clock support"
28 default n 28 default n
29 depends on EXPERIMENTAL
30 select PPS
31 select PTP_1588_CLOCK
29 select PTP_1588_CLOCK_PCH 32 select PTP_1588_CLOCK_PCH
30 ---help--- 33 ---help---
31 Say Y here if you want to use Precision Time Protocol (PTP) in the 34 Say Y here if you want to use Precision Time Protocol (PTP) in the
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index b528e52a8ee1..2a0c9dc48eb3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -38,7 +38,7 @@ static inline void writeq(u64 val, void __iomem *addr)
38} 38}
39#endif 39#endif
40 40
41static const struct crb_128M_2M_block_map 41static struct crb_128M_2M_block_map
42crb_128M_2M_map[64] __cacheline_aligned_in_smp = { 42crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */ 43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ 44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 473ce134ca63..24ad17ec7fcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1601,7 +1601,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1601 adapter->netdev = netdev; 1601 adapter->netdev = netdev;
1602 adapter->pdev = pdev; 1602 adapter->pdev = pdev;
1603 1603
1604 if (qlcnic_alloc_adapter_resources(adapter)) 1604 err = qlcnic_alloc_adapter_resources(adapter);
1605 if (err)
1605 goto err_out_free_netdev; 1606 goto err_out_free_netdev;
1606 1607
1607 adapter->dev_rst_time = jiffies; 1608 adapter->dev_rst_time = jiffies;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 995d0cfc4c06..1c818254b7be 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,7 +563,7 @@ rx_next:
563 if (cpr16(IntrStatus) & cp_rx_intr_mask) 563 if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 goto rx_status_loop; 564 goto rx_status_loop;
565 565
566 napi_gro_flush(napi); 566 napi_gro_flush(napi, false);
567 spin_lock_irqsave(&cp->lock, flags); 567 spin_lock_irqsave(&cp->lock, flags);
568 __napi_complete(napi); 568 __napi_complete(napi);
569 cpw16_f(IntrMask, cp_intr_mask); 569 cpw16_f(IntrMask, cp_intr_mask);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1d83565cc6af..3ed7add23c12 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -228,7 +228,7 @@ typedef enum {
228static const struct { 228static const struct {
229 const char *name; 229 const char *name;
230 u32 hw_flags; 230 u32 hw_flags;
231} board_info[] __devinitdata = { 231} board_info[] __devinitconst = {
232 { "RealTek RTL8139", RTL8139_CAPS }, 232 { "RealTek RTL8139", RTL8139_CAPS },
233 { "RealTek RTL8129", RTL8129_CAPS }, 233 { "RealTek RTL8129", RTL8129_CAPS },
234}; 234};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bad8f2eec9b4..c8bfea0524dd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2438,6 +2438,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2438 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2438 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2439 if (!rtsu) { 2439 if (!rtsu) {
2440 dev_err(&pdev->dev, "Not found TSU resource\n"); 2440 dev_err(&pdev->dev, "Not found TSU resource\n");
2441 ret = -ENODEV;
2441 goto out_release; 2442 goto out_release;
2442 } 2443 }
2443 mdp->tsu_addr = ioremap(rtsu->start, 2444 mdp->tsu_addr = ioremap(rtsu->start,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 96bd980e828d..4f86d0cd516a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2019,14 +2019,14 @@ static void efx_set_rx_mode(struct net_device *net_dev)
2019 netdev_for_each_mc_addr(ha, net_dev) { 2019 netdev_for_each_mc_addr(ha, net_dev) {
2020 crc = ether_crc_le(ETH_ALEN, ha->addr); 2020 crc = ether_crc_le(ETH_ALEN, ha->addr);
2021 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 2021 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2022 set_bit_le(bit, mc_hash->byte); 2022 __set_bit_le(bit, mc_hash);
2023 } 2023 }
2024 2024
2025 /* Broadcast packets go through the multicast hash filter. 2025 /* Broadcast packets go through the multicast hash filter.
2026 * ether_crc_le() of the broadcast address is 0xbe2612ff 2026 * ether_crc_le() of the broadcast address is 0xbe2612ff
2027 * so we always add bit 0xff to the mask. 2027 * so we always add bit 0xff to the mask.
2028 */ 2028 */
2029 set_bit_le(0xff, mc_hash->byte); 2029 __set_bit_le(0xff, mc_hash);
2030 } 2030 }
2031 2031
2032 if (efx->port_enabled) 2032 if (efx->port_enabled)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c1a010cda89b..576a31091165 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1101,18 +1101,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1101 return &rx_queue->buffer[index]; 1101 return &rx_queue->buffer[index];
1102} 1102}
1103 1103
1104/* Set bit in a little-endian bitfield */
1105static inline void set_bit_le(unsigned nr, unsigned char *addr)
1106{
1107 addr[nr / 8] |= (1 << (nr % 8));
1108}
1109
1110/* Clear bit in a little-endian bitfield */
1111static inline void clear_bit_le(unsigned nr, unsigned char *addr)
1112{
1113 addr[nr / 8] &= ~(1 << (nr % 8));
1114}
1115
1116 1104
1117/** 1105/**
1118 * EFX_MAX_FRAME_LEN - calculate maximum frame length 1106 * EFX_MAX_FRAME_LEN - calculate maximum frame length
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index cdff40b65729..aab7cacb2e34 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -472,9 +472,9 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
472 472
473 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG); 473 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
475 clear_bit_le(tx_queue->queue, (void *)&reg); 475 __clear_bit_le(tx_queue->queue, &reg);
476 else 476 else
477 set_bit_le(tx_queue->queue, (void *)&reg); 477 __set_bit_le(tx_queue->queue, &reg);
478 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 478 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
479 } 479 }
480 480
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 5b3dd028ce85..0767043f44a4 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -640,8 +640,7 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
640 evt = list_entry(cursor, struct efx_ptp_event_rx, 640 evt = list_entry(cursor, struct efx_ptp_event_rx,
641 link); 641 link);
642 if (time_after(jiffies, evt->expiry)) { 642 if (time_after(jiffies, evt->expiry)) {
643 list_del(&evt->link); 643 list_move(&evt->link, &ptp->evt_free_list);
644 list_add(&evt->link, &ptp->evt_free_list);
645 netif_warn(efx, hw, efx->net_dev, 644 netif_warn(efx, hw, efx->net_dev,
646 "PTP rx event dropped\n"); 645 "PTP rx event dropped\n");
647 } 646 }
@@ -684,8 +683,7 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
684 683
685 match->state = PTP_PACKET_STATE_MATCHED; 684 match->state = PTP_PACKET_STATE_MATCHED;
686 rc = PTP_PACKET_STATE_MATCHED; 685 rc = PTP_PACKET_STATE_MATCHED;
687 list_del(&evt->link); 686 list_move(&evt->link, &ptp->evt_free_list);
688 list_add(&evt->link, &ptp->evt_free_list);
689 break; 687 break;
690 } 688 }
691 } 689 }
@@ -820,8 +818,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
820 /* Drop any pending receive events */ 818 /* Drop any pending receive events */
821 spin_lock_bh(&efx->ptp_data->evt_lock); 819 spin_lock_bh(&efx->ptp_data->evt_lock);
822 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 820 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
823 list_del(cursor); 821 list_move(cursor, &efx->ptp_data->evt_free_list);
824 list_add(cursor, &efx->ptp_data->evt_free_list);
825 } 822 }
826 spin_unlock_bh(&efx->ptp_data->evt_lock); 823 spin_unlock_bh(&efx->ptp_data->evt_lock);
827 824
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 4613591b43e7..d8166012b7d4 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1618,7 +1618,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1618static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, 1618static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1619 struct net_device *dev) 1619 struct net_device *dev)
1620{ 1620{
1621 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; 1621 static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
1622 struct sis190_private *tp = netdev_priv(dev); 1622 struct sis190_private *tp = netdev_priv(dev);
1623 struct pci_dev *isa_bridge; 1623 struct pci_dev *isa_bridge;
1624 u8 reg, tmp8; 1624 u8 reg, tmp8;
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 203d9c6ec23a..fb9f6b38511f 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -478,8 +478,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
478 478
479 /* IO region. */ 479 /* IO region. */
480 ioaddr = pci_iomap(pci_dev, 0, 0); 480 ioaddr = pci_iomap(pci_dev, 0, 0);
481 if (!ioaddr) 481 if (!ioaddr) {
482 ret = -ENOMEM;
482 goto err_out_cleardev; 483 goto err_out_cleardev;
484 }
483 485
484 sis_priv = netdev_priv(net_dev); 486 sis_priv = netdev_priv(net_dev);
485 sis_priv->ioaddr = ioaddr; 487 sis_priv->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e872e1da3137..7d51a65ab099 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -50,7 +50,6 @@ struct stmmac_priv {
50 unsigned int dirty_rx; 50 unsigned int dirty_rx;
51 struct sk_buff **rx_skbuff; 51 struct sk_buff **rx_skbuff;
52 dma_addr_t *rx_skbuff_dma; 52 dma_addr_t *rx_skbuff_dma;
53 struct sk_buff_head rx_recycle;
54 53
55 struct net_device *dev; 54 struct net_device *dev;
56 dma_addr_t dma_rx_phy; 55 dma_addr_t dma_rx_phy;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3be88331d17a..c6cdbc4eb05e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
747 priv->hw->ring->clean_desc3(p); 747 priv->hw->ring->clean_desc3(p);
748 748
749 if (likely(skb != NULL)) { 749 if (likely(skb != NULL)) {
750 /* 750 dev_kfree_skb(skb);
751 * If there's room in the queue (limit it to size)
752 * we add this skb back into the pool,
753 * if it's the right size.
754 */
755 if ((skb_queue_len(&priv->rx_recycle) <
756 priv->dma_rx_size) &&
757 skb_recycle_check(skb, priv->dma_buf_sz))
758 __skb_queue_head(&priv->rx_recycle, skb);
759 else
760 dev_kfree_skb(skb);
761
762 priv->tx_skbuff[entry] = NULL; 751 priv->tx_skbuff[entry] = NULL;
763 } 752 }
764 753
@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
1169 priv->eee_enabled = stmmac_eee_init(priv); 1158 priv->eee_enabled = stmmac_eee_init(priv);
1170 1159
1171 napi_enable(&priv->napi); 1160 napi_enable(&priv->napi);
1172 skb_queue_head_init(&priv->rx_recycle);
1173 netif_start_queue(dev); 1161 netif_start_queue(dev);
1174 1162
1175 return 0; 1163 return 0;
@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
1222 kfree(priv->tm); 1210 kfree(priv->tm);
1223#endif 1211#endif
1224 napi_disable(&priv->napi); 1212 napi_disable(&priv->napi);
1225 skb_queue_purge(&priv->rx_recycle);
1226 1213
1227 /* Free the IRQ lines */ 1214 /* Free the IRQ lines */
1228 free_irq(dev->irq, dev); 1215 free_irq(dev->irq, dev);
@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1388 if (likely(priv->rx_skbuff[entry] == NULL)) { 1375 if (likely(priv->rx_skbuff[entry] == NULL)) {
1389 struct sk_buff *skb; 1376 struct sk_buff *skb;
1390 1377
1391 skb = __skb_dequeue(&priv->rx_recycle); 1378 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1392 if (skb == NULL)
1393 skb = netdev_alloc_skb_ip_align(priv->dev,
1394 bfsize);
1395 1379
1396 if (unlikely(skb == NULL)) 1380 if (unlikely(skb == NULL))
1397 break; 1381 break;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8419bf385e08..275b430aeb75 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9788,6 +9788,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9788 9788
9789 if (!pci_is_pcie(pdev)) { 9789 if (!pci_is_pcie(pdev)) {
9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9791 err = -ENODEV;
9791 goto err_out_free_res; 9792 goto err_out_free_res;
9792 } 9793 }
9793 9794
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9ae12d0c9632..6c8695ec7cb9 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2963,7 +2963,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2963 goto err_out_iounmap; 2963 goto err_out_iounmap;
2964 } 2964 }
2965 2965
2966 if (gem_get_device_address(gp)) 2966 err = gem_get_device_address(gp);
2967 if (err)
2967 goto err_out_free_consistent; 2968 goto err_out_free_consistent;
2968 2969
2969 dev->netdev_ops = &gem_netdev_ops; 2970 dev->netdev_ops = &gem_netdev_ops;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 64783a0d545a..1450e33fc250 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -811,9 +811,9 @@ static struct tty_ldisc_ops sp_ldisc = {
811 811
812/* Initialize 6pack control device -- register 6pack line discipline */ 812/* Initialize 6pack control device -- register 6pack line discipline */
813 813
814static const char msg_banner[] __initdata = KERN_INFO \ 814static const char msg_banner[] __initconst = KERN_INFO \
815 "AX.25: 6pack driver, " SIXPACK_VERSION "\n"; 815 "AX.25: 6pack driver, " SIXPACK_VERSION "\n";
816static const char msg_regfail[] __initdata = KERN_ERR \ 816static const char msg_regfail[] __initconst = KERN_ERR \
817 "6pack: can't register line discipline (err = %d)\n"; 817 "6pack: can't register line discipline (err = %d)\n";
818 818
819static int __init sixpack_init_driver(void) 819static int __init sixpack_init_driver(void)
@@ -829,7 +829,7 @@ static int __init sixpack_init_driver(void)
829 return status; 829 return status;
830} 830}
831 831
832static const char msg_unregfail[] __exitdata = KERN_ERR \ 832static const char msg_unregfail[] = KERN_ERR \
833 "6pack: can't unregister line discipline (err = %d)\n"; 833 "6pack: can't unregister line discipline (err = %d)\n";
834 834
835static void __exit sixpack_exit_driver(void) 835static void __exit sixpack_exit_driver(void)
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 76d54774ba82..c2e5497397d5 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -87,7 +87,7 @@
87 87
88#include <linux/bpqether.h> 88#include <linux/bpqether.h>
89 89
90static const char banner[] __initdata = KERN_INFO \ 90static const char banner[] __initconst = KERN_INFO \
91 "AX.25: bpqether driver version 004\n"; 91 "AX.25: bpqether driver version 004\n";
92 92
93static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; 93static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2c0894a92abd..8e01c457015b 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -997,9 +997,9 @@ static struct tty_ldisc_ops ax_ldisc = {
997 .write_wakeup = mkiss_write_wakeup 997 .write_wakeup = mkiss_write_wakeup
998}; 998};
999 999
1000static const char banner[] __initdata = KERN_INFO \ 1000static const char banner[] __initconst = KERN_INFO \
1001 "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n"; 1001 "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
1002static const char msg_regfail[] __initdata = KERN_ERR \ 1002static const char msg_regfail[] __initconst = KERN_ERR \
1003 "mkiss: can't register line discipline (err = %d)\n"; 1003 "mkiss: can't register line discipline (err = %d)\n";
1004 1004
1005static int __init mkiss_init_driver(void) 1005static int __init mkiss_init_driver(void)
@@ -1015,7 +1015,7 @@ static int __init mkiss_init_driver(void)
1015 return status; 1015 return status;
1016} 1016}
1017 1017
1018static const char msg_unregfail[] __exitdata = KERN_ERR \ 1018static const char msg_unregfail[] = KERN_ERR \
1019 "mkiss: can't unregister line discipline (err = %d)\n"; 1019 "mkiss: can't unregister line discipline (err = %d)\n";
1020 1020
1021static void __exit mkiss_exit_driver(void) 1021static void __exit mkiss_exit_driver(void)
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index efc6c97163a7..1b4a47bd32b7 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -182,7 +182,7 @@
182 182
183#include "z8530.h" 183#include "z8530.h"
184 184
185static const char banner[] __initdata = KERN_INFO \ 185static const char banner[] __initconst = KERN_INFO \
186 "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n"; 186 "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
187 187
188static void t_dwait(unsigned long); 188static void t_dwait(unsigned long);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5a6412ecce73..c6645f1017af 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -76,7 +76,7 @@
76/* --------------------------------------------------------------------- */ 76/* --------------------------------------------------------------------- */
77 77
78static const char yam_drvname[] = "yam"; 78static const char yam_drvname[] = "yam";
79static const char yam_drvinfo[] __initdata = KERN_INFO \ 79static const char yam_drvinfo[] __initconst = KERN_INFO \
80 "YAM driver version 0.8 by F1OAT/F6FBB\n"; 80 "YAM driver version 0.8 by F1OAT/F6FBB\n";
81 81
82/* --------------------------------------------------------------------- */ 82/* --------------------------------------------------------------------- */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 30087ca23a0f..6e4d4b62c9a8 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -459,8 +459,10 @@ static int irtty_open(struct tty_struct *tty)
459 459
460 /* allocate private device info block */ 460 /* allocate private device info block */
461 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 461 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
462 if (!priv) 462 if (!priv) {
463 ret = -ENOMEM;
463 goto out_put; 464 goto out_put;
465 }
464 466
465 priv->magic = IRTTY_MAGIC; 467 priv->magic = IRTTY_MAGIC;
466 priv->tty = tty; 468 priv->tty = tty;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 1a00b5990cb8..f07c340990da 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -920,8 +920,10 @@ static int mcs_probe(struct usb_interface *intf,
920 920
921 ndev->netdev_ops = &mcs_netdev_ops; 921 ndev->netdev_ops = &mcs_netdev_ops;
922 922
923 if (!intf->cur_altsetting) 923 if (!intf->cur_altsetting) {
924 ret = -ENOMEM;
924 goto error2; 925 goto error2;
926 }
925 927
926 ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint, 928 ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
927 intf->cur_altsetting->desc.bNumEndpoints); 929 intf->cur_altsetting->desc.bNumEndpoints);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 002a442bf73f..858de05bdb7d 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -846,8 +846,10 @@ static int pxa_irda_probe(struct platform_device *pdev)
846 goto err_mem_2; 846 goto err_mem_2;
847 847
848 dev = alloc_irdadev(sizeof(struct pxa_irda)); 848 dev = alloc_irdadev(sizeof(struct pxa_irda));
849 if (!dev) 849 if (!dev) {
850 err = -ENOMEM;
850 goto err_mem_3; 851 goto err_mem_3;
852 }
851 853
852 SET_NETDEV_DEV(dev, &pdev->dev); 854 SET_NETDEV_DEV(dev, &pdev->dev);
853 si = netdev_priv(dev); 855 si = netdev_priv(dev);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index e25067552b20..42fde9ed23e1 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -940,8 +940,10 @@ static int sa1100_irda_probe(struct platform_device *pdev)
940 goto err_mem_3; 940 goto err_mem_3;
941 941
942 dev = alloc_irdadev(sizeof(struct sa1100_irda)); 942 dev = alloc_irdadev(sizeof(struct sa1100_irda));
943 if (!dev) 943 if (!dev) {
944 err = -ENOMEM;
944 goto err_mem_4; 945 goto err_mem_4;
946 }
945 947
946 SET_NETDEV_DEV(dev, &pdev->dev); 948 SET_NETDEV_DEV(dev, &pdev->dev);
947 949
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index eb315b8d07a3..4b746d9bd8e7 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -808,8 +808,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
808 goto err_mem_4; 808 goto err_mem_4;
809 809
810 platform_set_drvdata(pdev, ndev); 810 platform_set_drvdata(pdev, ndev);
811 811 err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
812 if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) { 812 if (err) {
813 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); 813 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
814 goto err_mem_4; 814 goto err_mem_4;
815 } 815 }
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 795109425568..624ac1939e85 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -741,6 +741,7 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
741 self->clk = clk_get(&pdev->dev, clk_name); 741 self->clk = clk_get(&pdev->dev, clk_name);
742 if (IS_ERR(self->clk)) { 742 if (IS_ERR(self->clk)) {
743 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); 743 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
744 err = -ENODEV;
744 goto err_mem_3; 745 goto err_mem_3;
745 } 746 }
746 747
@@ -760,8 +761,8 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
760 goto err_mem_4; 761 goto err_mem_4;
761 762
762 platform_set_drvdata(pdev, ndev); 763 platform_set_drvdata(pdev, ndev);
763 764 err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
764 if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) { 765 if (err) {
765 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n"); 766 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
766 goto err_mem_4; 767 goto err_mem_4;
767 } 768 }
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 170eb411ab5d..c1ef3000ea60 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/of_device.h> 28#include <linux/of_device.h>
29#include <linux/of_mdio.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
30#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
31#include <linux/skbuff.h> 32#include <linux/skbuff.h>
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 91d25888a1b9..d8b9b1e8ee02 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -26,7 +26,7 @@
26#include <linux/ethtool.h> 26#include <linux/ethtool.h>
27 27
28#define DRV_NAME "rionet" 28#define DRV_NAME "rionet"
29#define DRV_VERSION "0.2" 29#define DRV_VERSION "0.3"
30#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>" 30#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
31#define DRV_DESC "Ethernet over RapidIO" 31#define DRV_DESC "Ethernet over RapidIO"
32 32
@@ -47,8 +47,7 @@ MODULE_LICENSE("GPL");
47 47
48#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE 48#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
49#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE 49#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
50 50#define RIONET_MAX_NETS 8
51static LIST_HEAD(rionet_peers);
52 51
53struct rionet_private { 52struct rionet_private {
54 struct rio_mport *mport; 53 struct rio_mport *mport;
@@ -69,16 +68,14 @@ struct rionet_peer {
69 struct resource *res; 68 struct resource *res;
70}; 69};
71 70
72static int rionet_check = 0; 71struct rionet_net {
73static int rionet_capable = 1; 72 struct net_device *ndev;
73 struct list_head peers;
74 struct rio_dev **active;
75 int nact; /* number of active peers */
76};
74 77
75/* 78static struct rionet_net nets[RIONET_MAX_NETS];
76 * This is a fast lookup table for translating TX
77 * Ethernet packets into a destination RIO device. It
78 * could be made into a hash table to save memory depending
79 * on system trade-offs.
80 */
81static struct rio_dev **rionet_active;
82 79
83#define is_rionet_capable(src_ops, dst_ops) \ 80#define is_rionet_capable(src_ops, dst_ops) \
84 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 81 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
@@ -175,6 +172,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
175 struct ethhdr *eth = (struct ethhdr *)skb->data; 172 struct ethhdr *eth = (struct ethhdr *)skb->data;
176 u16 destid; 173 u16 destid;
177 unsigned long flags; 174 unsigned long flags;
175 int add_num = 1;
178 176
179 local_irq_save(flags); 177 local_irq_save(flags);
180 if (!spin_trylock(&rnet->tx_lock)) { 178 if (!spin_trylock(&rnet->tx_lock)) {
@@ -182,7 +180,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
182 return NETDEV_TX_LOCKED; 180 return NETDEV_TX_LOCKED;
183 } 181 }
184 182
185 if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { 183 if (is_multicast_ether_addr(eth->h_dest))
184 add_num = nets[rnet->mport->id].nact;
185
186 if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
186 netif_stop_queue(ndev); 187 netif_stop_queue(ndev);
187 spin_unlock_irqrestore(&rnet->tx_lock, flags); 188 spin_unlock_irqrestore(&rnet->tx_lock, flags);
188 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", 189 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
@@ -191,15 +192,22 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
191 } 192 }
192 193
193 if (is_multicast_ether_addr(eth->h_dest)) { 194 if (is_multicast_ether_addr(eth->h_dest)) {
195 int count = 0;
196
194 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); 197 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
195 i++) 198 i++)
196 if (rionet_active[i]) 199 if (nets[rnet->mport->id].active[i]) {
197 rionet_queue_tx_msg(skb, ndev, 200 rionet_queue_tx_msg(skb, ndev,
198 rionet_active[i]); 201 nets[rnet->mport->id].active[i]);
202 if (count)
203 atomic_inc(&skb->users);
204 count++;
205 }
199 } else if (RIONET_MAC_MATCH(eth->h_dest)) { 206 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
200 destid = RIONET_GET_DESTID(eth->h_dest); 207 destid = RIONET_GET_DESTID(eth->h_dest);
201 if (rionet_active[destid]) 208 if (nets[rnet->mport->id].active[destid])
202 rionet_queue_tx_msg(skb, ndev, rionet_active[destid]); 209 rionet_queue_tx_msg(skb, ndev,
210 nets[rnet->mport->id].active[destid]);
203 } 211 }
204 212
205 spin_unlock_irqrestore(&rnet->tx_lock, flags); 213 spin_unlock_irqrestore(&rnet->tx_lock, flags);
@@ -218,16 +226,21 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
218 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", 226 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
219 DRV_NAME, sid, tid, info); 227 DRV_NAME, sid, tid, info);
220 if (info == RIONET_DOORBELL_JOIN) { 228 if (info == RIONET_DOORBELL_JOIN) {
221 if (!rionet_active[sid]) { 229 if (!nets[rnet->mport->id].active[sid]) {
222 list_for_each_entry(peer, &rionet_peers, node) { 230 list_for_each_entry(peer,
223 if (peer->rdev->destid == sid) 231 &nets[rnet->mport->id].peers, node) {
224 rionet_active[sid] = peer->rdev; 232 if (peer->rdev->destid == sid) {
233 nets[rnet->mport->id].active[sid] =
234 peer->rdev;
235 nets[rnet->mport->id].nact++;
236 }
225 } 237 }
226 rio_mport_send_doorbell(mport, sid, 238 rio_mport_send_doorbell(mport, sid,
227 RIONET_DOORBELL_JOIN); 239 RIONET_DOORBELL_JOIN);
228 } 240 }
229 } else if (info == RIONET_DOORBELL_LEAVE) { 241 } else if (info == RIONET_DOORBELL_LEAVE) {
230 rionet_active[sid] = NULL; 242 nets[rnet->mport->id].active[sid] = NULL;
243 nets[rnet->mport->id].nact--;
231 } else { 244 } else {
232 if (netif_msg_intr(rnet)) 245 if (netif_msg_intr(rnet))
233 printk(KERN_WARNING "%s: unhandled doorbell\n", 246 printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -321,7 +334,8 @@ static int rionet_open(struct net_device *ndev)
321 netif_carrier_on(ndev); 334 netif_carrier_on(ndev);
322 netif_start_queue(ndev); 335 netif_start_queue(ndev);
323 336
324 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 337 list_for_each_entry_safe(peer, tmp,
338 &nets[rnet->mport->id].peers, node) {
325 if (!(peer->res = rio_request_outb_dbell(peer->rdev, 339 if (!(peer->res = rio_request_outb_dbell(peer->rdev,
326 RIONET_DOORBELL_JOIN, 340 RIONET_DOORBELL_JOIN,
327 RIONET_DOORBELL_LEAVE))) 341 RIONET_DOORBELL_LEAVE)))
@@ -346,7 +360,7 @@ static int rionet_close(struct net_device *ndev)
346 int i; 360 int i;
347 361
348 if (netif_msg_ifup(rnet)) 362 if (netif_msg_ifup(rnet))
349 printk(KERN_INFO "%s: close\n", DRV_NAME); 363 printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name);
350 364
351 netif_stop_queue(ndev); 365 netif_stop_queue(ndev);
352 netif_carrier_off(ndev); 366 netif_carrier_off(ndev);
@@ -354,10 +368,11 @@ static int rionet_close(struct net_device *ndev)
354 for (i = 0; i < RIONET_RX_RING_SIZE; i++) 368 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
355 kfree_skb(rnet->rx_skb[i]); 369 kfree_skb(rnet->rx_skb[i]);
356 370
357 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 371 list_for_each_entry_safe(peer, tmp,
358 if (rionet_active[peer->rdev->destid]) { 372 &nets[rnet->mport->id].peers, node) {
373 if (nets[rnet->mport->id].active[peer->rdev->destid]) {
359 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); 374 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
360 rionet_active[peer->rdev->destid] = NULL; 375 nets[rnet->mport->id].active[peer->rdev->destid] = NULL;
361 } 376 }
362 rio_release_outb_dbell(peer->rdev, peer->res); 377 rio_release_outb_dbell(peer->rdev, peer->res);
363 } 378 }
@@ -373,17 +388,21 @@ static int rionet_close(struct net_device *ndev)
373static void rionet_remove(struct rio_dev *rdev) 388static void rionet_remove(struct rio_dev *rdev)
374{ 389{
375 struct net_device *ndev = rio_get_drvdata(rdev); 390 struct net_device *ndev = rio_get_drvdata(rdev);
391 unsigned char netid = rdev->net->hport->id;
376 struct rionet_peer *peer, *tmp; 392 struct rionet_peer *peer, *tmp;
377 393
378 free_pages((unsigned long)rionet_active, get_order(sizeof(void *) *
379 RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size)));
380 unregister_netdev(ndev); 394 unregister_netdev(ndev);
381 free_netdev(ndev);
382 395
383 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 396 free_pages((unsigned long)nets[netid].active, get_order(sizeof(void *) *
397 RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size)));
398 nets[netid].active = NULL;
399
400 list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) {
384 list_del(&peer->node); 401 list_del(&peer->node);
385 kfree(peer); 402 kfree(peer);
386 } 403 }
404
405 free_netdev(ndev);
387} 406}
388 407
389static void rionet_get_drvinfo(struct net_device *ndev, 408static void rionet_get_drvinfo(struct net_device *ndev,
@@ -435,13 +454,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
435 const size_t rionet_active_bytes = sizeof(void *) * 454 const size_t rionet_active_bytes = sizeof(void *) *
436 RIO_MAX_ROUTE_ENTRIES(mport->sys_size); 455 RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
437 456
438 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, 457 nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
439 get_order(rionet_active_bytes)); 458 get_order(rionet_active_bytes));
440 if (!rionet_active) { 459 if (!nets[mport->id].active) {
441 rc = -ENOMEM; 460 rc = -ENOMEM;
442 goto out; 461 goto out;
443 } 462 }
444 memset((void *)rionet_active, 0, rionet_active_bytes); 463 memset((void *)nets[mport->id].active, 0, rionet_active_bytes);
445 464
446 /* Set up private area */ 465 /* Set up private area */
447 rnet = netdev_priv(ndev); 466 rnet = netdev_priv(ndev);
@@ -470,60 +489,62 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
470 if (rc != 0) 489 if (rc != 0)
471 goto out; 490 goto out;
472 491
473 printk("%s: %s %s Version %s, MAC %pM\n", 492 printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
474 ndev->name, 493 ndev->name,
475 DRV_NAME, 494 DRV_NAME,
476 DRV_DESC, 495 DRV_DESC,
477 DRV_VERSION, 496 DRV_VERSION,
478 ndev->dev_addr); 497 ndev->dev_addr,
498 mport->name);
479 499
480 out: 500 out:
481 return rc; 501 return rc;
482} 502}
483 503
484/* 504static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1];
485 * XXX Make multi-net safe 505
486 */
487static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 506static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
488{ 507{
489 int rc = -ENODEV; 508 int rc = -ENODEV;
490 u32 lsrc_ops, ldst_ops; 509 u32 lsrc_ops, ldst_ops;
491 struct rionet_peer *peer; 510 struct rionet_peer *peer;
492 struct net_device *ndev = NULL; 511 struct net_device *ndev = NULL;
512 unsigned char netid = rdev->net->hport->id;
513 int oldnet;
493 514
494 /* If local device is not rionet capable, give up quickly */ 515 if (netid >= RIONET_MAX_NETS)
495 if (!rionet_capable) 516 return rc;
496 goto out;
497 517
498 /* Allocate our net_device structure */ 518 oldnet = test_and_set_bit(netid, net_table);
499 ndev = alloc_etherdev(sizeof(struct rionet_private));
500 if (ndev == NULL) {
501 rc = -ENOMEM;
502 goto out;
503 }
504 519
505 /* 520 /*
506 * First time through, make sure local device is rionet 521 * First time through, make sure local device is rionet
507 * capable, setup netdev, and set flags so this is skipped 522 * capable, setup netdev (will be skipped on later probes)
508 * on later probes
509 */ 523 */
510 if (!rionet_check) { 524 if (!oldnet) {
511 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 525 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
512 &lsrc_ops); 526 &lsrc_ops);
513 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 527 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
514 &ldst_ops); 528 &ldst_ops);
515 if (!is_rionet_capable(lsrc_ops, ldst_ops)) { 529 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
516 printk(KERN_ERR 530 printk(KERN_ERR
517 "%s: local device is not network capable\n", 531 "%s: local device %s is not network capable\n",
518 DRV_NAME); 532 DRV_NAME, rdev->net->hport->name);
519 rionet_check = 1;
520 rionet_capable = 0;
521 goto out; 533 goto out;
522 } 534 }
523 535
536 /* Allocate our net_device structure */
537 ndev = alloc_etherdev(sizeof(struct rionet_private));
538 if (ndev == NULL) {
539 rc = -ENOMEM;
540 goto out;
541 }
542 nets[netid].ndev = ndev;
524 rc = rionet_setup_netdev(rdev->net->hport, ndev); 543 rc = rionet_setup_netdev(rdev->net->hport, ndev);
525 rionet_check = 1; 544 INIT_LIST_HEAD(&nets[netid].peers);
526 } 545 nets[netid].nact = 0;
546 } else if (nets[netid].ndev == NULL)
547 goto out;
527 548
528 /* 549 /*
529 * If the remote device has mailbox/doorbell capabilities, 550 * If the remote device has mailbox/doorbell capabilities,
@@ -535,10 +556,10 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
535 goto out; 556 goto out;
536 } 557 }
537 peer->rdev = rdev; 558 peer->rdev = rdev;
538 list_add_tail(&peer->node, &rionet_peers); 559 list_add_tail(&peer->node, &nets[netid].peers);
539 } 560 }
540 561
541 rio_set_drvdata(rdev, ndev); 562 rio_set_drvdata(rdev, nets[netid].ndev);
542 563
543 out: 564 out:
544 return rc; 565 return rc;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 434d5af8e6fb..c81e278629ff 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -244,8 +244,12 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
244 * - suspend: peripheral ready to suspend 244 * - suspend: peripheral ready to suspend
245 * - response: suggest N millisec polling 245 * - response: suggest N millisec polling
246 * - response complete: suggest N sec polling 246 * - response complete: suggest N sec polling
247 *
248 * Suspend is reported and maybe heeded.
247 */ 249 */
248 case 2: /* Suspend hint */ 250 case 2: /* Suspend hint */
251 usbnet_device_suggests_idle(dev);
252 continue;
249 case 3: /* Response hint */ 253 case 3: /* Response hint */
250 case 4: /* Response complete hint */ 254 case 4: /* Response complete hint */
251 continue; 255 continue;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c75e11e1b385..afb117c16d2d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -424,7 +424,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
424 424
425 netdev_dbg(kaweth->net, 425 netdev_dbg(kaweth->net,
426 "Downloading firmware at %p to kaweth device at %p\n", 426 "Downloading firmware at %p to kaweth device at %p\n",
427 fw->data, kaweth); 427 kaweth->firmware_buf, kaweth);
428 netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); 428 netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
429 429
430 return kaweth_control(kaweth, 430 return kaweth_control(kaweth,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03c2d8d653df..cc7e72010ac3 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,6 +117,7 @@ enum {
117struct mcs7830_data { 117struct mcs7830_data {
118 u8 multi_filter[8]; 118 u8 multi_filter[8];
119 u8 config; 119 u8 config;
120 u8 link_counter;
120}; 121};
121 122
122static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 123static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
632static void mcs7830_status(struct usbnet *dev, struct urb *urb) 633static void mcs7830_status(struct usbnet *dev, struct urb *urb)
633{ 634{
634 u8 *buf = urb->transfer_buffer; 635 u8 *buf = urb->transfer_buffer;
635 bool link; 636 bool link, link_changed;
637 struct mcs7830_data *data = mcs7830_get_data(dev);
636 638
637 if (urb->actual_length < 16) 639 if (urb->actual_length < 16)
638 return; 640 return;
639 641
640 link = !(buf[1] & 0x20); 642 link = !(buf[1] & 0x20);
641 if (netif_carrier_ok(dev->net) != link) { 643 link_changed = netif_carrier_ok(dev->net) != link;
642 if (link) { 644 if (link_changed) {
643 netif_carrier_on(dev->net); 645 data->link_counter++;
644 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 646 /*
645 } else 647 track link state 20 times to guard against erroneous
646 netif_carrier_off(dev->net); 648 link state changes reported sometimes by the chip
647 netdev_dbg(dev->net, "Link Status is: %d\n", link); 649 */
648 } 650 if (data->link_counter > 20) {
651 data->link_counter = 0;
652 if (link) {
653 netif_carrier_on(dev->net);
654 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
655 } else
656 netif_carrier_off(dev->net);
657 netdev_dbg(dev->net, "Link Status is: %d\n", link);
658 }
659 } else
660 data->link_counter = 0;
649} 661}
650 662
651static const struct driver_info moschip_info = { 663static const struct driver_info moschip_info = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index fc9f578a1e25..f9819d10b1f9 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1588,10 +1588,27 @@ int usbnet_resume (struct usb_interface *intf)
1588 tasklet_schedule (&dev->bh); 1588 tasklet_schedule (&dev->bh);
1589 } 1589 }
1590 } 1590 }
1591
1592 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
1593 usb_autopm_get_interface_no_resume(intf);
1594
1591 return 0; 1595 return 0;
1592} 1596}
1593EXPORT_SYMBOL_GPL(usbnet_resume); 1597EXPORT_SYMBOL_GPL(usbnet_resume);
1594 1598
1599/*
1600 * Either a subdriver implements manage_power, then it is assumed to always
1601 * be ready to be suspended or it reports the readiness to be suspended
1602 * explicitly
1603 */
1604void usbnet_device_suggests_idle(struct usbnet *dev)
1605{
1606 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
1607 dev->intf->needs_remote_wakeup = 1;
1608 usb_autopm_put_interface_async(dev->intf);
1609 }
1610}
1611EXPORT_SYMBOL(usbnet_device_suggests_idle);
1595 1612
1596/*-------------------------------------------------------------------------*/ 1613/*-------------------------------------------------------------------------*/
1597 1614
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 51de9edb55f5..607976c00162 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -28,7 +28,6 @@
28#include <linux/igmp.h> 28#include <linux/igmp.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <linux/version.h>
32#include <linux/hash.h> 31#include <linux/hash.h>
33#include <net/ip.h> 32#include <net/ip.h>
34#include <net/icmp.h> 33#include <net/icmp.h>
@@ -107,6 +106,8 @@ struct vxlan_dev {
107 __be32 gaddr; /* multicast group */ 106 __be32 gaddr; /* multicast group */
108 __be32 saddr; /* source address */ 107 __be32 saddr; /* source address */
109 unsigned int link; /* link to multicast over */ 108 unsigned int link; /* link to multicast over */
109 __u16 port_min; /* source port range */
110 __u16 port_max;
110 __u8 tos; /* TOS override */ 111 __u8 tos; /* TOS override */
111 __u8 ttl; 112 __u8 ttl;
112 bool learn; 113 bool learn;
@@ -229,9 +230,9 @@ static u32 eth_hash(const unsigned char *addr)
229 230
230 /* only want 6 bytes */ 231 /* only want 6 bytes */
231#ifdef __BIG_ENDIAN 232#ifdef __BIG_ENDIAN
232 value <<= 16;
233#else
234 value >>= 16; 233 value >>= 16;
234#else
235 value <<= 16;
235#endif 236#endif
236 return hash_64(value, FDB_HASH_BITS); 237 return hash_64(value, FDB_HASH_BITS);
237} 238}
@@ -536,7 +537,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
536 } 537 }
537 538
538 __skb_pull(skb, sizeof(struct vxlanhdr)); 539 __skb_pull(skb, sizeof(struct vxlanhdr));
539 skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
540 540
541 /* Is this VNI defined? */ 541 /* Is this VNI defined? */
542 vni = ntohl(vxh->vx_vni) >> 8; 542 vni = ntohl(vxh->vx_vni) >> 8;
@@ -555,7 +555,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
555 /* Re-examine inner Ethernet packet */ 555 /* Re-examine inner Ethernet packet */
556 oip = ip_hdr(skb); 556 oip = ip_hdr(skb);
557 skb->protocol = eth_type_trans(skb, vxlan->dev); 557 skb->protocol = eth_type_trans(skb, vxlan->dev);
558 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
559 558
560 /* Ignore packet loops (and multicast echo) */ 559 /* Ignore packet loops (and multicast echo) */
561 if (compare_ether_addr(eth_hdr(skb)->h_source, 560 if (compare_ether_addr(eth_hdr(skb)->h_source,
@@ -567,6 +566,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
567 566
568 __skb_tunnel_rx(skb, vxlan->dev); 567 __skb_tunnel_rx(skb, vxlan->dev);
569 skb_reset_network_header(skb); 568 skb_reset_network_header(skb);
569 skb->ip_summed = CHECKSUM_NONE;
570 570
571 err = IP_ECN_decapsulate(oip, skb); 571 err = IP_ECN_decapsulate(oip, skb);
572 if (unlikely(err)) { 572 if (unlikely(err)) {
@@ -622,46 +622,89 @@ static inline u8 vxlan_ecn_encap(u8 tos,
622 return INET_ECN_encapsulate(tos, inner); 622 return INET_ECN_encapsulate(tos, inner);
623} 623}
624 624
625static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
626{
627 const struct ethhdr *eth = (struct ethhdr *) skb->data;
628 const struct vxlan_fdb *f;
629
630 if (is_multicast_ether_addr(eth->h_dest))
631 return vxlan->gaddr;
632
633 f = vxlan_find_mac(vxlan, eth->h_dest);
634 if (f)
635 return f->remote_ip;
636 else
637 return vxlan->gaddr;
638
639}
640
641static void vxlan_sock_free(struct sk_buff *skb)
642{
643 sock_put(skb->sk);
644}
645
646/* On transmit, associate with the tunnel socket */
647static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
648{
649 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
650 struct sock *sk = vn->sock->sk;
651
652 skb_orphan(skb);
653 sock_hold(sk);
654 skb->sk = sk;
655 skb->destructor = vxlan_sock_free;
656}
657
658/* Compute source port for outgoing packet
659 * first choice to use L4 flow hash since it will spread
660 * better and maybe available from hardware
661 * secondary choice is to use jhash on the Ethernet header
662 */
663static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
664{
665 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
666 u32 hash;
667
668 hash = skb_get_rxhash(skb);
669 if (!hash)
670 hash = jhash(skb->data, 2 * ETH_ALEN,
671 (__force u32) skb->protocol);
672
673 return (((u64) hash * range) >> 32) + vxlan->port_min;
674}
675
625/* Transmit local packets over Vxlan 676/* Transmit local packets over Vxlan
626 * 677 *
627 * Outer IP header inherits ECN and DF from inner header. 678 * Outer IP header inherits ECN and DF from inner header.
628 * Outer UDP destination is the VXLAN assigned port. 679 * Outer UDP destination is the VXLAN assigned port.
629 * source port is based on hash of flow if available 680 * source port is based on hash of flow
630 * otherwise use a random value
631 */ 681 */
632static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 682static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
633{ 683{
634 struct vxlan_dev *vxlan = netdev_priv(dev); 684 struct vxlan_dev *vxlan = netdev_priv(dev);
635 struct rtable *rt; 685 struct rtable *rt;
636 const struct ethhdr *eth;
637 const struct iphdr *old_iph; 686 const struct iphdr *old_iph;
638 struct iphdr *iph; 687 struct iphdr *iph;
639 struct vxlanhdr *vxh; 688 struct vxlanhdr *vxh;
640 struct udphdr *uh; 689 struct udphdr *uh;
641 struct flowi4 fl4; 690 struct flowi4 fl4;
642 struct vxlan_fdb *f;
643 unsigned int pkt_len = skb->len; 691 unsigned int pkt_len = skb->len;
644 u32 hash;
645 __be32 dst; 692 __be32 dst;
693 __u16 src_port;
646 __be16 df = 0; 694 __be16 df = 0;
647 __u8 tos, ttl; 695 __u8 tos, ttl;
648 int err; 696 int err;
649 697
698 dst = vxlan_find_dst(vxlan, skb);
699 if (!dst)
700 goto drop;
701
650 /* Need space for new headers (invalidates iph ptr) */ 702 /* Need space for new headers (invalidates iph ptr) */
651 if (skb_cow_head(skb, VXLAN_HEADROOM)) 703 if (skb_cow_head(skb, VXLAN_HEADROOM))
652 goto drop; 704 goto drop;
653 705
654 eth = (void *)skb->data;
655 old_iph = ip_hdr(skb); 706 old_iph = ip_hdr(skb);
656 707
657 if (!is_multicast_ether_addr(eth->h_dest) &&
658 (f = vxlan_find_mac(vxlan, eth->h_dest)))
659 dst = f->remote_ip;
660 else if (vxlan->gaddr) {
661 dst = vxlan->gaddr;
662 } else
663 goto drop;
664
665 ttl = vxlan->ttl; 708 ttl = vxlan->ttl;
666 if (!ttl && IN_MULTICAST(ntohl(dst))) 709 if (!ttl && IN_MULTICAST(ntohl(dst)))
667 ttl = 1; 710 ttl = 1;
@@ -670,11 +713,15 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
670 if (tos == 1) 713 if (tos == 1)
671 tos = vxlan_get_dsfield(old_iph, skb); 714 tos = vxlan_get_dsfield(old_iph, skb);
672 715
673 hash = skb_get_rxhash(skb); 716 src_port = vxlan_src_port(vxlan, skb);
717
718 memset(&fl4, 0, sizeof(fl4));
719 fl4.flowi4_oif = vxlan->link;
720 fl4.flowi4_tos = RT_TOS(tos);
721 fl4.daddr = dst;
722 fl4.saddr = vxlan->saddr;
674 723
675 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, 724 rt = ip_route_output_key(dev_net(dev), &fl4);
676 vxlan->saddr, vxlan->vni,
677 RT_TOS(tos), vxlan->link);
678 if (IS_ERR(rt)) { 725 if (IS_ERR(rt)) {
679 netdev_dbg(dev, "no route to %pI4\n", &dst); 726 netdev_dbg(dev, "no route to %pI4\n", &dst);
680 dev->stats.tx_carrier_errors++; 727 dev->stats.tx_carrier_errors++;
@@ -703,7 +750,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
703 uh = udp_hdr(skb); 750 uh = udp_hdr(skb);
704 751
705 uh->dest = htons(vxlan_port); 752 uh->dest = htons(vxlan_port);
706 uh->source = hash ? :random32(); 753 uh->source = htons(src_port);
707 754
708 uh->len = htons(skb->len); 755 uh->len = htons(skb->len);
709 uh->check = 0; 756 uh->check = 0;
@@ -716,10 +763,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
716 iph->frag_off = df; 763 iph->frag_off = df;
717 iph->protocol = IPPROTO_UDP; 764 iph->protocol = IPPROTO_UDP;
718 iph->tos = vxlan_ecn_encap(tos, old_iph, skb); 765 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
719 iph->daddr = fl4.daddr; 766 iph->daddr = dst;
720 iph->saddr = fl4.saddr; 767 iph->saddr = fl4.saddr;
721 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 768 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
722 769
770 vxlan_set_owner(dev, skb);
771
723 /* See __IPTUNNEL_XMIT */ 772 /* See __IPTUNNEL_XMIT */
724 skb->ip_summed = CHECKSUM_NONE; 773 skb->ip_summed = CHECKSUM_NONE;
725 ip_select_ident(iph, &rt->dst, NULL); 774 ip_select_ident(iph, &rt->dst, NULL);
@@ -929,9 +978,11 @@ static void vxlan_setup(struct net_device *dev)
929{ 978{
930 struct vxlan_dev *vxlan = netdev_priv(dev); 979 struct vxlan_dev *vxlan = netdev_priv(dev);
931 unsigned h; 980 unsigned h;
981 int low, high;
932 982
933 eth_hw_addr_random(dev); 983 eth_hw_addr_random(dev);
934 ether_setup(dev); 984 ether_setup(dev);
985 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
935 986
936 dev->netdev_ops = &vxlan_netdev_ops; 987 dev->netdev_ops = &vxlan_netdev_ops;
937 dev->destructor = vxlan_free; 988 dev->destructor = vxlan_free;
@@ -948,6 +999,10 @@ static void vxlan_setup(struct net_device *dev)
948 vxlan->age_timer.function = vxlan_cleanup; 999 vxlan->age_timer.function = vxlan_cleanup;
949 vxlan->age_timer.data = (unsigned long) vxlan; 1000 vxlan->age_timer.data = (unsigned long) vxlan;
950 1001
1002 inet_get_local_port_range(&low, &high);
1003 vxlan->port_min = low;
1004 vxlan->port_max = high;
1005
951 vxlan->dev = dev; 1006 vxlan->dev = dev;
952 1007
953 for (h = 0; h < FDB_HASH_SIZE; ++h) 1008 for (h = 0; h < FDB_HASH_SIZE; ++h)
@@ -964,6 +1019,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
964 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 1019 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
965 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 1020 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
966 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 1021 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1022 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
967}; 1023};
968 1024
969static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 1025static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -996,6 +1052,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
996 return -EADDRNOTAVAIL; 1052 return -EADDRNOTAVAIL;
997 } 1053 }
998 } 1054 }
1055
1056 if (data[IFLA_VXLAN_PORT_RANGE]) {
1057 const struct ifla_vxlan_port_range *p
1058 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1059
1060 if (ntohs(p->high) < ntohs(p->low)) {
1061 pr_debug("port range %u .. %u not valid\n",
1062 ntohs(p->low), ntohs(p->high));
1063 return -EINVAL;
1064 }
1065 }
1066
999 return 0; 1067 return 0;
1000} 1068}
1001 1069
@@ -1022,14 +1090,18 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1022 if (data[IFLA_VXLAN_LOCAL]) 1090 if (data[IFLA_VXLAN_LOCAL])
1023 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); 1091 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1024 1092
1025 if (data[IFLA_VXLAN_LINK]) { 1093 if (data[IFLA_VXLAN_LINK] &&
1026 vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]); 1094 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1095 struct net_device *lowerdev
1096 = __dev_get_by_index(net, vxlan->link);
1027 1097
1028 if (!tb[IFLA_MTU]) { 1098 if (!lowerdev) {
1029 struct net_device *lowerdev; 1099 pr_info("ifindex %d does not exist\n", vxlan->link);
1030 lowerdev = __dev_get_by_index(net, vxlan->link); 1100 return -ENODEV;
1031 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1032 } 1101 }
1102
1103 if (!tb[IFLA_MTU])
1104 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1033 } 1105 }
1034 1106
1035 if (data[IFLA_VXLAN_TOS]) 1107 if (data[IFLA_VXLAN_TOS])
@@ -1046,6 +1118,13 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1046 if (data[IFLA_VXLAN_LIMIT]) 1118 if (data[IFLA_VXLAN_LIMIT])
1047 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 1119 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1048 1120
1121 if (data[IFLA_VXLAN_PORT_RANGE]) {
1122 const struct ifla_vxlan_port_range *p
1123 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1124 vxlan->port_min = ntohs(p->low);
1125 vxlan->port_max = ntohs(p->high);
1126 }
1127
1049 err = register_netdevice(dev); 1128 err = register_netdevice(dev);
1050 if (!err) 1129 if (!err)
1051 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); 1130 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
@@ -1074,23 +1153,28 @@ static size_t vxlan_get_size(const struct net_device *dev)
1074 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 1153 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1075 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 1154 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1076 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 1155 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1156 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1077 0; 1157 0;
1078} 1158}
1079 1159
1080static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 1160static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1081{ 1161{
1082 const struct vxlan_dev *vxlan = netdev_priv(dev); 1162 const struct vxlan_dev *vxlan = netdev_priv(dev);
1163 struct ifla_vxlan_port_range ports = {
1164 .low = htons(vxlan->port_min),
1165 .high = htons(vxlan->port_max),
1166 };
1083 1167
1084 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) 1168 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1085 goto nla_put_failure; 1169 goto nla_put_failure;
1086 1170
1087 if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) 1171 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
1088 goto nla_put_failure; 1172 goto nla_put_failure;
1089 1173
1090 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link)) 1174 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1091 goto nla_put_failure; 1175 goto nla_put_failure;
1092 1176
1093 if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) 1177 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1094 goto nla_put_failure; 1178 goto nla_put_failure;
1095 1179
1096 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || 1180 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
@@ -1100,6 +1184,9 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1100 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) 1184 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1101 goto nla_put_failure; 1185 goto nla_put_failure;
1102 1186
1187 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1188 goto nla_put_failure;
1189
1103 return 0; 1190 return 0;
1104 1191
1105nla_put_failure: 1192nla_put_failure:
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1a623183cbe5..b6271325f803 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -597,7 +597,7 @@ fst_q_work_item(u64 * queue, int card_index)
597 * bottom half for the card. Note the limitation of 64 cards. 597 * bottom half for the card. Note the limitation of 64 cards.
598 * That ought to be enough 598 * That ought to be enough
599 */ 599 */
600 mask = 1 << card_index; 600 mask = (u64)1 << card_index;
601 *queue |= mask; 601 *queue |= mask;
602 spin_unlock_irqrestore(&fst_work_q_lock, flags); 602 spin_unlock_irqrestore(&fst_work_q_lock, flags);
603} 603}
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 0e5769061702..feacc3b994b7 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL(z8530_queue_xmit);
1775/* 1775/*
1776 * Module support 1776 * Module support
1777 */ 1777 */
1778static const char banner[] __initdata = 1778static const char banner[] __initconst =
1779 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; 1779 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1780 1780
1781static int __init z85230_init_driver(void) 1781static int __init z85230_init_driver(void)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 89bf94d4d8a1..6f7cf49eff4d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -534,107 +534,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
534 534
535static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { 535static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
537 {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, 537 {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
538 {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, 538 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
539 {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, 539 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
540 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 540 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
541 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 541 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
542 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 542 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
543 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 543 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
544 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, 544 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
545 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 545 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
546 {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202}, 546 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
547 {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400}, 547 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
548 {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402}, 548 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
549 {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404}, 549 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
550 {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603}, 550 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
551 {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02}, 551 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
552 {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04}, 552 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
553 {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20}, 553 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
554 {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20}, 554 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
555 {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22}, 555 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
556 {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24}, 556 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
557 {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640}, 557 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
558 {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660}, 558 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
559 {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861}, 559 {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
560 {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81}, 560 {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
561 {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83}, 561 {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
562 {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84}, 562 {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
563 {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3}, 563 {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
564 {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5}, 564 {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
565 {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9}, 565 {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
566 {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb}, 566 {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
567 {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 567 {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
568 {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 568 {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
569 {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 569 {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
570 {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 570 {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
571 {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 571 {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
572 {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 572 {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
573 {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, 573 {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
574 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 574 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
575 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 575 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
576 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 576 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
577 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, 577 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
578 {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202}, 578 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
579 {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400}, 579 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
580 {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402}, 580 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
581 {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404}, 581 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
582 {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603}, 582 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
583 {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02}, 583 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
584 {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04}, 584 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
585 {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20}, 585 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
586 {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20}, 586 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
587 {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22}, 587 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
588 {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24}, 588 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
589 {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640}, 589 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
590 {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660}, 590 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
591 {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861}, 591 {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
592 {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81}, 592 {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
593 {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83}, 593 {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
594 {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84}, 594 {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
595 {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3}, 595 {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
596 {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5}, 596 {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
597 {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9}, 597 {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
598 {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb}, 598 {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
599 {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 599 {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
600 {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 600 {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
601 {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 601 {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
602 {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 602 {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
603 {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 603 {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
604 {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 604 {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
605 {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, 605 {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
606 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 606 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
607 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 607 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
608 {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, 608 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
609 {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, 609 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
610 {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, 610 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
611 {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000}, 611 {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
612 {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501}, 612 {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
613 {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501}, 613 {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
614 {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03}, 614 {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
615 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, 615 {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
616 {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04}, 616 {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
617 {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 617 {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
618 {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 618 {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
619 {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 619 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
620 {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 620 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
621 {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, 621 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
622 {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, 622 {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
623 {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, 623 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
624 {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, 624 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
625 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 625 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
626 {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, 626 {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
627 {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, 627 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
628 {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, 628 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
629 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 629 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
630 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 630 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
631 {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, 631 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
632 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 632 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
633 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 633 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
634 {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, 634 {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
635 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 635 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
636 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 636 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
637 {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, 637 {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
638 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 638 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
639}; 639};
640 640
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 924c4616c3d9..f5dda84176c3 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -38,6 +38,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
39 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ 39 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
40 { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */ 40 { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
41 { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
41 { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ 42 { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
42 43
43 { USB_DEVICE(0x0cf3, 0x7015), 44 { USB_DEVICE(0x0cf3, 0x7015),
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 378bd70256b2..741918a2027b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -312,6 +312,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
312 } 312 }
313 313
314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
315 bf->bf_next = NULL;
315 list_del(&bf->list); 316 list_del(&bf->list);
316 317
317 spin_unlock_bh(&sc->tx.txbuflock); 318 spin_unlock_bh(&sc->tx.txbuflock);
@@ -393,7 +394,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
393 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 394 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
394 u32 ba[WME_BA_BMP_SIZE >> 5]; 395 u32 ba[WME_BA_BMP_SIZE >> 5];
395 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 396 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
396 bool rc_update = true; 397 bool rc_update = true, isba;
397 struct ieee80211_tx_rate rates[4]; 398 struct ieee80211_tx_rate rates[4];
398 struct ath_frame_info *fi; 399 struct ath_frame_info *fi;
399 int nframes; 400 int nframes;
@@ -437,13 +438,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
437 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 438 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
438 tid = ATH_AN_2_TID(an, tidno); 439 tid = ATH_AN_2_TID(an, tidno);
439 seq_first = tid->seq_start; 440 seq_first = tid->seq_start;
441 isba = ts->ts_flags & ATH9K_TX_BA;
440 442
441 /* 443 /*
442 * The hardware occasionally sends a tx status for the wrong TID. 444 * The hardware occasionally sends a tx status for the wrong TID.
443 * In this case, the BA status cannot be considered valid and all 445 * In this case, the BA status cannot be considered valid and all
444 * subframes need to be retransmitted 446 * subframes need to be retransmitted
447 *
448 * Only BlockAcks have a TID and therefore normal Acks cannot be
449 * checked
445 */ 450 */
446 if (tidno != ts->tid) 451 if (isba && tidno != ts->tid)
447 txok = false; 452 txok = false;
448 453
449 isaggr = bf_isaggr(bf); 454 isaggr = bf_isaggr(bf);
@@ -1774,6 +1779,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1774 list_add_tail(&bf->list, &bf_head); 1779 list_add_tail(&bf->list, &bf_head);
1775 bf->bf_state.bf_type = 0; 1780 bf->bf_state.bf_type = 0;
1776 1781
1782 bf->bf_next = NULL;
1777 bf->bf_lastbf = bf; 1783 bf->bf_lastbf = bf;
1778 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1784 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1779 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1785 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 73730e94e0ac..c5a99c8c8168 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5404,6 +5404,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5404 cancel_work_sync(&wldev->restart_work); 5404 cancel_work_sync(&wldev->restart_work);
5405 5405
5406 B43_WARN_ON(!wl); 5406 B43_WARN_ON(!wl);
5407 if (!wldev->fw.ucode.data)
5408 return; /* NULL if firmware never loaded */
5407 if (wl->current_dev == wldev && wl->hw_registred) { 5409 if (wl->current_dev == wldev && wl->hw_registred) {
5408 b43_leds_stop(wldev); 5410 b43_leds_stop(wldev);
5409 ieee80211_unregister_hw(wl->hw); 5411 ieee80211_unregister_hw(wl->hw);
@@ -5478,6 +5480,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5478 cancel_work_sync(&wldev->restart_work); 5480 cancel_work_sync(&wldev->restart_work);
5479 5481
5480 B43_WARN_ON(!wl); 5482 B43_WARN_ON(!wl);
5483 if (!wldev->fw.ucode.data)
5484 return; /* NULL if firmware never loaded */
5481 if (wl->current_dev == wldev && wl->hw_registred) { 5485 if (wl->current_dev == wldev && wl->hw_registred) {
5482 b43_leds_stop(wldev); 5486 b43_leds_stop(wldev);
5483 ieee80211_unregister_hw(wl->hw); 5487 ieee80211_unregister_hw(wl->hw);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index a2b4b1e71017..7a6dfdc67b6c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1339,7 +1339,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
1339 } 1339 }
1340 1340
1341 ret = brcmf_bus_start(dev); 1341 ret = brcmf_bus_start(dev);
1342 if (ret == -ENOLINK) { 1342 if (ret) {
1343 brcmf_dbg(ERROR, "dongle is not responding\n"); 1343 brcmf_dbg(ERROR, "dongle is not responding\n");
1344 brcmf_detach(dev); 1344 brcmf_detach(dev);
1345 goto fail; 1345 goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index c1abaa6db59e..a6f1e8166008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3569,7 +3569,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3569 3569
3570 if (!request || !request->n_ssids || !request->n_match_sets) { 3570 if (!request || !request->n_ssids || !request->n_match_sets) {
3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n", 3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
3572 request->n_ssids); 3572 request ? request->n_ssids : 0);
3573 return -EINVAL; 3573 return -EINVAL;
3574 } 3574 }
3575 3575
@@ -3972,7 +3972,7 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3972 u8 *iovar_ie_buf; 3972 u8 *iovar_ie_buf;
3973 u8 *curr_ie_buf; 3973 u8 *curr_ie_buf;
3974 u8 *mgmt_ie_buf = NULL; 3974 u8 *mgmt_ie_buf = NULL;
3975 u32 mgmt_ie_buf_len = 0; 3975 int mgmt_ie_buf_len;
3976 u32 *mgmt_ie_len = 0; 3976 u32 *mgmt_ie_len = 0;
3977 u32 del_add_ie_buf_len = 0; 3977 u32 del_add_ie_buf_len = 0;
3978 u32 total_ie_buf_len = 0; 3978 u32 total_ie_buf_len = 0;
@@ -3982,7 +3982,7 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3982 struct parsed_vndr_ie_info *vndrie_info; 3982 struct parsed_vndr_ie_info *vndrie_info;
3983 s32 i; 3983 s32 i;
3984 u8 *ptr; 3984 u8 *ptr;
3985 u32 remained_buf_len; 3985 int remained_buf_len;
3986 3986
3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag); 3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); 3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
@@ -4606,12 +4606,13 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4606 struct brcmf_cfg80211_profile *profile = cfg->profile; 4606 struct brcmf_cfg80211_profile *profile = cfg->profile;
4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); 4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4608 struct wiphy *wiphy = cfg_to_wiphy(cfg); 4608 struct wiphy *wiphy = cfg_to_wiphy(cfg);
4609 struct brcmf_channel_info_le channel_le; 4609 struct ieee80211_channel *notify_channel = NULL;
4610 struct ieee80211_channel *notify_channel;
4611 struct ieee80211_supported_band *band; 4610 struct ieee80211_supported_band *band;
4611 struct brcmf_bss_info_le *bi;
4612 u32 freq; 4612 u32 freq;
4613 s32 err = 0; 4613 s32 err = 0;
4614 u32 target_channel; 4614 u32 target_channel;
4615 u8 *buf;
4615 4616
4616 WL_TRACE("Enter\n"); 4617 WL_TRACE("Enter\n");
4617 4618
@@ -4619,11 +4620,22 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4619 memcpy(profile->bssid, e->addr, ETH_ALEN); 4620 memcpy(profile->bssid, e->addr, ETH_ALEN);
4620 brcmf_update_bss_info(cfg); 4621 brcmf_update_bss_info(cfg);
4621 4622
4622 brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le, 4623 buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
4623 sizeof(channel_le)); 4624 if (buf == NULL) {
4625 err = -ENOMEM;
4626 goto done;
4627 }
4628
4629 /* data sent to dongle has to be little endian */
4630 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
4631 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
4632
4633 if (err)
4634 goto done;
4624 4635
4625 target_channel = le32_to_cpu(channel_le.target_channel); 4636 bi = (struct brcmf_bss_info_le *)(buf + 4);
4626 WL_CONN("Roamed to channel %d\n", target_channel); 4637 target_channel = bi->ctl_ch ? bi->ctl_ch :
4638 CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
4627 4639
4628 if (target_channel <= CH_MAX_2G_CHANNEL) 4640 if (target_channel <= CH_MAX_2G_CHANNEL)
4629 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 4641 band = wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -4633,6 +4645,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4633 freq = ieee80211_channel_to_frequency(target_channel, band->band); 4645 freq = ieee80211_channel_to_frequency(target_channel, band->band);
4634 notify_channel = ieee80211_get_channel(wiphy, freq); 4646 notify_channel = ieee80211_get_channel(wiphy, freq);
4635 4647
4648done:
4649 kfree(buf);
4636 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid, 4650 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
4637 conn_info->req_ie, conn_info->req_ie_len, 4651 conn_info->req_ie, conn_info->req_ie_len,
4638 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); 4652 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
@@ -5186,41 +5200,6 @@ brcmf_cfg80211_event(struct net_device *ndev,
5186 schedule_work(&cfg->event_work); 5200 schedule_work(&cfg->event_work);
5187} 5201}
5188 5202
5189static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
5190{
5191 s32 infra = 0;
5192 s32 err = 0;
5193
5194 switch (iftype) {
5195 case NL80211_IFTYPE_MONITOR:
5196 case NL80211_IFTYPE_WDS:
5197 WL_ERR("type (%d) : currently we do not support this mode\n",
5198 iftype);
5199 err = -EINVAL;
5200 return err;
5201 case NL80211_IFTYPE_ADHOC:
5202 infra = 0;
5203 break;
5204 case NL80211_IFTYPE_STATION:
5205 infra = 1;
5206 break;
5207 case NL80211_IFTYPE_AP:
5208 infra = 1;
5209 break;
5210 default:
5211 err = -EINVAL;
5212 WL_ERR("invalid type (%d)\n", iftype);
5213 return err;
5214 }
5215 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
5216 if (err) {
5217 WL_ERR("WLC_SET_INFRA error (%d)\n", err);
5218 return err;
5219 }
5220
5221 return 0;
5222}
5223
5224static s32 brcmf_dongle_eventmsg(struct net_device *ndev) 5203static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
5225{ 5204{
5226 /* Room for "event_msgs" + '\0' + bitvec */ 5205 /* Room for "event_msgs" + '\0' + bitvec */
@@ -5439,7 +5418,8 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
5439 WL_BEACON_TIMEOUT); 5418 WL_BEACON_TIMEOUT);
5440 if (err) 5419 if (err)
5441 goto default_conf_out; 5420 goto default_conf_out;
5442 err = brcmf_dongle_mode(ndev, wdev->iftype); 5421 err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
5422 NULL, NULL);
5443 if (err && err != -EINPROGRESS) 5423 if (err && err != -EINPROGRESS)
5444 goto default_conf_out; 5424 goto default_conf_out;
5445 err = brcmf_dongle_probecap(cfg); 5425 err = brcmf_dongle_probecap(cfg);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 935120fc8c93..768bf612533e 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -10472,7 +10472,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10472 } else 10472 } else
10473 len = src->len; 10473 len = src->len;
10474 10474
10475 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC); 10475 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10476 if (!dst) 10476 if (!dst)
10477 continue; 10477 continue;
10478 10478
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 349c205d5f62..da5862064195 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -518,7 +518,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
518 * See iwlagn_mac_channel_switch. 518 * See iwlagn_mac_channel_switch.
519 */ 519 */
520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
521 struct iwl6000_channel_switch_cmd cmd; 521 struct iwl6000_channel_switch_cmd *cmd;
522 u32 switch_time_in_usec, ucode_switch_time; 522 u32 switch_time_in_usec, ucode_switch_time;
523 u16 ch; 523 u16 ch;
524 u32 tsf_low; 524 u32 tsf_low;
@@ -527,18 +527,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
527 struct ieee80211_vif *vif = ctx->vif; 527 struct ieee80211_vif *vif = ctx->vif;
528 struct iwl_host_cmd hcmd = { 528 struct iwl_host_cmd hcmd = {
529 .id = REPLY_CHANNEL_SWITCH, 529 .id = REPLY_CHANNEL_SWITCH,
530 .len = { sizeof(cmd), }, 530 .len = { sizeof(*cmd), },
531 .flags = CMD_SYNC, 531 .flags = CMD_SYNC,
532 .data = { &cmd, }, 532 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
533 }; 533 };
534 int err;
534 535
535 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 536 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
537 if (!cmd)
538 return -ENOMEM;
539
540 hcmd.data[0] = cmd;
541
542 cmd->band = priv->band == IEEE80211_BAND_2GHZ;
536 ch = ch_switch->channel->hw_value; 543 ch = ch_switch->channel->hw_value;
537 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", 544 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
538 ctx->active.channel, ch); 545 ctx->active.channel, ch);
539 cmd.channel = cpu_to_le16(ch); 546 cmd->channel = cpu_to_le16(ch);
540 cmd.rxon_flags = ctx->staging.flags; 547 cmd->rxon_flags = ctx->staging.flags;
541 cmd.rxon_filter_flags = ctx->staging.filter_flags; 548 cmd->rxon_filter_flags = ctx->staging.filter_flags;
542 switch_count = ch_switch->count; 549 switch_count = ch_switch->count;
543 tsf_low = ch_switch->timestamp & 0x0ffffffff; 550 tsf_low = ch_switch->timestamp & 0x0ffffffff;
544 /* 551 /*
@@ -554,23 +561,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
554 switch_count = 0; 561 switch_count = 0;
555 } 562 }
556 if (switch_count <= 1) 563 if (switch_count <= 1)
557 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 564 cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
558 else { 565 else {
559 switch_time_in_usec = 566 switch_time_in_usec =
560 vif->bss_conf.beacon_int * switch_count * TIME_UNIT; 567 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
561 ucode_switch_time = iwl_usecs_to_beacons(priv, 568 ucode_switch_time = iwl_usecs_to_beacons(priv,
562 switch_time_in_usec, 569 switch_time_in_usec,
563 beacon_interval); 570 beacon_interval);
564 cmd.switch_time = iwl_add_beacon_time(priv, 571 cmd->switch_time = iwl_add_beacon_time(priv,
565 priv->ucode_beacon_time, 572 priv->ucode_beacon_time,
566 ucode_switch_time, 573 ucode_switch_time,
567 beacon_interval); 574 beacon_interval);
568 } 575 }
569 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 576 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
570 cmd.switch_time); 577 cmd->switch_time);
571 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR; 578 cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
572 579
573 return iwl_dvm_send_cmd(priv, &hcmd); 580 err = iwl_dvm_send_cmd(priv, &hcmd);
581 kfree(cmd);
582 return err;
574} 583}
575 584
576struct iwl_lib_ops iwl6000_lib = { 585struct iwl_lib_ops iwl6000_lib = {
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 0679458a1bac..780d3e168297 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1825,8 +1825,6 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1825 return -EBUSY; 1825 return -EBUSY;
1826 } 1826 }
1827 1827
1828 priv->scan_request = request;
1829
1830 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), 1828 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1831 GFP_KERNEL); 1829 GFP_KERNEL);
1832 if (!priv->user_scan_cfg) { 1830 if (!priv->user_scan_cfg) {
@@ -1834,6 +1832,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1834 return -ENOMEM; 1832 return -ENOMEM;
1835 } 1833 }
1836 1834
1835 priv->scan_request = request;
1836
1837 priv->user_scan_cfg->num_ssids = request->n_ssids; 1837 priv->user_scan_cfg->num_ssids = request->n_ssids;
1838 priv->user_scan_cfg->ssid_list = request->ssids; 1838 priv->user_scan_cfg->ssid_list = request->ssids;
1839 1839
@@ -1870,6 +1870,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1870 ret = mwifiex_scan_networks(priv, priv->user_scan_cfg); 1870 ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
1871 if (ret) { 1871 if (ret) {
1872 dev_err(priv->adapter->dev, "scan failed: %d\n", ret); 1872 dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
1873 priv->scan_request = NULL;
1874 kfree(priv->user_scan_cfg);
1875 priv->user_scan_cfg = NULL;
1873 return ret; 1876 return ret;
1874 } 1877 }
1875 1878
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 00b658d3b6ec..9171aaedbccd 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1843,21 +1843,18 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
1843 struct cfg80211_ssid *req_ssid) 1843 struct cfg80211_ssid *req_ssid)
1844{ 1844{
1845 struct mwifiex_adapter *adapter = priv->adapter; 1845 struct mwifiex_adapter *adapter = priv->adapter;
1846 int ret = 0; 1846 int ret;
1847 struct mwifiex_user_scan_cfg *scan_cfg; 1847 struct mwifiex_user_scan_cfg *scan_cfg;
1848 1848
1849 if (!req_ssid)
1850 return -1;
1851
1852 if (adapter->scan_processing) { 1849 if (adapter->scan_processing) {
1853 dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); 1850 dev_err(adapter->dev, "cmd: Scan already in process...\n");
1854 return ret; 1851 return -EBUSY;
1855 } 1852 }
1856 1853
1857 if (priv->scan_block) { 1854 if (priv->scan_block) {
1858 dev_dbg(adapter->dev, 1855 dev_err(adapter->dev,
1859 "cmd: Scan is blocked during association...\n"); 1856 "cmd: Scan is blocked during association...\n");
1860 return ret; 1857 return -EBUSY;
1861 } 1858 }
1862 1859
1863 scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL); 1860 scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index a12e84f892be..6b2e1e431dd2 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1988,6 +1988,7 @@ static struct usb_driver rt2500usb_driver = {
1988 .disconnect = rt2x00usb_disconnect, 1988 .disconnect = rt2x00usb_disconnect,
1989 .suspend = rt2x00usb_suspend, 1989 .suspend = rt2x00usb_suspend,
1990 .resume = rt2x00usb_resume, 1990 .resume = rt2x00usb_resume,
1991 .reset_resume = rt2x00usb_resume,
1991 .disable_hub_initiated_lpm = 1, 1992 .disable_hub_initiated_lpm = 1,
1992}; 1993};
1993 1994
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 01dc8891070c..59474ae0aec0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2449,7 +2449,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
2449 /* 2449 /*
2450 * Check if temperature compensation is supported. 2450 * Check if temperature compensation is supported.
2451 */ 2451 */
2452 if (tssi_bounds[4] == 0xff) 2452 if (tssi_bounds[4] == 0xff || step == 0xff)
2453 return 0; 2453 return 0;
2454 2454
2455 /* 2455 /*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index c9e9370eb789..3b8fb5a603f2 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1282,6 +1282,7 @@ static struct usb_driver rt2800usb_driver = {
1282 .disconnect = rt2x00usb_disconnect, 1282 .disconnect = rt2x00usb_disconnect,
1283 .suspend = rt2x00usb_suspend, 1283 .suspend = rt2x00usb_suspend,
1284 .resume = rt2x00usb_resume, 1284 .resume = rt2x00usb_resume,
1285 .reset_resume = rt2x00usb_resume,
1285 .disable_hub_initiated_lpm = 1, 1286 .disable_hub_initiated_lpm = 1,
1286}; 1287};
1287 1288
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index e5eb43b3eee7..24eec66e9fd2 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2535,6 +2535,7 @@ static struct usb_driver rt73usb_driver = {
2535 .disconnect = rt2x00usb_disconnect, 2535 .disconnect = rt2x00usb_disconnect,
2536 .suspend = rt2x00usb_suspend, 2536 .suspend = rt2x00usb_suspend,
2537 .resume = rt2x00usb_resume, 2537 .resume = rt2x00usb_resume,
2538 .reset_resume = rt2x00usb_resume,
2538 .disable_hub_initiated_lpm = 1, 2539 .disable_hub_initiated_lpm = 1,
2539}; 2540};
2540 2541
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 030beb45d8b0..e3ea4b346889 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -673,7 +673,7 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
673 set_hal_start(rtlhal); 673 set_hal_start(rtlhal);
674 674
675 /* Start bulk IN */ 675 /* Start bulk IN */
676 _rtl_usb_receive(hw); 676 err = _rtl_usb_receive(hw);
677 } 677 }
678 678
679 return err; 679 return err;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 682633bfe00f..f2d6b78d901d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -40,6 +40,7 @@
40 40
41#include <net/tcp.h> 41#include <net/tcp.h>
42 42
43#include <xen/xen.h>
43#include <xen/events.h> 44#include <xen/events.h>
44#include <xen/interface/memory.h> 45#include <xen/interface/memory.h>
45 46
@@ -334,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
334 335
335 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
336 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 337 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
338 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
337 unsigned long bytes; 339 unsigned long bytes;
340
341 offset &= ~PAGE_MASK;
342
338 while (size > 0) { 343 while (size > 0) {
344 BUG_ON(offset >= PAGE_SIZE);
339 BUG_ON(copy_off > MAX_BUFFER_OFFSET); 345 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
340 346
341 if (start_new_rx_buffer(copy_off, size, 0)) { 347 bytes = PAGE_SIZE - offset;
348
349 if (bytes > size)
350 bytes = size;
351
352 if (start_new_rx_buffer(copy_off, bytes, 0)) {
342 count++; 353 count++;
343 copy_off = 0; 354 copy_off = 0;
344 } 355 }
345 356
346 bytes = size;
347 if (copy_off + bytes > MAX_BUFFER_OFFSET) 357 if (copy_off + bytes > MAX_BUFFER_OFFSET)
348 bytes = MAX_BUFFER_OFFSET - copy_off; 358 bytes = MAX_BUFFER_OFFSET - copy_off;
349 359
350 copy_off += bytes; 360 copy_off += bytes;
361
362 offset += bytes;
351 size -= bytes; 363 size -= bytes;
364
365 if (offset == PAGE_SIZE)
366 offset = 0;
352 } 367 }
353 } 368 }
354 return count; 369 return count;
@@ -402,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
402 unsigned long bytes; 417 unsigned long bytes;
403 418
404 /* Data must not cross a page boundary. */ 419 /* Data must not cross a page boundary. */
405 BUG_ON(size + offset > PAGE_SIZE); 420 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
406 421
407 meta = npo->meta + npo->meta_prod - 1; 422 meta = npo->meta + npo->meta_prod - 1;
408 423
424 /* Skip unused frames from start of page */
425 page += offset >> PAGE_SHIFT;
426 offset &= ~PAGE_MASK;
427
409 while (size > 0) { 428 while (size > 0) {
429 BUG_ON(offset >= PAGE_SIZE);
410 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 430 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
411 431
412 if (start_new_rx_buffer(npo->copy_off, size, *head)) { 432 bytes = PAGE_SIZE - offset;
433
434 if (bytes > size)
435 bytes = size;
436
437 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
413 /* 438 /*
414 * Netfront requires there to be some data in the head 439 * Netfront requires there to be some data in the head
415 * buffer. 440 * buffer.
@@ -419,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
419 meta = get_next_rx_buffer(vif, npo); 444 meta = get_next_rx_buffer(vif, npo);
420 } 445 }
421 446
422 bytes = size;
423 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 447 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
424 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 448 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
425 449
@@ -452,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
452 offset += bytes; 476 offset += bytes;
453 size -= bytes; 477 size -= bytes;
454 478
479 /* Next frame */
480 if (offset == PAGE_SIZE && size) {
481 BUG_ON(!PageCompound(page));
482 page++;
483 offset = 0;
484 }
485
455 /* Leave a gap for the GSO descriptor. */ 486 /* Leave a gap for the GSO descriptor. */
456 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 487 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
457 vif->rx.req_cons++; 488 vif->rx.req_cons++;
@@ -635,9 +666,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
635 return; 666 return;
636 667
637 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); 668 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
638 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op, 669 gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
639 npo.copy_prod);
640 BUG_ON(ret != 0);
641 670
642 while ((skb = __skb_dequeue(&rxq)) != NULL) { 671 while ((skb = __skb_dequeue(&rxq)) != NULL) {
643 sco = (struct skb_cb_overlay *)skb->cb; 672 sco = (struct skb_cb_overlay *)skb->cb;
@@ -1460,18 +1489,15 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1460static void xen_netbk_tx_action(struct xen_netbk *netbk) 1489static void xen_netbk_tx_action(struct xen_netbk *netbk)
1461{ 1490{
1462 unsigned nr_gops; 1491 unsigned nr_gops;
1463 int ret;
1464 1492
1465 nr_gops = xen_netbk_tx_build_gops(netbk); 1493 nr_gops = xen_netbk_tx_build_gops(netbk);
1466 1494
1467 if (nr_gops == 0) 1495 if (nr_gops == 0)
1468 return; 1496 return;
1469 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1470 netbk->tx_copy_ops, nr_gops);
1471 BUG_ON(ret);
1472 1497
1473 xen_netbk_tx_submit(netbk); 1498 gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
1474 1499
1500 xen_netbk_tx_submit(netbk);
1475} 1501}
1476 1502
1477static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1503static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c934fe8583f5..caa011008cd0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <net/ip.h> 44#include <net/ip.h>
45 45
46#include <asm/xen/page.h>
46#include <xen/xen.h> 47#include <xen/xen.h>
47#include <xen/xenbus.h> 48#include <xen/xenbus.h>
48#include <xen/events.h> 49#include <xen/events.h>